1*e4b17023SJohn Marino /* GNU Objective C Runtime @synchronized implementation
2*e4b17023SJohn Marino Copyright (C) 2010 Free Software Foundation, Inc.
3*e4b17023SJohn Marino Contributed by Nicola Pero <nicola.pero@meta-innovation.com>
4*e4b17023SJohn Marino
5*e4b17023SJohn Marino This file is part of GCC.
6*e4b17023SJohn Marino
7*e4b17023SJohn Marino GCC is free software; you can redistribute it and/or modify it under the
8*e4b17023SJohn Marino terms of the GNU General Public License as published by the Free Software
9*e4b17023SJohn Marino Foundation; either version 3, or (at your option) any later version.
10*e4b17023SJohn Marino
11*e4b17023SJohn Marino GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12*e4b17023SJohn Marino WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13*e4b17023SJohn Marino FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
14*e4b17023SJohn Marino details.
15*e4b17023SJohn Marino
16*e4b17023SJohn Marino Under Section 7 of GPL version 3, you are granted additional
17*e4b17023SJohn Marino permissions described in the GCC Runtime Library Exception, version
18*e4b17023SJohn Marino 3.1, as published by the Free Software Foundation.
19*e4b17023SJohn Marino
20*e4b17023SJohn Marino You should have received a copy of the GNU General Public License and
21*e4b17023SJohn Marino a copy of the GCC Runtime Library Exception along with this program;
22*e4b17023SJohn Marino see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23*e4b17023SJohn Marino <http://www.gnu.org/licenses/>. */
24*e4b17023SJohn Marino
25*e4b17023SJohn Marino /* This file implements objc_sync_enter() and objc_sync_exit(), the
26*e4b17023SJohn Marino two functions required to support @synchronized().
27*e4b17023SJohn Marino
28*e4b17023SJohn Marino objc_sync_enter(object) needs to get a recursive lock associated
29*e4b17023SJohn Marino with 'object', and lock it.
30*e4b17023SJohn Marino
31*e4b17023SJohn Marino objc_sync_exit(object) needs to get the recursive lock associated
32*e4b17023SJohn Marino with 'object', and unlock it. */
33*e4b17023SJohn Marino
34*e4b17023SJohn Marino /* To avoid the overhead of continuously allocating and deallocating
35*e4b17023SJohn Marino locks, we implement a pool of locks. When a lock is needed for an
36*e4b17023SJohn Marino object, we get a lock from the pool and associate it with the
37*e4b17023SJohn Marino object.
38*e4b17023SJohn Marino
39*e4b17023SJohn Marino The lock pool need to be protected by its own lock (the
40*e4b17023SJohn Marino "protection" lock), which has to be locked then unlocked each time
41*e4b17023SJohn Marino objc_sync_enter() and objc_sync_exit() are called. To reduce the
42*e4b17023SJohn Marino contention on the protection lock, instead of a single pool with a
43*e4b17023SJohn Marino single (global) protection lock we use a number of smaller pools,
44*e4b17023SJohn Marino each with its own pool protection lock. To decide which lock pool
45*e4b17023SJohn Marino to use for each object, we compute a hash from the object pointer.
46*e4b17023SJohn Marino
47*e4b17023SJohn Marino The implementation of each lock pool uses a linked list of all the
48*e4b17023SJohn Marino locks in the pool (both unlocked, and locked); this works in the
49*e4b17023SJohn Marino assumption that the number of locks concurrently required is very
50*e4b17023SJohn Marino low. In practice, it seems that you rarely see more than a few
51*e4b17023SJohn Marino locks ever concurrently required.
52*e4b17023SJohn Marino
53*e4b17023SJohn Marino A standard case is a thread acquiring a lock recursively, over and
54*e4b17023SJohn Marino over again: for example when most methods of a class are protected
55*e4b17023SJohn Marino by @synchronized(self) but they also call each other. We use
56*e4b17023SJohn Marino thread-local storage to implement a cache and optimize this case.
57*e4b17023SJohn Marino The cache stores locks that the thread successfully acquired,
58*e4b17023SJohn Marino allowing objc_sync_enter() and objc_sync_exit() to locate a lock
59*e4b17023SJohn Marino which is already held by the current thread without having to use
60*e4b17023SJohn Marino any protection lock or synchronization mechanism. It can so detect
61*e4b17023SJohn Marino recursive locks/unlocks, and transform them into no-ops that
62*e4b17023SJohn Marino require no actual locking or synchronization mechanisms at all. */
63*e4b17023SJohn Marino
64*e4b17023SJohn Marino /* You can disable the thread-local cache (most likely to benchmark
65*e4b17023SJohn Marino the code with and without it) by compiling with
66*e4b17023SJohn Marino -DSYNC_CACHE_DISABLE, or commenting out the following line. */
67*e4b17023SJohn Marino /* #define SYNC_CACHE_DISABLE */
68*e4b17023SJohn Marino
69*e4b17023SJohn Marino /* If thread-local storage is not available, automatically disable the
70*e4b17023SJohn Marino cache. */
71*e4b17023SJohn Marino #ifndef HAVE_TLS
72*e4b17023SJohn Marino # define SYNC_CACHE_DISABLE
73*e4b17023SJohn Marino #endif
74*e4b17023SJohn Marino
75*e4b17023SJohn Marino #include "objc-private/common.h"
76*e4b17023SJohn Marino #include "objc/objc-sync.h" /* For objc_sync_enter(), objc_sync_exit() */
77*e4b17023SJohn Marino #include "objc/runtime.h" /* For objc_malloc() */
78*e4b17023SJohn Marino #include "objc/thr.h" /* For objc_mutex_loc() and similar */
79*e4b17023SJohn Marino #include "objc-private/objc-sync.h" /* For __objc_sync_init() */
80*e4b17023SJohn Marino
81*e4b17023SJohn Marino /* We have 32 pools of locks, each of them protected by its own
82*e4b17023SJohn Marino protection lock. It's tempting to increase this number to reduce
83*e4b17023SJohn Marino contention; but in our tests it is high enough. */
84*e4b17023SJohn Marino #define SYNC_NUMBER_OF_POOLS 32
85*e4b17023SJohn Marino
86*e4b17023SJohn Marino /* Given an object, it determines which pool contains the associated
87*e4b17023SJohn Marino lock. */
88*e4b17023SJohn Marino #define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
89*e4b17023SJohn Marino
90*e4b17023SJohn Marino /* The locks protecting each pool. */
91*e4b17023SJohn Marino static objc_mutex_t sync_pool_protection_locks[SYNC_NUMBER_OF_POOLS];
92*e4b17023SJohn Marino
93*e4b17023SJohn Marino /* The data structure (linked list) holding the locks. */
94*e4b17023SJohn Marino typedef struct lock_node
95*e4b17023SJohn Marino {
96*e4b17023SJohn Marino /* Pointer to next entry on the list. NULL indicates end of list.
97*e4b17023SJohn Marino You need to hold the appropriate sync_pool_protection_locks[N] to
98*e4b17023SJohn Marino read or write this variable. */
99*e4b17023SJohn Marino struct lock_node *next;
100*e4b17023SJohn Marino
101*e4b17023SJohn Marino /* The (recursive) lock. Allocated when the node is created, and
102*e4b17023SJohn Marino always not-NULL, and unchangeable, after that. */
103*e4b17023SJohn Marino objc_mutex_t lock;
104*e4b17023SJohn Marino
105*e4b17023SJohn Marino /* This is how many times the objc_mutex_lock() has been called on
106*e4b17023SJohn Marino the lock (it is 0 when the lock is unused). Used to track when
107*e4b17023SJohn Marino the lock is no longer associated with an object and can be reused
108*e4b17023SJohn Marino for another object. It records "real" locks, potentially (but
109*e4b17023SJohn Marino not necessarily) by multiple threads. You need to hold the
110*e4b17023SJohn Marino appropriate sync_pool_protection_locks[N] to read or write this
111*e4b17023SJohn Marino variable. */
112*e4b17023SJohn Marino unsigned int usage_count;
113*e4b17023SJohn Marino
114*e4b17023SJohn Marino /* The object that the lock is associated with. This variable can
115*e4b17023SJohn Marino only be written when holding the sync_pool_protection_locks[N]
116*e4b17023SJohn Marino and when node->usage_count == 0, ie, the lock is not being used.
117*e4b17023SJohn Marino You can read this variable either when you hold the
118*e4b17023SJohn Marino sync_pool_protection_locks[N] or when you hold node->lock,
119*e4b17023SJohn Marino because in that case you know that node->usage_count can't get to
120*e4b17023SJohn Marino zero until you release the lock. It is valid to have usage_count
121*e4b17023SJohn Marino == 0 and object != nil; in that case, the lock is not currently
122*e4b17023SJohn Marino being used, but is still currently associated with the
123*e4b17023SJohn Marino object. */
124*e4b17023SJohn Marino id object;
125*e4b17023SJohn Marino
126*e4b17023SJohn Marino /* This is a counter reserved for use by the thread currently
127*e4b17023SJohn Marino holding the lock. So, you need to hold node->lock to read or
128*e4b17023SJohn Marino write this variable. It is normally 0, and if the cache is not
129*e4b17023SJohn Marino being used, it is kept at 0 (even if recursive locks are being
130*e4b17023SJohn Marino done; in that case, no difference is made between recursive and
131*e4b17023SJohn Marino non-recursive locks: they all increase usage_count, and call
132*e4b17023SJohn Marino objc_mutex_lock()). When the cache is being used, a thread may
133*e4b17023SJohn Marino be able to find a lock that it already holds using the cache; in
134*e4b17023SJohn Marino that case, to perform additional locks/unlocks it can
135*e4b17023SJohn Marino increase/decrease the recursive_usage_count (which does not
136*e4b17023SJohn Marino require any synchronization with other threads, since it's
137*e4b17023SJohn Marino protected by the node->lock itself) instead of the usage_count
138*e4b17023SJohn Marino (which requires locking the pool protection lock). And it can
139*e4b17023SJohn Marino skip the call to objc_mutex_lock/unlock too. */
140*e4b17023SJohn Marino unsigned int recursive_usage_count;
141*e4b17023SJohn Marino } *lock_node_ptr;
142*e4b17023SJohn Marino
143*e4b17023SJohn Marino
144*e4b17023SJohn Marino /* The pools of locks. Each of them is a linked list of lock_nodes.
145*e4b17023SJohn Marino In the list we keep both unlocked and locked nodes. */
146*e4b17023SJohn Marino static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
147*e4b17023SJohn Marino
148*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
149*e4b17023SJohn Marino /* We store a cache of locks acquired by each thread in thread-local
150*e4b17023SJohn Marino storage. */
151*e4b17023SJohn Marino static __thread lock_node_ptr *lock_cache = NULL;
152*e4b17023SJohn Marino
153*e4b17023SJohn Marino /* This is a conservative implementation that uses a static array of
154*e4b17023SJohn Marino fixed size as cache. Because the cache is an array that we scan
155*e4b17023SJohn Marino linearly, the bigger it is, the slower it gets. This does not
156*e4b17023SJohn Marino matter much at small sizes (eg, the overhead of checking 8 cache
157*e4b17023SJohn Marino slots instead of 4 is very small compared to the other overheads
158*e4b17023SJohn Marino involved such as function calls and lock/unlock operations), but at
159*e4b17023SJohn Marino large sizes it becomes important as obviously there is a size over
160*e4b17023SJohn Marino which using the cache backfires: the lookup is so slow that the
161*e4b17023SJohn Marino cache slows down the software instead of speeding it up. In
162*e4b17023SJohn Marino practice, it seems that most threads use a small number of
163*e4b17023SJohn Marino concurrent locks, so we have a conservative implementation with a
164*e4b17023SJohn Marino fixed-size cache of 8 locks which gives a very predictable
165*e4b17023SJohn Marino behaviour. If a thread locks lots of different locks, only the
166*e4b17023SJohn Marino first 8 get the speed benefits of the cache, but the cache remains
167*e4b17023SJohn Marino always small, fast and predictable.
168*e4b17023SJohn Marino
169*e4b17023SJohn Marino SYNC_CACHE_SIZE is the size of the lock cache for each thread. */
170*e4b17023SJohn Marino #define SYNC_CACHE_SIZE 8
171*e4b17023SJohn Marino #endif /* SYNC_CACHE_DISABLE */
172*e4b17023SJohn Marino
173*e4b17023SJohn Marino /* Called at startup by init.c. */
174*e4b17023SJohn Marino void
__objc_sync_init(void)175*e4b17023SJohn Marino __objc_sync_init (void)
176*e4b17023SJohn Marino {
177*e4b17023SJohn Marino int i;
178*e4b17023SJohn Marino
179*e4b17023SJohn Marino for (i = 0; i < SYNC_NUMBER_OF_POOLS; i++)
180*e4b17023SJohn Marino {
181*e4b17023SJohn Marino lock_node_ptr new_node;
182*e4b17023SJohn Marino
183*e4b17023SJohn Marino /* Create a protection lock for each pool. */
184*e4b17023SJohn Marino sync_pool_protection_locks[i] = objc_mutex_allocate ();
185*e4b17023SJohn Marino
186*e4b17023SJohn Marino /* Preallocate a lock per pool. */
187*e4b17023SJohn Marino new_node = objc_malloc (sizeof (struct lock_node));
188*e4b17023SJohn Marino new_node->lock = objc_mutex_allocate ();
189*e4b17023SJohn Marino new_node->object = nil;
190*e4b17023SJohn Marino new_node->usage_count = 0;
191*e4b17023SJohn Marino new_node->recursive_usage_count = 0;
192*e4b17023SJohn Marino new_node->next = NULL;
193*e4b17023SJohn Marino
194*e4b17023SJohn Marino sync_pool_array[i] = new_node;
195*e4b17023SJohn Marino }
196*e4b17023SJohn Marino }
197*e4b17023SJohn Marino
198*e4b17023SJohn Marino int
objc_sync_enter(id object)199*e4b17023SJohn Marino objc_sync_enter (id object)
200*e4b17023SJohn Marino {
201*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
202*e4b17023SJohn Marino int free_cache_slot;
203*e4b17023SJohn Marino #endif
204*e4b17023SJohn Marino int hash;
205*e4b17023SJohn Marino lock_node_ptr node;
206*e4b17023SJohn Marino lock_node_ptr unused_node;
207*e4b17023SJohn Marino
208*e4b17023SJohn Marino if (object == nil)
209*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
210*e4b17023SJohn Marino
211*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
212*e4b17023SJohn Marino if (lock_cache == NULL)
213*e4b17023SJohn Marino {
214*e4b17023SJohn Marino /* Note that this calloc only happen only once per thread, the
215*e4b17023SJohn Marino very first time a thread does a objc_sync_enter(). */
216*e4b17023SJohn Marino lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
217*e4b17023SJohn Marino }
218*e4b17023SJohn Marino
219*e4b17023SJohn Marino /* Check the cache to see if we have a record of having already
220*e4b17023SJohn Marino locked the lock corresponding to this object. While doing so,
221*e4b17023SJohn Marino keep track of the first free cache node in case we need it
222*e4b17023SJohn Marino later. */
223*e4b17023SJohn Marino node = NULL;
224*e4b17023SJohn Marino free_cache_slot = -1;
225*e4b17023SJohn Marino
226*e4b17023SJohn Marino {
227*e4b17023SJohn Marino int i;
228*e4b17023SJohn Marino for (i = 0; i < SYNC_CACHE_SIZE; i++)
229*e4b17023SJohn Marino {
230*e4b17023SJohn Marino lock_node_ptr locked_node = lock_cache[i];
231*e4b17023SJohn Marino
232*e4b17023SJohn Marino if (locked_node == NULL)
233*e4b17023SJohn Marino {
234*e4b17023SJohn Marino if (free_cache_slot == -1)
235*e4b17023SJohn Marino free_cache_slot = i;
236*e4b17023SJohn Marino }
237*e4b17023SJohn Marino else if (locked_node->object == object)
238*e4b17023SJohn Marino {
239*e4b17023SJohn Marino node = locked_node;
240*e4b17023SJohn Marino break;
241*e4b17023SJohn Marino }
242*e4b17023SJohn Marino }
243*e4b17023SJohn Marino }
244*e4b17023SJohn Marino
245*e4b17023SJohn Marino if (node != NULL)
246*e4b17023SJohn Marino {
247*e4b17023SJohn Marino /* We found the lock. Increase recursive_usage_count, which is
248*e4b17023SJohn Marino protected by node->lock, which we already hold. */
249*e4b17023SJohn Marino node->recursive_usage_count++;
250*e4b17023SJohn Marino
251*e4b17023SJohn Marino /* There is no need to actually lock anything, since we already
252*e4b17023SJohn Marino hold the lock. Correspondingly, objc_sync_exit() will just
253*e4b17023SJohn Marino decrease recursive_usage_count and do nothing to unlock. */
254*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
255*e4b17023SJohn Marino }
256*e4b17023SJohn Marino #endif /* SYNC_CACHE_DISABLE */
257*e4b17023SJohn Marino
258*e4b17023SJohn Marino /* The following is the standard lookup for the lock in the standard
259*e4b17023SJohn Marino pool lock. It requires a pool protection lock. */
260*e4b17023SJohn Marino hash = SYNC_OBJECT_HASH(object);
261*e4b17023SJohn Marino
262*e4b17023SJohn Marino /* Search for an existing lock for 'object'. While searching, make
263*e4b17023SJohn Marino note of any unused lock if we find any. */
264*e4b17023SJohn Marino unused_node = NULL;
265*e4b17023SJohn Marino
266*e4b17023SJohn Marino objc_mutex_lock (sync_pool_protection_locks[hash]);
267*e4b17023SJohn Marino
268*e4b17023SJohn Marino node = sync_pool_array[hash];
269*e4b17023SJohn Marino
270*e4b17023SJohn Marino while (node != NULL)
271*e4b17023SJohn Marino {
272*e4b17023SJohn Marino if (node->object == object)
273*e4b17023SJohn Marino {
274*e4b17023SJohn Marino /* We found the lock. */
275*e4b17023SJohn Marino node->usage_count++;
276*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
277*e4b17023SJohn Marino
278*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
279*e4b17023SJohn Marino /* Put it in the cache. */
280*e4b17023SJohn Marino if (free_cache_slot != -1)
281*e4b17023SJohn Marino lock_cache[free_cache_slot] = node;
282*e4b17023SJohn Marino #endif
283*e4b17023SJohn Marino
284*e4b17023SJohn Marino /* Lock it. */
285*e4b17023SJohn Marino objc_mutex_lock (node->lock);
286*e4b17023SJohn Marino
287*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
288*e4b17023SJohn Marino }
289*e4b17023SJohn Marino
290*e4b17023SJohn Marino if (unused_node == NULL && node->usage_count == 0)
291*e4b17023SJohn Marino {
292*e4b17023SJohn Marino /* We found the first unused node. Record it. */
293*e4b17023SJohn Marino unused_node = node;
294*e4b17023SJohn Marino }
295*e4b17023SJohn Marino
296*e4b17023SJohn Marino node = node->next;
297*e4b17023SJohn Marino }
298*e4b17023SJohn Marino
299*e4b17023SJohn Marino /* An existing lock for 'object' could not be found. */
300*e4b17023SJohn Marino if (unused_node != NULL)
301*e4b17023SJohn Marino {
302*e4b17023SJohn Marino /* But we found a unused lock; use it. */
303*e4b17023SJohn Marino unused_node->object = object;
304*e4b17023SJohn Marino unused_node->usage_count = 1;
305*e4b17023SJohn Marino unused_node->recursive_usage_count = 0;
306*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
307*e4b17023SJohn Marino
308*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
309*e4b17023SJohn Marino if (free_cache_slot != -1)
310*e4b17023SJohn Marino lock_cache[free_cache_slot] = unused_node;
311*e4b17023SJohn Marino #endif
312*e4b17023SJohn Marino
313*e4b17023SJohn Marino objc_mutex_lock (unused_node->lock);
314*e4b17023SJohn Marino
315*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
316*e4b17023SJohn Marino }
317*e4b17023SJohn Marino else
318*e4b17023SJohn Marino {
319*e4b17023SJohn Marino /* There are no unused nodes; allocate a new node. */
320*e4b17023SJohn Marino lock_node_ptr new_node;
321*e4b17023SJohn Marino
322*e4b17023SJohn Marino /* Create the node. */
323*e4b17023SJohn Marino new_node = objc_malloc (sizeof (struct lock_node));
324*e4b17023SJohn Marino new_node->lock = objc_mutex_allocate ();
325*e4b17023SJohn Marino new_node->object = object;
326*e4b17023SJohn Marino new_node->usage_count = 1;
327*e4b17023SJohn Marino new_node->recursive_usage_count = 0;
328*e4b17023SJohn Marino
329*e4b17023SJohn Marino /* Attach it at the beginning of the pool. */
330*e4b17023SJohn Marino new_node->next = sync_pool_array[hash];
331*e4b17023SJohn Marino sync_pool_array[hash] = new_node;
332*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
333*e4b17023SJohn Marino
334*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
335*e4b17023SJohn Marino if (free_cache_slot != -1)
336*e4b17023SJohn Marino lock_cache[free_cache_slot] = new_node;
337*e4b17023SJohn Marino #endif
338*e4b17023SJohn Marino
339*e4b17023SJohn Marino objc_mutex_lock (new_node->lock);
340*e4b17023SJohn Marino
341*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
342*e4b17023SJohn Marino }
343*e4b17023SJohn Marino }
344*e4b17023SJohn Marino
345*e4b17023SJohn Marino int
objc_sync_exit(id object)346*e4b17023SJohn Marino objc_sync_exit (id object)
347*e4b17023SJohn Marino {
348*e4b17023SJohn Marino int hash;
349*e4b17023SJohn Marino lock_node_ptr node;
350*e4b17023SJohn Marino
351*e4b17023SJohn Marino if (object == nil)
352*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
353*e4b17023SJohn Marino
354*e4b17023SJohn Marino #ifndef SYNC_CACHE_DISABLE
355*e4b17023SJohn Marino if (lock_cache != NULL)
356*e4b17023SJohn Marino {
357*e4b17023SJohn Marino int i;
358*e4b17023SJohn Marino
359*e4b17023SJohn Marino /* Find the lock in the cache. */
360*e4b17023SJohn Marino node = NULL;
361*e4b17023SJohn Marino for (i = 0; i < SYNC_CACHE_SIZE; i++)
362*e4b17023SJohn Marino {
363*e4b17023SJohn Marino lock_node_ptr locked_node = lock_cache[i];
364*e4b17023SJohn Marino
365*e4b17023SJohn Marino if (locked_node != NULL && locked_node->object == object)
366*e4b17023SJohn Marino {
367*e4b17023SJohn Marino node = locked_node;
368*e4b17023SJohn Marino break;
369*e4b17023SJohn Marino }
370*e4b17023SJohn Marino }
371*e4b17023SJohn Marino /* Note that, if a node was found in the cache, the variable i
372*e4b17023SJohn Marino now holds the index where it was found, which will be used to
373*e4b17023SJohn Marino remove it from the cache. */
374*e4b17023SJohn Marino if (node != NULL)
375*e4b17023SJohn Marino {
376*e4b17023SJohn Marino if (node->recursive_usage_count > 0)
377*e4b17023SJohn Marino {
378*e4b17023SJohn Marino node->recursive_usage_count--;
379*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
380*e4b17023SJohn Marino }
381*e4b17023SJohn Marino else
382*e4b17023SJohn Marino {
383*e4b17023SJohn Marino /* We need to do a real unlock. */
384*e4b17023SJohn Marino hash = SYNC_OBJECT_HASH(object);
385*e4b17023SJohn Marino
386*e4b17023SJohn Marino /* TODO: If we had atomic increase/decrease operations
387*e4b17023SJohn Marino with memory barriers, we could avoid the lock
388*e4b17023SJohn Marino here! */
389*e4b17023SJohn Marino objc_mutex_lock (sync_pool_protection_locks[hash]);
390*e4b17023SJohn Marino node->usage_count--;
391*e4b17023SJohn Marino /* Normally, we do not reset object to nil here. We'll
392*e4b17023SJohn Marino leave the lock associated with that object, at zero
393*e4b17023SJohn Marino usage count. This makes it slighly more efficient to
394*e4b17023SJohn Marino provide a lock for that object if (as likely)
395*e4b17023SJohn Marino requested again. If the object is deallocated, we
396*e4b17023SJohn Marino don't care. It will never match a new lock that is
397*e4b17023SJohn Marino requested, and the node will be reused at some point.
398*e4b17023SJohn Marino
399*e4b17023SJohn Marino But, if garbage collection is enabled, leaving a
400*e4b17023SJohn Marino pointer to the object in memory might prevent the
401*e4b17023SJohn Marino object from being released. In that case, we remove
402*e4b17023SJohn Marino it (TODO: maybe we should avoid using the garbage
403*e4b17023SJohn Marino collector at all ? Nothing is ever deallocated in
404*e4b17023SJohn Marino this file). */
405*e4b17023SJohn Marino #if OBJC_WITH_GC
406*e4b17023SJohn Marino node->object = nil;
407*e4b17023SJohn Marino #endif
408*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
409*e4b17023SJohn Marino
410*e4b17023SJohn Marino /* PS: Between objc_mutex_unlock
411*e4b17023SJohn Marino (sync_pool_protection_locks[hash]) and
412*e4b17023SJohn Marino objc_mutex_unlock (node->lock), the pool is unlocked
413*e4b17023SJohn Marino so other threads may allocate this same lock to
414*e4b17023SJohn Marino another object (!). This is not a problem, but it is
415*e4b17023SJohn Marino curious. */
416*e4b17023SJohn Marino objc_mutex_unlock (node->lock);
417*e4b17023SJohn Marino
418*e4b17023SJohn Marino /* Remove the node from the cache. */
419*e4b17023SJohn Marino lock_cache[i] = NULL;
420*e4b17023SJohn Marino
421*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
422*e4b17023SJohn Marino }
423*e4b17023SJohn Marino }
424*e4b17023SJohn Marino }
425*e4b17023SJohn Marino #endif
426*e4b17023SJohn Marino
427*e4b17023SJohn Marino /* The cache either wasn't there, or didn't work (eg, we overflowed
428*e4b17023SJohn Marino it at some point and stopped recording new locks in the cache).
429*e4b17023SJohn Marino Proceed with a full search of the lock pool. */
430*e4b17023SJohn Marino hash = SYNC_OBJECT_HASH(object);
431*e4b17023SJohn Marino
432*e4b17023SJohn Marino objc_mutex_lock (sync_pool_protection_locks[hash]);
433*e4b17023SJohn Marino
434*e4b17023SJohn Marino /* Search for an existing lock for 'object'. */
435*e4b17023SJohn Marino node = sync_pool_array[hash];
436*e4b17023SJohn Marino
437*e4b17023SJohn Marino while (node != NULL)
438*e4b17023SJohn Marino {
439*e4b17023SJohn Marino if (node->object == object)
440*e4b17023SJohn Marino {
441*e4b17023SJohn Marino /* We found the lock. */
442*e4b17023SJohn Marino node->usage_count--;
443*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
444*e4b17023SJohn Marino
445*e4b17023SJohn Marino objc_mutex_unlock (node->lock);
446*e4b17023SJohn Marino
447*e4b17023SJohn Marino /* No need to remove the node from the cache, since it
448*e4b17023SJohn Marino wasn't found in the cache when we looked for it! */
449*e4b17023SJohn Marino return OBJC_SYNC_SUCCESS;
450*e4b17023SJohn Marino }
451*e4b17023SJohn Marino
452*e4b17023SJohn Marino node = node->next;
453*e4b17023SJohn Marino }
454*e4b17023SJohn Marino
455*e4b17023SJohn Marino objc_mutex_unlock (sync_pool_protection_locks[hash]);
456*e4b17023SJohn Marino
457*e4b17023SJohn Marino /* A lock for 'object' to unlock could not be found (!!). */
458*e4b17023SJohn Marino return OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
459*e4b17023SJohn Marino }
460