xref: /netbsd-src/external/gpl3/gcc.old/dist/libobjc/objc-sync.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
136ac495dSmrg /* GNU Objective C Runtime @synchronized implementation
2*8feb0f0bSmrg    Copyright (C) 2010-2020 Free Software Foundation, Inc.
336ac495dSmrg    Contributed by Nicola Pero <nicola.pero@meta-innovation.com>
436ac495dSmrg 
536ac495dSmrg This file is part of GCC.
636ac495dSmrg 
736ac495dSmrg GCC is free software; you can redistribute it and/or modify it under the
836ac495dSmrg terms of the GNU General Public License as published by the Free Software
936ac495dSmrg Foundation; either version 3, or (at your option) any later version.
1036ac495dSmrg 
1136ac495dSmrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1236ac495dSmrg WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
1336ac495dSmrg FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
1436ac495dSmrg details.
1536ac495dSmrg 
1636ac495dSmrg Under Section 7 of GPL version 3, you are granted additional
1736ac495dSmrg permissions described in the GCC Runtime Library Exception, version
1836ac495dSmrg 3.1, as published by the Free Software Foundation.
1936ac495dSmrg 
2036ac495dSmrg You should have received a copy of the GNU General Public License and
2136ac495dSmrg a copy of the GCC Runtime Library Exception along with this program;
2236ac495dSmrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
2336ac495dSmrg <http://www.gnu.org/licenses/>.  */
2436ac495dSmrg 
2536ac495dSmrg /* This file implements objc_sync_enter() and objc_sync_exit(), the
2636ac495dSmrg    two functions required to support @synchronized().
2736ac495dSmrg 
2836ac495dSmrg    objc_sync_enter(object) needs to get a recursive lock associated
2936ac495dSmrg    with 'object', and lock it.
3036ac495dSmrg 
3136ac495dSmrg    objc_sync_exit(object) needs to get the recursive lock associated
3236ac495dSmrg    with 'object', and unlock it.  */
3336ac495dSmrg 
3436ac495dSmrg /* To avoid the overhead of continuously allocating and deallocating
3536ac495dSmrg    locks, we implement a pool of locks.  When a lock is needed for an
3636ac495dSmrg    object, we get a lock from the pool and associate it with the
3736ac495dSmrg    object.
3836ac495dSmrg 
3936ac495dSmrg    The lock pool need to be protected by its own lock (the
4036ac495dSmrg    "protection" lock), which has to be locked then unlocked each time
4136ac495dSmrg    objc_sync_enter() and objc_sync_exit() are called.  To reduce the
4236ac495dSmrg    contention on the protection lock, instead of a single pool with a
4336ac495dSmrg    single (global) protection lock we use a number of smaller pools,
4436ac495dSmrg    each with its own pool protection lock.  To decide which lock pool
4536ac495dSmrg    to use for each object, we compute a hash from the object pointer.
4636ac495dSmrg 
4736ac495dSmrg    The implementation of each lock pool uses a linked list of all the
4836ac495dSmrg    locks in the pool (both unlocked, and locked); this works in the
4936ac495dSmrg    assumption that the number of locks concurrently required is very
5036ac495dSmrg    low.  In practice, it seems that you rarely see more than a few
5136ac495dSmrg    locks ever concurrently required.
5236ac495dSmrg 
5336ac495dSmrg    A standard case is a thread acquiring a lock recursively, over and
5436ac495dSmrg    over again: for example when most methods of a class are protected
5536ac495dSmrg    by @synchronized(self) but they also call each other.  We use
5636ac495dSmrg    thread-local storage to implement a cache and optimize this case.
5736ac495dSmrg    The cache stores locks that the thread successfully acquired,
5836ac495dSmrg    allowing objc_sync_enter() and objc_sync_exit() to locate a lock
5936ac495dSmrg    which is already held by the current thread without having to use
6036ac495dSmrg    any protection lock or synchronization mechanism.  It can so detect
6136ac495dSmrg    recursive locks/unlocks, and transform them into no-ops that
6236ac495dSmrg    require no actual locking or synchronization mechanisms at all.  */
6336ac495dSmrg 
6436ac495dSmrg /* You can disable the thread-local cache (most likely to benchmark
6536ac495dSmrg    the code with and without it) by compiling with
6636ac495dSmrg    -DSYNC_CACHE_DISABLE, or commenting out the following line.  */
6736ac495dSmrg /* #define SYNC_CACHE_DISABLE */
6836ac495dSmrg 
6936ac495dSmrg /* If thread-local storage is not available, automatically disable the
7036ac495dSmrg    cache.  */
7136ac495dSmrg #ifndef HAVE_TLS
7236ac495dSmrg # define SYNC_CACHE_DISABLE
7336ac495dSmrg #endif
7436ac495dSmrg 
7536ac495dSmrg #include "objc-private/common.h"
7636ac495dSmrg #include "objc/objc-sync.h"         /* For objc_sync_enter(), objc_sync_exit() */
7736ac495dSmrg #include "objc/runtime.h"           /* For objc_malloc() */
7836ac495dSmrg #include "objc/thr.h"               /* For objc_mutex_loc() and similar */
7936ac495dSmrg #include "objc-private/objc-sync.h" /* For __objc_sync_init() */
8036ac495dSmrg 
8136ac495dSmrg /* We have 32 pools of locks, each of them protected by its own
8236ac495dSmrg    protection lock.  It's tempting to increase this number to reduce
8336ac495dSmrg    contention; but in our tests it is high enough.  */
8436ac495dSmrg #define SYNC_NUMBER_OF_POOLS 32
8536ac495dSmrg 
8636ac495dSmrg /* Given an object, it determines which pool contains the associated
8736ac495dSmrg    lock.  */
8836ac495dSmrg #define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
8936ac495dSmrg 
9036ac495dSmrg /* The locks protecting each pool.  */
9136ac495dSmrg static objc_mutex_t sync_pool_protection_locks[SYNC_NUMBER_OF_POOLS];
9236ac495dSmrg 
9336ac495dSmrg /* The data structure (linked list) holding the locks.  */
9436ac495dSmrg typedef struct lock_node
9536ac495dSmrg {
9636ac495dSmrg   /* Pointer to next entry on the list.  NULL indicates end of list.
9736ac495dSmrg      You need to hold the appropriate sync_pool_protection_locks[N] to
9836ac495dSmrg      read or write this variable.  */
9936ac495dSmrg   struct lock_node *next;
10036ac495dSmrg 
10136ac495dSmrg   /* The (recursive) lock.  Allocated when the node is created, and
10236ac495dSmrg      always not-NULL, and unchangeable, after that.  */
10336ac495dSmrg   objc_mutex_t lock;
10436ac495dSmrg 
10536ac495dSmrg   /* This is how many times the objc_mutex_lock() has been called on
10636ac495dSmrg      the lock (it is 0 when the lock is unused).  Used to track when
10736ac495dSmrg      the lock is no longer associated with an object and can be reused
10836ac495dSmrg      for another object.  It records "real" locks, potentially (but
10936ac495dSmrg      not necessarily) by multiple threads.  You need to hold the
11036ac495dSmrg      appropriate sync_pool_protection_locks[N] to read or write this
11136ac495dSmrg      variable.  */
11236ac495dSmrg   unsigned int usage_count;
11336ac495dSmrg 
11436ac495dSmrg   /* The object that the lock is associated with.  This variable can
11536ac495dSmrg      only be written when holding the sync_pool_protection_locks[N]
11636ac495dSmrg      and when node->usage_count == 0, ie, the lock is not being used.
11736ac495dSmrg      You can read this variable either when you hold the
11836ac495dSmrg      sync_pool_protection_locks[N] or when you hold node->lock,
11936ac495dSmrg      because in that case you know that node->usage_count can't get to
12036ac495dSmrg      zero until you release the lock.  It is valid to have usage_count
12136ac495dSmrg      == 0 and object != nil; in that case, the lock is not currently
12236ac495dSmrg      being used, but is still currently associated with the
12336ac495dSmrg      object.  */
12436ac495dSmrg   id object;
12536ac495dSmrg 
12636ac495dSmrg   /* This is a counter reserved for use by the thread currently
12736ac495dSmrg      holding the lock.  So, you need to hold node->lock to read or
12836ac495dSmrg      write this variable.  It is normally 0, and if the cache is not
12936ac495dSmrg      being used, it is kept at 0 (even if recursive locks are being
13036ac495dSmrg      done; in that case, no difference is made between recursive and
13136ac495dSmrg      non-recursive locks: they all increase usage_count, and call
13236ac495dSmrg      objc_mutex_lock()).  When the cache is being used, a thread may
13336ac495dSmrg      be able to find a lock that it already holds using the cache; in
13436ac495dSmrg      that case, to perform additional locks/unlocks it can
13536ac495dSmrg      increase/decrease the recursive_usage_count (which does not
13636ac495dSmrg      require any synchronization with other threads, since it's
13736ac495dSmrg      protected by the node->lock itself) instead of the usage_count
13836ac495dSmrg      (which requires locking the pool protection lock).  And it can
13936ac495dSmrg      skip the call to objc_mutex_lock/unlock too.  */
14036ac495dSmrg   unsigned int recursive_usage_count;
14136ac495dSmrg } *lock_node_ptr;
14236ac495dSmrg 
14336ac495dSmrg 
14436ac495dSmrg /* The pools of locks.  Each of them is a linked list of lock_nodes.
14536ac495dSmrg    In the list we keep both unlocked and locked nodes.  */
14636ac495dSmrg static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
14736ac495dSmrg 
14836ac495dSmrg #ifndef SYNC_CACHE_DISABLE
14936ac495dSmrg /* We store a cache of locks acquired by each thread in thread-local
15036ac495dSmrg    storage.  */
15136ac495dSmrg static __thread lock_node_ptr *lock_cache = NULL;
15236ac495dSmrg 
15336ac495dSmrg /* This is a conservative implementation that uses a static array of
15436ac495dSmrg    fixed size as cache.  Because the cache is an array that we scan
15536ac495dSmrg    linearly, the bigger it is, the slower it gets.  This does not
15636ac495dSmrg    matter much at small sizes (eg, the overhead of checking 8 cache
15736ac495dSmrg    slots instead of 4 is very small compared to the other overheads
15836ac495dSmrg    involved such as function calls and lock/unlock operations), but at
15936ac495dSmrg    large sizes it becomes important as obviously there is a size over
16036ac495dSmrg    which using the cache backfires: the lookup is so slow that the
16136ac495dSmrg    cache slows down the software instead of speeding it up.  In
16236ac495dSmrg    practice, it seems that most threads use a small number of
16336ac495dSmrg    concurrent locks, so we have a conservative implementation with a
16436ac495dSmrg    fixed-size cache of 8 locks which gives a very predictable
16536ac495dSmrg    behaviour.  If a thread locks lots of different locks, only the
16636ac495dSmrg    first 8 get the speed benefits of the cache, but the cache remains
16736ac495dSmrg    always small, fast and predictable.
16836ac495dSmrg 
16936ac495dSmrg    SYNC_CACHE_SIZE is the size of the lock cache for each thread.  */
17036ac495dSmrg #define SYNC_CACHE_SIZE 8
17136ac495dSmrg #endif /* SYNC_CACHE_DISABLE */
17236ac495dSmrg 
17336ac495dSmrg /* Called at startup by init.c.  */
17436ac495dSmrg void
__objc_sync_init(void)17536ac495dSmrg __objc_sync_init (void)
17636ac495dSmrg {
17736ac495dSmrg   int i;
17836ac495dSmrg 
17936ac495dSmrg   for (i = 0; i < SYNC_NUMBER_OF_POOLS; i++)
18036ac495dSmrg     {
18136ac495dSmrg       lock_node_ptr new_node;
18236ac495dSmrg 
18336ac495dSmrg       /* Create a protection lock for each pool.  */
18436ac495dSmrg       sync_pool_protection_locks[i] = objc_mutex_allocate ();
18536ac495dSmrg 
18636ac495dSmrg       /* Preallocate a lock per pool.  */
18736ac495dSmrg       new_node = objc_malloc (sizeof (struct lock_node));
18836ac495dSmrg       new_node->lock = objc_mutex_allocate ();
18936ac495dSmrg       new_node->object = nil;
19036ac495dSmrg       new_node->usage_count = 0;
19136ac495dSmrg       new_node->recursive_usage_count = 0;
19236ac495dSmrg       new_node->next = NULL;
19336ac495dSmrg 
19436ac495dSmrg       sync_pool_array[i] = new_node;
19536ac495dSmrg     }
19636ac495dSmrg }
19736ac495dSmrg 
19836ac495dSmrg int
objc_sync_enter(id object)19936ac495dSmrg objc_sync_enter (id object)
20036ac495dSmrg {
20136ac495dSmrg #ifndef SYNC_CACHE_DISABLE
20236ac495dSmrg   int free_cache_slot;
20336ac495dSmrg #endif
20436ac495dSmrg   int hash;
20536ac495dSmrg   lock_node_ptr node;
20636ac495dSmrg   lock_node_ptr unused_node;
20736ac495dSmrg 
20836ac495dSmrg   if (object == nil)
20936ac495dSmrg     return OBJC_SYNC_SUCCESS;
21036ac495dSmrg 
21136ac495dSmrg #ifndef SYNC_CACHE_DISABLE
21236ac495dSmrg   if (lock_cache == NULL)
21336ac495dSmrg     {
21436ac495dSmrg       /* Note that this calloc only happen only once per thread, the
21536ac495dSmrg 	 very first time a thread does a objc_sync_enter().  */
21636ac495dSmrg       lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
21736ac495dSmrg     }
21836ac495dSmrg 
21936ac495dSmrg   /* Check the cache to see if we have a record of having already
22036ac495dSmrg      locked the lock corresponding to this object.  While doing so,
22136ac495dSmrg      keep track of the first free cache node in case we need it
22236ac495dSmrg      later.  */
22336ac495dSmrg   node = NULL;
22436ac495dSmrg   free_cache_slot = -1;
22536ac495dSmrg 
22636ac495dSmrg   {
22736ac495dSmrg     int i;
22836ac495dSmrg     for (i = 0; i < SYNC_CACHE_SIZE; i++)
22936ac495dSmrg       {
23036ac495dSmrg 	lock_node_ptr locked_node = lock_cache[i];
23136ac495dSmrg 
23236ac495dSmrg 	if (locked_node == NULL)
23336ac495dSmrg 	  {
23436ac495dSmrg 	    if (free_cache_slot == -1)
23536ac495dSmrg 	      free_cache_slot = i;
23636ac495dSmrg 	  }
23736ac495dSmrg 	else if (locked_node->object == object)
23836ac495dSmrg 	  {
23936ac495dSmrg 	    node = locked_node;
24036ac495dSmrg 	    break;
24136ac495dSmrg 	  }
24236ac495dSmrg       }
24336ac495dSmrg   }
24436ac495dSmrg 
24536ac495dSmrg   if (node != NULL)
24636ac495dSmrg     {
24736ac495dSmrg       /* We found the lock.  Increase recursive_usage_count, which is
24836ac495dSmrg 	 protected by node->lock, which we already hold.  */
24936ac495dSmrg       node->recursive_usage_count++;
25036ac495dSmrg 
25136ac495dSmrg       /* There is no need to actually lock anything, since we already
25236ac495dSmrg 	 hold the lock.  Correspondingly, objc_sync_exit() will just
25336ac495dSmrg 	 decrease recursive_usage_count and do nothing to unlock.  */
25436ac495dSmrg       return OBJC_SYNC_SUCCESS;
25536ac495dSmrg     }
25636ac495dSmrg #endif /* SYNC_CACHE_DISABLE */
25736ac495dSmrg 
25836ac495dSmrg   /* The following is the standard lookup for the lock in the standard
25936ac495dSmrg      pool lock.  It requires a pool protection lock.  */
26036ac495dSmrg   hash = SYNC_OBJECT_HASH(object);
26136ac495dSmrg 
26236ac495dSmrg   /* Search for an existing lock for 'object'.  While searching, make
26336ac495dSmrg      note of any unused lock if we find any.  */
26436ac495dSmrg   unused_node = NULL;
26536ac495dSmrg 
26636ac495dSmrg   objc_mutex_lock (sync_pool_protection_locks[hash]);
26736ac495dSmrg 
26836ac495dSmrg   node = sync_pool_array[hash];
26936ac495dSmrg 
27036ac495dSmrg   while (node != NULL)
27136ac495dSmrg     {
27236ac495dSmrg       if (node->object == object)
27336ac495dSmrg 	{
27436ac495dSmrg 	  /* We found the lock.  */
27536ac495dSmrg 	  node->usage_count++;
27636ac495dSmrg 	  objc_mutex_unlock (sync_pool_protection_locks[hash]);
27736ac495dSmrg 
27836ac495dSmrg #ifndef SYNC_CACHE_DISABLE
27936ac495dSmrg 	  /* Put it in the cache.  */
28036ac495dSmrg 	  if (free_cache_slot != -1)
28136ac495dSmrg 	    lock_cache[free_cache_slot] = node;
28236ac495dSmrg #endif
28336ac495dSmrg 
28436ac495dSmrg 	  /* Lock it.  */
28536ac495dSmrg 	  objc_mutex_lock (node->lock);
28636ac495dSmrg 
28736ac495dSmrg 	  return OBJC_SYNC_SUCCESS;
28836ac495dSmrg 	}
28936ac495dSmrg 
29036ac495dSmrg       if (unused_node == NULL  &&  node->usage_count == 0)
29136ac495dSmrg 	{
29236ac495dSmrg 	  /* We found the first unused node.  Record it.  */
29336ac495dSmrg 	  unused_node = node;
29436ac495dSmrg 	}
29536ac495dSmrg 
29636ac495dSmrg       node = node->next;
29736ac495dSmrg     }
29836ac495dSmrg 
29936ac495dSmrg   /* An existing lock for 'object' could not be found.  */
30036ac495dSmrg   if (unused_node != NULL)
30136ac495dSmrg     {
30236ac495dSmrg       /* But we found a unused lock; use it.  */
30336ac495dSmrg       unused_node->object = object;
30436ac495dSmrg       unused_node->usage_count = 1;
30536ac495dSmrg       unused_node->recursive_usage_count = 0;
30636ac495dSmrg       objc_mutex_unlock (sync_pool_protection_locks[hash]);
30736ac495dSmrg 
30836ac495dSmrg #ifndef SYNC_CACHE_DISABLE
30936ac495dSmrg       if (free_cache_slot != -1)
31036ac495dSmrg 	lock_cache[free_cache_slot] = unused_node;
31136ac495dSmrg #endif
31236ac495dSmrg 
31336ac495dSmrg       objc_mutex_lock (unused_node->lock);
31436ac495dSmrg 
31536ac495dSmrg       return OBJC_SYNC_SUCCESS;
31636ac495dSmrg     }
31736ac495dSmrg   else
31836ac495dSmrg     {
31936ac495dSmrg       /* There are no unused nodes; allocate a new node.  */
32036ac495dSmrg       lock_node_ptr new_node;
32136ac495dSmrg 
32236ac495dSmrg       /* Create the node.  */
32336ac495dSmrg       new_node = objc_malloc (sizeof (struct lock_node));
32436ac495dSmrg       new_node->lock = objc_mutex_allocate ();
32536ac495dSmrg       new_node->object = object;
32636ac495dSmrg       new_node->usage_count = 1;
32736ac495dSmrg       new_node->recursive_usage_count = 0;
32836ac495dSmrg 
32936ac495dSmrg       /* Attach it at the beginning of the pool.  */
33036ac495dSmrg       new_node->next = sync_pool_array[hash];
33136ac495dSmrg       sync_pool_array[hash] = new_node;
33236ac495dSmrg       objc_mutex_unlock (sync_pool_protection_locks[hash]);
33336ac495dSmrg 
33436ac495dSmrg #ifndef SYNC_CACHE_DISABLE
33536ac495dSmrg       if (free_cache_slot != -1)
33636ac495dSmrg 	lock_cache[free_cache_slot] = new_node;
33736ac495dSmrg #endif
33836ac495dSmrg 
33936ac495dSmrg       objc_mutex_lock (new_node->lock);
34036ac495dSmrg 
34136ac495dSmrg       return OBJC_SYNC_SUCCESS;
34236ac495dSmrg     }
34336ac495dSmrg }
34436ac495dSmrg 
34536ac495dSmrg int
objc_sync_exit(id object)34636ac495dSmrg objc_sync_exit (id object)
34736ac495dSmrg {
34836ac495dSmrg   int hash;
34936ac495dSmrg   lock_node_ptr node;
35036ac495dSmrg 
35136ac495dSmrg   if (object == nil)
35236ac495dSmrg     return OBJC_SYNC_SUCCESS;
35336ac495dSmrg 
35436ac495dSmrg #ifndef SYNC_CACHE_DISABLE
35536ac495dSmrg   if (lock_cache != NULL)
35636ac495dSmrg     {
35736ac495dSmrg       int i;
35836ac495dSmrg 
35936ac495dSmrg       /* Find the lock in the cache.  */
36036ac495dSmrg       node = NULL;
36136ac495dSmrg       for (i = 0; i < SYNC_CACHE_SIZE; i++)
36236ac495dSmrg 	{
36336ac495dSmrg 	  lock_node_ptr locked_node = lock_cache[i];
36436ac495dSmrg 
36536ac495dSmrg 	  if (locked_node != NULL  &&  locked_node->object == object)
36636ac495dSmrg 	    {
36736ac495dSmrg 	      node = locked_node;
36836ac495dSmrg 	      break;
36936ac495dSmrg 	    }
37036ac495dSmrg 	}
37136ac495dSmrg       /* Note that, if a node was found in the cache, the variable i
37236ac495dSmrg 	 now holds the index where it was found, which will be used to
37336ac495dSmrg 	 remove it from the cache.  */
37436ac495dSmrg       if (node != NULL)
37536ac495dSmrg 	{
37636ac495dSmrg 	  if (node->recursive_usage_count > 0)
37736ac495dSmrg 	    {
37836ac495dSmrg 	      node->recursive_usage_count--;
37936ac495dSmrg 	      return OBJC_SYNC_SUCCESS;
38036ac495dSmrg 	    }
38136ac495dSmrg 	  else
38236ac495dSmrg 	    {
38336ac495dSmrg 	      /* We need to do a real unlock.  */
38436ac495dSmrg 	      hash = SYNC_OBJECT_HASH(object);
38536ac495dSmrg 
38636ac495dSmrg 	      /* TODO: If we had atomic increase/decrease operations
38736ac495dSmrg 		 with memory barriers, we could avoid the lock
38836ac495dSmrg 		 here!  */
38936ac495dSmrg 	      objc_mutex_lock (sync_pool_protection_locks[hash]);
39036ac495dSmrg 	      node->usage_count--;
39136ac495dSmrg 	      /* Normally, we do not reset object to nil here.  We'll
39236ac495dSmrg 		 leave the lock associated with that object, at zero
39336ac495dSmrg 		 usage count.  This makes it slightly more efficient to
39436ac495dSmrg 		 provide a lock for that object if (as likely)
39536ac495dSmrg 		 requested again.  If the object is deallocated, we
39636ac495dSmrg 		 don't care.  It will never match a new lock that is
39736ac495dSmrg 		 requested, and the node will be reused at some point.
39836ac495dSmrg 
39936ac495dSmrg 		 But, if garbage collection is enabled, leaving a
40036ac495dSmrg 		 pointer to the object in memory might prevent the
40136ac495dSmrg 		 object from being released.  In that case, we remove
40236ac495dSmrg 		 it (TODO: maybe we should avoid using the garbage
40336ac495dSmrg 		 collector at all ?  Nothing is ever deallocated in
40436ac495dSmrg 		 this file).  */
40536ac495dSmrg #if OBJC_WITH_GC
40636ac495dSmrg 	      node->object = nil;
40736ac495dSmrg #endif
40836ac495dSmrg 	      objc_mutex_unlock (sync_pool_protection_locks[hash]);
40936ac495dSmrg 
41036ac495dSmrg 	      /* PS: Between objc_mutex_unlock
41136ac495dSmrg 		 (sync_pool_protection_locks[hash]) and
41236ac495dSmrg 		 objc_mutex_unlock (node->lock), the pool is unlocked
41336ac495dSmrg 		 so other threads may allocate this same lock to
41436ac495dSmrg 		 another object (!).  This is not a problem, but it is
41536ac495dSmrg 		 curious.  */
41636ac495dSmrg 	      objc_mutex_unlock (node->lock);
41736ac495dSmrg 
41836ac495dSmrg 	      /* Remove the node from the cache.  */
41936ac495dSmrg 	      lock_cache[i] = NULL;
42036ac495dSmrg 
42136ac495dSmrg 	      return OBJC_SYNC_SUCCESS;
42236ac495dSmrg 	    }
42336ac495dSmrg 	}
42436ac495dSmrg     }
42536ac495dSmrg #endif
42636ac495dSmrg 
42736ac495dSmrg   /* The cache either wasn't there, or didn't work (eg, we overflowed
42836ac495dSmrg      it at some point and stopped recording new locks in the cache).
42936ac495dSmrg      Proceed with a full search of the lock pool.  */
43036ac495dSmrg   hash = SYNC_OBJECT_HASH(object);
43136ac495dSmrg 
43236ac495dSmrg   objc_mutex_lock (sync_pool_protection_locks[hash]);
43336ac495dSmrg 
43436ac495dSmrg   /* Search for an existing lock for 'object'.  */
43536ac495dSmrg   node = sync_pool_array[hash];
43636ac495dSmrg 
43736ac495dSmrg   while (node != NULL)
43836ac495dSmrg     {
43936ac495dSmrg       if (node->object == object)
44036ac495dSmrg 	{
44136ac495dSmrg 	  /* We found the lock.  */
44236ac495dSmrg 	  node->usage_count--;
44336ac495dSmrg 	  objc_mutex_unlock (sync_pool_protection_locks[hash]);
44436ac495dSmrg 
44536ac495dSmrg 	  objc_mutex_unlock (node->lock);
44636ac495dSmrg 
44736ac495dSmrg 	  /* No need to remove the node from the cache, since it
44836ac495dSmrg 	     wasn't found in the cache when we looked for it!  */
44936ac495dSmrg 	  return OBJC_SYNC_SUCCESS;
45036ac495dSmrg 	}
45136ac495dSmrg 
45236ac495dSmrg       node = node->next;
45336ac495dSmrg     }
45436ac495dSmrg 
45536ac495dSmrg   objc_mutex_unlock (sync_pool_protection_locks[hash]);
45636ac495dSmrg 
45736ac495dSmrg   /* A lock for 'object' to unlock could not be found (!!).  */
45836ac495dSmrg   return OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
45936ac495dSmrg }
460