xref: /netbsd-src/external/gpl3/gcc/dist/libobjc/objc-sync.c (revision b1e838363e3c6fc78a55519254d99869742dd33c)
148fb7bfaSmrg /* GNU Objective C Runtime @synchronized implementation
2*b1e83836Smrg    Copyright (C) 2010-2022 Free Software Foundation, Inc.
348fb7bfaSmrg    Contributed by Nicola Pero <nicola.pero@meta-innovation.com>
448fb7bfaSmrg 
548fb7bfaSmrg This file is part of GCC.
648fb7bfaSmrg 
748fb7bfaSmrg GCC is free software; you can redistribute it and/or modify it under the
848fb7bfaSmrg terms of the GNU General Public License as published by the Free Software
948fb7bfaSmrg Foundation; either version 3, or (at your option) any later version.
1048fb7bfaSmrg 
1148fb7bfaSmrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1248fb7bfaSmrg WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
1348fb7bfaSmrg FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
1448fb7bfaSmrg details.
1548fb7bfaSmrg 
1648fb7bfaSmrg Under Section 7 of GPL version 3, you are granted additional
1748fb7bfaSmrg permissions described in the GCC Runtime Library Exception, version
1848fb7bfaSmrg 3.1, as published by the Free Software Foundation.
1948fb7bfaSmrg 
2048fb7bfaSmrg You should have received a copy of the GNU General Public License and
2148fb7bfaSmrg a copy of the GCC Runtime Library Exception along with this program;
2248fb7bfaSmrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
2348fb7bfaSmrg <http://www.gnu.org/licenses/>.  */
2448fb7bfaSmrg 
2548fb7bfaSmrg /* This file implements objc_sync_enter() and objc_sync_exit(), the
2648fb7bfaSmrg    two functions required to support @synchronized().
2748fb7bfaSmrg 
2848fb7bfaSmrg    objc_sync_enter(object) needs to get a recursive lock associated
2948fb7bfaSmrg    with 'object', and lock it.
3048fb7bfaSmrg 
3148fb7bfaSmrg    objc_sync_exit(object) needs to get the recursive lock associated
3248fb7bfaSmrg    with 'object', and unlock it.  */
3348fb7bfaSmrg 
3448fb7bfaSmrg /* To avoid the overhead of continuously allocating and deallocating
3548fb7bfaSmrg    locks, we implement a pool of locks.  When a lock is needed for an
3648fb7bfaSmrg    object, we get a lock from the pool and associate it with the
3748fb7bfaSmrg    object.
3848fb7bfaSmrg 
3948fb7bfaSmrg    The lock pool need to be protected by its own lock (the
4048fb7bfaSmrg    "protection" lock), which has to be locked then unlocked each time
4148fb7bfaSmrg    objc_sync_enter() and objc_sync_exit() are called.  To reduce the
4248fb7bfaSmrg    contention on the protection lock, instead of a single pool with a
4348fb7bfaSmrg    single (global) protection lock we use a number of smaller pools,
4448fb7bfaSmrg    each with its own pool protection lock.  To decide which lock pool
4548fb7bfaSmrg    to use for each object, we compute a hash from the object pointer.
4648fb7bfaSmrg 
4748fb7bfaSmrg    The implementation of each lock pool uses a linked list of all the
4848fb7bfaSmrg    locks in the pool (both unlocked, and locked); this works in the
4948fb7bfaSmrg    assumption that the number of locks concurrently required is very
5048fb7bfaSmrg    low.  In practice, it seems that you rarely see more than a few
5148fb7bfaSmrg    locks ever concurrently required.
5248fb7bfaSmrg 
5348fb7bfaSmrg    A standard case is a thread acquiring a lock recursively, over and
5448fb7bfaSmrg    over again: for example when most methods of a class are protected
5548fb7bfaSmrg    by @synchronized(self) but they also call each other.  We use
5648fb7bfaSmrg    thread-local storage to implement a cache and optimize this case.
5748fb7bfaSmrg    The cache stores locks that the thread successfully acquired,
5848fb7bfaSmrg    allowing objc_sync_enter() and objc_sync_exit() to locate a lock
5948fb7bfaSmrg    which is already held by the current thread without having to use
6048fb7bfaSmrg    any protection lock or synchronization mechanism.  It can so detect
6148fb7bfaSmrg    recursive locks/unlocks, and transform them into no-ops that
6248fb7bfaSmrg    require no actual locking or synchronization mechanisms at all.  */
6348fb7bfaSmrg 
6448fb7bfaSmrg /* You can disable the thread-local cache (most likely to benchmark
6548fb7bfaSmrg    the code with and without it) by compiling with
6648fb7bfaSmrg    -DSYNC_CACHE_DISABLE, or commenting out the following line.  */
6748fb7bfaSmrg /* #define SYNC_CACHE_DISABLE */
6848fb7bfaSmrg 
6948fb7bfaSmrg /* If thread-local storage is not available, automatically disable the
7048fb7bfaSmrg    cache.  */
7148fb7bfaSmrg #ifndef HAVE_TLS
7248fb7bfaSmrg # define SYNC_CACHE_DISABLE
7348fb7bfaSmrg #endif
7448fb7bfaSmrg 
7548fb7bfaSmrg #include "objc-private/common.h"
7648fb7bfaSmrg #include "objc/objc-sync.h"         /* For objc_sync_enter(), objc_sync_exit() */
7748fb7bfaSmrg #include "objc/runtime.h"           /* For objc_malloc() */
7848fb7bfaSmrg #include "objc/thr.h"               /* For objc_mutex_loc() and similar */
7948fb7bfaSmrg #include "objc-private/objc-sync.h" /* For __objc_sync_init() */
8048fb7bfaSmrg 
8148fb7bfaSmrg /* We have 32 pools of locks, each of them protected by its own
8248fb7bfaSmrg    protection lock.  It's tempting to increase this number to reduce
8348fb7bfaSmrg    contention; but in our tests it is high enough.  */
8448fb7bfaSmrg #define SYNC_NUMBER_OF_POOLS 32
8548fb7bfaSmrg 
8648fb7bfaSmrg /* Given an object, it determines which pool contains the associated
8748fb7bfaSmrg    lock.  */
8848fb7bfaSmrg #define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
8948fb7bfaSmrg 
9048fb7bfaSmrg /* The locks protecting each pool.  */
9148fb7bfaSmrg static objc_mutex_t sync_pool_protection_locks[SYNC_NUMBER_OF_POOLS];
9248fb7bfaSmrg 
9348fb7bfaSmrg /* The data structure (linked list) holding the locks.  */
9448fb7bfaSmrg typedef struct lock_node
9548fb7bfaSmrg {
9648fb7bfaSmrg   /* Pointer to next entry on the list.  NULL indicates end of list.
9748fb7bfaSmrg      You need to hold the appropriate sync_pool_protection_locks[N] to
9848fb7bfaSmrg      read or write this variable.  */
9948fb7bfaSmrg   struct lock_node *next;
10048fb7bfaSmrg 
10148fb7bfaSmrg   /* The (recursive) lock.  Allocated when the node is created, and
10248fb7bfaSmrg      always not-NULL, and unchangeable, after that.  */
10348fb7bfaSmrg   objc_mutex_t lock;
10448fb7bfaSmrg 
10548fb7bfaSmrg   /* This is how many times the objc_mutex_lock() has been called on
10648fb7bfaSmrg      the lock (it is 0 when the lock is unused).  Used to track when
10748fb7bfaSmrg      the lock is no longer associated with an object and can be reused
10848fb7bfaSmrg      for another object.  It records "real" locks, potentially (but
10948fb7bfaSmrg      not necessarily) by multiple threads.  You need to hold the
11048fb7bfaSmrg      appropriate sync_pool_protection_locks[N] to read or write this
11148fb7bfaSmrg      variable.  */
11248fb7bfaSmrg   unsigned int usage_count;
11348fb7bfaSmrg 
11448fb7bfaSmrg   /* The object that the lock is associated with.  This variable can
11548fb7bfaSmrg      only be written when holding the sync_pool_protection_locks[N]
11648fb7bfaSmrg      and when node->usage_count == 0, ie, the lock is not being used.
11748fb7bfaSmrg      You can read this variable either when you hold the
11848fb7bfaSmrg      sync_pool_protection_locks[N] or when you hold node->lock,
11948fb7bfaSmrg      because in that case you know that node->usage_count can't get to
12048fb7bfaSmrg      zero until you release the lock.  It is valid to have usage_count
12148fb7bfaSmrg      == 0 and object != nil; in that case, the lock is not currently
12248fb7bfaSmrg      being used, but is still currently associated with the
12348fb7bfaSmrg      object.  */
12448fb7bfaSmrg   id object;
12548fb7bfaSmrg 
12648fb7bfaSmrg   /* This is a counter reserved for use by the thread currently
12748fb7bfaSmrg      holding the lock.  So, you need to hold node->lock to read or
12848fb7bfaSmrg      write this variable.  It is normally 0, and if the cache is not
12948fb7bfaSmrg      being used, it is kept at 0 (even if recursive locks are being
13048fb7bfaSmrg      done; in that case, no difference is made between recursive and
13148fb7bfaSmrg      non-recursive locks: they all increase usage_count, and call
13248fb7bfaSmrg      objc_mutex_lock()).  When the cache is being used, a thread may
13348fb7bfaSmrg      be able to find a lock that it already holds using the cache; in
13448fb7bfaSmrg      that case, to perform additional locks/unlocks it can
13548fb7bfaSmrg      increase/decrease the recursive_usage_count (which does not
13648fb7bfaSmrg      require any synchronization with other threads, since it's
13748fb7bfaSmrg      protected by the node->lock itself) instead of the usage_count
13848fb7bfaSmrg      (which requires locking the pool protection lock).  And it can
13948fb7bfaSmrg      skip the call to objc_mutex_lock/unlock too.  */
14048fb7bfaSmrg   unsigned int recursive_usage_count;
14148fb7bfaSmrg } *lock_node_ptr;
14248fb7bfaSmrg 
14348fb7bfaSmrg 
14448fb7bfaSmrg /* The pools of locks.  Each of them is a linked list of lock_nodes.
14548fb7bfaSmrg    In the list we keep both unlocked and locked nodes.  */
14648fb7bfaSmrg static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
14748fb7bfaSmrg 
14848fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
14948fb7bfaSmrg /* We store a cache of locks acquired by each thread in thread-local
15048fb7bfaSmrg    storage.  */
15148fb7bfaSmrg static __thread lock_node_ptr *lock_cache = NULL;
15248fb7bfaSmrg 
15348fb7bfaSmrg /* This is a conservative implementation that uses a static array of
15448fb7bfaSmrg    fixed size as cache.  Because the cache is an array that we scan
15548fb7bfaSmrg    linearly, the bigger it is, the slower it gets.  This does not
15648fb7bfaSmrg    matter much at small sizes (eg, the overhead of checking 8 cache
15748fb7bfaSmrg    slots instead of 4 is very small compared to the other overheads
15848fb7bfaSmrg    involved such as function calls and lock/unlock operations), but at
15948fb7bfaSmrg    large sizes it becomes important as obviously there is a size over
16048fb7bfaSmrg    which using the cache backfires: the lookup is so slow that the
16148fb7bfaSmrg    cache slows down the software instead of speeding it up.  In
16248fb7bfaSmrg    practice, it seems that most threads use a small number of
16348fb7bfaSmrg    concurrent locks, so we have a conservative implementation with a
16448fb7bfaSmrg    fixed-size cache of 8 locks which gives a very predictable
16548fb7bfaSmrg    behaviour.  If a thread locks lots of different locks, only the
16648fb7bfaSmrg    first 8 get the speed benefits of the cache, but the cache remains
16748fb7bfaSmrg    always small, fast and predictable.
16848fb7bfaSmrg 
16948fb7bfaSmrg    SYNC_CACHE_SIZE is the size of the lock cache for each thread.  */
17048fb7bfaSmrg #define SYNC_CACHE_SIZE 8
17148fb7bfaSmrg #endif /* SYNC_CACHE_DISABLE */
17248fb7bfaSmrg 
17348fb7bfaSmrg /* Called at startup by init.c.  */
17448fb7bfaSmrg void
__objc_sync_init(void)17548fb7bfaSmrg __objc_sync_init (void)
17648fb7bfaSmrg {
17748fb7bfaSmrg   int i;
17848fb7bfaSmrg 
17948fb7bfaSmrg   for (i = 0; i < SYNC_NUMBER_OF_POOLS; i++)
18048fb7bfaSmrg     {
18148fb7bfaSmrg       lock_node_ptr new_node;
18248fb7bfaSmrg 
18348fb7bfaSmrg       /* Create a protection lock for each pool.  */
18448fb7bfaSmrg       sync_pool_protection_locks[i] = objc_mutex_allocate ();
18548fb7bfaSmrg 
18648fb7bfaSmrg       /* Preallocate a lock per pool.  */
18748fb7bfaSmrg       new_node = objc_malloc (sizeof (struct lock_node));
18848fb7bfaSmrg       new_node->lock = objc_mutex_allocate ();
18948fb7bfaSmrg       new_node->object = nil;
19048fb7bfaSmrg       new_node->usage_count = 0;
19148fb7bfaSmrg       new_node->recursive_usage_count = 0;
19248fb7bfaSmrg       new_node->next = NULL;
19348fb7bfaSmrg 
19448fb7bfaSmrg       sync_pool_array[i] = new_node;
19548fb7bfaSmrg     }
19648fb7bfaSmrg }
19748fb7bfaSmrg 
19848fb7bfaSmrg int
objc_sync_enter(id object)19948fb7bfaSmrg objc_sync_enter (id object)
20048fb7bfaSmrg {
20148fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
20248fb7bfaSmrg   int free_cache_slot;
20348fb7bfaSmrg #endif
20448fb7bfaSmrg   int hash;
20548fb7bfaSmrg   lock_node_ptr node;
20648fb7bfaSmrg   lock_node_ptr unused_node;
20748fb7bfaSmrg 
20848fb7bfaSmrg   if (object == nil)
20948fb7bfaSmrg     return OBJC_SYNC_SUCCESS;
21048fb7bfaSmrg 
21148fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
21248fb7bfaSmrg   if (lock_cache == NULL)
21348fb7bfaSmrg     {
21448fb7bfaSmrg       /* Note that this calloc only happen only once per thread, the
21548fb7bfaSmrg 	 very first time a thread does a objc_sync_enter().  */
21648fb7bfaSmrg       lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
21748fb7bfaSmrg     }
21848fb7bfaSmrg 
21948fb7bfaSmrg   /* Check the cache to see if we have a record of having already
22048fb7bfaSmrg      locked the lock corresponding to this object.  While doing so,
22148fb7bfaSmrg      keep track of the first free cache node in case we need it
22248fb7bfaSmrg      later.  */
22348fb7bfaSmrg   node = NULL;
22448fb7bfaSmrg   free_cache_slot = -1;
22548fb7bfaSmrg 
22648fb7bfaSmrg   {
22748fb7bfaSmrg     int i;
22848fb7bfaSmrg     for (i = 0; i < SYNC_CACHE_SIZE; i++)
22948fb7bfaSmrg       {
23048fb7bfaSmrg 	lock_node_ptr locked_node = lock_cache[i];
23148fb7bfaSmrg 
23248fb7bfaSmrg 	if (locked_node == NULL)
23348fb7bfaSmrg 	  {
23448fb7bfaSmrg 	    if (free_cache_slot == -1)
23548fb7bfaSmrg 	      free_cache_slot = i;
23648fb7bfaSmrg 	  }
23748fb7bfaSmrg 	else if (locked_node->object == object)
23848fb7bfaSmrg 	  {
23948fb7bfaSmrg 	    node = locked_node;
24048fb7bfaSmrg 	    break;
24148fb7bfaSmrg 	  }
24248fb7bfaSmrg       }
24348fb7bfaSmrg   }
24448fb7bfaSmrg 
24548fb7bfaSmrg   if (node != NULL)
24648fb7bfaSmrg     {
24748fb7bfaSmrg       /* We found the lock.  Increase recursive_usage_count, which is
24848fb7bfaSmrg 	 protected by node->lock, which we already hold.  */
24948fb7bfaSmrg       node->recursive_usage_count++;
25048fb7bfaSmrg 
25148fb7bfaSmrg       /* There is no need to actually lock anything, since we already
25248fb7bfaSmrg 	 hold the lock.  Correspondingly, objc_sync_exit() will just
25348fb7bfaSmrg 	 decrease recursive_usage_count and do nothing to unlock.  */
25448fb7bfaSmrg       return OBJC_SYNC_SUCCESS;
25548fb7bfaSmrg     }
25648fb7bfaSmrg #endif /* SYNC_CACHE_DISABLE */
25748fb7bfaSmrg 
25848fb7bfaSmrg   /* The following is the standard lookup for the lock in the standard
25948fb7bfaSmrg      pool lock.  It requires a pool protection lock.  */
26048fb7bfaSmrg   hash = SYNC_OBJECT_HASH(object);
26148fb7bfaSmrg 
26248fb7bfaSmrg   /* Search for an existing lock for 'object'.  While searching, make
26348fb7bfaSmrg      note of any unused lock if we find any.  */
26448fb7bfaSmrg   unused_node = NULL;
26548fb7bfaSmrg 
26648fb7bfaSmrg   objc_mutex_lock (sync_pool_protection_locks[hash]);
26748fb7bfaSmrg 
26848fb7bfaSmrg   node = sync_pool_array[hash];
26948fb7bfaSmrg 
27048fb7bfaSmrg   while (node != NULL)
27148fb7bfaSmrg     {
27248fb7bfaSmrg       if (node->object == object)
27348fb7bfaSmrg 	{
27448fb7bfaSmrg 	  /* We found the lock.  */
27548fb7bfaSmrg 	  node->usage_count++;
27648fb7bfaSmrg 	  objc_mutex_unlock (sync_pool_protection_locks[hash]);
27748fb7bfaSmrg 
27848fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
27948fb7bfaSmrg 	  /* Put it in the cache.  */
28048fb7bfaSmrg 	  if (free_cache_slot != -1)
28148fb7bfaSmrg 	    lock_cache[free_cache_slot] = node;
28248fb7bfaSmrg #endif
28348fb7bfaSmrg 
28448fb7bfaSmrg 	  /* Lock it.  */
28548fb7bfaSmrg 	  objc_mutex_lock (node->lock);
28648fb7bfaSmrg 
28748fb7bfaSmrg 	  return OBJC_SYNC_SUCCESS;
28848fb7bfaSmrg 	}
28948fb7bfaSmrg 
29048fb7bfaSmrg       if (unused_node == NULL  &&  node->usage_count == 0)
29148fb7bfaSmrg 	{
29248fb7bfaSmrg 	  /* We found the first unused node.  Record it.  */
29348fb7bfaSmrg 	  unused_node = node;
29448fb7bfaSmrg 	}
29548fb7bfaSmrg 
29648fb7bfaSmrg       node = node->next;
29748fb7bfaSmrg     }
29848fb7bfaSmrg 
29948fb7bfaSmrg   /* An existing lock for 'object' could not be found.  */
30048fb7bfaSmrg   if (unused_node != NULL)
30148fb7bfaSmrg     {
30248fb7bfaSmrg       /* But we found a unused lock; use it.  */
30348fb7bfaSmrg       unused_node->object = object;
30448fb7bfaSmrg       unused_node->usage_count = 1;
30548fb7bfaSmrg       unused_node->recursive_usage_count = 0;
30648fb7bfaSmrg       objc_mutex_unlock (sync_pool_protection_locks[hash]);
30748fb7bfaSmrg 
30848fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
30948fb7bfaSmrg       if (free_cache_slot != -1)
31048fb7bfaSmrg 	lock_cache[free_cache_slot] = unused_node;
31148fb7bfaSmrg #endif
31248fb7bfaSmrg 
31348fb7bfaSmrg       objc_mutex_lock (unused_node->lock);
31448fb7bfaSmrg 
31548fb7bfaSmrg       return OBJC_SYNC_SUCCESS;
31648fb7bfaSmrg     }
31748fb7bfaSmrg   else
31848fb7bfaSmrg     {
31948fb7bfaSmrg       /* There are no unused nodes; allocate a new node.  */
32048fb7bfaSmrg       lock_node_ptr new_node;
32148fb7bfaSmrg 
32248fb7bfaSmrg       /* Create the node.  */
32348fb7bfaSmrg       new_node = objc_malloc (sizeof (struct lock_node));
32448fb7bfaSmrg       new_node->lock = objc_mutex_allocate ();
32548fb7bfaSmrg       new_node->object = object;
32648fb7bfaSmrg       new_node->usage_count = 1;
32748fb7bfaSmrg       new_node->recursive_usage_count = 0;
32848fb7bfaSmrg 
32948fb7bfaSmrg       /* Attach it at the beginning of the pool.  */
33048fb7bfaSmrg       new_node->next = sync_pool_array[hash];
33148fb7bfaSmrg       sync_pool_array[hash] = new_node;
33248fb7bfaSmrg       objc_mutex_unlock (sync_pool_protection_locks[hash]);
33348fb7bfaSmrg 
33448fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
33548fb7bfaSmrg       if (free_cache_slot != -1)
33648fb7bfaSmrg 	lock_cache[free_cache_slot] = new_node;
33748fb7bfaSmrg #endif
33848fb7bfaSmrg 
33948fb7bfaSmrg       objc_mutex_lock (new_node->lock);
34048fb7bfaSmrg 
34148fb7bfaSmrg       return OBJC_SYNC_SUCCESS;
34248fb7bfaSmrg     }
34348fb7bfaSmrg }
34448fb7bfaSmrg 
34548fb7bfaSmrg int
objc_sync_exit(id object)34648fb7bfaSmrg objc_sync_exit (id object)
34748fb7bfaSmrg {
34848fb7bfaSmrg   int hash;
34948fb7bfaSmrg   lock_node_ptr node;
35048fb7bfaSmrg 
35148fb7bfaSmrg   if (object == nil)
35248fb7bfaSmrg     return OBJC_SYNC_SUCCESS;
35348fb7bfaSmrg 
35448fb7bfaSmrg #ifndef SYNC_CACHE_DISABLE
35548fb7bfaSmrg   if (lock_cache != NULL)
35648fb7bfaSmrg     {
35748fb7bfaSmrg       int i;
35848fb7bfaSmrg 
35948fb7bfaSmrg       /* Find the lock in the cache.  */
36048fb7bfaSmrg       node = NULL;
36148fb7bfaSmrg       for (i = 0; i < SYNC_CACHE_SIZE; i++)
36248fb7bfaSmrg 	{
36348fb7bfaSmrg 	  lock_node_ptr locked_node = lock_cache[i];
36448fb7bfaSmrg 
36548fb7bfaSmrg 	  if (locked_node != NULL  &&  locked_node->object == object)
36648fb7bfaSmrg 	    {
36748fb7bfaSmrg 	      node = locked_node;
36848fb7bfaSmrg 	      break;
36948fb7bfaSmrg 	    }
37048fb7bfaSmrg 	}
37148fb7bfaSmrg       /* Note that, if a node was found in the cache, the variable i
37248fb7bfaSmrg 	 now holds the index where it was found, which will be used to
37348fb7bfaSmrg 	 remove it from the cache.  */
37448fb7bfaSmrg       if (node != NULL)
37548fb7bfaSmrg 	{
37648fb7bfaSmrg 	  if (node->recursive_usage_count > 0)
37748fb7bfaSmrg 	    {
37848fb7bfaSmrg 	      node->recursive_usage_count--;
37948fb7bfaSmrg 	      return OBJC_SYNC_SUCCESS;
38048fb7bfaSmrg 	    }
38148fb7bfaSmrg 	  else
38248fb7bfaSmrg 	    {
38348fb7bfaSmrg 	      /* We need to do a real unlock.  */
38448fb7bfaSmrg 	      hash = SYNC_OBJECT_HASH(object);
38548fb7bfaSmrg 
38648fb7bfaSmrg 	      /* TODO: If we had atomic increase/decrease operations
38748fb7bfaSmrg 		 with memory barriers, we could avoid the lock
38848fb7bfaSmrg 		 here!  */
38948fb7bfaSmrg 	      objc_mutex_lock (sync_pool_protection_locks[hash]);
39048fb7bfaSmrg 	      node->usage_count--;
39148fb7bfaSmrg 	      /* Normally, we do not reset object to nil here.  We'll
39248fb7bfaSmrg 		 leave the lock associated with that object, at zero
3934d5abbe8Smrg 		 usage count.  This makes it slightly more efficient to
39448fb7bfaSmrg 		 provide a lock for that object if (as likely)
39548fb7bfaSmrg 		 requested again.  If the object is deallocated, we
39648fb7bfaSmrg 		 don't care.  It will never match a new lock that is
39748fb7bfaSmrg 		 requested, and the node will be reused at some point.
39848fb7bfaSmrg 
39948fb7bfaSmrg 		 But, if garbage collection is enabled, leaving a
40048fb7bfaSmrg 		 pointer to the object in memory might prevent the
40148fb7bfaSmrg 		 object from being released.  In that case, we remove
40248fb7bfaSmrg 		 it (TODO: maybe we should avoid using the garbage
40348fb7bfaSmrg 		 collector at all ?  Nothing is ever deallocated in
40448fb7bfaSmrg 		 this file).  */
40548fb7bfaSmrg #if OBJC_WITH_GC
40648fb7bfaSmrg 	      node->object = nil;
40748fb7bfaSmrg #endif
40848fb7bfaSmrg 	      objc_mutex_unlock (sync_pool_protection_locks[hash]);
40948fb7bfaSmrg 
41048fb7bfaSmrg 	      /* PS: Between objc_mutex_unlock
41148fb7bfaSmrg 		 (sync_pool_protection_locks[hash]) and
41248fb7bfaSmrg 		 objc_mutex_unlock (node->lock), the pool is unlocked
41348fb7bfaSmrg 		 so other threads may allocate this same lock to
41448fb7bfaSmrg 		 another object (!).  This is not a problem, but it is
41548fb7bfaSmrg 		 curious.  */
41648fb7bfaSmrg 	      objc_mutex_unlock (node->lock);
41748fb7bfaSmrg 
41848fb7bfaSmrg 	      /* Remove the node from the cache.  */
41948fb7bfaSmrg 	      lock_cache[i] = NULL;
42048fb7bfaSmrg 
42148fb7bfaSmrg 	      return OBJC_SYNC_SUCCESS;
42248fb7bfaSmrg 	    }
42348fb7bfaSmrg 	}
42448fb7bfaSmrg     }
42548fb7bfaSmrg #endif
42648fb7bfaSmrg 
42748fb7bfaSmrg   /* The cache either wasn't there, or didn't work (eg, we overflowed
42848fb7bfaSmrg      it at some point and stopped recording new locks in the cache).
42948fb7bfaSmrg      Proceed with a full search of the lock pool.  */
43048fb7bfaSmrg   hash = SYNC_OBJECT_HASH(object);
43148fb7bfaSmrg 
43248fb7bfaSmrg   objc_mutex_lock (sync_pool_protection_locks[hash]);
43348fb7bfaSmrg 
43448fb7bfaSmrg   /* Search for an existing lock for 'object'.  */
43548fb7bfaSmrg   node = sync_pool_array[hash];
43648fb7bfaSmrg 
43748fb7bfaSmrg   while (node != NULL)
43848fb7bfaSmrg     {
43948fb7bfaSmrg       if (node->object == object)
44048fb7bfaSmrg 	{
44148fb7bfaSmrg 	  /* We found the lock.  */
44248fb7bfaSmrg 	  node->usage_count--;
44348fb7bfaSmrg 	  objc_mutex_unlock (sync_pool_protection_locks[hash]);
44448fb7bfaSmrg 
44548fb7bfaSmrg 	  objc_mutex_unlock (node->lock);
44648fb7bfaSmrg 
44748fb7bfaSmrg 	  /* No need to remove the node from the cache, since it
44848fb7bfaSmrg 	     wasn't found in the cache when we looked for it!  */
44948fb7bfaSmrg 	  return OBJC_SYNC_SUCCESS;
45048fb7bfaSmrg 	}
45148fb7bfaSmrg 
45248fb7bfaSmrg       node = node->next;
45348fb7bfaSmrg     }
45448fb7bfaSmrg 
45548fb7bfaSmrg   objc_mutex_unlock (sync_pool_protection_locks[hash]);
45648fb7bfaSmrg 
45748fb7bfaSmrg   /* A lock for 'object' to unlock could not be found (!!).  */
45848fb7bfaSmrg   return OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
45948fb7bfaSmrg }
460