xref: /netbsd-src/external/gpl3/gcc/dist/libgomp/config/linux/ptrlock.c (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1*b1e83836Smrg /* Copyright (C) 2008-2022 Free Software Foundation, Inc.
24fee23f9Smrg    Contributed by Jakub Jelinek <jakub@redhat.com>.
34fee23f9Smrg 
44d5abbe8Smrg    This file is part of the GNU Offloading and Multi Processing Library
54d5abbe8Smrg    (libgomp).
64fee23f9Smrg 
74fee23f9Smrg    Libgomp is free software; you can redistribute it and/or modify it
84fee23f9Smrg    under the terms of the GNU General Public License as published by
94fee23f9Smrg    the Free Software Foundation; either version 3, or (at your option)
104fee23f9Smrg    any later version.
114fee23f9Smrg 
124fee23f9Smrg    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
134fee23f9Smrg    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
144fee23f9Smrg    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
154fee23f9Smrg    more details.
164fee23f9Smrg 
174fee23f9Smrg    Under Section 7 of GPL version 3, you are granted additional
184fee23f9Smrg    permissions described in the GCC Runtime Library Exception, version
194fee23f9Smrg    3.1, as published by the Free Software Foundation.
204fee23f9Smrg 
214fee23f9Smrg    You should have received a copy of the GNU General Public License and
224fee23f9Smrg    a copy of the GCC Runtime Library Exception along with this program;
234fee23f9Smrg    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
244fee23f9Smrg    <http://www.gnu.org/licenses/>.  */
254fee23f9Smrg 
264fee23f9Smrg /* This is a Linux specific implementation of a mutex synchronization
274fee23f9Smrg    mechanism for libgomp.  This type is private to the library.  This
284fee23f9Smrg    implementation uses atomic instructions and the futex syscall.  */
294fee23f9Smrg 
304fee23f9Smrg #include <endian.h>
314fee23f9Smrg #include <limits.h>
324fee23f9Smrg #include "wait.h"
334fee23f9Smrg 
344fee23f9Smrg void *
gomp_ptrlock_get_slow(gomp_ptrlock_t * ptrlock)354fee23f9Smrg gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock)
364fee23f9Smrg {
374fee23f9Smrg   int *intptr;
3848fb7bfaSmrg   uintptr_t oldval = 1;
3948fb7bfaSmrg 
4048fb7bfaSmrg   __atomic_compare_exchange_n (ptrlock, &oldval, 2, false,
4148fb7bfaSmrg 			       MEMMODEL_RELAXED, MEMMODEL_RELAXED);
424fee23f9Smrg 
434fee23f9Smrg   /* futex works on ints, not pointers.
444fee23f9Smrg      But a valid work share pointer will be at least
454fee23f9Smrg      8 byte aligned, so it is safe to assume the low
464fee23f9Smrg      32-bits of the pointer won't contain values 1 or 2.  */
474fee23f9Smrg   __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
484fee23f9Smrg #if __BYTE_ORDER == __BIG_ENDIAN
494fee23f9Smrg   if (sizeof (*ptrlock) > sizeof (int))
504fee23f9Smrg     intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
514fee23f9Smrg #endif
524fee23f9Smrg   do
534fee23f9Smrg     do_wait (intptr, 2);
5448fb7bfaSmrg   while (__atomic_load_n (intptr, MEMMODEL_RELAXED) == 2);
554fee23f9Smrg   __asm volatile ("" : : : "memory");
5648fb7bfaSmrg   return (void *) __atomic_load_n (ptrlock, MEMMODEL_ACQUIRE);
574fee23f9Smrg }
584fee23f9Smrg 
594fee23f9Smrg void
gomp_ptrlock_set_slow(gomp_ptrlock_t * ptrlock)6048fb7bfaSmrg gomp_ptrlock_set_slow (gomp_ptrlock_t *ptrlock)
614fee23f9Smrg {
624fee23f9Smrg   int *intptr;
634fee23f9Smrg 
644fee23f9Smrg   __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
654fee23f9Smrg #if __BYTE_ORDER == __BIG_ENDIAN
664fee23f9Smrg   if (sizeof (*ptrlock) > sizeof (int))
674fee23f9Smrg     intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
684fee23f9Smrg #endif
694fee23f9Smrg   futex_wake (intptr, INT_MAX);
704fee23f9Smrg }
71