xref: /netbsd-src/common/lib/libc/atomic/atomic_add_64_cas.c (revision 504e8e687f4f6c5bcfd35885ec51e14ae6b54f44)
1 /*	$NetBSD: atomic_add_64_cas.c,v 1.9 2014/06/23 21:53:45 joerg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "atomic_op_namespace.h"
33 
34 #include <sys/atomic.h>
35 
36 #ifdef __HAVE_ATOMIC64_OPS
37 
38 uint64_t fetch_and_add_8(volatile uint64_t *, uint64_t, ...)
39     asm("__sync_fetch_and_add_8");
40 
41 uint64_t
fetch_and_add_8(volatile uint64_t * addr,uint64_t val,...)42 fetch_and_add_8(volatile uint64_t *addr, uint64_t val, ...)
43 {
44 	uint64_t old, new;
45 
46 	do {
47 		old = *addr;
48 		new = old + val;
49 	} while (atomic_cas_64(addr, old, new) != old);
50 	return old;
51 }
52 
53 void
atomic_add_64(volatile uint64_t * addr,int64_t val)54 atomic_add_64(volatile uint64_t *addr, int64_t val)
55 {
56    (void) fetch_and_add_8(addr, val);
57 }
58 
59 __strong_alias(__atomic_fetch_add_8,__sync_fetch_and_add_8)
60 
61 #undef atomic_add_64
62 atomic_op_alias(atomic_add_64,_atomic_add_64)
63 
64 #if defined(_LP64)
65 #undef atomic_add_long
66 atomic_op_alias(atomic_add_long,_atomic_add_64)
67 __strong_alias(_atomic_add_long,_atomic_add_64)
68 
69 #undef atomic_add_ptr
70 atomic_op_alias(atomic_add_ptr,_atomic_add_64)
71 __strong_alias(_atomic_add_ptr,_atomic_add_64)
72 #endif /* _LP64 */
73 
74 #endif
75