1/* $NetBSD: atomic.h,v 1.7 2014/07/17 14:30:33 riastradh Exp $ */
2
3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _LINUX_ATOMIC_H_
33#define _LINUX_ATOMIC_H_
34
35#include <sys/atomic.h>
36
37#include <machine/limits.h>
38
39struct atomic {
40 union {
41 volatile int au_int;
42 volatile unsigned int au_uint;
43 } a_u;
44};
45
46#define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
47
48typedef struct atomic atomic_t;
49
50static inline int
51atomic_read(atomic_t *atomic)
52{
53 return atomic->a_u.au_int;
54}
55
56static inline void
57atomic_set(atomic_t *atomic, int value)
58{
59 atomic->a_u.au_int = value;
60}
61
62static inline void
63atomic_add(int addend, atomic_t *atomic)
64{
65 atomic_add_int(&atomic->a_u.au_uint, addend);
66}
67
68static inline void
69atomic_sub(int subtrahend, atomic_t *atomic)
70{
71 atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
72}
73
74static inline int
75atomic_add_return(int addend, atomic_t *atomic)
76{
77 return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
78}
79
80static inline void
81atomic_inc(atomic_t *atomic)
82{
83 atomic_inc_uint(&atomic->a_u.au_uint);
84}
85
86static inline void
87atomic_dec(atomic_t *atomic)
88{
89 atomic_dec_uint(&atomic->a_u.au_uint);
90}
91
92static inline int
93atomic_inc_return(atomic_t *atomic)
94{
95 return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
96}
97
98static inline int
99atomic_dec_return(atomic_t *atomic)
100{
101 return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
102}
103
104static inline int
105atomic_dec_and_test(atomic_t *atomic)
106{
107 return (0 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
108}
109
110static inline void
111atomic_set_mask(unsigned long mask, atomic_t *atomic)
112{
113 atomic_or_uint(&atomic->a_u.au_uint, mask);
114}
115
116static inline void
117atomic_clear_mask(unsigned long mask, atomic_t *atomic)
118{
119 atomic_and_uint(&atomic->a_u.au_uint, ~mask);
120}
121
122static inline int
123atomic_add_unless(atomic_t *atomic, int addend, int zero)
124{
125 int value;
126
127 do {
128 value = atomic->a_u.au_int;
129 if (value == zero)
130 return 0;
131 } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
132 != value);
133
134 return 1;
135}
136
137static inline int
138atomic_inc_not_zero(atomic_t *atomic)
139{
140 return atomic_add_unless(atomic, 1, 0);
141}
142
143static inline int
144atomic_xchg(atomic_t *atomic, int new)
145{
146 return (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
147}
148
149static inline int
150atomic_cmpxchg(atomic_t *atomic, int old, int new)
151{
152 return (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)old,
153 (unsigned)new);
154}
155
156struct atomic64 {
157 volatile uint64_t a_v;
158};
159
160typedef struct atomic64 atomic64_t;
161
162static inline uint64_t
163atomic64_read(const struct atomic64 *a)
164{
165 return a->a_v;
166}
167
168static inline void
169atomic64_set(struct atomic64 *a, uint64_t v)
170{
171 a->a_v = v;
172}
173
174static inline void
175atomic64_add(long long d, struct atomic64 *a)
176{
177 atomic_add_64(&a->a_v, d);
178}
179
180static inline void
181atomic64_sub(long long d, struct atomic64 *a)
182{
183 atomic_add_64(&a->a_v, -d);
184}
185
186static inline uint64_t
187atomic64_xchg(struct atomic64 *a, uint64_t v)
188{
189 return atomic_swap_64(&a->a_v, v);
190}
191
192static inline void
193set_bit(unsigned int bit, volatile unsigned long *ptr)
194{
195 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
196
197 atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
198}
199
200static inline void
201clear_bit(unsigned int bit, volatile unsigned long *ptr)
202{
203 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
204
205 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
206}
207
208static inline void
209change_bit(unsigned int bit, volatile unsigned long *ptr)
210{
211 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
212 volatile unsigned long *const p = &ptr[bit / units];
213 const unsigned long mask = (1UL << (bit % units));
214 unsigned long v;
215
216 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
217}
218
219static inline unsigned long
220test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
221{
222 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
223 volatile unsigned long *const p = &ptr[bit / units];
224 const unsigned long mask = (1UL << (bit % units));
225 unsigned long v;
226
227 do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
228
229 return ((v & mask) != 0);
230}
231
232static inline unsigned long
233test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
234{
235 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
236 volatile unsigned long *const p = &ptr[bit / units];
237 const unsigned long mask = (1UL << (bit % units));
238 unsigned long v;
239
240 do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
241
242 return ((v & mask) != 0);
243}
244
245static inline unsigned long
246test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
247{
248 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
249 volatile unsigned long *const p = &ptr[bit / units];
250 const unsigned long mask = (1UL << (bit % units));
251 unsigned long v;
252
253 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
254
255 return ((v & mask) != 0);
256}
257
258#if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
259/*
260 * XXX These memory barriers are doubtless overkill, but I am having
261 * trouble understanding the intent and use of the Linux atomic membar
262 * API. I think that for reference counting purposes, the sequences
263 * should be insn/inc/enter and exit/dec/insn, but the use of the
264 * before/after memory barriers is not consistent throughout Linux.
265 */
266# define smp_mb__before_atomic_inc() membar_sync()
267# define smp_mb__after_atomic_inc() membar_sync()
268# define smp_mb__before_atomic_dec() membar_sync()
269# define smp_mb__after_atomic_dec() membar_sync()
270#else
271# define smp_mb__before_atomic_inc() __insn_barrier()
272# define smp_mb__after_atomic_inc() __insn_barrier()
273# define smp_mb__before_atomic_dec() __insn_barrier()
274# define smp_mb__after_atomic_dec() __insn_barrier()
275#endif
276
277#endif /* _LINUX_ATOMIC_H_ */
278