GNU Linux-libre 4.4.288-gnu1
[releases.git] / arch / parisc / include / asm / atomic.h
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  *
16  * And probably incredibly slow on parisc.  OTOH, we don't
17  * have to write any serious assembly.   prumpf
18  */
19
20 #ifdef CONFIG_SMP
21 #include <asm/spinlock.h>
22 #include <asm/cache.h>          /* we use L1_CACHE_BYTES */
23
24 /* Use an array of spinlocks for our atomic_ts.
25  * Hash function to index into a different SPINLOCK.
26  * Since "a" is usually an address, use one spinlock per cacheline.
27  */
28 #  define ATOMIC_HASH_SIZE 4
29 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34  * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do {     \
36         arch_spinlock_t *s = ATOMIC_HASH(l);            \
37         local_irq_save(f);                      \
38         arch_spin_lock(s);                      \
39 } while(0)
40
41 #define _atomic_spin_unlock_irqrestore(l,f) do {        \
42         arch_spinlock_t *s = ATOMIC_HASH(l);                    \
43         arch_spin_unlock(s);                            \
44         local_irq_restore(f);                           \
45 } while(0)
46
47
48 #else
49 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 #endif
52
53 /*
54  * Note that we need not lock read accesses - aligned word writes/reads
55  * are atomic, so a reader never sees inconsistent values.
56  */
57
58 static __inline__ void atomic_set(atomic_t *v, int i)
59 {
60         unsigned long flags;
61         _atomic_spin_lock_irqsave(v, flags);
62
63         v->counter = i;
64
65         _atomic_spin_unlock_irqrestore(v, flags);
66 }
67
68 static __inline__ int atomic_read(const atomic_t *v)
69 {
70         return READ_ONCE((v)->counter);
71 }
72
73 /* exported interface */
74 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76
77 /**
78  * __atomic_add_unless - add unless the number is a given value
79  * @v: pointer of type atomic_t
80  * @a: the amount to add to v...
81  * @u: ...unless v is equal to u.
82  *
83  * Atomically adds @a to @v, so long as it was not @u.
84  * Returns the old value of @v.
85  */
86 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87 {
88         int c, old;
89         c = atomic_read(v);
90         for (;;) {
91                 if (unlikely(c == (u)))
92                         break;
93                 old = atomic_cmpxchg((v), c, c + (a));
94                 if (likely(old == c))
95                         break;
96                 c = old;
97         }
98         return c;
99 }
100
101 #define ATOMIC_OP(op, c_op)                                             \
102 static __inline__ void atomic_##op(int i, atomic_t *v)                  \
103 {                                                                       \
104         unsigned long flags;                                            \
105                                                                         \
106         _atomic_spin_lock_irqsave(v, flags);                            \
107         v->counter c_op i;                                              \
108         _atomic_spin_unlock_irqrestore(v, flags);                       \
109 }                                                                       \
110
111 #define ATOMIC_OP_RETURN(op, c_op)                                      \
112 static __inline__ int atomic_##op##_return(int i, atomic_t *v)          \
113 {                                                                       \
114         unsigned long flags;                                            \
115         int ret;                                                        \
116                                                                         \
117         _atomic_spin_lock_irqsave(v, flags);                            \
118         ret = (v->counter c_op i);                                      \
119         _atomic_spin_unlock_irqrestore(v, flags);                       \
120                                                                         \
121         return ret;                                                     \
122 }
123
124 #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126 ATOMIC_OPS(add, +=)
127 ATOMIC_OPS(sub, -=)
128
129 ATOMIC_OP(and, &=)
130 ATOMIC_OP(or, |=)
131 ATOMIC_OP(xor, ^=)
132
133 #undef ATOMIC_OPS
134 #undef ATOMIC_OP_RETURN
135 #undef ATOMIC_OP
136
137 #define atomic_inc(v)   (atomic_add(   1,(v)))
138 #define atomic_dec(v)   (atomic_add(  -1,(v)))
139
140 #define atomic_inc_return(v)    (atomic_add_return(   1,(v)))
141 #define atomic_dec_return(v)    (atomic_add_return(  -1,(v)))
142
143 #define atomic_add_negative(a, v)       (atomic_add_return((a), (v)) < 0)
144
145 /*
146  * atomic_inc_and_test - increment and test
147  * @v: pointer of type atomic_t
148  *
149  * Atomically increments @v by 1
150  * and returns true if the result is zero, or false for all
151  * other cases.
152  */
153 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
154
155 #define atomic_dec_and_test(v)  (atomic_dec_return(v) == 0)
156
157 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i),(v)) == 0)
158
159 #define ATOMIC_INIT(i)  { (i) }
160
161 #ifdef CONFIG_64BIT
162
163 #define ATOMIC64_INIT(i) { (i) }
164
165 #define ATOMIC64_OP(op, c_op)                                           \
166 static __inline__ void atomic64_##op(s64 i, atomic64_t *v)              \
167 {                                                                       \
168         unsigned long flags;                                            \
169                                                                         \
170         _atomic_spin_lock_irqsave(v, flags);                            \
171         v->counter c_op i;                                              \
172         _atomic_spin_unlock_irqrestore(v, flags);                       \
173 }                                                                       \
174
175 #define ATOMIC64_OP_RETURN(op, c_op)                                    \
176 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)      \
177 {                                                                       \
178         unsigned long flags;                                            \
179         s64 ret;                                                        \
180                                                                         \
181         _atomic_spin_lock_irqsave(v, flags);                            \
182         ret = (v->counter c_op i);                                      \
183         _atomic_spin_unlock_irqrestore(v, flags);                       \
184                                                                         \
185         return ret;                                                     \
186 }
187
188 #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
189
190 ATOMIC64_OPS(add, +=)
191 ATOMIC64_OPS(sub, -=)
192 ATOMIC64_OP(and, &=)
193 ATOMIC64_OP(or, |=)
194 ATOMIC64_OP(xor, ^=)
195
196 #undef ATOMIC64_OPS
197 #undef ATOMIC64_OP_RETURN
198 #undef ATOMIC64_OP
199
200 static __inline__ void
201 atomic64_set(atomic64_t *v, s64 i)
202 {
203         unsigned long flags;
204         _atomic_spin_lock_irqsave(v, flags);
205
206         v->counter = i;
207
208         _atomic_spin_unlock_irqrestore(v, flags);
209 }
210
211 #define atomic64_set_release(v, i)      atomic64_set((v), (i))
212
213 static __inline__ s64
214 atomic64_read(const atomic64_t *v)
215 {
216         return ACCESS_ONCE((v)->counter);
217 }
218
219 #define atomic64_inc(v)         (atomic64_add(   1,(v)))
220 #define atomic64_dec(v)         (atomic64_add(  -1,(v)))
221
222 #define atomic64_inc_return(v)          (atomic64_add_return(   1,(v)))
223 #define atomic64_dec_return(v)          (atomic64_add_return(  -1,(v)))
224
225 #define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
226
227 #define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
228 #define atomic64_dec_and_test(v)        (atomic64_dec_return(v) == 0)
229 #define atomic64_sub_and_test(i,v)      (atomic64_sub_return((i),(v)) == 0)
230
231 /* exported interface */
232 #define atomic64_cmpxchg(v, o, n) \
233         ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
234 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
235
236 /**
237  * atomic64_add_unless - add unless the number is a given value
238  * @v: pointer of type atomic64_t
239  * @a: the amount to add to v...
240  * @u: ...unless v is equal to u.
241  *
242  * Atomically adds @a to @v, so long as it was not @u.
243  * Returns the old value of @v.
244  */
245 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
246 {
247         long c, old;
248         c = atomic64_read(v);
249         for (;;) {
250                 if (unlikely(c == (u)))
251                         break;
252                 old = atomic64_cmpxchg((v), c, c + (a));
253                 if (likely(old == c))
254                         break;
255                 c = old;
256         }
257         return c != (u);
258 }
259
260 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
261
262 /*
263  * atomic64_dec_if_positive - decrement by 1 if old value positive
264  * @v: pointer of type atomic_t
265  *
266  * The function returns the old value of *v minus 1, even if
267  * the atomic variable, v, was not decremented.
268  */
269 static inline long atomic64_dec_if_positive(atomic64_t *v)
270 {
271         long c, old, dec;
272         c = atomic64_read(v);
273         for (;;) {
274                 dec = c - 1;
275                 if (unlikely(dec < 0))
276                         break;
277                 old = atomic64_cmpxchg((v), c, dec);
278                 if (likely(old == c))
279                         break;
280                 c = old;
281         }
282         return dec;
283 }
284
285 #endif /* !CONFIG_64BIT */
286
287
288 #endif /* _ASM_PARISC_ATOMIC_H_ */