2 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #ifndef _ASM_RISCV_ATOMIC_H
13 #define _ASM_RISCV_ATOMIC_H
15 #ifdef CONFIG_GENERIC_ATOMIC64
16 # include <asm-generic/atomic64.h>
18 # if (__riscv_xlen < 64)
19 # error "64-bit atomics require XLEN to be at least 64"
23 #include <asm/cmpxchg.h>
24 #include <asm/barrier.h>
26 #define ATOMIC_INIT(i) { (i) }
28 #define __atomic_acquire_fence() \
29 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
31 #define __atomic_release_fence() \
32 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
34 static __always_inline int atomic_read(const atomic_t *v)
36 return READ_ONCE(v->counter);
38 static __always_inline void atomic_set(atomic_t *v, int i)
40 WRITE_ONCE(v->counter, i);
43 #ifndef CONFIG_GENERIC_ATOMIC64
44 #define ATOMIC64_INIT(i) { (i) }
45 static __always_inline long atomic64_read(const atomic64_t *v)
47 return READ_ONCE(v->counter);
49 static __always_inline void atomic64_set(atomic64_t *v, long i)
51 WRITE_ONCE(v->counter, i);
56 * First, the atomic ops that have no ordering constraints and therefor don't
57 * have the AQ or RL bits set. These don't return anything, so there's only
58 * one version to worry about.
60 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
61 static __always_inline \
62 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
64 __asm__ __volatile__ ( \
65 " amo" #asm_op "." #asm_type " zero, %1, %0" \
71 #ifdef CONFIG_GENERIC_ATOMIC64
72 #define ATOMIC_OPS(op, asm_op, I) \
73 ATOMIC_OP (op, asm_op, I, w, int, )
75 #define ATOMIC_OPS(op, asm_op, I) \
76 ATOMIC_OP (op, asm_op, I, w, int, ) \
77 ATOMIC_OP (op, asm_op, I, d, long, 64)
80 ATOMIC_OPS(add, add, i)
81 ATOMIC_OPS(sub, add, -i)
82 ATOMIC_OPS(and, and, i)
83 ATOMIC_OPS( or, or, i)
84 ATOMIC_OPS(xor, xor, i)
90 * Atomic ops that have ordered, relaxed, acquire, and release variants.
91 * There's two flavors of these: the arithmatic ops have both fetch and return
92 * versions, while the logical ops only have fetch versions.
94 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
95 static __always_inline \
96 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
97 atomic##prefix##_t *v) \
99 register c_type ret; \
100 __asm__ __volatile__ ( \
101 " amo" #asm_op "." #asm_type " %1, %2, %0" \
102 : "+A" (v->counter), "=r" (ret) \
107 static __always_inline \
108 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
110 register c_type ret; \
111 __asm__ __volatile__ ( \
112 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
113 : "+A" (v->counter), "=r" (ret) \
119 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
120 static __always_inline \
121 c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
122 atomic##prefix##_t *v) \
124 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
126 static __always_inline \
127 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
129 return atomic##prefix##_fetch_##op(i, v) c_op I; \
132 #ifdef CONFIG_GENERIC_ATOMIC64
133 #define ATOMIC_OPS(op, asm_op, c_op, I) \
134 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
137 #define ATOMIC_OPS(op, asm_op, c_op, I) \
138 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
139 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
140 ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
141 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
144 ATOMIC_OPS(add, add, +, i)
145 ATOMIC_OPS(sub, add, +, -i)
147 #define atomic_add_return_relaxed atomic_add_return_relaxed
148 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
149 #define atomic_add_return atomic_add_return
150 #define atomic_sub_return atomic_sub_return
152 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
153 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
154 #define atomic_fetch_add atomic_fetch_add
155 #define atomic_fetch_sub atomic_fetch_sub
157 #ifndef CONFIG_GENERIC_ATOMIC64
158 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
159 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
160 #define atomic64_add_return atomic64_add_return
161 #define atomic64_sub_return atomic64_sub_return
163 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
164 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
165 #define atomic64_fetch_add atomic64_fetch_add
166 #define atomic64_fetch_sub atomic64_fetch_sub
171 #ifdef CONFIG_GENERIC_ATOMIC64
172 #define ATOMIC_OPS(op, asm_op, I) \
173 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
175 #define ATOMIC_OPS(op, asm_op, I) \
176 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
177 ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
180 ATOMIC_OPS(and, and, i)
181 ATOMIC_OPS( or, or, i)
182 ATOMIC_OPS(xor, xor, i)
184 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
185 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
186 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
187 #define atomic_fetch_and atomic_fetch_and
188 #define atomic_fetch_or atomic_fetch_or
189 #define atomic_fetch_xor atomic_fetch_xor
191 #ifndef CONFIG_GENERIC_ATOMIC64
192 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
193 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
194 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
195 #define atomic64_fetch_and atomic64_fetch_and
196 #define atomic64_fetch_or atomic64_fetch_or
197 #define atomic64_fetch_xor atomic64_fetch_xor
202 #undef ATOMIC_FETCH_OP
203 #undef ATOMIC_OP_RETURN
205 /* This is required to provide a full barrier on success. */
206 static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
210 __asm__ __volatile__ (
211 "0: lr.w %[p], %[c]\n"
212 " beq %[p], %[u], 1f\n"
213 " add %[rc], %[p], %[a]\n"
214 " sc.w.rl %[rc], %[rc], %[c]\n"
218 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
219 : [a]"r" (a), [u]"r" (u)
223 #define atomic_fetch_add_unless atomic_fetch_add_unless
225 #ifndef CONFIG_GENERIC_ATOMIC64
226 static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
230 __asm__ __volatile__ (
231 "0: lr.d %[p], %[c]\n"
232 " beq %[p], %[u], 1f\n"
233 " add %[rc], %[p], %[a]\n"
234 " sc.d.rl %[rc], %[rc], %[c]\n"
238 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
239 : [a]"r" (a), [u]"r" (u)
243 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
247 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
248 * {cmp,}xchg and the operations that return, so they need a full barrier.
250 #define ATOMIC_OP(c_t, prefix, size) \
251 static __always_inline \
252 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
254 return __xchg_relaxed(&(v->counter), n, size); \
256 static __always_inline \
257 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
259 return __xchg_acquire(&(v->counter), n, size); \
261 static __always_inline \
262 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
264 return __xchg_release(&(v->counter), n, size); \
266 static __always_inline \
267 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
269 return __xchg(&(v->counter), n, size); \
271 static __always_inline \
272 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
275 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
277 static __always_inline \
278 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
281 return __cmpxchg_acquire(&(v->counter), o, n, size); \
283 static __always_inline \
284 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
287 return __cmpxchg_release(&(v->counter), o, n, size); \
289 static __always_inline \
290 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
292 return __cmpxchg(&(v->counter), o, n, size); \
295 #ifdef CONFIG_GENERIC_ATOMIC64
296 #define ATOMIC_OPS() \
299 #define ATOMIC_OPS() \
300 ATOMIC_OP( int, , 4) \
301 ATOMIC_OP(long, 64, 8)
309 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
313 __asm__ __volatile__ (
314 "0: lr.w %[p], %[c]\n"
315 " sub %[rc], %[p], %[o]\n"
317 " sc.w.rl %[rc], %[rc], %[c]\n"
321 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
324 return prev - offset;
327 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
329 #ifndef CONFIG_GENERIC_ATOMIC64
330 static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
334 __asm__ __volatile__ (
335 "0: lr.d %[p], %[c]\n"
336 " sub %[rc], %[p], %[o]\n"
338 " sc.d.rl %[rc], %[rc], %[c]\n"
342 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
345 return prev - offset;
348 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
351 #endif /* _ASM_RISCV_ATOMIC_H */