GNU Linux-libre 4.19.264-gnu1
[releases.git] / arch / xtensa / include / asm / spinlock.h
1 /*
2  * include/asm-xtensa/spinlock.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001 - 2005 Tensilica Inc.
9  */
10
11 #ifndef _XTENSA_SPINLOCK_H
12 #define _XTENSA_SPINLOCK_H
13
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16
17 /*
18  * spinlock
19  *
20  * There is at most one owner of a spinlock.  There are not different
21  * types of spinlock owners like there are for rwlocks (see below).
22  *
23  * When trying to obtain a spinlock, the function "spins" forever, or busy-
24  * waits, until the lock is obtained.  When spinning, presumably some other
25  * owner will soon give up the spinlock making it available to others.  Use
26  * the trylock functions to avoid spinning forever.
27  *
28  * possible values:
29  *
30  *    0         nobody owns the spinlock
31  *    1         somebody owns the spinlock
32  */
33
34 #define arch_spin_is_locked(x) ((x)->slock != 0)
35
36 static inline void arch_spin_lock(arch_spinlock_t *lock)
37 {
38         unsigned long tmp;
39
40         __asm__ __volatile__(
41                         "       movi    %0, 0\n"
42                         "       wsr     %0, scompare1\n"
43                         "1:     movi    %0, 1\n"
44                         "       s32c1i  %0, %1, 0\n"
45                         "       bnez    %0, 1b\n"
46                         : "=&a" (tmp)
47                         : "a" (&lock->slock)
48                         : "memory");
49 }
50
51 /* Returns 1 if the lock is obtained, 0 otherwise. */
52
53 static inline int arch_spin_trylock(arch_spinlock_t *lock)
54 {
55         unsigned long tmp;
56
57         __asm__ __volatile__(
58                         "       movi    %0, 0\n"
59                         "       wsr     %0, scompare1\n"
60                         "       movi    %0, 1\n"
61                         "       s32c1i  %0, %1, 0\n"
62                         : "=&a" (tmp)
63                         : "a" (&lock->slock)
64                         : "memory");
65
66         return tmp == 0 ? 1 : 0;
67 }
68
69 static inline void arch_spin_unlock(arch_spinlock_t *lock)
70 {
71         unsigned long tmp;
72
73         __asm__ __volatile__(
74                         "       movi    %0, 0\n"
75                         "       s32ri   %0, %1, 0\n"
76                         : "=&a" (tmp)
77                         : "a" (&lock->slock)
78                         : "memory");
79 }
80
81 /*
82  * rwlock
83  *
84  * Read-write locks are really a more flexible spinlock.  They allow
85  * multiple readers but only one writer.  Write ownership is exclusive
86  * (i.e., all other readers and writers are blocked from ownership while
87  * there is a write owner).  These rwlocks are unfair to writers.  Writers
88  * can be starved for an indefinite time by readers.
89  *
90  * possible values:
91  *
92  *   0          nobody owns the rwlock
93  *  >0          one or more readers own the rwlock
94  *                (the positive value is the actual number of readers)
95  *  0x80000000  one writer owns the rwlock, no other writers, no readers
96  */
97
98 static inline void arch_write_lock(arch_rwlock_t *rw)
99 {
100         unsigned long tmp;
101
102         __asm__ __volatile__(
103                         "       movi    %0, 0\n"
104                         "       wsr     %0, scompare1\n"
105                         "1:     movi    %0, 1\n"
106                         "       slli    %0, %0, 31\n"
107                         "       s32c1i  %0, %1, 0\n"
108                         "       bnez    %0, 1b\n"
109                         : "=&a" (tmp)
110                         : "a" (&rw->lock)
111                         : "memory");
112 }
113
114 /* Returns 1 if the lock is obtained, 0 otherwise. */
115
116 static inline int arch_write_trylock(arch_rwlock_t *rw)
117 {
118         unsigned long tmp;
119
120         __asm__ __volatile__(
121                         "       movi    %0, 0\n"
122                         "       wsr     %0, scompare1\n"
123                         "       movi    %0, 1\n"
124                         "       slli    %0, %0, 31\n"
125                         "       s32c1i  %0, %1, 0\n"
126                         : "=&a" (tmp)
127                         : "a" (&rw->lock)
128                         : "memory");
129
130         return tmp == 0 ? 1 : 0;
131 }
132
133 static inline void arch_write_unlock(arch_rwlock_t *rw)
134 {
135         unsigned long tmp;
136
137         __asm__ __volatile__(
138                         "       movi    %0, 0\n"
139                         "       s32ri   %0, %1, 0\n"
140                         : "=&a" (tmp)
141                         : "a" (&rw->lock)
142                         : "memory");
143 }
144
145 static inline void arch_read_lock(arch_rwlock_t *rw)
146 {
147         unsigned long tmp;
148         unsigned long result;
149
150         __asm__ __volatile__(
151                         "1:     l32i    %1, %2, 0\n"
152                         "       bltz    %1, 1b\n"
153                         "       wsr     %1, scompare1\n"
154                         "       addi    %0, %1, 1\n"
155                         "       s32c1i  %0, %2, 0\n"
156                         "       bne     %0, %1, 1b\n"
157                         : "=&a" (result), "=&a" (tmp)
158                         : "a" (&rw->lock)
159                         : "memory");
160 }
161
162 /* Returns 1 if the lock is obtained, 0 otherwise. */
163
164 static inline int arch_read_trylock(arch_rwlock_t *rw)
165 {
166         unsigned long result;
167         unsigned long tmp;
168
169         __asm__ __volatile__(
170                         "       l32i    %1, %2, 0\n"
171                         "       addi    %0, %1, 1\n"
172                         "       bltz    %0, 1f\n"
173                         "       wsr     %1, scompare1\n"
174                         "       s32c1i  %0, %2, 0\n"
175                         "       sub     %0, %0, %1\n"
176                         "1:\n"
177                         : "=&a" (result), "=&a" (tmp)
178                         : "a" (&rw->lock)
179                         : "memory");
180
181         return result == 0;
182 }
183
184 static inline void arch_read_unlock(arch_rwlock_t *rw)
185 {
186         unsigned long tmp1, tmp2;
187
188         __asm__ __volatile__(
189                         "1:     l32i    %1, %2, 0\n"
190                         "       addi    %0, %1, -1\n"
191                         "       wsr     %1, scompare1\n"
192                         "       s32c1i  %0, %2, 0\n"
193                         "       bne     %0, %1, 1b\n"
194                         : "=&a" (tmp1), "=&a" (tmp2)
195                         : "a" (&rw->lock)
196                         : "memory");
197 }
198
199 #endif  /* _XTENSA_SPINLOCK_H */