GNU Linux-libre 4.14.290-gnu1
[releases.git] / arch / parisc / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
11 {
12         volatile unsigned int *a = __ldcw_align(x);
13         return *a == 0;
14 }
15
16 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
17
18 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19                                          unsigned long flags)
20 {
21         volatile unsigned int *a;
22
23         a = __ldcw_align(x);
24         while (__ldcw(a) == 0)
25                 while (*a == 0)
26                         if (flags & PSW_SM_I) {
27                                 local_irq_enable();
28                                 cpu_relax();
29                                 local_irq_disable();
30                         } else
31                                 cpu_relax();
32 }
33
34 static inline void arch_spin_unlock(arch_spinlock_t *x)
35 {
36         volatile unsigned int *a;
37
38         a = __ldcw_align(x);
39         mb();
40         *a = 1;
41 }
42
43 static inline int arch_spin_trylock(arch_spinlock_t *x)
44 {
45         volatile unsigned int *a;
46         int ret;
47
48         a = __ldcw_align(x);
49         ret = __ldcw(a) != 0;
50
51         return ret;
52 }
53
54 /*
55  * Read-write spinlocks, allowing multiple readers but only one writer.
56  * Linux rwlocks are unfair to writers; they can be starved for an indefinite
57  * time by readers.  With care, they can also be taken in interrupt context.
58  *
59  * In the PA-RISC implementation, we have a spinlock and a counter.
60  * Readers use the lock to serialise their access to the counter (which
61  * records how many readers currently hold the lock).
62  * Writers hold the spinlock, preventing any readers or other writers from
63  * grabbing the rwlock.
64  */
65
66 /* Note that we have to ensure interrupts are disabled in case we're
67  * interrupted by some other code that wants to grab the same read lock */
68 static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
69 {
70         unsigned long flags;
71         local_irq_save(flags);
72         arch_spin_lock_flags(&rw->lock, flags);
73         rw->counter++;
74         arch_spin_unlock(&rw->lock);
75         local_irq_restore(flags);
76 }
77
78 /* Note that we have to ensure interrupts are disabled in case we're
79  * interrupted by some other code that wants to grab the same read lock */
80 static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
81 {
82         unsigned long flags;
83         local_irq_save(flags);
84         arch_spin_lock_flags(&rw->lock, flags);
85         rw->counter--;
86         arch_spin_unlock(&rw->lock);
87         local_irq_restore(flags);
88 }
89
90 /* Note that we have to ensure interrupts are disabled in case we're
91  * interrupted by some other code that wants to grab the same read lock */
92 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
93 {
94         unsigned long flags;
95  retry:
96         local_irq_save(flags);
97         if (arch_spin_trylock(&rw->lock)) {
98                 rw->counter++;
99                 arch_spin_unlock(&rw->lock);
100                 local_irq_restore(flags);
101                 return 1;
102         }
103
104         local_irq_restore(flags);
105         /* If write-locked, we fail to acquire the lock */
106         if (rw->counter < 0)
107                 return 0;
108
109         /* Wait until we have a realistic chance at the lock */
110         while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
111                 cpu_relax();
112
113         goto retry;
114 }
115
116 /* Note that we have to ensure interrupts are disabled in case we're
117  * interrupted by some other code that wants to read_trylock() this lock */
118 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
119 {
120         unsigned long flags;
121 retry:
122         local_irq_save(flags);
123         arch_spin_lock_flags(&rw->lock, flags);
124
125         if (rw->counter != 0) {
126                 arch_spin_unlock(&rw->lock);
127                 local_irq_restore(flags);
128
129                 while (rw->counter != 0)
130                         cpu_relax();
131
132                 goto retry;
133         }
134
135         rw->counter = -1; /* mark as write-locked */
136         mb();
137         local_irq_restore(flags);
138 }
139
140 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
141 {
142         rw->counter = 0;
143         arch_spin_unlock(&rw->lock);
144 }
145
146 /* Note that we have to ensure interrupts are disabled in case we're
147  * interrupted by some other code that wants to read_trylock() this lock */
148 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
149 {
150         unsigned long flags;
151         int result = 0;
152
153         local_irq_save(flags);
154         if (arch_spin_trylock(&rw->lock)) {
155                 if (rw->counter == 0) {
156                         rw->counter = -1;
157                         result = 1;
158                 } else {
159                         /* Read-locked.  Oh well. */
160                         arch_spin_unlock(&rw->lock);
161                 }
162         }
163         local_irq_restore(flags);
164
165         return result;
166 }
167
168 /*
169  * read_can_lock - would read_trylock() succeed?
170  * @lock: the rwlock in question.
171  */
172 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
173 {
174         return rw->counter >= 0;
175 }
176
177 /*
178  * write_can_lock - would write_trylock() succeed?
179  * @lock: the rwlock in question.
180  */
181 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
182 {
183         return !rw->counter;
184 }
185
186 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
187 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
188
189 #endif /* __ASM_SPINLOCK_H */