GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / x86 / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <asm/cpufeature.h>
6 #include <asm-generic/qspinlock_types.h>
7 #include <asm/paravirt.h>
8 #include <asm/rmwcc.h>
9
10 #define _Q_PENDING_LOOPS        (1 << 9)
11
12 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
13
14 static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
15 {
16         GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
17                          "I", _Q_PENDING_OFFSET, "%0", c);
18 }
19
20 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
21 {
22         u32 val = 0;
23
24         if (__queued_RMW_btsl(lock))
25                 val |= _Q_PENDING_VAL;
26
27         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
28
29         return val;
30 }
31
32 #define queued_spin_unlock queued_spin_unlock
33 /**
34  * queued_spin_unlock - release a queued spinlock
35  * @lock : Pointer to queued spinlock structure
36  *
37  * A smp_store_release() on the least-significant byte.
38  */
39 static inline void native_queued_spin_unlock(struct qspinlock *lock)
40 {
41         smp_store_release(&lock->locked, 0);
42 }
43
44 #ifdef CONFIG_PARAVIRT_SPINLOCKS
45 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
46 extern void __pv_init_lock_hash(void);
47 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
48 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
49
50 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
51 {
52         pv_queued_spin_lock_slowpath(lock, val);
53 }
54
55 static inline void queued_spin_unlock(struct qspinlock *lock)
56 {
57         pv_queued_spin_unlock(lock);
58 }
59
60 #define vcpu_is_preempted vcpu_is_preempted
61 static inline bool vcpu_is_preempted(long cpu)
62 {
63         return pv_vcpu_is_preempted(cpu);
64 }
65 #else
66 static inline void queued_spin_unlock(struct qspinlock *lock)
67 {
68         native_queued_spin_unlock(lock);
69 }
70 #endif
71
72 #ifdef CONFIG_PARAVIRT
73 #define virt_spin_lock virt_spin_lock
74 static inline bool virt_spin_lock(struct qspinlock *lock)
75 {
76         if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
77                 return false;
78
79         /*
80          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
81          * back to a Test-and-Set spinlock, because fair locks have
82          * horrible lock 'holder' preemption issues.
83          */
84
85         do {
86                 while (atomic_read(&lock->val) != 0)
87                         cpu_relax();
88         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
89
90         return true;
91 }
92 #endif /* CONFIG_PARAVIRT */
93
94 #include <asm-generic/qspinlock.h>
95
96 #endif /* _ASM_X86_QSPINLOCK_H */