blob: f07a40d33871e2b9414ea40b53ee154108932d4e [file] [log] [blame]
Linus Torvalds2f4f12e2013-09-02 11:58:20 -07001#include <linux/export.h>
2#include <linux/lockref.h>
Will Deacon14058d22013-11-27 13:52:53 +00003#include <linux/mutex.h>
Linus Torvalds2f4f12e2013-09-02 11:58:20 -07004
Peter Zijlstra57f42572013-11-14 14:31:54 -08005#if USE_CMPXCHG_LOCKREF
Linus Torvaldsbc08b442013-09-02 12:12:15 -07006
7/*
Will Deacond2212b42013-09-26 17:27:00 +01008 * Allow weakly-ordered memory architectures to provide barrier-less
9 * cmpxchg semantics for lockref updates.
10 */
11#ifndef cmpxchg64_relaxed
12# define cmpxchg64_relaxed cmpxchg64
13#endif
14
15/*
Linus Torvaldsbc08b442013-09-02 12:12:15 -070016 * Note that the "cmpxchg()" reloads the "old" value for the
17 * failure case.
18 */
19#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
20 struct lockref old; \
21 BUILD_BUG_ON(sizeof(old) != 8); \
22 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
23 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
24 struct lockref new = old, prev = old; \
25 CODE \
Will Deacond2212b42013-09-26 17:27:00 +010026 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
27 old.lock_count, \
28 new.lock_count); \
Linus Torvaldsbc08b442013-09-02 12:12:15 -070029 if (likely(old.lock_count == prev.lock_count)) { \
30 SUCCESS; \
31 } \
Heiko Carstens491f6f8e2013-09-23 12:59:56 +020032 arch_mutex_cpu_relax(); \
Linus Torvaldsbc08b442013-09-02 12:12:15 -070033 } \
34} while (0)
35
36#else
37
38#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
39
40#endif
41
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070042/**
43 * lockref_get - Increments reference count unconditionally
Linus Torvalds44a0cf92013-09-07 15:30:29 -070044 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070045 *
46 * This operation is only valid if you already hold a reference
47 * to the object, so you know the count cannot be zero.
48 */
49void lockref_get(struct lockref *lockref)
50{
Linus Torvaldsbc08b442013-09-02 12:12:15 -070051 CMPXCHG_LOOP(
52 new.count++;
53 ,
54 return;
55 );
56
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070057 spin_lock(&lockref->lock);
58 lockref->count++;
59 spin_unlock(&lockref->lock);
60}
61EXPORT_SYMBOL(lockref_get);
62
63/**
64 * lockref_get_not_zero - Increments count unless the count is 0
Linus Torvalds44a0cf92013-09-07 15:30:29 -070065 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070066 * Return: 1 if count updated successfully or 0 if count was zero
67 */
68int lockref_get_not_zero(struct lockref *lockref)
69{
Linus Torvaldsbc08b442013-09-02 12:12:15 -070070 int retval;
71
72 CMPXCHG_LOOP(
73 new.count++;
74 if (!old.count)
75 return 0;
76 ,
77 return 1;
78 );
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070079
80 spin_lock(&lockref->lock);
Linus Torvaldsbc08b442013-09-02 12:12:15 -070081 retval = 0;
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070082 if (lockref->count) {
83 lockref->count++;
84 retval = 1;
85 }
86 spin_unlock(&lockref->lock);
87 return retval;
88}
89EXPORT_SYMBOL(lockref_get_not_zero);
90
91/**
92 * lockref_get_or_lock - Increments count unless the count is 0
Linus Torvalds44a0cf92013-09-07 15:30:29 -070093 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070094 * Return: 1 if count updated successfully or 0 if count was zero
95 * and we got the lock instead.
96 */
97int lockref_get_or_lock(struct lockref *lockref)
98{
Linus Torvaldsbc08b442013-09-02 12:12:15 -070099 CMPXCHG_LOOP(
100 new.count++;
101 if (!old.count)
102 break;
103 ,
104 return 1;
105 );
106
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700107 spin_lock(&lockref->lock);
108 if (!lockref->count)
109 return 0;
110 lockref->count++;
111 spin_unlock(&lockref->lock);
112 return 1;
113}
114EXPORT_SYMBOL(lockref_get_or_lock);
115
116/**
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
Linus Torvalds44a0cf92013-09-07 15:30:29 -0700118 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120 */
121int lockref_put_or_lock(struct lockref *lockref)
122{
Linus Torvaldsbc08b442013-09-02 12:12:15 -0700123 CMPXCHG_LOOP(
124 new.count--;
125 if (old.count <= 1)
126 break;
127 ,
128 return 1;
129 );
130
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700131 spin_lock(&lockref->lock);
132 if (lockref->count <= 1)
133 return 0;
134 lockref->count--;
135 spin_unlock(&lockref->lock);
136 return 1;
137}
138EXPORT_SYMBOL(lockref_put_or_lock);
Linus Torvaldse7d33bb2013-09-07 15:49:18 -0700139
140/**
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
143 */
144void lockref_mark_dead(struct lockref *lockref)
145{
146 assert_spin_locked(&lockref->lock);
147 lockref->count = -128;
148}
Steven Whitehousee66cf162013-10-15 15:18:08 +0100149EXPORT_SYMBOL(lockref_mark_dead);
Linus Torvaldse7d33bb2013-09-07 15:49:18 -0700150
151/**
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
155 */
156int lockref_get_not_dead(struct lockref *lockref)
157{
158 int retval;
159
160 CMPXCHG_LOOP(
161 new.count++;
162 if ((int)old.count < 0)
163 return 0;
164 ,
165 return 1;
166 );
167
168 spin_lock(&lockref->lock);
169 retval = 0;
170 if ((int) lockref->count >= 0) {
171 lockref->count++;
172 retval = 1;
173 }
174 spin_unlock(&lockref->lock);
175 return retval;
176}
177EXPORT_SYMBOL(lockref_get_not_dead);