blob: 070dab5e7d77eae142015e8a06e7f391537c61da [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
4#include <linux/percpu-refcount.h>
5
6/*
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
21 *
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
25 * works.
26 *
27 * Converting to non percpu mode is done with some RCUish stuff in
Tejun Heoe6253052014-09-20 01:27:25 -040028 * percpu_ref_kill. Additionally, we need a bias value so that the
29 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
Kent Overstreet215e2622013-05-31 15:26:45 -070030 */
31
Tejun Heoe6253052014-09-20 01:27:25 -040032#define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
Kent Overstreet215e2622013-05-31 15:26:45 -070033
Tejun Heoe6253052014-09-20 01:27:25 -040034static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref)
Tejun Heoeae7975d2014-06-28 08:10:13 -040035{
Tejun Heoe6253052014-09-20 01:27:25 -040036 return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
Tejun Heoeae7975d2014-06-28 08:10:13 -040037}
38
Kent Overstreet215e2622013-05-31 15:26:45 -070039/**
40 * percpu_ref_init - initialize a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -070041 * @ref: percpu_ref to initialize
42 * @release: function which will be called when refcount hits 0
Tejun Heoa34375e2014-09-08 09:51:30 +090043 * @gfp: allocation mask to use
Kent Overstreet215e2622013-05-31 15:26:45 -070044 *
45 * Initializes the refcount in single atomic counter mode with a refcount of 1;
Tejun Heoe6253052014-09-20 01:27:25 -040046 * analagous to atomic_long_set(ref, 1).
Kent Overstreet215e2622013-05-31 15:26:45 -070047 *
48 * Note that @release must not sleep - it may potentially be called from RCU
49 * callback context by percpu_ref_kill().
50 */
Tejun Heoa34375e2014-09-08 09:51:30 +090051int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
52 gfp_t gfp)
Kent Overstreet215e2622013-05-31 15:26:45 -070053{
Tejun Heoe6253052014-09-20 01:27:25 -040054 atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
Kent Overstreet215e2622013-05-31 15:26:45 -070055
Tejun Heoe6253052014-09-20 01:27:25 -040056 ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp);
Tejun Heo7d742072014-06-28 08:10:13 -040057 if (!ref->pcpu_count_ptr)
Kent Overstreet215e2622013-05-31 15:26:45 -070058 return -ENOMEM;
59
60 ref->release = release;
61 return 0;
62}
Matias Bjorling5e9dd372013-10-16 13:47:01 -070063EXPORT_SYMBOL_GPL(percpu_ref_init);
Kent Overstreet215e2622013-05-31 15:26:45 -070064
Tejun Heobc497bd2013-06-12 20:52:35 -070065/**
Tejun Heo9a1049d2014-06-28 08:10:14 -040066 * percpu_ref_exit - undo percpu_ref_init()
67 * @ref: percpu_ref to exit
Tejun Heobc497bd2013-06-12 20:52:35 -070068 *
Tejun Heo9a1049d2014-06-28 08:10:14 -040069 * This function exits @ref. The caller is responsible for ensuring that
70 * @ref is no longer in active use. The usual places to invoke this
71 * function from are the @ref->release() callback or in init failure path
72 * where percpu_ref_init() succeeded but other parts of the initialization
73 * of the embedding object failed.
Tejun Heobc497bd2013-06-12 20:52:35 -070074 */
Tejun Heo9a1049d2014-06-28 08:10:14 -040075void percpu_ref_exit(struct percpu_ref *ref)
Tejun Heobc497bd2013-06-12 20:52:35 -070076{
Tejun Heoe6253052014-09-20 01:27:25 -040077 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
Tejun Heobc497bd2013-06-12 20:52:35 -070078
79 if (pcpu_count) {
Tejun Heoeae7975d2014-06-28 08:10:13 -040080 free_percpu(pcpu_count);
Tejun Heo9a1049d2014-06-28 08:10:14 -040081 ref->pcpu_count_ptr = PCPU_REF_DEAD;
Tejun Heobc497bd2013-06-12 20:52:35 -070082 }
83}
Tejun Heo9a1049d2014-06-28 08:10:14 -040084EXPORT_SYMBOL_GPL(percpu_ref_exit);
Tejun Heobc497bd2013-06-12 20:52:35 -070085
Kent Overstreet215e2622013-05-31 15:26:45 -070086static void percpu_ref_kill_rcu(struct rcu_head *rcu)
87{
88 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
Tejun Heoe6253052014-09-20 01:27:25 -040089 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
90 unsigned long count = 0;
Kent Overstreet215e2622013-05-31 15:26:45 -070091 int cpu;
92
Kent Overstreet215e2622013-05-31 15:26:45 -070093 for_each_possible_cpu(cpu)
94 count += *per_cpu_ptr(pcpu_count, cpu);
95
Tejun Heoe6253052014-09-20 01:27:25 -040096 pr_debug("global %ld pcpu %ld",
97 atomic_long_read(&ref->count), (long)count);
Kent Overstreet215e2622013-05-31 15:26:45 -070098
99 /*
100 * It's crucial that we sum the percpu counters _before_ adding the sum
101 * to &ref->count; since gets could be happening on one cpu while puts
102 * happen on another, adding a single cpu's count could cause
103 * @ref->count to hit 0 before we've got a consistent value - but the
104 * sum of all the counts will be consistent and correct.
105 *
106 * Subtracting the bias value then has to happen _after_ adding count to
107 * &ref->count; we need the bias value to prevent &ref->count from
108 * reaching 0 before we add the percpu counts. But doing it at the same
109 * time is equivalent and saves us atomic operations:
110 */
111
Tejun Heoe6253052014-09-20 01:27:25 -0400112 atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700113
Tejun Heoe6253052014-09-20 01:27:25 -0400114 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
115 "percpu ref (%pf) <= 0 (%ld) after killed",
116 ref->release, atomic_long_read(&ref->count));
Kent Overstreet687b0ad2014-01-06 13:13:26 -0800117
Tejun Heodbece3a2013-06-13 19:23:53 -0700118 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
119 if (ref->confirm_kill)
120 ref->confirm_kill(ref);
121
Kent Overstreet215e2622013-05-31 15:26:45 -0700122 /*
123 * Now we're in single atomic_t mode with a consistent refcount, so it's
124 * safe to drop our initial ref:
125 */
126 percpu_ref_put(ref);
127}
128
129/**
Tejun Heodbece3a2013-06-13 19:23:53 -0700130 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
Tejun Heoac899062013-06-12 20:43:06 -0700131 * @ref: percpu_ref to kill
Tejun Heodbece3a2013-06-13 19:23:53 -0700132 * @confirm_kill: optional confirmation callback
Kent Overstreet215e2622013-05-31 15:26:45 -0700133 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700134 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
135 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
136 * called after @ref is seen as dead from all CPUs - all further
137 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
138 * for more details.
Kent Overstreet215e2622013-05-31 15:26:45 -0700139 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700140 * Due to the way percpu_ref is implemented, @confirm_kill will be called
141 * after at least one full RCU grace period has passed but this is an
142 * implementation detail and callers must not depend on it.
Kent Overstreet215e2622013-05-31 15:26:45 -0700143 */
Tejun Heodbece3a2013-06-13 19:23:53 -0700144void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
145 percpu_ref_func_t *confirm_kill)
Kent Overstreet215e2622013-05-31 15:26:45 -0700146{
Tejun Heo7d742072014-06-28 08:10:13 -0400147 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
Tejun Heo4843c332014-09-20 01:27:24 -0400148 "percpu_ref_kill() called more than once on %pf!",
149 ref->release);
Kent Overstreet215e2622013-05-31 15:26:45 -0700150
Tejun Heo7d742072014-06-28 08:10:13 -0400151 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
Tejun Heodbece3a2013-06-13 19:23:53 -0700152 ref->confirm_kill = confirm_kill;
Kent Overstreet215e2622013-05-31 15:26:45 -0700153
Tejun Heoa4244452013-06-16 16:12:26 -0700154 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
Kent Overstreet215e2622013-05-31 15:26:45 -0700155}
Matias Bjorling5e9dd372013-10-16 13:47:01 -0700156EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
Tejun Heoa2237372014-09-24 13:31:48 -0400157
158/**
159 * percpu_ref_reinit - re-initialize a percpu refcount
160 * @ref: perpcu_ref to re-initialize
161 *
162 * Re-initialize @ref so that it's in the same state as when it finished
163 * percpu_ref_init(). @ref must have been initialized successfully, killed
164 * and reached 0 but not exited.
165 *
166 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
167 * this function is in progress.
168 */
169void percpu_ref_reinit(struct percpu_ref *ref)
170{
171 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref);
172 int cpu;
173
174 BUG_ON(!pcpu_count);
175 WARN_ON(!percpu_ref_is_zero(ref));
176
177 atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS);
178
179 /*
180 * Restore per-cpu operation. smp_store_release() is paired with
181 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
182 * that the zeroing is visible to all percpu accesses which can see
183 * the following PCPU_REF_DEAD clearing.
184 */
185 for_each_possible_cpu(cpu)
186 *per_cpu_ptr(pcpu_count, cpu) = 0;
187
188 smp_store_release(&ref->pcpu_count_ptr,
189 ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
190}
191EXPORT_SYMBOL_GPL(percpu_ref_reinit);