blob: bfe4db4e165f2941654e5ba551fbc8c3fe0a467c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
Nadia Derbey3219b3b2008-07-25 01:48:00 -07009 * Modified by Nadia Derbey to make it RCU safe.
10 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080011 * Small id to pointer translation service.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080013 * It uses a radix tree like structure as a sparse array indexed
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * by the id to obtain the pointer. The bitmap makes allocating
Jesper Juhle15ae2d2005-10-30 15:02:14 -080015 * a new id quick.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
Jesper Juhle15ae2d2005-10-30 15:02:14 -080022 * You can release ids at any time. When all ids are released, most of
Fengguang Wu125c4c72012-10-04 17:13:15 -070023 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
Jesper Juhle15ae2d2005-10-30 15:02:14 -080024 * don't need to go to the memory "store" during an id allocate, just
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050032#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#endif
Jeff Mahoney5806f072006-06-26 00:27:19 -070034#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/string.h>
36#include <linux/idr.h>
Rusty Russell88eca022011-08-03 16:21:06 -070037#include <linux/spinlock.h>
Tejun Heod5c74092013-02-27 17:03:55 -080038#include <linux/percpu.h>
39#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Tejun Heoe8c8d1b2013-02-27 17:05:04 -080041#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
42#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
43
44/* Leave the possibility of an incomplete final layer */
45#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
46
47/* Number of id_layer structs to leave in free list */
48#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
49
Christoph Lametere18b8902006-12-06 20:33:20 -080050static struct kmem_cache *idr_layer_cache;
Tejun Heod5c74092013-02-27 17:03:55 -080051static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
52static DEFINE_PER_CPU(int, idr_preload_cnt);
Rusty Russell88eca022011-08-03 16:21:06 -070053static DEFINE_SPINLOCK(simple_ida_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Tejun Heo326cf0f2013-02-27 17:05:02 -080055/* the maximum ID which can be allocated given idr->layers */
56static int idr_max(int layers)
57{
58 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
59
60 return (1 << bits) - 1;
61}
62
Tejun Heo54616282013-02-27 17:05:07 -080063/*
64 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
65 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
66 * so on.
67 */
68static int idr_layer_prefix_mask(int layer)
69{
70 return ~idr_max(layer + 1);
71}
72
Nadia Derbey4ae53782008-07-25 01:47:58 -070073static struct idr_layer *get_from_free_list(struct idr *idp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 struct idr_layer *p;
Roland Dreierc259cc22006-07-14 00:24:23 -070076 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Roland Dreierc259cc22006-07-14 00:24:23 -070078 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 if ((p = idp->id_free)) {
80 idp->id_free = p->ary[0];
81 idp->id_free_cnt--;
82 p->ary[0] = NULL;
83 }
Roland Dreierc259cc22006-07-14 00:24:23 -070084 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 return(p);
86}
87
Tejun Heod5c74092013-02-27 17:03:55 -080088/**
89 * idr_layer_alloc - allocate a new idr_layer
90 * @gfp_mask: allocation mask
91 * @layer_idr: optional idr to allocate from
92 *
93 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
94 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
95 * an idr_layer from @idr->id_free.
96 *
97 * @layer_idr is to maintain backward compatibility with the old alloc
98 * interface - idr_pre_get() and idr_get_new*() - and will be removed
99 * together with per-pool preload buffer.
100 */
101static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
102{
103 struct idr_layer *new;
104
105 /* this is the old path, bypass to get_from_free_list() */
106 if (layer_idr)
107 return get_from_free_list(layer_idr);
108
Tejun Heo59bfbcf2013-03-13 14:59:49 -0700109 /*
110 * Try to allocate directly from kmem_cache. We want to try this
111 * before preload buffer; otherwise, non-preloading idr_alloc()
112 * users will end up taking advantage of preloading ones. As the
113 * following is allowed to fail for preloaded cases, suppress
114 * warning this time.
115 */
116 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
Tejun Heod5c74092013-02-27 17:03:55 -0800117 if (new)
118 return new;
119
120 /*
121 * Try to fetch one from the per-cpu preload buffer if in process
122 * context. See idr_preload() for details.
123 */
Tejun Heo59bfbcf2013-03-13 14:59:49 -0700124 if (!in_interrupt()) {
125 preempt_disable();
126 new = __this_cpu_read(idr_preload_head);
127 if (new) {
128 __this_cpu_write(idr_preload_head, new->ary[0]);
129 __this_cpu_dec(idr_preload_cnt);
130 new->ary[0] = NULL;
131 }
132 preempt_enable();
133 if (new)
134 return new;
Tejun Heod5c74092013-02-27 17:03:55 -0800135 }
Tejun Heo59bfbcf2013-03-13 14:59:49 -0700136
137 /*
138 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
139 * that memory allocation failure warning is printed as intended.
140 */
141 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
Tejun Heod5c74092013-02-27 17:03:55 -0800142}
143
Nadia Derbeycf481c22008-07-25 01:48:02 -0700144static void idr_layer_rcu_free(struct rcu_head *head)
145{
146 struct idr_layer *layer;
147
148 layer = container_of(head, struct idr_layer, rcu_head);
149 kmem_cache_free(idr_layer_cache, layer);
150}
151
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800152static inline void free_layer(struct idr *idr, struct idr_layer *p)
Nadia Derbeycf481c22008-07-25 01:48:02 -0700153{
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800154 if (idr->hint && idr->hint == p)
155 RCU_INIT_POINTER(idr->hint, NULL);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700156 call_rcu(&p->rcu_head, idr_layer_rcu_free);
157}
158
Sonny Rao1eec0052006-06-25 05:49:34 -0700159/* only called when idp->lock is held */
Nadia Derbey4ae53782008-07-25 01:47:58 -0700160static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
Sonny Rao1eec0052006-06-25 05:49:34 -0700161{
162 p->ary[0] = idp->id_free;
163 idp->id_free = p;
164 idp->id_free_cnt++;
165}
166
Nadia Derbey4ae53782008-07-25 01:47:58 -0700167static void move_to_free_list(struct idr *idp, struct idr_layer *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Roland Dreierc259cc22006-07-14 00:24:23 -0700169 unsigned long flags;
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 /*
172 * Depends on the return element being zeroed.
173 */
Roland Dreierc259cc22006-07-14 00:24:23 -0700174 spin_lock_irqsave(&idp->lock, flags);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700175 __move_to_free_list(idp, p);
Roland Dreierc259cc22006-07-14 00:24:23 -0700176 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900179static void idr_mark_full(struct idr_layer **pa, int id)
180{
181 struct idr_layer *p = pa[0];
182 int l = 0;
183
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800184 __set_bit(id & IDR_MASK, p->bitmap);
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900185 /*
186 * If this layer is full mark the bit in the layer above to
187 * show that this part of the radix tree is full. This may
188 * complete the layer above and require walking up the radix
189 * tree.
190 */
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800191 while (bitmap_full(p->bitmap, IDR_SIZE)) {
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900192 if (!(p = pa[++l]))
193 break;
194 id = id >> IDR_BITS;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800195 __set_bit((id & IDR_MASK), p->bitmap);
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900196 }
197}
198
Tejun Heoc8615d32013-03-13 14:59:42 -0700199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
Fengguang Wu125c4c72012-10-04 17:13:15 -0700201 while (idp->id_free_cnt < MAX_IDR_FREE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct idr_layer *new;
Andrew Morton5b019e92009-01-15 13:51:21 -0800203 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800204 if (new == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return (0);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700206 move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 }
208 return 1;
209}
Tejun Heoc8615d32013-03-13 14:59:42 -0700210EXPORT_SYMBOL(__idr_pre_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Tejun Heo12d1b432013-02-27 17:03:53 -0800212/**
213 * sub_alloc - try to allocate an id without growing the tree depth
214 * @idp: idr handle
215 * @starting_id: id to start search at
Tejun Heo12d1b432013-02-27 17:03:53 -0800216 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
Tejun Heod5c74092013-02-27 17:03:55 -0800217 * @gfp_mask: allocation mask for idr_layer_alloc()
218 * @layer_idr: optional idr passed to idr_layer_alloc()
Tejun Heo12d1b432013-02-27 17:03:53 -0800219 *
220 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
221 * growing its depth. Returns
222 *
223 * the allocated id >= 0 if successful,
224 * -EAGAIN if the tree needs to grow for allocation to succeed,
225 * -ENOSPC if the id space is exhausted,
226 * -ENOMEM if more idr_layers need to be allocated.
227 */
Tejun Heod5c74092013-02-27 17:03:55 -0800228static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
229 gfp_t gfp_mask, struct idr *layer_idr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 int n, m, sh;
232 struct idr_layer *p, *new;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900233 int l, id, oid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235 id = *starting_id;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900236 restart:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 p = idp->top;
238 l = idp->layers;
239 pa[l--] = NULL;
240 while (1) {
241 /*
242 * We run around this while until we reach the leaf node...
243 */
244 n = (id >> (IDR_BITS*l)) & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800245 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 if (m == IDR_SIZE) {
247 /* no space available go back to previous layer. */
248 l++;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900249 oid = id;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800250 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900251
252 /* if already at the top layer, we need to grow */
Tejun Heod2e72762010-02-22 12:44:19 -0800253 if (id >= 1 << (idp->layers * IDR_BITS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 *starting_id = id;
Tejun Heo12d1b432013-02-27 17:03:53 -0800255 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
Tejun Heod2e72762010-02-22 12:44:19 -0800257 p = pa[l];
258 BUG_ON(!p);
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900259
260 /* If we need to go up one layer, continue the
261 * loop; otherwise, restart from the top.
262 */
263 sh = IDR_BITS * (l + 1);
264 if (oid >> sh == id >> sh)
265 continue;
266 else
267 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269 if (m != n) {
270 sh = IDR_BITS*l;
271 id = ((id >> sh) ^ n ^ m) << sh;
272 }
Fengguang Wu125c4c72012-10-04 17:13:15 -0700273 if ((id >= MAX_IDR_BIT) || (id < 0))
Tejun Heo12d1b432013-02-27 17:03:53 -0800274 return -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (l == 0)
276 break;
277 /*
278 * Create the layer below if it is missing.
279 */
280 if (!p->ary[m]) {
Tejun Heod5c74092013-02-27 17:03:55 -0800281 new = idr_layer_alloc(gfp_mask, layer_idr);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700282 if (!new)
Tejun Heo12d1b432013-02-27 17:03:53 -0800283 return -ENOMEM;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800284 new->layer = l-1;
Tejun Heo54616282013-02-27 17:05:07 -0800285 new->prefix = id & idr_layer_prefix_mask(new->layer);
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700286 rcu_assign_pointer(p->ary[m], new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 p->count++;
288 }
289 pa[l--] = p;
290 p = p->ary[m];
291 }
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900292
293 pa[l] = p;
294 return id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900297static int idr_get_empty_slot(struct idr *idp, int starting_id,
Tejun Heod5c74092013-02-27 17:03:55 -0800298 struct idr_layer **pa, gfp_t gfp_mask,
299 struct idr *layer_idr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 struct idr_layer *p, *new;
302 int layers, v, id;
Roland Dreierc259cc22006-07-14 00:24:23 -0700303 unsigned long flags;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 id = starting_id;
306build_up:
307 p = idp->top;
308 layers = idp->layers;
309 if (unlikely(!p)) {
Tejun Heod5c74092013-02-27 17:03:55 -0800310 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
Tejun Heo12d1b432013-02-27 17:03:53 -0800311 return -ENOMEM;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800312 p->layer = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 layers = 1;
314 }
315 /*
316 * Add a new layer to the top of the tree if the requested
317 * id is larger than the currently allocated space.
318 */
Tejun Heo326cf0f2013-02-27 17:05:02 -0800319 while (id > idr_max(layers)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 layers++;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100321 if (!p->count) {
322 /* special case: if the tree is currently empty,
323 * then we grow the tree by moving the top node
324 * upwards.
325 */
326 p->layer++;
Tejun Heo54616282013-02-27 17:05:07 -0800327 WARN_ON_ONCE(p->prefix);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 continue;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100329 }
Tejun Heod5c74092013-02-27 17:03:55 -0800330 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /*
332 * The allocation failed. If we built part of
333 * the structure tear it down.
334 */
Roland Dreierc259cc22006-07-14 00:24:23 -0700335 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 for (new = p; p && p != idp->top; new = p) {
337 p = p->ary[0];
338 new->ary[0] = NULL;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800339 new->count = 0;
340 bitmap_clear(new->bitmap, 0, IDR_SIZE);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700341 __move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
Roland Dreierc259cc22006-07-14 00:24:23 -0700343 spin_unlock_irqrestore(&idp->lock, flags);
Tejun Heo12d1b432013-02-27 17:03:53 -0800344 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346 new->ary[0] = p;
347 new->count = 1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800348 new->layer = layers-1;
Tejun Heo54616282013-02-27 17:05:07 -0800349 new->prefix = id & idr_layer_prefix_mask(new->layer);
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800350 if (bitmap_full(p->bitmap, IDR_SIZE))
351 __set_bit(0, new->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 p = new;
353 }
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700354 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 idp->layers = layers;
Tejun Heod5c74092013-02-27 17:03:55 -0800356 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
Tejun Heo12d1b432013-02-27 17:03:53 -0800357 if (v == -EAGAIN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 goto build_up;
359 return(v);
360}
361
Tejun Heo3594eb22013-02-27 17:03:54 -0800362/*
363 * @id and @pa are from a successful allocation from idr_get_empty_slot().
364 * Install the user pointer @ptr and mark the slot full.
365 */
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800366static void idr_fill_slot(struct idr *idr, void *ptr, int id,
367 struct idr_layer **pa)
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900368{
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800369 /* update hint used for lookup, cleared from free_layer() */
370 rcu_assign_pointer(idr->hint, pa[0]);
371
Tejun Heo3594eb22013-02-27 17:03:54 -0800372 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
373 pa[0]->count++;
374 idr_mark_full(pa, id);
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900375}
376
Tejun Heoc8615d32013-03-13 14:59:42 -0700377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
Tejun Heo326cf0f2013-02-27 17:05:02 -0800379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 int rv;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800381
Tejun Heod5c74092013-02-27 17:03:55 -0800382 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
Nadia Derbey944ca052008-07-25 01:47:59 -0700383 if (rv < 0)
Tejun Heo12d1b432013-02-27 17:03:53 -0800384 return rv == -ENOMEM ? -EAGAIN : rv;
Tejun Heo3594eb22013-02-27 17:03:54 -0800385
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800386 idr_fill_slot(idp, ptr, rv, pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 *id = rv;
388 return 0;
389}
Tejun Heoc8615d32013-03-13 14:59:42 -0700390EXPORT_SYMBOL(__idr_get_new_above);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Tejun Heod5c74092013-02-27 17:03:55 -0800392/**
393 * idr_preload - preload for idr_alloc()
394 * @gfp_mask: allocation mask to use for preloading
395 *
396 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
397 * process context and each idr_preload() invocation should be matched with
398 * idr_preload_end(). Note that preemption is disabled while preloaded.
399 *
400 * The first idr_alloc() in the preloaded section can be treated as if it
401 * were invoked with @gfp_mask used for preloading. This allows using more
402 * permissive allocation masks for idrs protected by spinlocks.
403 *
404 * For example, if idr_alloc() below fails, the failure can be treated as
405 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
406 *
407 * idr_preload(GFP_KERNEL);
408 * spin_lock(lock);
409 *
410 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
411 *
412 * spin_unlock(lock);
413 * idr_preload_end();
414 * if (id < 0)
415 * error;
416 */
417void idr_preload(gfp_t gfp_mask)
418{
419 /*
420 * Consuming preload buffer from non-process context breaks preload
421 * allocation guarantee. Disallow usage from those contexts.
422 */
423 WARN_ON_ONCE(in_interrupt());
424 might_sleep_if(gfp_mask & __GFP_WAIT);
425
426 preempt_disable();
427
428 /*
429 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
430 * return value from idr_alloc() needs to be checked for failure
431 * anyway. Silently give up if allocation fails. The caller can
432 * treat failures from idr_alloc() as if idr_alloc() were called
433 * with @gfp_mask which should be enough.
434 */
435 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
436 struct idr_layer *new;
437
438 preempt_enable();
439 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
440 preempt_disable();
441 if (!new)
442 break;
443
444 /* link the new one to per-cpu preload list */
445 new->ary[0] = __this_cpu_read(idr_preload_head);
446 __this_cpu_write(idr_preload_head, new);
447 __this_cpu_inc(idr_preload_cnt);
448 }
449}
450EXPORT_SYMBOL(idr_preload);
451
452/**
453 * idr_alloc - allocate new idr entry
454 * @idr: the (initialized) idr
455 * @ptr: pointer to be associated with the new id
456 * @start: the minimum id (inclusive)
457 * @end: the maximum id (exclusive, <= 0 for max)
458 * @gfp_mask: memory allocation flags
459 *
460 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
461 * available in the specified range, returns -ENOSPC. On memory allocation
462 * failure, returns -ENOMEM.
463 *
464 * Note that @end is treated as max when <= 0. This is to always allow
465 * using @start + N as @end as long as N is inside integer range.
466 *
467 * The user is responsible for exclusively synchronizing all operations
468 * which may modify @idr. However, read-only accesses such as idr_find()
469 * or iteration can be performed under RCU read lock provided the user
470 * destroys @ptr in RCU-safe way after removal from idr.
471 */
472int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
473{
474 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
Tejun Heo326cf0f2013-02-27 17:05:02 -0800475 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
Tejun Heod5c74092013-02-27 17:03:55 -0800476 int id;
477
478 might_sleep_if(gfp_mask & __GFP_WAIT);
479
480 /* sanity checks */
481 if (WARN_ON_ONCE(start < 0))
482 return -EINVAL;
483 if (unlikely(max < start))
484 return -ENOSPC;
485
486 /* allocate id */
487 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
488 if (unlikely(id < 0))
489 return id;
490 if (unlikely(id > max))
491 return -ENOSPC;
492
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800493 idr_fill_slot(idr, ptr, id, pa);
Tejun Heod5c74092013-02-27 17:03:55 -0800494 return id;
495}
496EXPORT_SYMBOL_GPL(idr_alloc);
497
Jeff Layton3e6628c42013-04-29 16:21:16 -0700498/**
499 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
500 * @idr: the (initialized) idr
501 * @ptr: pointer to be associated with the new id
502 * @start: the minimum id (inclusive)
503 * @end: the maximum id (exclusive, <= 0 for max)
504 * @gfp_mask: memory allocation flags
505 *
506 * Essentially the same as idr_alloc, but prefers to allocate progressively
507 * higher ids if it can. If the "cur" counter wraps, then it will start again
508 * at the "start" end of the range and allocate one that has already been used.
509 */
510int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
511 gfp_t gfp_mask)
512{
513 int id;
514
515 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
516 if (id == -ENOSPC)
517 id = idr_alloc(idr, ptr, start, end, gfp_mask);
518
519 if (likely(id >= 0))
520 idr->cur = id + 1;
521 return id;
522}
523EXPORT_SYMBOL(idr_alloc_cyclic);
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525static void idr_remove_warning(int id)
526{
Jean Delvaredd04b452013-07-03 15:08:47 -0700527 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530static void sub_remove(struct idr *idp, int shift, int id)
531{
532 struct idr_layer *p = idp->top;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800533 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 struct idr_layer ***paa = &pa[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700535 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 int n;
537
538 *paa = NULL;
539 *++paa = &idp->top;
540
541 while ((shift > 0) && p) {
542 n = (id >> shift) & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800543 __clear_bit(n, p->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 *++paa = &p->ary[n];
545 p = p->ary[n];
546 shift -= IDR_BITS;
547 }
548 n = id & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800549 if (likely(p != NULL && test_bit(n, p->bitmap))) {
550 __clear_bit(n, p->bitmap);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700551 rcu_assign_pointer(p->ary[n], NULL);
552 to_free = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 while(*paa && ! --((**paa)->count)){
Nadia Derbeycf481c22008-07-25 01:48:02 -0700554 if (to_free)
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800555 free_layer(idp, to_free);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700556 to_free = **paa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 **paa-- = NULL;
558 }
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800559 if (!*paa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 idp->layers = 0;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700561 if (to_free)
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800562 free_layer(idp, to_free);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800563 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 idr_remove_warning(id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
567/**
Randy Dunlap56083ab2010-10-26 14:19:08 -0700568 * idr_remove - remove the given id and free its slot
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800569 * @idp: idr handle
570 * @id: unique key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 */
572void idr_remove(struct idr *idp, int id)
573{
574 struct idr_layer *p;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700575 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Tejun Heo2e1c9b22013-03-08 12:43:30 -0800577 if (id < 0)
Tejun Heoe8c8d1b2013-02-27 17:05:04 -0800578 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800581 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
Nadia Derbeycf481c22008-07-25 01:48:02 -0700582 idp->top->ary[0]) {
583 /*
584 * Single child at leftmost slot: we can shrink the tree.
585 * This level is not needed anymore since when layers are
586 * inserted, they are inserted at the top of the existing
587 * tree.
588 */
589 to_free = idp->top;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 p = idp->top->ary[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700591 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 --idp->layers;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800593 to_free->count = 0;
594 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800595 free_layer(idp, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 }
Fengguang Wu125c4c72012-10-04 17:13:15 -0700597 while (idp->id_free_cnt >= MAX_IDR_FREE) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700598 p = get_from_free_list(idp);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700599 /*
600 * Note: we don't call the rcu callback here, since the only
601 * layers that fall into the freelist are those that have been
602 * preallocated.
603 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 kmem_cache_free(idr_layer_cache, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 }
Nadia Derbeyaf8e2a42008-05-01 04:34:57 -0700606 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608EXPORT_SYMBOL(idr_remove);
609
Tejun Heofe6e24e2013-02-27 17:03:50 -0800610void __idr_remove_all(struct idr *idp)
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700611{
Oleg Nesterov6ace06dc2007-07-31 00:39:19 -0700612 int n, id, max;
Imre Deak2dcb22b2010-05-26 14:43:38 -0700613 int bt_mask;
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700614 struct idr_layer *p;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800615 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700616 struct idr_layer **paa = &pa[0];
617
618 n = idp->layers * IDR_BITS;
619 p = idp->top;
Paul E. McKenney1b233362009-03-10 12:55:52 -0700620 rcu_assign_pointer(idp->top, NULL);
Tejun Heo326cf0f2013-02-27 17:05:02 -0800621 max = idr_max(idp->layers);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700622
623 id = 0;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800624 while (id >= 0 && id <= max) {
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700625 while (n > IDR_BITS && p) {
626 n -= IDR_BITS;
627 *paa++ = p;
628 p = p->ary[(id >> n) & IDR_MASK];
629 }
630
Imre Deak2dcb22b2010-05-26 14:43:38 -0700631 bt_mask = id;
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700632 id += 1 << n;
Imre Deak2dcb22b2010-05-26 14:43:38 -0700633 /* Get the highest bit that the above add changed from 0->1. */
634 while (n < fls(id ^ bt_mask)) {
Nadia Derbeycf481c22008-07-25 01:48:02 -0700635 if (p)
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800636 free_layer(idp, p);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700637 n += IDR_BITS;
638 p = *--paa;
639 }
640 }
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700641 idp->layers = 0;
642}
Tejun Heofe6e24e2013-02-27 17:03:50 -0800643EXPORT_SYMBOL(__idr_remove_all);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700644
645/**
Andrew Morton8d3b3592005-10-23 12:57:18 -0700646 * idr_destroy - release all cached layers within an idr tree
Naohiro Aotaea24ea8502010-08-31 00:37:03 +0900647 * @idp: idr handle
Tejun Heo9bb26bc2013-02-27 17:03:35 -0800648 *
649 * Free all id mappings and all idp_layers. After this function, @idp is
650 * completely unused and can be freed / recycled. The caller is
651 * responsible for ensuring that no one else accesses @idp during or after
652 * idr_destroy().
653 *
654 * A typical clean-up sequence for objects stored in an idr tree will use
655 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
656 * free up the id mappings and cached idr_layers.
Andrew Morton8d3b3592005-10-23 12:57:18 -0700657 */
658void idr_destroy(struct idr *idp)
659{
Tejun Heofe6e24e2013-02-27 17:03:50 -0800660 __idr_remove_all(idp);
Tejun Heo9bb26bc2013-02-27 17:03:35 -0800661
Andrew Morton8d3b3592005-10-23 12:57:18 -0700662 while (idp->id_free_cnt) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700663 struct idr_layer *p = get_from_free_list(idp);
Andrew Morton8d3b3592005-10-23 12:57:18 -0700664 kmem_cache_free(idr_layer_cache, p);
665 }
666}
667EXPORT_SYMBOL(idr_destroy);
668
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800669void *idr_find_slowpath(struct idr *idp, int id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
671 int n;
672 struct idr_layer *p;
673
Tejun Heo2e1c9b22013-03-08 12:43:30 -0800674 if (id < 0)
Tejun Heoe8c8d1b2013-02-27 17:05:04 -0800675 return NULL;
676
Paul E. McKenney96be7532010-02-22 17:04:55 -0800677 p = rcu_dereference_raw(idp->top);
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800678 if (!p)
679 return NULL;
680 n = (p->layer+1) * IDR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Tejun Heo326cf0f2013-02-27 17:05:02 -0800682 if (id > idr_max(p->layer + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 return NULL;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800684 BUG_ON(n == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 while (n > 0 && p) {
687 n -= IDR_BITS;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800688 BUG_ON(n != p->layer*IDR_BITS);
Paul E. McKenney96be7532010-02-22 17:04:55 -0800689 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
691 return((void *)p);
692}
Tejun Heo0ffc2a92013-02-27 17:05:08 -0800693EXPORT_SYMBOL(idr_find_slowpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Jeff Mahoney5806f072006-06-26 00:27:19 -0700695/**
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700696 * idr_for_each - iterate through all stored pointers
697 * @idp: idr handle
698 * @fn: function to be called for each pointer
699 * @data: data passed back to callback function
700 *
701 * Iterate over the pointers registered with the given idr. The
702 * callback function will be called for each pointer currently
703 * registered, passing the id, the pointer and the data pointer passed
704 * to this function. It is not safe to modify the idr tree while in
705 * the callback, so functions such as idr_get_new and idr_remove are
706 * not allowed.
707 *
708 * We check the return of @fn each time. If it returns anything other
Randy Dunlap56083ab2010-10-26 14:19:08 -0700709 * than %0, we break out and return that value.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700710 *
711 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
712 */
713int idr_for_each(struct idr *idp,
714 int (*fn)(int id, void *p, void *data), void *data)
715{
716 int n, id, max, error = 0;
717 struct idr_layer *p;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800718 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700719 struct idr_layer **paa = &pa[0];
720
721 n = idp->layers * IDR_BITS;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800722 p = rcu_dereference_raw(idp->top);
Tejun Heo326cf0f2013-02-27 17:05:02 -0800723 max = idr_max(idp->layers);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700724
725 id = 0;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800726 while (id >= 0 && id <= max) {
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700727 while (n > 0 && p) {
728 n -= IDR_BITS;
729 *paa++ = p;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800730 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700731 }
732
733 if (p) {
734 error = fn(id, (void *)p, data);
735 if (error)
736 break;
737 }
738
739 id += 1 << n;
740 while (n < fls(id)) {
741 n += IDR_BITS;
742 p = *--paa;
743 }
744 }
745
746 return error;
747}
748EXPORT_SYMBOL(idr_for_each);
749
750/**
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700751 * idr_get_next - lookup next object of id to given id.
752 * @idp: idr handle
Naohiro Aotaea24ea8502010-08-31 00:37:03 +0900753 * @nextidp: pointer to lookup key
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700754 *
755 * Returns pointer to registered object with id, which is next number to
Naohiro Aota1458ce12010-08-27 17:43:46 +0900756 * given id. After being looked up, *@nextidp will be updated for the next
757 * iteration.
Hugh Dickins9f7de822012-03-21 16:34:20 -0700758 *
759 * This function can be called under rcu_read_lock(), given that the leaf
760 * pointers lifetimes are correctly managed.
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700761 */
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700762void *idr_get_next(struct idr *idp, int *nextidp)
763{
Tejun Heo326cf0f2013-02-27 17:05:02 -0800764 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700765 struct idr_layer **paa = &pa[0];
766 int id = *nextidp;
767 int n, max;
768
769 /* find first ent */
Paul E. McKenney94bfa3b2010-06-07 17:09:45 -0700770 p = rcu_dereference_raw(idp->top);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700771 if (!p)
772 return NULL;
Hugh Dickins9f7de822012-03-21 16:34:20 -0700773 n = (p->layer + 1) * IDR_BITS;
Tejun Heo326cf0f2013-02-27 17:05:02 -0800774 max = idr_max(p->layer + 1);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700775
Tejun Heo326cf0f2013-02-27 17:05:02 -0800776 while (id >= 0 && id <= max) {
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700777 while (n > 0 && p) {
778 n -= IDR_BITS;
779 *paa++ = p;
Paul E. McKenney94bfa3b2010-06-07 17:09:45 -0700780 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700781 }
782
783 if (p) {
784 *nextidp = id;
785 return p;
786 }
787
Tejun Heo6cdae742013-02-27 17:03:34 -0800788 /*
789 * Proceed to the next layer at the current level. Unlike
790 * idr_for_each(), @id isn't guaranteed to be aligned to
791 * layer boundary at this point and adding 1 << n may
792 * incorrectly skip IDs. Make sure we jump to the
793 * beginning of the next layer using round_up().
794 */
795 id = round_up(id + 1, 1 << n);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700796 while (n < fls(id)) {
797 n += IDR_BITS;
798 p = *--paa;
799 }
800 }
801 return NULL;
802}
Ben Hutchings4d1ee802010-01-29 20:59:17 +0000803EXPORT_SYMBOL(idr_get_next);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700804
805
806/**
Jeff Mahoney5806f072006-06-26 00:27:19 -0700807 * idr_replace - replace pointer for given id
808 * @idp: idr handle
809 * @ptr: pointer you want associated with the id
810 * @id: lookup key
811 *
812 * Replace the pointer registered with an id and return the old value.
Randy Dunlap56083ab2010-10-26 14:19:08 -0700813 * A %-ENOENT return indicates that @id was not found.
814 * A %-EINVAL return indicates that @id was not within valid constraints.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700815 *
Nadia Derbeycf481c22008-07-25 01:48:02 -0700816 * The caller must serialize with writers.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700817 */
818void *idr_replace(struct idr *idp, void *ptr, int id)
819{
820 int n;
821 struct idr_layer *p, *old_p;
822
Tejun Heo2e1c9b22013-03-08 12:43:30 -0800823 if (id < 0)
Tejun Heoe8c8d1b2013-02-27 17:05:04 -0800824 return ERR_PTR(-EINVAL);
825
Jeff Mahoney5806f072006-06-26 00:27:19 -0700826 p = idp->top;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800827 if (!p)
828 return ERR_PTR(-EINVAL);
829
830 n = (p->layer+1) * IDR_BITS;
Jeff Mahoney5806f072006-06-26 00:27:19 -0700831
Jeff Mahoney5806f072006-06-26 00:27:19 -0700832 if (id >= (1 << n))
833 return ERR_PTR(-EINVAL);
834
835 n -= IDR_BITS;
836 while ((n > 0) && p) {
837 p = p->ary[(id >> n) & IDR_MASK];
838 n -= IDR_BITS;
839 }
840
841 n = id & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -0800842 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
Jeff Mahoney5806f072006-06-26 00:27:19 -0700843 return ERR_PTR(-ENOENT);
844
845 old_p = p->ary[n];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700846 rcu_assign_pointer(p->ary[n], ptr);
Jeff Mahoney5806f072006-06-26 00:27:19 -0700847
848 return old_p;
849}
850EXPORT_SYMBOL(idr_replace);
851
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700852void __init idr_init_cache(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700854 idr_layer_cache = kmem_cache_create("idr_layer_cache",
Andrew Morton5b019e92009-01-15 13:51:21 -0800855 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
857
858/**
859 * idr_init - initialize idr handle
860 * @idp: idr handle
861 *
862 * This function is use to set up the handle (@idp) that you will pass
863 * to the rest of the functions.
864 */
865void idr_init(struct idr *idp)
866{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 memset(idp, 0, sizeof(struct idr));
868 spin_lock_init(&idp->lock);
869}
870EXPORT_SYMBOL(idr_init);
Tejun Heo72dba582007-06-14 03:45:13 +0900871
872
Randy Dunlap56083ab2010-10-26 14:19:08 -0700873/**
874 * DOC: IDA description
Tejun Heo72dba582007-06-14 03:45:13 +0900875 * IDA - IDR based ID allocator
876 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700877 * This is id allocator without id -> pointer translation. Memory
Tejun Heo72dba582007-06-14 03:45:13 +0900878 * usage is much lower than full blown idr because each id only
879 * occupies a bit. ida uses a custom leaf node which contains
880 * IDA_BITMAP_BITS slots.
881 *
882 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
883 */
884
885static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
886{
887 unsigned long flags;
888
889 if (!ida->free_bitmap) {
890 spin_lock_irqsave(&ida->idr.lock, flags);
891 if (!ida->free_bitmap) {
892 ida->free_bitmap = bitmap;
893 bitmap = NULL;
894 }
895 spin_unlock_irqrestore(&ida->idr.lock, flags);
896 }
897
898 kfree(bitmap);
899}
900
901/**
902 * ida_pre_get - reserve resources for ida allocation
903 * @ida: ida handle
904 * @gfp_mask: memory allocation flag
905 *
906 * This function should be called prior to locking and calling the
907 * following function. It preallocates enough memory to satisfy the
908 * worst possible allocation.
909 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700910 * If the system is REALLY out of memory this function returns %0,
911 * otherwise %1.
Tejun Heo72dba582007-06-14 03:45:13 +0900912 */
913int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
914{
915 /* allocate idr_layers */
Tejun Heoc8615d32013-03-13 14:59:42 -0700916 if (!__idr_pre_get(&ida->idr, gfp_mask))
Tejun Heo72dba582007-06-14 03:45:13 +0900917 return 0;
918
919 /* allocate free_bitmap */
920 if (!ida->free_bitmap) {
921 struct ida_bitmap *bitmap;
922
923 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
924 if (!bitmap)
925 return 0;
926
927 free_bitmap(ida, bitmap);
928 }
929
930 return 1;
931}
932EXPORT_SYMBOL(ida_pre_get);
933
934/**
935 * ida_get_new_above - allocate new ID above or equal to a start id
936 * @ida: ida handle
Naohiro Aotaea24ea8502010-08-31 00:37:03 +0900937 * @starting_id: id to start search at
Tejun Heo72dba582007-06-14 03:45:13 +0900938 * @p_id: pointer to the allocated handle
939 *
Wang Sheng-Huie3816c52011-10-31 17:12:36 -0700940 * Allocate new ID above or equal to @starting_id. It should be called
941 * with any required locks.
Tejun Heo72dba582007-06-14 03:45:13 +0900942 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700943 * If memory is required, it will return %-EAGAIN, you should unlock
Tejun Heo72dba582007-06-14 03:45:13 +0900944 * and go back to the ida_pre_get() call. If the ida is full, it will
Randy Dunlap56083ab2010-10-26 14:19:08 -0700945 * return %-ENOSPC.
Tejun Heo72dba582007-06-14 03:45:13 +0900946 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700947 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
Tejun Heo72dba582007-06-14 03:45:13 +0900948 */
949int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
950{
Tejun Heo326cf0f2013-02-27 17:05:02 -0800951 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
Tejun Heo72dba582007-06-14 03:45:13 +0900952 struct ida_bitmap *bitmap;
953 unsigned long flags;
954 int idr_id = starting_id / IDA_BITMAP_BITS;
955 int offset = starting_id % IDA_BITMAP_BITS;
956 int t, id;
957
958 restart:
959 /* get vacant slot */
Tejun Heod5c74092013-02-27 17:03:55 -0800960 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
Nadia Derbey944ca052008-07-25 01:47:59 -0700961 if (t < 0)
Tejun Heo12d1b432013-02-27 17:03:53 -0800962 return t == -ENOMEM ? -EAGAIN : t;
Tejun Heo72dba582007-06-14 03:45:13 +0900963
Fengguang Wu125c4c72012-10-04 17:13:15 -0700964 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
Tejun Heo72dba582007-06-14 03:45:13 +0900965 return -ENOSPC;
966
967 if (t != idr_id)
968 offset = 0;
969 idr_id = t;
970
971 /* if bitmap isn't there, create a new one */
972 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
973 if (!bitmap) {
974 spin_lock_irqsave(&ida->idr.lock, flags);
975 bitmap = ida->free_bitmap;
976 ida->free_bitmap = NULL;
977 spin_unlock_irqrestore(&ida->idr.lock, flags);
978
979 if (!bitmap)
980 return -EAGAIN;
981
982 memset(bitmap, 0, sizeof(struct ida_bitmap));
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700983 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
984 (void *)bitmap);
Tejun Heo72dba582007-06-14 03:45:13 +0900985 pa[0]->count++;
986 }
987
988 /* lookup for empty slot */
989 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
990 if (t == IDA_BITMAP_BITS) {
991 /* no empty slot after offset, continue to the next chunk */
992 idr_id++;
993 offset = 0;
994 goto restart;
995 }
996
997 id = idr_id * IDA_BITMAP_BITS + t;
Fengguang Wu125c4c72012-10-04 17:13:15 -0700998 if (id >= MAX_IDR_BIT)
Tejun Heo72dba582007-06-14 03:45:13 +0900999 return -ENOSPC;
1000
1001 __set_bit(t, bitmap->bitmap);
1002 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
1003 idr_mark_full(pa, idr_id);
1004
1005 *p_id = id;
1006
1007 /* Each leaf node can handle nearly a thousand slots and the
1008 * whole idea of ida is to have small memory foot print.
1009 * Throw away extra resources one by one after each successful
1010 * allocation.
1011 */
1012 if (ida->idr.id_free_cnt || ida->free_bitmap) {
Nadia Derbey4ae53782008-07-25 01:47:58 -07001013 struct idr_layer *p = get_from_free_list(&ida->idr);
Tejun Heo72dba582007-06-14 03:45:13 +09001014 if (p)
1015 kmem_cache_free(idr_layer_cache, p);
1016 }
1017
1018 return 0;
1019}
1020EXPORT_SYMBOL(ida_get_new_above);
1021
1022/**
Tejun Heo72dba582007-06-14 03:45:13 +09001023 * ida_remove - remove the given ID
1024 * @ida: ida handle
1025 * @id: ID to free
1026 */
1027void ida_remove(struct ida *ida, int id)
1028{
1029 struct idr_layer *p = ida->idr.top;
1030 int shift = (ida->idr.layers - 1) * IDR_BITS;
1031 int idr_id = id / IDA_BITMAP_BITS;
1032 int offset = id % IDA_BITMAP_BITS;
1033 int n;
1034 struct ida_bitmap *bitmap;
1035
1036 /* clear full bits while looking up the leaf idr_layer */
1037 while ((shift > 0) && p) {
1038 n = (idr_id >> shift) & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -08001039 __clear_bit(n, p->bitmap);
Tejun Heo72dba582007-06-14 03:45:13 +09001040 p = p->ary[n];
1041 shift -= IDR_BITS;
1042 }
1043
1044 if (p == NULL)
1045 goto err;
1046
1047 n = idr_id & IDR_MASK;
Tejun Heo1d9b2e12013-02-27 17:05:05 -08001048 __clear_bit(n, p->bitmap);
Tejun Heo72dba582007-06-14 03:45:13 +09001049
1050 bitmap = (void *)p->ary[n];
1051 if (!test_bit(offset, bitmap->bitmap))
1052 goto err;
1053
1054 /* update bitmap and remove it if empty */
1055 __clear_bit(offset, bitmap->bitmap);
1056 if (--bitmap->nr_busy == 0) {
Tejun Heo1d9b2e12013-02-27 17:05:05 -08001057 __set_bit(n, p->bitmap); /* to please idr_remove() */
Tejun Heo72dba582007-06-14 03:45:13 +09001058 idr_remove(&ida->idr, idr_id);
1059 free_bitmap(ida, bitmap);
1060 }
1061
1062 return;
1063
1064 err:
Jean Delvaredd04b452013-07-03 15:08:47 -07001065 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
Tejun Heo72dba582007-06-14 03:45:13 +09001066}
1067EXPORT_SYMBOL(ida_remove);
1068
1069/**
1070 * ida_destroy - release all cached layers within an ida tree
Naohiro Aotaea24ea8502010-08-31 00:37:03 +09001071 * @ida: ida handle
Tejun Heo72dba582007-06-14 03:45:13 +09001072 */
1073void ida_destroy(struct ida *ida)
1074{
1075 idr_destroy(&ida->idr);
1076 kfree(ida->free_bitmap);
1077}
1078EXPORT_SYMBOL(ida_destroy);
1079
1080/**
Rusty Russell88eca022011-08-03 16:21:06 -07001081 * ida_simple_get - get a new id.
1082 * @ida: the (initialized) ida.
1083 * @start: the minimum id (inclusive, < 0x8000000)
1084 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1085 * @gfp_mask: memory allocation flags
1086 *
1087 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1088 * On memory allocation failure, returns -ENOMEM.
1089 *
1090 * Use ida_simple_remove() to get rid of an id.
1091 */
1092int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1093 gfp_t gfp_mask)
1094{
1095 int ret, id;
1096 unsigned int max;
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001097 unsigned long flags;
Rusty Russell88eca022011-08-03 16:21:06 -07001098
1099 BUG_ON((int)start < 0);
1100 BUG_ON((int)end < 0);
1101
1102 if (end == 0)
1103 max = 0x80000000;
1104 else {
1105 BUG_ON(end < start);
1106 max = end - 1;
1107 }
1108
1109again:
1110 if (!ida_pre_get(ida, gfp_mask))
1111 return -ENOMEM;
1112
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001113 spin_lock_irqsave(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001114 ret = ida_get_new_above(ida, start, &id);
1115 if (!ret) {
1116 if (id > max) {
1117 ida_remove(ida, id);
1118 ret = -ENOSPC;
1119 } else {
1120 ret = id;
1121 }
1122 }
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001123 spin_unlock_irqrestore(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001124
1125 if (unlikely(ret == -EAGAIN))
1126 goto again;
1127
1128 return ret;
1129}
1130EXPORT_SYMBOL(ida_simple_get);
1131
1132/**
1133 * ida_simple_remove - remove an allocated id.
1134 * @ida: the (initialized) ida.
1135 * @id: the id returned by ida_simple_get.
1136 */
1137void ida_simple_remove(struct ida *ida, unsigned int id)
1138{
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001139 unsigned long flags;
1140
Rusty Russell88eca022011-08-03 16:21:06 -07001141 BUG_ON((int)id < 0);
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001142 spin_lock_irqsave(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001143 ida_remove(ida, id);
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001144 spin_unlock_irqrestore(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001145}
1146EXPORT_SYMBOL(ida_simple_remove);
1147
1148/**
Tejun Heo72dba582007-06-14 03:45:13 +09001149 * ida_init - initialize ida handle
1150 * @ida: ida handle
1151 *
1152 * This function is use to set up the handle (@ida) that you will pass
1153 * to the rest of the functions.
1154 */
1155void ida_init(struct ida *ida)
1156{
1157 memset(ida, 0, sizeof(struct ida));
1158 idr_init(&ida->idr);
1159
1160}
1161EXPORT_SYMBOL(ida_init);