blob: 057919164e23d7ecc3f6e4258e541eb064a2dd73 [file] [log] [blame]
Thomas Graf7e1e7762014-08-02 11:47:44 +02001/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 *
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 *
10 * Code partially derived from nft_hash
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/log2.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/mm.h>
Daniel Borkmann87545892014-12-10 16:33:11 +010023#include <linux/jhash.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020024#include <linux/random.h>
25#include <linux/rhashtable.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020026
27#define HASH_DEFAULT_SIZE 64UL
28#define HASH_MIN_SIZE 4UL
Thomas Graf97defe12015-01-02 23:00:20 +010029#define BUCKET_LOCKS_PER_CPU 128UL
30
Thomas Graff89bd6f2015-01-02 23:00:21 +010031/* Base bits plus 1 bit for nulls marker */
32#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
33
Thomas Graf97defe12015-01-02 23:00:20 +010034enum {
35 RHT_LOCK_NORMAL,
36 RHT_LOCK_NESTED,
37 RHT_LOCK_NESTED2,
38};
39
40/* The bucket lock is selected based on the hash and protects mutations
41 * on a group of hash buckets.
42 *
43 * IMPORTANT: When holding the bucket lock of both the old and new table
44 * during expansions and shrinking, the old bucket lock must always be
45 * acquired first.
46 */
47static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48{
49 return &tbl->locks[hash & tbl->locks_mask];
50}
Thomas Graf7e1e7762014-08-02 11:47:44 +020051
52#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
Thomas Graf97defe12015-01-02 23:00:20 +010053#define ASSERT_BUCKET_LOCK(TBL, HASH) \
54 BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
Thomas Graf7e1e7762014-08-02 11:47:44 +020055
56#ifdef CONFIG_PROVE_LOCKING
Thomas Graf97defe12015-01-02 23:00:20 +010057int lockdep_rht_mutex_is_held(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +020058{
Thomas Graf97defe12015-01-02 23:00:20 +010059 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
Thomas Graf7e1e7762014-08-02 11:47:44 +020060}
61EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
Thomas Graf88d6ed12015-01-02 23:00:16 +010062
63int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64{
Thomas Graf97defe12015-01-02 23:00:20 +010065 spinlock_t *lock = bucket_lock(tbl, hash);
66
67 return (debug_locks) ? lockdep_is_held(lock) : 1;
Thomas Graf88d6ed12015-01-02 23:00:16 +010068}
69EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
Thomas Graf7e1e7762014-08-02 11:47:44 +020070#endif
71
Thomas Grafc91eee52014-08-13 16:38:30 +020072static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
Thomas Graf7e1e7762014-08-02 11:47:44 +020073{
74 return (void *) he - ht->p.head_offset;
75}
Thomas Graf7e1e7762014-08-02 11:47:44 +020076
Thomas Graf8d24c0b2015-01-02 23:00:14 +010077static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
Thomas Graf7e1e7762014-08-02 11:47:44 +020078{
Thomas Graf8d24c0b2015-01-02 23:00:14 +010079 return hash & (tbl->size - 1);
Thomas Graf7e1e7762014-08-02 11:47:44 +020080}
81
Thomas Graf8d24c0b2015-01-02 23:00:14 +010082static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83{
84 u32 hash;
85
86 if (unlikely(!ht->p.key_len))
87 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88 else
89 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90 ht->p.hash_rnd);
91
Thomas Graff89bd6f2015-01-02 23:00:21 +010092 return hash >> HASH_RESERVED_SPACE;
Thomas Graf8d24c0b2015-01-02 23:00:14 +010093}
94
Thomas Graf97defe12015-01-02 23:00:20 +010095static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
Thomas Graf7e1e7762014-08-02 11:47:44 +020096{
97 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
Thomas Graf8d24c0b2015-01-02 23:00:14 +010098 u32 hash;
Thomas Graf7e1e7762014-08-02 11:47:44 +020099
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100100 hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
Thomas Graff89bd6f2015-01-02 23:00:21 +0100101 hash >>= HASH_RESERVED_SPACE;
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100102
103 return rht_bucket_index(tbl, hash);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200104}
Thomas Graf7e1e7762014-08-02 11:47:44 +0200105
106static u32 head_hashfn(const struct rhashtable *ht,
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100107 const struct bucket_table *tbl,
108 const struct rhash_head *he)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200109{
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100110 return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
Thomas Graf7e1e7762014-08-02 11:47:44 +0200111}
112
Thomas Grafb8e19432015-01-02 23:00:17 +0100113static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
114{
115 struct rhash_head __rcu **pprev;
116
117 for (pprev = &tbl->buckets[n];
Thomas Graff89bd6f2015-01-02 23:00:21 +0100118 !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
Thomas Grafb8e19432015-01-02 23:00:17 +0100119 pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
120 ;
121
122 return pprev;
123}
124
Thomas Graf97defe12015-01-02 23:00:20 +0100125static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
126{
127 unsigned int i, size;
128#if defined(CONFIG_PROVE_LOCKING)
129 unsigned int nr_pcpus = 2;
130#else
131 unsigned int nr_pcpus = num_possible_cpus();
132#endif
133
134 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
135 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
136
137 /* Never allocate more than one lock per bucket */
138 size = min_t(unsigned int, size, tbl->size);
139
140 if (sizeof(spinlock_t) != 0) {
141#ifdef CONFIG_NUMA
142 if (size * sizeof(spinlock_t) > PAGE_SIZE)
143 tbl->locks = vmalloc(size * sizeof(spinlock_t));
144 else
145#endif
146 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
147 GFP_KERNEL);
148 if (!tbl->locks)
149 return -ENOMEM;
150 for (i = 0; i < size; i++)
151 spin_lock_init(&tbl->locks[i]);
152 }
153 tbl->locks_mask = size - 1;
154
155 return 0;
156}
157
158static void bucket_table_free(const struct bucket_table *tbl)
159{
160 if (tbl)
161 kvfree(tbl->locks);
162
163 kvfree(tbl);
164}
165
166static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
167 size_t nbuckets)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200168{
169 struct bucket_table *tbl;
170 size_t size;
Thomas Graff89bd6f2015-01-02 23:00:21 +0100171 int i;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200172
173 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
Thomas Graf6eba8222014-11-13 13:45:46 +0100174 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200175 if (tbl == NULL)
176 tbl = vzalloc(size);
177
178 if (tbl == NULL)
179 return NULL;
180
181 tbl->size = nbuckets;
182
Thomas Graf97defe12015-01-02 23:00:20 +0100183 if (alloc_bucket_locks(ht, tbl) < 0) {
184 bucket_table_free(tbl);
185 return NULL;
186 }
Thomas Graf7e1e7762014-08-02 11:47:44 +0200187
Thomas Graff89bd6f2015-01-02 23:00:21 +0100188 for (i = 0; i < nbuckets; i++)
189 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
190
Thomas Graf97defe12015-01-02 23:00:20 +0100191 return tbl;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200192}
193
194/**
195 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
196 * @ht: hash table
197 * @new_size: new table size
198 */
199bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
200{
201 /* Expand table when exceeding 75% load */
Ying Xuec0c09bf2015-01-07 13:41:56 +0800202 return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
203 (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200204}
205EXPORT_SYMBOL_GPL(rht_grow_above_75);
206
207/**
208 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
209 * @ht: hash table
210 * @new_size: new table size
211 */
212bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
213{
214 /* Shrink table beneath 30% load */
Ying Xuec0c09bf2015-01-07 13:41:56 +0800215 return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
216 (atomic_read(&ht->shift) > ht->p.min_shift);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200217}
218EXPORT_SYMBOL_GPL(rht_shrink_below_30);
219
220static void hashtable_chain_unzip(const struct rhashtable *ht,
221 const struct bucket_table *new_tbl,
Thomas Graf97defe12015-01-02 23:00:20 +0100222 struct bucket_table *old_tbl,
223 size_t old_hash)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200224{
225 struct rhash_head *he, *p, *next;
Thomas Graf97defe12015-01-02 23:00:20 +0100226 spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
227 unsigned int new_hash, new_hash2;
228
229 ASSERT_BUCKET_LOCK(old_tbl, old_hash);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200230
231 /* Old bucket empty, no work needed. */
Thomas Graf97defe12015-01-02 23:00:20 +0100232 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
233 old_hash);
Thomas Graff89bd6f2015-01-02 23:00:21 +0100234 if (rht_is_a_nulls(p))
Thomas Graf7e1e7762014-08-02 11:47:44 +0200235 return;
236
Thomas Graf97defe12015-01-02 23:00:20 +0100237 new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
238 new_bucket_lock = bucket_lock(new_tbl, new_hash);
239
Thomas Graf7e1e7762014-08-02 11:47:44 +0200240 /* Advance the old bucket pointer one or more times until it
241 * reaches a node that doesn't hash to the same bucket as the
242 * previous node p. Call the previous node p;
243 */
Thomas Graf97defe12015-01-02 23:00:20 +0100244 rht_for_each_continue(he, p->next, old_tbl, old_hash) {
245 new_hash2 = head_hashfn(ht, new_tbl, he);
246 if (new_hash != new_hash2)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200247 break;
248 p = he;
249 }
Thomas Graf97defe12015-01-02 23:00:20 +0100250 rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
251
252 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
253
254 /* If we have encountered an entry that maps to a different bucket in
255 * the new table, lock down that bucket as well as we might cut off
256 * the end of the chain.
257 */
258 new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
259 if (new_bucket_lock != new_bucket_lock2)
260 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200261
262 /* Find the subsequent node which does hash to the same
263 * bucket as node P, or NULL if no such node exists.
264 */
Thomas Graff89bd6f2015-01-02 23:00:21 +0100265 INIT_RHT_NULLS_HEAD(next, ht, old_hash);
266 if (!rht_is_a_nulls(he)) {
Thomas Graf97defe12015-01-02 23:00:20 +0100267 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
268 if (head_hashfn(ht, new_tbl, he) == new_hash) {
Thomas Graf7e1e7762014-08-02 11:47:44 +0200269 next = he;
270 break;
271 }
272 }
273 }
274
275 /* Set p's next pointer to that subsequent node pointer,
276 * bypassing the nodes which do not hash to p's bucket
277 */
Thomas Graf97defe12015-01-02 23:00:20 +0100278 rcu_assign_pointer(p->next, next);
279
280 if (new_bucket_lock != new_bucket_lock2)
281 spin_unlock_bh(new_bucket_lock2);
282 spin_unlock_bh(new_bucket_lock);
283}
284
285static void link_old_to_new(struct bucket_table *new_tbl,
286 unsigned int new_hash, struct rhash_head *entry)
287{
288 spinlock_t *new_bucket_lock;
289
290 new_bucket_lock = bucket_lock(new_tbl, new_hash);
291
292 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
293 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
294 spin_unlock_bh(new_bucket_lock);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200295}
296
297/**
298 * rhashtable_expand - Expand hash table while allowing concurrent lookups
299 * @ht: the hash table to expand
Thomas Graf7e1e7762014-08-02 11:47:44 +0200300 *
301 * A secondary bucket array is allocated and the hash entries are migrated
302 * while keeping them on both lists until the end of the RCU grace period.
303 *
304 * This function may only be called in a context where it is safe to call
305 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
306 *
Thomas Graf97defe12015-01-02 23:00:20 +0100307 * The caller must ensure that no concurrent resizing occurs by holding
308 * ht->mutex.
309 *
310 * It is valid to have concurrent insertions and deletions protected by per
311 * bucket locks or concurrent RCU protected lookups and traversals.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200312 */
Thomas Graf6eba8222014-11-13 13:45:46 +0100313int rhashtable_expand(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200314{
315 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
316 struct rhash_head *he;
Thomas Graf97defe12015-01-02 23:00:20 +0100317 spinlock_t *old_bucket_lock;
318 unsigned int new_hash, old_hash;
319 bool complete = false;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200320
321 ASSERT_RHT_MUTEX(ht);
322
Thomas Graf97defe12015-01-02 23:00:20 +0100323 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200324 if (new_tbl == NULL)
325 return -ENOMEM;
326
Ying Xuec0c09bf2015-01-07 13:41:56 +0800327 atomic_inc(&ht->shift);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200328
Thomas Graf97defe12015-01-02 23:00:20 +0100329 /* Make insertions go into the new, empty table right away. Deletions
330 * and lookups will be attempted in both tables until we synchronize.
331 * The synchronize_rcu() guarantees for the new table to be picked up
332 * so no new additions go into the old table while we relink.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200333 */
Thomas Graf97defe12015-01-02 23:00:20 +0100334 rcu_assign_pointer(ht->future_tbl, new_tbl);
335 synchronize_rcu();
336
337 /* For each new bucket, search the corresponding old bucket for the
338 * first entry that hashes to the new bucket, and link the end of
339 * newly formed bucket chain (containing entries added to future
340 * table) to that entry. Since all the entries which will end up in
341 * the new bucket appear in the same old bucket, this constructs an
342 * entirely valid new hash table, but with multiple buckets
343 * "zipped" together into a single imprecise chain.
344 */
345 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
346 old_hash = rht_bucket_index(old_tbl, new_hash);
347 old_bucket_lock = bucket_lock(old_tbl, old_hash);
348
349 spin_lock_bh(old_bucket_lock);
350 rht_for_each(he, old_tbl, old_hash) {
351 if (head_hashfn(ht, new_tbl, he) == new_hash) {
352 link_old_to_new(new_tbl, new_hash, he);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200353 break;
354 }
355 }
Thomas Graf97defe12015-01-02 23:00:20 +0100356 spin_unlock_bh(old_bucket_lock);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200357 }
358
359 /* Publish the new table pointer. Lookups may now traverse
Herbert Xu0c828f22014-11-13 13:10:48 +0800360 * the new table, but they will not benefit from any
361 * additional efficiency until later steps unzip the buckets.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200362 */
363 rcu_assign_pointer(ht->tbl, new_tbl);
364
365 /* Unzip interleaved hash chains */
Thomas Graf97defe12015-01-02 23:00:20 +0100366 while (!complete && !ht->being_destroyed) {
Thomas Graf7e1e7762014-08-02 11:47:44 +0200367 /* Wait for readers. All new readers will see the new
368 * table, and thus no references to the old table will
369 * remain.
370 */
371 synchronize_rcu();
372
373 /* For each bucket in the old table (each of which
374 * contains items from multiple buckets of the new
375 * table): ...
376 */
377 complete = true;
Thomas Graf97defe12015-01-02 23:00:20 +0100378 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
Thomas Graff89bd6f2015-01-02 23:00:21 +0100379 struct rhash_head *head;
380
Thomas Graf97defe12015-01-02 23:00:20 +0100381 old_bucket_lock = bucket_lock(old_tbl, old_hash);
382 spin_lock_bh(old_bucket_lock);
383
384 hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
Thomas Graff89bd6f2015-01-02 23:00:21 +0100385 head = rht_dereference_bucket(old_tbl->buckets[old_hash],
386 old_tbl, old_hash);
387 if (!rht_is_a_nulls(head))
Thomas Graf7e1e7762014-08-02 11:47:44 +0200388 complete = false;
Thomas Graf97defe12015-01-02 23:00:20 +0100389
390 spin_unlock_bh(old_bucket_lock);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200391 }
Thomas Graf97defe12015-01-02 23:00:20 +0100392 }
Thomas Graf7e1e7762014-08-02 11:47:44 +0200393
394 bucket_table_free(old_tbl);
395 return 0;
396}
397EXPORT_SYMBOL_GPL(rhashtable_expand);
398
399/**
400 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
401 * @ht: the hash table to shrink
Thomas Graf7e1e7762014-08-02 11:47:44 +0200402 *
403 * This function may only be called in a context where it is safe to call
404 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
405 *
Thomas Graf97defe12015-01-02 23:00:20 +0100406 * The caller must ensure that no concurrent resizing occurs by holding
407 * ht->mutex.
408 *
Thomas Graf7e1e7762014-08-02 11:47:44 +0200409 * The caller must ensure that no concurrent table mutations take place.
410 * It is however valid to have concurrent lookups if they are RCU protected.
Thomas Graf97defe12015-01-02 23:00:20 +0100411 *
412 * It is valid to have concurrent insertions and deletions protected by per
413 * bucket locks or concurrent RCU protected lookups and traversals.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200414 */
Thomas Graf6eba8222014-11-13 13:45:46 +0100415int rhashtable_shrink(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200416{
Thomas Graf97defe12015-01-02 23:00:20 +0100417 struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
418 spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
419 unsigned int new_hash;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200420
421 ASSERT_RHT_MUTEX(ht);
422
Thomas Graf97defe12015-01-02 23:00:20 +0100423 new_tbl = bucket_table_alloc(ht, tbl->size / 2);
424 if (new_tbl == NULL)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200425 return -ENOMEM;
426
Thomas Graf97defe12015-01-02 23:00:20 +0100427 rcu_assign_pointer(ht->future_tbl, new_tbl);
428 synchronize_rcu();
Thomas Graf7e1e7762014-08-02 11:47:44 +0200429
Thomas Graf97defe12015-01-02 23:00:20 +0100430 /* Link the first entry in the old bucket to the end of the
431 * bucket in the new table. As entries are concurrently being
432 * added to the new table, lock down the new bucket. As we
433 * always divide the size in half when shrinking, each bucket
434 * in the new table maps to exactly two buckets in the old
435 * table.
436 *
437 * As removals can occur concurrently on the old table, we need
438 * to lock down both matching buckets in the old table.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200439 */
Thomas Graf97defe12015-01-02 23:00:20 +0100440 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
441 old_bucket_lock1 = bucket_lock(tbl, new_hash);
442 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
443 new_bucket_lock = bucket_lock(new_tbl, new_hash);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200444
Thomas Graf97defe12015-01-02 23:00:20 +0100445 spin_lock_bh(old_bucket_lock1);
Thomas Graf80ca8c32015-01-12 23:58:21 +0000446
447 /* Depending on the lock per buckets mapping, the bucket in
448 * the lower and upper region may map to the same lock.
449 */
450 if (old_bucket_lock1 != old_bucket_lock2) {
451 spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
452 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
453 } else {
454 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
455 }
Thomas Graf97defe12015-01-02 23:00:20 +0100456
457 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
458 tbl->buckets[new_hash]);
459 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
460 tbl->buckets[new_hash + new_tbl->size]);
461
462 spin_unlock_bh(new_bucket_lock);
Thomas Graf80ca8c32015-01-12 23:58:21 +0000463 if (old_bucket_lock1 != old_bucket_lock2)
464 spin_unlock_bh(old_bucket_lock2);
Thomas Graf97defe12015-01-02 23:00:20 +0100465 spin_unlock_bh(old_bucket_lock1);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200466 }
467
468 /* Publish the new, valid hash table */
Thomas Graf97defe12015-01-02 23:00:20 +0100469 rcu_assign_pointer(ht->tbl, new_tbl);
Ying Xuec0c09bf2015-01-07 13:41:56 +0800470 atomic_dec(&ht->shift);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200471
472 /* Wait for readers. No new readers will have references to the
473 * old hash table.
474 */
475 synchronize_rcu();
476
477 bucket_table_free(tbl);
478
479 return 0;
480}
481EXPORT_SYMBOL_GPL(rhashtable_shrink);
482
Thomas Graf97defe12015-01-02 23:00:20 +0100483static void rht_deferred_worker(struct work_struct *work)
484{
485 struct rhashtable *ht;
486 struct bucket_table *tbl;
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100487 struct rhashtable_walker *walker;
Thomas Graf97defe12015-01-02 23:00:20 +0100488
Ying Xue57699a42015-01-16 11:13:09 +0800489 ht = container_of(work, struct rhashtable, run_work);
Thomas Graf97defe12015-01-02 23:00:20 +0100490 mutex_lock(&ht->mutex);
Herbert Xu28134a52015-02-04 07:33:22 +1100491 if (ht->being_destroyed)
492 goto unlock;
493
Thomas Graf97defe12015-01-02 23:00:20 +0100494 tbl = rht_dereference(ht->tbl, ht);
495
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100496 list_for_each_entry(walker, &ht->walkers, list)
497 walker->resize = true;
498
Thomas Graf97defe12015-01-02 23:00:20 +0100499 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
500 rhashtable_expand(ht);
501 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
502 rhashtable_shrink(ht);
503
Herbert Xu28134a52015-02-04 07:33:22 +1100504unlock:
Thomas Graf97defe12015-01-02 23:00:20 +0100505 mutex_unlock(&ht->mutex);
506}
507
Ying Xue54c5b7d2015-01-07 13:41:53 +0800508static void rhashtable_wakeup_worker(struct rhashtable *ht)
509{
510 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
511 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
512 size_t size = tbl->size;
513
514 /* Only adjust the table if no resizing is currently in progress. */
515 if (tbl == new_tbl &&
516 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
517 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
Ying Xue57699a42015-01-16 11:13:09 +0800518 schedule_work(&ht->run_work);
Ying Xue54c5b7d2015-01-07 13:41:53 +0800519}
520
Ying Xuedb304852015-01-07 13:41:54 +0800521static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
522 struct bucket_table *tbl, u32 hash)
523{
524 struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
525 tbl, hash);
526
527 if (rht_is_a_nulls(head))
528 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
529 else
530 RCU_INIT_POINTER(obj->next, head);
531
532 rcu_assign_pointer(tbl->buckets[hash], obj);
533
534 atomic_inc(&ht->nelems);
535
536 rhashtable_wakeup_worker(ht);
537}
538
Thomas Graf7e1e7762014-08-02 11:47:44 +0200539/**
Ying Xuedb304852015-01-07 13:41:54 +0800540 * rhashtable_insert - insert object into hash table
Thomas Graf7e1e7762014-08-02 11:47:44 +0200541 * @ht: hash table
542 * @obj: pointer to hash head inside object
Thomas Graf7e1e7762014-08-02 11:47:44 +0200543 *
Thomas Graf97defe12015-01-02 23:00:20 +0100544 * Will take a per bucket spinlock to protect against mutual mutations
545 * on the same bucket. Multiple insertions may occur in parallel unless
546 * they map to the same bucket lock.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200547 *
Thomas Graf97defe12015-01-02 23:00:20 +0100548 * It is safe to call this function from atomic context.
549 *
550 * Will trigger an automatic deferred table resizing if the size grows
551 * beyond the watermark indicated by grow_decision() which can be passed
552 * to rhashtable_init().
Thomas Graf7e1e7762014-08-02 11:47:44 +0200553 */
Thomas Graf6eba8222014-11-13 13:45:46 +0100554void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200555{
Thomas Graf97defe12015-01-02 23:00:20 +0100556 struct bucket_table *tbl;
557 spinlock_t *lock;
558 unsigned hash;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200559
Thomas Graf97defe12015-01-02 23:00:20 +0100560 rcu_read_lock();
Thomas Graf7e1e7762014-08-02 11:47:44 +0200561
Thomas Graf97defe12015-01-02 23:00:20 +0100562 tbl = rht_dereference_rcu(ht->future_tbl, ht);
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100563 hash = head_hashfn(ht, tbl, obj);
Thomas Graf97defe12015-01-02 23:00:20 +0100564 lock = bucket_lock(tbl, hash);
565
566 spin_lock_bh(lock);
Ying Xuedb304852015-01-07 13:41:54 +0800567 __rhashtable_insert(ht, obj, tbl, hash);
Thomas Graf97defe12015-01-02 23:00:20 +0100568 spin_unlock_bh(lock);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200569
Thomas Graf97defe12015-01-02 23:00:20 +0100570 rcu_read_unlock();
Thomas Graf7e1e7762014-08-02 11:47:44 +0200571}
572EXPORT_SYMBOL_GPL(rhashtable_insert);
573
574/**
Thomas Graf7e1e7762014-08-02 11:47:44 +0200575 * rhashtable_remove - remove object from hash table
576 * @ht: hash table
577 * @obj: pointer to hash head inside object
Thomas Graf7e1e7762014-08-02 11:47:44 +0200578 *
579 * Since the hash chain is single linked, the removal operation needs to
580 * walk the bucket chain upon removal. The removal operation is thus
581 * considerable slow if the hash table is not correctly sized.
582 *
Ying Xuedb304852015-01-07 13:41:54 +0800583 * Will automatically shrink the table via rhashtable_expand() if the
Thomas Graf7e1e7762014-08-02 11:47:44 +0200584 * shrink_decision function specified at rhashtable_init() returns true.
585 *
586 * The caller must ensure that no concurrent table mutations occur. It is
587 * however valid to have concurrent lookups if they are RCU protected.
588 */
Thomas Graf6eba8222014-11-13 13:45:46 +0100589bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200590{
Thomas Graf97defe12015-01-02 23:00:20 +0100591 struct bucket_table *tbl;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200592 struct rhash_head __rcu **pprev;
593 struct rhash_head *he;
Thomas Graf97defe12015-01-02 23:00:20 +0100594 spinlock_t *lock;
595 unsigned int hash;
Thomas Graffe6a0432015-01-21 11:54:01 +0000596 bool ret = false;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200597
Thomas Graf97defe12015-01-02 23:00:20 +0100598 rcu_read_lock();
599 tbl = rht_dereference_rcu(ht->tbl, ht);
600 hash = head_hashfn(ht, tbl, obj);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200601
Thomas Graf97defe12015-01-02 23:00:20 +0100602 lock = bucket_lock(tbl, hash);
603 spin_lock_bh(lock);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200604
Thomas Graf97defe12015-01-02 23:00:20 +0100605restart:
606 pprev = &tbl->buckets[hash];
607 rht_for_each(he, tbl, hash) {
Thomas Graf7e1e7762014-08-02 11:47:44 +0200608 if (he != obj) {
609 pprev = &he->next;
610 continue;
611 }
612
Thomas Graf97defe12015-01-02 23:00:20 +0100613 rcu_assign_pointer(*pprev, obj->next);
Thomas Graf897362e2015-01-02 23:00:18 +0100614
Thomas Graffe6a0432015-01-21 11:54:01 +0000615 ret = true;
616 break;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200617 }
618
Thomas Graffe6a0432015-01-21 11:54:01 +0000619 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
620 * 'future_tbl' only exists for a short period of time during
621 * resizing. Thus traversing both is fine and the added cost is
622 * very rare.
623 */
Ying Xuebd6d4db2015-01-07 13:41:55 +0800624 if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
Thomas Graf97defe12015-01-02 23:00:20 +0100625 spin_unlock_bh(lock);
626
Ying Xuebd6d4db2015-01-07 13:41:55 +0800627 tbl = rht_dereference_rcu(ht->future_tbl, ht);
Thomas Graf97defe12015-01-02 23:00:20 +0100628 hash = head_hashfn(ht, tbl, obj);
629
630 lock = bucket_lock(tbl, hash);
631 spin_lock_bh(lock);
632 goto restart;
633 }
634
635 spin_unlock_bh(lock);
Thomas Graffe6a0432015-01-21 11:54:01 +0000636
637 if (ret) {
638 atomic_dec(&ht->nelems);
639 rhashtable_wakeup_worker(ht);
640 }
641
Thomas Graf97defe12015-01-02 23:00:20 +0100642 rcu_read_unlock();
643
Thomas Graffe6a0432015-01-21 11:54:01 +0000644 return ret;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200645}
646EXPORT_SYMBOL_GPL(rhashtable_remove);
647
Ying Xueefb975a62015-01-07 13:41:52 +0800648struct rhashtable_compare_arg {
649 struct rhashtable *ht;
650 const void *key;
651};
652
653static bool rhashtable_compare(void *ptr, void *arg)
654{
655 struct rhashtable_compare_arg *x = arg;
656 struct rhashtable *ht = x->ht;
657
658 return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
659}
660
Thomas Graf7e1e7762014-08-02 11:47:44 +0200661/**
662 * rhashtable_lookup - lookup key in hash table
663 * @ht: hash table
664 * @key: pointer to key
665 *
666 * Computes the hash value for the key and traverses the bucket chain looking
667 * for a entry with an identical key. The first matching entry is returned.
668 *
669 * This lookup function may only be used for fixed key hash table (key_len
Ying Xuedb304852015-01-07 13:41:54 +0800670 * parameter set). It will BUG() if used inappropriately.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200671 *
Thomas Graf97defe12015-01-02 23:00:20 +0100672 * Lookups may occur in parallel with hashtable mutations and resizing.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200673 */
Thomas Graf97defe12015-01-02 23:00:20 +0100674void *rhashtable_lookup(struct rhashtable *ht, const void *key)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200675{
Ying Xueefb975a62015-01-07 13:41:52 +0800676 struct rhashtable_compare_arg arg = {
677 .ht = ht,
678 .key = key,
679 };
Thomas Graf7e1e7762014-08-02 11:47:44 +0200680
681 BUG_ON(!ht->p.key_len);
682
Ying Xueefb975a62015-01-07 13:41:52 +0800683 return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200684}
685EXPORT_SYMBOL_GPL(rhashtable_lookup);
686
687/**
688 * rhashtable_lookup_compare - search hash table with compare function
689 * @ht: hash table
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100690 * @key: the pointer to the key
Thomas Graf7e1e7762014-08-02 11:47:44 +0200691 * @compare: compare function, must return true on match
692 * @arg: argument passed on to compare function
693 *
694 * Traverses the bucket chain behind the provided hash value and calls the
695 * specified compare function for each entry.
696 *
Thomas Graf97defe12015-01-02 23:00:20 +0100697 * Lookups may occur in parallel with hashtable mutations and resizing.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200698 *
699 * Returns the first entry on which the compare function returned true.
700 */
Thomas Graf97defe12015-01-02 23:00:20 +0100701void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
Thomas Graf7e1e7762014-08-02 11:47:44 +0200702 bool (*compare)(void *, void *), void *arg)
703{
Thomas Graf97defe12015-01-02 23:00:20 +0100704 const struct bucket_table *tbl, *old_tbl;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200705 struct rhash_head *he;
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100706 u32 hash;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200707
Thomas Graf97defe12015-01-02 23:00:20 +0100708 rcu_read_lock();
709
710 old_tbl = rht_dereference_rcu(ht->tbl, ht);
711 tbl = rht_dereference_rcu(ht->future_tbl, ht);
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100712 hash = key_hashfn(ht, key, ht->p.key_len);
Thomas Graf97defe12015-01-02 23:00:20 +0100713restart:
714 rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
Thomas Graf7e1e7762014-08-02 11:47:44 +0200715 if (!compare(rht_obj(ht, he), arg))
716 continue;
Thomas Graf97defe12015-01-02 23:00:20 +0100717 rcu_read_unlock();
Thomas Grafa4b18cd2015-01-02 23:00:15 +0100718 return rht_obj(ht, he);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200719 }
720
Thomas Graf97defe12015-01-02 23:00:20 +0100721 if (unlikely(tbl != old_tbl)) {
722 tbl = old_tbl;
723 goto restart;
724 }
725 rcu_read_unlock();
726
Thomas Graf7e1e7762014-08-02 11:47:44 +0200727 return NULL;
728}
729EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
730
Ying Xuedb304852015-01-07 13:41:54 +0800731/**
732 * rhashtable_lookup_insert - lookup and insert object into hash table
733 * @ht: hash table
734 * @obj: pointer to hash head inside object
735 *
736 * Locks down the bucket chain in both the old and new table if a resize
737 * is in progress to ensure that writers can't remove from the old table
738 * and can't insert to the new table during the atomic operation of search
739 * and insertion. Searches for duplicates in both the old and new table if
740 * a resize is in progress.
741 *
742 * This lookup function may only be used for fixed key hash table (key_len
743 * parameter set). It will BUG() if used inappropriately.
744 *
745 * It is safe to call this function from atomic context.
746 *
747 * Will trigger an automatic deferred table resizing if the size grows
748 * beyond the watermark indicated by grow_decision() which can be passed
749 * to rhashtable_init().
750 */
751bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
752{
Ying Xue7a868d12015-01-12 14:52:22 +0800753 struct rhashtable_compare_arg arg = {
754 .ht = ht,
755 .key = rht_obj(ht, obj) + ht->p.key_offset,
756 };
757
758 BUG_ON(!ht->p.key_len);
759
760 return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
761 &arg);
762}
763EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
764
765/**
766 * rhashtable_lookup_compare_insert - search and insert object to hash table
767 * with compare function
768 * @ht: hash table
769 * @obj: pointer to hash head inside object
770 * @compare: compare function, must return true on match
771 * @arg: argument passed on to compare function
772 *
773 * Locks down the bucket chain in both the old and new table if a resize
774 * is in progress to ensure that writers can't remove from the old table
775 * and can't insert to the new table during the atomic operation of search
776 * and insertion. Searches for duplicates in both the old and new table if
777 * a resize is in progress.
778 *
779 * Lookups may occur in parallel with hashtable mutations and resizing.
780 *
781 * Will trigger an automatic deferred table resizing if the size grows
782 * beyond the watermark indicated by grow_decision() which can be passed
783 * to rhashtable_init().
784 */
785bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
786 struct rhash_head *obj,
787 bool (*compare)(void *, void *),
788 void *arg)
789{
Ying Xuedb304852015-01-07 13:41:54 +0800790 struct bucket_table *new_tbl, *old_tbl;
791 spinlock_t *new_bucket_lock, *old_bucket_lock;
792 u32 new_hash, old_hash;
793 bool success = true;
794
795 BUG_ON(!ht->p.key_len);
796
797 rcu_read_lock();
798
799 old_tbl = rht_dereference_rcu(ht->tbl, ht);
800 old_hash = head_hashfn(ht, old_tbl, obj);
801 old_bucket_lock = bucket_lock(old_tbl, old_hash);
802 spin_lock_bh(old_bucket_lock);
803
804 new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
805 new_hash = head_hashfn(ht, new_tbl, obj);
806 new_bucket_lock = bucket_lock(new_tbl, new_hash);
807 if (unlikely(old_tbl != new_tbl))
808 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
809
Ying Xue7a868d12015-01-12 14:52:22 +0800810 if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
811 compare, arg)) {
Ying Xuedb304852015-01-07 13:41:54 +0800812 success = false;
813 goto exit;
814 }
815
816 __rhashtable_insert(ht, obj, new_tbl, new_hash);
817
818exit:
819 if (unlikely(old_tbl != new_tbl))
820 spin_unlock_bh(new_bucket_lock);
821 spin_unlock_bh(old_bucket_lock);
822
823 rcu_read_unlock();
824
825 return success;
826}
Ying Xue7a868d12015-01-12 14:52:22 +0800827EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
Ying Xuedb304852015-01-07 13:41:54 +0800828
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100829/**
830 * rhashtable_walk_init - Initialise an iterator
831 * @ht: Table to walk over
832 * @iter: Hash table Iterator
833 *
834 * This function prepares a hash table walk.
835 *
836 * Note that if you restart a walk after rhashtable_walk_stop you
837 * may see the same object twice. Also, you may miss objects if
838 * there are removals in between rhashtable_walk_stop and the next
839 * call to rhashtable_walk_start.
840 *
841 * For a completely stable walk you should construct your own data
842 * structure outside the hash table.
843 *
844 * This function may sleep so you must not call it from interrupt
845 * context or with spin locks held.
846 *
847 * You must call rhashtable_walk_exit if this function returns
848 * successfully.
849 */
850int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
851{
852 iter->ht = ht;
853 iter->p = NULL;
854 iter->slot = 0;
855 iter->skip = 0;
856
857 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
858 if (!iter->walker)
859 return -ENOMEM;
860
861 mutex_lock(&ht->mutex);
862 list_add(&iter->walker->list, &ht->walkers);
863 mutex_unlock(&ht->mutex);
864
865 return 0;
866}
867EXPORT_SYMBOL_GPL(rhashtable_walk_init);
868
869/**
870 * rhashtable_walk_exit - Free an iterator
871 * @iter: Hash table Iterator
872 *
873 * This function frees resources allocated by rhashtable_walk_init.
874 */
875void rhashtable_walk_exit(struct rhashtable_iter *iter)
876{
877 mutex_lock(&iter->ht->mutex);
878 list_del(&iter->walker->list);
879 mutex_unlock(&iter->ht->mutex);
880 kfree(iter->walker);
881}
882EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
883
884/**
885 * rhashtable_walk_start - Start a hash table walk
886 * @iter: Hash table iterator
887 *
888 * Start a hash table walk. Note that we take the RCU lock in all
889 * cases including when we return an error. So you must always call
890 * rhashtable_walk_stop to clean up.
891 *
892 * Returns zero if successful.
893 *
894 * Returns -EAGAIN if resize event occured. Note that the iterator
895 * will rewind back to the beginning and you may use it immediately
896 * by calling rhashtable_walk_next.
897 */
898int rhashtable_walk_start(struct rhashtable_iter *iter)
899{
900 rcu_read_lock();
901
902 if (iter->walker->resize) {
903 iter->slot = 0;
904 iter->skip = 0;
905 iter->walker->resize = false;
906 return -EAGAIN;
907 }
908
909 return 0;
910}
911EXPORT_SYMBOL_GPL(rhashtable_walk_start);
912
913/**
914 * rhashtable_walk_next - Return the next object and advance the iterator
915 * @iter: Hash table iterator
916 *
917 * Note that you must call rhashtable_walk_stop when you are finished
918 * with the walk.
919 *
920 * Returns the next object or NULL when the end of the table is reached.
921 *
922 * Returns -EAGAIN if resize event occured. Note that the iterator
923 * will rewind back to the beginning and you may continue to use it.
924 */
925void *rhashtable_walk_next(struct rhashtable_iter *iter)
926{
927 const struct bucket_table *tbl;
928 struct rhashtable *ht = iter->ht;
929 struct rhash_head *p = iter->p;
930 void *obj = NULL;
931
932 tbl = rht_dereference_rcu(ht->tbl, ht);
933
934 if (p) {
935 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
936 goto next;
937 }
938
939 for (; iter->slot < tbl->size; iter->slot++) {
940 int skip = iter->skip;
941
942 rht_for_each_rcu(p, tbl, iter->slot) {
943 if (!skip)
944 break;
945 skip--;
946 }
947
948next:
949 if (!rht_is_a_nulls(p)) {
950 iter->skip++;
951 iter->p = p;
952 obj = rht_obj(ht, p);
953 goto out;
954 }
955
956 iter->skip = 0;
957 }
958
959 iter->p = NULL;
960
961out:
962 if (iter->walker->resize) {
963 iter->p = NULL;
964 iter->slot = 0;
965 iter->skip = 0;
966 iter->walker->resize = false;
967 return ERR_PTR(-EAGAIN);
968 }
969
970 return obj;
971}
972EXPORT_SYMBOL_GPL(rhashtable_walk_next);
973
974/**
975 * rhashtable_walk_stop - Finish a hash table walk
976 * @iter: Hash table iterator
977 *
978 * Finish a hash table walk.
979 */
980void rhashtable_walk_stop(struct rhashtable_iter *iter)
981{
982 rcu_read_unlock();
983 iter->p = NULL;
984}
985EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
986
Ying Xue94000172014-09-03 09:22:36 +0800987static size_t rounded_hashtable_size(struct rhashtable_params *params)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200988{
Ying Xue94000172014-09-03 09:22:36 +0800989 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
990 1UL << params->min_shift);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200991}
992
993/**
994 * rhashtable_init - initialize a new hash table
995 * @ht: hash table to be initialized
996 * @params: configuration parameters
997 *
998 * Initializes a new hash table based on the provided configuration
999 * parameters. A table can be configured either with a variable or
1000 * fixed length key:
1001 *
1002 * Configuration Example 1: Fixed length keys
1003 * struct test_obj {
1004 * int key;
1005 * void * my_member;
1006 * struct rhash_head node;
1007 * };
1008 *
1009 * struct rhashtable_params params = {
1010 * .head_offset = offsetof(struct test_obj, node),
1011 * .key_offset = offsetof(struct test_obj, key),
1012 * .key_len = sizeof(int),
Daniel Borkmann87545892014-12-10 16:33:11 +01001013 * .hashfn = jhash,
Thomas Graff89bd6f2015-01-02 23:00:21 +01001014 * .nulls_base = (1U << RHT_BASE_SHIFT),
Thomas Graf7e1e7762014-08-02 11:47:44 +02001015 * };
1016 *
1017 * Configuration Example 2: Variable length keys
1018 * struct test_obj {
1019 * [...]
1020 * struct rhash_head node;
1021 * };
1022 *
1023 * u32 my_hash_fn(const void *data, u32 seed)
1024 * {
1025 * struct test_obj *obj = data;
1026 *
1027 * return [... hash ...];
1028 * }
1029 *
1030 * struct rhashtable_params params = {
1031 * .head_offset = offsetof(struct test_obj, node),
Daniel Borkmann87545892014-12-10 16:33:11 +01001032 * .hashfn = jhash,
Thomas Graf7e1e7762014-08-02 11:47:44 +02001033 * .obj_hashfn = my_hash_fn,
Thomas Graf7e1e7762014-08-02 11:47:44 +02001034 * };
1035 */
1036int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1037{
1038 struct bucket_table *tbl;
1039 size_t size;
1040
1041 size = HASH_DEFAULT_SIZE;
1042
1043 if ((params->key_len && !params->hashfn) ||
1044 (!params->key_len && !params->obj_hashfn))
1045 return -EINVAL;
1046
Thomas Graff89bd6f2015-01-02 23:00:21 +01001047 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1048 return -EINVAL;
1049
Ying Xue94000172014-09-03 09:22:36 +08001050 params->min_shift = max_t(size_t, params->min_shift,
1051 ilog2(HASH_MIN_SIZE));
1052
Thomas Graf7e1e7762014-08-02 11:47:44 +02001053 if (params->nelem_hint)
Ying Xue94000172014-09-03 09:22:36 +08001054 size = rounded_hashtable_size(params);
Thomas Graf7e1e7762014-08-02 11:47:44 +02001055
Thomas Graf97defe12015-01-02 23:00:20 +01001056 memset(ht, 0, sizeof(*ht));
1057 mutex_init(&ht->mutex);
1058 memcpy(&ht->p, params, sizeof(*params));
Herbert Xuf2dba9c2015-02-04 07:33:23 +11001059 INIT_LIST_HEAD(&ht->walkers);
Thomas Graf97defe12015-01-02 23:00:20 +01001060
1061 if (params->locks_mul)
1062 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1063 else
1064 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1065
1066 tbl = bucket_table_alloc(ht, size);
Thomas Graf7e1e7762014-08-02 11:47:44 +02001067 if (tbl == NULL)
1068 return -ENOMEM;
1069
Ying Xue545a1482015-01-07 13:41:57 +08001070 atomic_set(&ht->nelems, 0);
Ying Xuec0c09bf2015-01-07 13:41:56 +08001071 atomic_set(&ht->shift, ilog2(tbl->size));
Thomas Graf7e1e7762014-08-02 11:47:44 +02001072 RCU_INIT_POINTER(ht->tbl, tbl);
Thomas Graf97defe12015-01-02 23:00:20 +01001073 RCU_INIT_POINTER(ht->future_tbl, tbl);
Thomas Graf7e1e7762014-08-02 11:47:44 +02001074
1075 if (!ht->p.hash_rnd)
1076 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1077
Thomas Graf97defe12015-01-02 23:00:20 +01001078 if (ht->p.grow_decision || ht->p.shrink_decision)
Ying Xue57699a42015-01-16 11:13:09 +08001079 INIT_WORK(&ht->run_work, rht_deferred_worker);
Thomas Graf97defe12015-01-02 23:00:20 +01001080
Thomas Graf7e1e7762014-08-02 11:47:44 +02001081 return 0;
1082}
1083EXPORT_SYMBOL_GPL(rhashtable_init);
1084
1085/**
1086 * rhashtable_destroy - destroy hash table
1087 * @ht: the hash table to destroy
1088 *
Pablo Neira Ayusoae82ddc2014-09-02 00:26:05 +02001089 * Frees the bucket array. This function is not rcu safe, therefore the caller
1090 * has to make sure that no resizing may happen by unpublishing the hashtable
1091 * and waiting for the quiescent cycle before releasing the bucket array.
Thomas Graf7e1e7762014-08-02 11:47:44 +02001092 */
Thomas Graf97defe12015-01-02 23:00:20 +01001093void rhashtable_destroy(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +02001094{
Thomas Graf97defe12015-01-02 23:00:20 +01001095 ht->being_destroyed = true;
1096
Ying Xue57699a42015-01-16 11:13:09 +08001097 if (ht->p.grow_decision || ht->p.shrink_decision)
1098 cancel_work_sync(&ht->run_work);
1099
Thomas Graf97defe12015-01-02 23:00:20 +01001100 mutex_lock(&ht->mutex);
Thomas Graf97defe12015-01-02 23:00:20 +01001101 bucket_table_free(rht_dereference(ht->tbl, ht));
Thomas Graf97defe12015-01-02 23:00:20 +01001102 mutex_unlock(&ht->mutex);
Thomas Graf7e1e7762014-08-02 11:47:44 +02001103}
1104EXPORT_SYMBOL_GPL(rhashtable_destroy);