blob: fdffd6232365a253711d158d3ea6b1b2c6b604b0
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table |
3 | * |
4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
7 | * |
8 | * Code partially derived from nft_hash |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. |
15 | */ |
16 | |
17 | #include <linux/atomic.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> |
20 | #include <linux/log2.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/vmalloc.h> |
24 | #include <linux/mm.h> |
25 | #include <linux/jhash.h> |
26 | #include <linux/random.h> |
27 | #include <linux/rhashtable.h> |
28 | #include <linux/err.h> |
29 | #include <linux/export.h> |
30 | |
31 | #define HASH_DEFAULT_SIZE 64UL |
32 | #define HASH_MIN_SIZE 4U |
33 | #define BUCKET_LOCKS_PER_CPU 32UL |
34 | |
35 | static u32 head_hashfn(struct rhashtable *ht, |
36 | const struct bucket_table *tbl, |
37 | const struct rhash_head *he) |
38 | { |
39 | return rht_head_hashfn(ht, tbl, he, ht->p); |
40 | } |
41 | |
42 | #ifdef CONFIG_PROVE_LOCKING |
43 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
44 | |
45 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
46 | { |
47 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
48 | } |
49 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
50 | |
51 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
52 | { |
53 | spinlock_t *lock = rht_bucket_lock(tbl, hash); |
54 | |
55 | return (debug_locks) ? lockdep_is_held(lock) : 1; |
56 | } |
57 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
58 | #else |
59 | #define ASSERT_RHT_MUTEX(HT) |
60 | #endif |
61 | |
62 | |
63 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
64 | gfp_t gfp) |
65 | { |
66 | unsigned int i, size; |
67 | #if defined(CONFIG_PROVE_LOCKING) |
68 | unsigned int nr_pcpus = 2; |
69 | #else |
70 | unsigned int nr_pcpus = num_possible_cpus(); |
71 | #endif |
72 | |
73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
75 | |
76 | /* Never allocate more than 0.5 locks per bucket */ |
77 | size = min_t(unsigned int, size, tbl->size >> 1); |
78 | |
79 | if (sizeof(spinlock_t) != 0) { |
80 | tbl->locks = NULL; |
81 | #ifdef CONFIG_NUMA |
82 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
83 | gfp == GFP_KERNEL) |
84 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
85 | #endif |
86 | if (gfp != GFP_KERNEL) |
87 | gfp |= __GFP_NOWARN | __GFP_NORETRY; |
88 | |
89 | if (!tbl->locks) |
90 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
91 | gfp); |
92 | if (!tbl->locks) |
93 | return -ENOMEM; |
94 | for (i = 0; i < size; i++) |
95 | spin_lock_init(&tbl->locks[i]); |
96 | } |
97 | tbl->locks_mask = size - 1; |
98 | |
99 | return 0; |
100 | } |
101 | |
102 | static void bucket_table_free(const struct bucket_table *tbl) |
103 | { |
104 | if (tbl) |
105 | kvfree(tbl->locks); |
106 | |
107 | kvfree(tbl); |
108 | } |
109 | |
110 | static void bucket_table_free_rcu(struct rcu_head *head) |
111 | { |
112 | bucket_table_free(container_of(head, struct bucket_table, rcu)); |
113 | } |
114 | |
115 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
116 | size_t nbuckets, |
117 | gfp_t gfp) |
118 | { |
119 | struct bucket_table *tbl = NULL; |
120 | size_t size; |
121 | int i; |
122 | |
123 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
124 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
125 | gfp != GFP_KERNEL) |
126 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
127 | if (tbl == NULL && gfp == GFP_KERNEL) |
128 | tbl = vzalloc(size); |
129 | if (tbl == NULL) |
130 | return NULL; |
131 | |
132 | tbl->size = nbuckets; |
133 | |
134 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
135 | bucket_table_free(tbl); |
136 | return NULL; |
137 | } |
138 | |
139 | INIT_LIST_HEAD(&tbl->walkers); |
140 | |
141 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
142 | |
143 | for (i = 0; i < nbuckets; i++) |
144 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
145 | |
146 | return tbl; |
147 | } |
148 | |
149 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
150 | struct bucket_table *tbl) |
151 | { |
152 | struct bucket_table *new_tbl; |
153 | |
154 | do { |
155 | new_tbl = tbl; |
156 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
157 | } while (tbl); |
158 | |
159 | return new_tbl; |
160 | } |
161 | |
162 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
163 | { |
164 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
165 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
166 | rht_dereference_rcu(old_tbl->future_tbl, ht)); |
167 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; |
168 | int err = -ENOENT; |
169 | struct rhash_head *head, *next, *entry; |
170 | spinlock_t *new_bucket_lock; |
171 | unsigned int new_hash; |
172 | |
173 | rht_for_each(entry, old_tbl, old_hash) { |
174 | err = 0; |
175 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); |
176 | |
177 | if (rht_is_a_nulls(next)) |
178 | break; |
179 | |
180 | pprev = &entry->next; |
181 | } |
182 | |
183 | if (err) |
184 | goto out; |
185 | |
186 | new_hash = head_hashfn(ht, new_tbl, entry); |
187 | |
188 | new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
189 | |
190 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
191 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
192 | new_tbl, new_hash); |
193 | |
194 | RCU_INIT_POINTER(entry->next, head); |
195 | |
196 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
197 | spin_unlock(new_bucket_lock); |
198 | |
199 | rcu_assign_pointer(*pprev, next); |
200 | |
201 | out: |
202 | return err; |
203 | } |
204 | |
205 | static void rhashtable_rehash_chain(struct rhashtable *ht, |
206 | unsigned int old_hash) |
207 | { |
208 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
209 | spinlock_t *old_bucket_lock; |
210 | |
211 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
212 | |
213 | spin_lock_bh(old_bucket_lock); |
214 | while (!rhashtable_rehash_one(ht, old_hash)) |
215 | ; |
216 | old_tbl->rehash++; |
217 | spin_unlock_bh(old_bucket_lock); |
218 | } |
219 | |
220 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
221 | struct bucket_table *old_tbl, |
222 | struct bucket_table *new_tbl) |
223 | { |
224 | /* Protect future_tbl using the first bucket lock. */ |
225 | spin_lock_bh(old_tbl->locks); |
226 | |
227 | /* Did somebody beat us to it? */ |
228 | if (rcu_access_pointer(old_tbl->future_tbl)) { |
229 | spin_unlock_bh(old_tbl->locks); |
230 | return -EEXIST; |
231 | } |
232 | |
233 | /* Make insertions go into the new, empty table right away. Deletions |
234 | * and lookups will be attempted in both tables until we synchronize. |
235 | */ |
236 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
237 | |
238 | spin_unlock_bh(old_tbl->locks); |
239 | |
240 | return 0; |
241 | } |
242 | |
243 | static int rhashtable_rehash_table(struct rhashtable *ht) |
244 | { |
245 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
246 | struct bucket_table *new_tbl; |
247 | struct rhashtable_walker *walker; |
248 | unsigned int old_hash; |
249 | |
250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); |
251 | if (!new_tbl) |
252 | return 0; |
253 | |
254 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
255 | rhashtable_rehash_chain(ht, old_hash); |
256 | cond_resched(); |
257 | } |
258 | |
259 | /* Publish the new table pointer. */ |
260 | rcu_assign_pointer(ht->tbl, new_tbl); |
261 | |
262 | spin_lock(&ht->lock); |
263 | list_for_each_entry(walker, &old_tbl->walkers, list) |
264 | walker->tbl = NULL; |
265 | spin_unlock(&ht->lock); |
266 | |
267 | /* Wait for readers. All new readers will see the new |
268 | * table, and thus no references to the old table will |
269 | * remain. |
270 | */ |
271 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
272 | |
273 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
274 | } |
275 | |
276 | /** |
277 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
278 | * @ht: the hash table to expand |
279 | * |
280 | * A secondary bucket array is allocated and the hash entries are migrated. |
281 | * |
282 | * This function may only be called in a context where it is safe to call |
283 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
284 | * |
285 | * The caller must ensure that no concurrent resizing occurs by holding |
286 | * ht->mutex. |
287 | * |
288 | * It is valid to have concurrent insertions and deletions protected by per |
289 | * bucket locks or concurrent RCU protected lookups and traversals. |
290 | */ |
291 | static int rhashtable_expand(struct rhashtable *ht) |
292 | { |
293 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
294 | int err; |
295 | |
296 | ASSERT_RHT_MUTEX(ht); |
297 | |
298 | old_tbl = rhashtable_last_table(ht, old_tbl); |
299 | |
300 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
301 | if (new_tbl == NULL) |
302 | return -ENOMEM; |
303 | |
304 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
305 | if (err) |
306 | bucket_table_free(new_tbl); |
307 | |
308 | return err; |
309 | } |
310 | |
311 | /** |
312 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
313 | * @ht: the hash table to shrink |
314 | * |
315 | * This function shrinks the hash table to fit, i.e., the smallest |
316 | * size would not cause it to expand right away automatically. |
317 | * |
318 | * The caller must ensure that no concurrent resizing occurs by holding |
319 | * ht->mutex. |
320 | * |
321 | * The caller must ensure that no concurrent table mutations take place. |
322 | * It is however valid to have concurrent lookups if they are RCU protected. |
323 | * |
324 | * It is valid to have concurrent insertions and deletions protected by per |
325 | * bucket locks or concurrent RCU protected lookups and traversals. |
326 | */ |
327 | static int rhashtable_shrink(struct rhashtable *ht) |
328 | { |
329 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
330 | unsigned int nelems = atomic_read(&ht->nelems); |
331 | unsigned int size = 0; |
332 | int err; |
333 | |
334 | ASSERT_RHT_MUTEX(ht); |
335 | |
336 | if (nelems) |
337 | size = roundup_pow_of_two(nelems * 3 / 2); |
338 | if (size < ht->p.min_size) |
339 | size = ht->p.min_size; |
340 | |
341 | if (old_tbl->size <= size) |
342 | return 0; |
343 | |
344 | if (rht_dereference(old_tbl->future_tbl, ht)) |
345 | return -EEXIST; |
346 | |
347 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
348 | if (new_tbl == NULL) |
349 | return -ENOMEM; |
350 | |
351 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
352 | if (err) |
353 | bucket_table_free(new_tbl); |
354 | |
355 | return err; |
356 | } |
357 | |
358 | static void rht_deferred_worker(struct work_struct *work) |
359 | { |
360 | struct rhashtable *ht; |
361 | struct bucket_table *tbl; |
362 | int err = 0; |
363 | |
364 | ht = container_of(work, struct rhashtable, run_work); |
365 | mutex_lock(&ht->mutex); |
366 | |
367 | tbl = rht_dereference(ht->tbl, ht); |
368 | tbl = rhashtable_last_table(ht, tbl); |
369 | |
370 | if (rht_grow_above_75(ht, tbl)) |
371 | rhashtable_expand(ht); |
372 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
373 | rhashtable_shrink(ht); |
374 | |
375 | err = rhashtable_rehash_table(ht); |
376 | |
377 | mutex_unlock(&ht->mutex); |
378 | |
379 | if (err) |
380 | schedule_work(&ht->run_work); |
381 | } |
382 | |
383 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
384 | struct bucket_table *tbl) |
385 | { |
386 | struct bucket_table *old_tbl; |
387 | struct bucket_table *new_tbl; |
388 | unsigned int size; |
389 | int err; |
390 | |
391 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
392 | |
393 | size = tbl->size; |
394 | |
395 | err = -EBUSY; |
396 | |
397 | if (rht_grow_above_75(ht, tbl)) |
398 | size *= 2; |
399 | /* Do not schedule more than one rehash */ |
400 | else if (old_tbl != tbl) |
401 | goto fail; |
402 | |
403 | err = -ENOMEM; |
404 | |
405 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
406 | if (new_tbl == NULL) |
407 | goto fail; |
408 | |
409 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
410 | if (err) { |
411 | bucket_table_free(new_tbl); |
412 | if (err == -EEXIST) |
413 | err = 0; |
414 | } else |
415 | schedule_work(&ht->run_work); |
416 | |
417 | return err; |
418 | |
419 | fail: |
420 | /* Do not fail the insert if someone else did a rehash. */ |
421 | if (likely(rcu_dereference_raw(tbl->future_tbl))) |
422 | return 0; |
423 | |
424 | /* Schedule async rehash to retry allocation in process context. */ |
425 | if (err == -ENOMEM) |
426 | schedule_work(&ht->run_work); |
427 | |
428 | return err; |
429 | } |
430 | |
431 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
432 | struct bucket_table *tbl, unsigned int hash, |
433 | const void *key, struct rhash_head *obj) |
434 | { |
435 | struct rhashtable_compare_arg arg = { |
436 | .ht = ht, |
437 | .key = key, |
438 | }; |
439 | struct rhash_head __rcu **pprev; |
440 | struct rhash_head *head; |
441 | int elasticity; |
442 | |
443 | elasticity = ht->elasticity; |
444 | pprev = &tbl->buckets[hash]; |
445 | rht_for_each(head, tbl, hash) { |
446 | struct rhlist_head *list; |
447 | struct rhlist_head *plist; |
448 | |
449 | elasticity--; |
450 | if (!key || |
451 | (ht->p.obj_cmpfn ? |
452 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
453 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
454 | pprev = &head->next; |
455 | continue; |
456 | } |
457 | |
458 | if (!ht->rhlist) |
459 | return rht_obj(ht, head); |
460 | |
461 | list = container_of(obj, struct rhlist_head, rhead); |
462 | plist = container_of(head, struct rhlist_head, rhead); |
463 | |
464 | RCU_INIT_POINTER(list->next, plist); |
465 | head = rht_dereference_bucket(head->next, tbl, hash); |
466 | RCU_INIT_POINTER(list->rhead.next, head); |
467 | rcu_assign_pointer(*pprev, obj); |
468 | |
469 | return NULL; |
470 | } |
471 | |
472 | if (elasticity <= 0) |
473 | return ERR_PTR(-EAGAIN); |
474 | |
475 | return ERR_PTR(-ENOENT); |
476 | } |
477 | |
478 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, |
479 | struct bucket_table *tbl, |
480 | unsigned int hash, |
481 | struct rhash_head *obj, |
482 | void *data) |
483 | { |
484 | struct bucket_table *new_tbl; |
485 | struct rhash_head *head; |
486 | |
487 | if (!IS_ERR_OR_NULL(data)) |
488 | return ERR_PTR(-EEXIST); |
489 | |
490 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) |
491 | return ERR_CAST(data); |
492 | |
493 | new_tbl = rcu_dereference(tbl->future_tbl); |
494 | if (new_tbl) |
495 | return new_tbl; |
496 | |
497 | if (PTR_ERR(data) != -ENOENT) |
498 | return ERR_CAST(data); |
499 | |
500 | if (unlikely(rht_grow_above_max(ht, tbl))) |
501 | return ERR_PTR(-E2BIG); |
502 | |
503 | if (unlikely(rht_grow_above_100(ht, tbl))) |
504 | return ERR_PTR(-EAGAIN); |
505 | |
506 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); |
507 | |
508 | RCU_INIT_POINTER(obj->next, head); |
509 | if (ht->rhlist) { |
510 | struct rhlist_head *list; |
511 | |
512 | list = container_of(obj, struct rhlist_head, rhead); |
513 | RCU_INIT_POINTER(list->next, NULL); |
514 | } |
515 | |
516 | rcu_assign_pointer(tbl->buckets[hash], obj); |
517 | |
518 | atomic_inc(&ht->nelems); |
519 | if (rht_grow_above_75(ht, tbl)) |
520 | schedule_work(&ht->run_work); |
521 | |
522 | return NULL; |
523 | } |
524 | |
525 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
526 | struct rhash_head *obj) |
527 | { |
528 | struct bucket_table *new_tbl; |
529 | struct bucket_table *tbl; |
530 | unsigned int hash; |
531 | spinlock_t *lock; |
532 | void *data; |
533 | |
534 | tbl = rcu_dereference(ht->tbl); |
535 | |
536 | /* All insertions must grab the oldest table containing |
537 | * the hashed bucket that is yet to be rehashed. |
538 | */ |
539 | for (;;) { |
540 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
541 | lock = rht_bucket_lock(tbl, hash); |
542 | spin_lock_bh(lock); |
543 | |
544 | if (tbl->rehash <= hash) |
545 | break; |
546 | |
547 | spin_unlock_bh(lock); |
548 | tbl = rcu_dereference(tbl->future_tbl); |
549 | } |
550 | |
551 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
552 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
553 | if (PTR_ERR(new_tbl) != -EEXIST) |
554 | data = ERR_CAST(new_tbl); |
555 | |
556 | while (!IS_ERR_OR_NULL(new_tbl)) { |
557 | tbl = new_tbl; |
558 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
559 | spin_lock_nested(rht_bucket_lock(tbl, hash), |
560 | SINGLE_DEPTH_NESTING); |
561 | |
562 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
563 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
564 | if (PTR_ERR(new_tbl) != -EEXIST) |
565 | data = ERR_CAST(new_tbl); |
566 | |
567 | spin_unlock(rht_bucket_lock(tbl, hash)); |
568 | } |
569 | |
570 | spin_unlock_bh(lock); |
571 | |
572 | if (PTR_ERR(data) == -EAGAIN) |
573 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: |
574 | -EAGAIN); |
575 | |
576 | return data; |
577 | } |
578 | |
579 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
580 | struct rhash_head *obj) |
581 | { |
582 | void *data; |
583 | |
584 | do { |
585 | rcu_read_lock(); |
586 | data = rhashtable_try_insert(ht, key, obj); |
587 | rcu_read_unlock(); |
588 | } while (PTR_ERR(data) == -EAGAIN); |
589 | |
590 | return data; |
591 | } |
592 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
593 | |
594 | /** |
595 | * rhashtable_walk_enter - Initialise an iterator |
596 | * @ht: Table to walk over |
597 | * @iter: Hash table Iterator |
598 | * |
599 | * This function prepares a hash table walk. |
600 | * |
601 | * Note that if you restart a walk after rhashtable_walk_stop you |
602 | * may see the same object twice. Also, you may miss objects if |
603 | * there are removals in between rhashtable_walk_stop and the next |
604 | * call to rhashtable_walk_start. |
605 | * |
606 | * For a completely stable walk you should construct your own data |
607 | * structure outside the hash table. |
608 | * |
609 | * This function may sleep so you must not call it from interrupt |
610 | * context or with spin locks held. |
611 | * |
612 | * You must call rhashtable_walk_exit after this function returns. |
613 | */ |
614 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
615 | { |
616 | iter->ht = ht; |
617 | iter->p = NULL; |
618 | iter->slot = 0; |
619 | iter->skip = 0; |
620 | |
621 | spin_lock(&ht->lock); |
622 | iter->walker.tbl = |
623 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
624 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
625 | spin_unlock(&ht->lock); |
626 | } |
627 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
628 | |
629 | /** |
630 | * rhashtable_walk_exit - Free an iterator |
631 | * @iter: Hash table Iterator |
632 | * |
633 | * This function frees resources allocated by rhashtable_walk_init. |
634 | */ |
635 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
636 | { |
637 | spin_lock(&iter->ht->lock); |
638 | if (iter->walker.tbl) |
639 | list_del(&iter->walker.list); |
640 | spin_unlock(&iter->ht->lock); |
641 | } |
642 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
643 | |
644 | /** |
645 | * rhashtable_walk_start - Start a hash table walk |
646 | * @iter: Hash table iterator |
647 | * |
648 | * Start a hash table walk. Note that we take the RCU lock in all |
649 | * cases including when we return an error. So you must always call |
650 | * rhashtable_walk_stop to clean up. |
651 | * |
652 | * Returns zero if successful. |
653 | * |
654 | * Returns -EAGAIN if resize event occured. Note that the iterator |
655 | * will rewind back to the beginning and you may use it immediately |
656 | * by calling rhashtable_walk_next. |
657 | */ |
658 | int rhashtable_walk_start(struct rhashtable_iter *iter) |
659 | __acquires(RCU) |
660 | { |
661 | struct rhashtable *ht = iter->ht; |
662 | |
663 | rcu_read_lock(); |
664 | |
665 | spin_lock(&ht->lock); |
666 | if (iter->walker.tbl) |
667 | list_del(&iter->walker.list); |
668 | spin_unlock(&ht->lock); |
669 | |
670 | if (!iter->walker.tbl) { |
671 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
672 | return -EAGAIN; |
673 | } |
674 | |
675 | return 0; |
676 | } |
677 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); |
678 | |
679 | /** |
680 | * rhashtable_walk_next - Return the next object and advance the iterator |
681 | * @iter: Hash table iterator |
682 | * |
683 | * Note that you must call rhashtable_walk_stop when you are finished |
684 | * with the walk. |
685 | * |
686 | * Returns the next object or NULL when the end of the table is reached. |
687 | * |
688 | * Returns -EAGAIN if resize event occured. Note that the iterator |
689 | * will rewind back to the beginning and you may continue to use it. |
690 | */ |
691 | void *rhashtable_walk_next(struct rhashtable_iter *iter) |
692 | { |
693 | struct bucket_table *tbl = iter->walker.tbl; |
694 | struct rhlist_head *list = iter->list; |
695 | struct rhashtable *ht = iter->ht; |
696 | struct rhash_head *p = iter->p; |
697 | bool rhlist = ht->rhlist; |
698 | |
699 | if (p) { |
700 | if (!rhlist || !(list = rcu_dereference(list->next))) { |
701 | p = rcu_dereference(p->next); |
702 | list = container_of(p, struct rhlist_head, rhead); |
703 | } |
704 | goto next; |
705 | } |
706 | |
707 | for (; iter->slot < tbl->size; iter->slot++) { |
708 | int skip = iter->skip; |
709 | |
710 | rht_for_each_rcu(p, tbl, iter->slot) { |
711 | if (rhlist) { |
712 | list = container_of(p, struct rhlist_head, |
713 | rhead); |
714 | do { |
715 | if (!skip) |
716 | goto next; |
717 | skip--; |
718 | list = rcu_dereference(list->next); |
719 | } while (list); |
720 | |
721 | continue; |
722 | } |
723 | if (!skip) |
724 | break; |
725 | skip--; |
726 | } |
727 | |
728 | next: |
729 | if (!rht_is_a_nulls(p)) { |
730 | iter->skip++; |
731 | iter->p = p; |
732 | iter->list = list; |
733 | return rht_obj(ht, rhlist ? &list->rhead : p); |
734 | } |
735 | |
736 | iter->skip = 0; |
737 | } |
738 | |
739 | iter->p = NULL; |
740 | |
741 | /* Ensure we see any new tables. */ |
742 | smp_rmb(); |
743 | |
744 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
745 | if (iter->walker.tbl) { |
746 | iter->slot = 0; |
747 | iter->skip = 0; |
748 | return ERR_PTR(-EAGAIN); |
749 | } |
750 | |
751 | return NULL; |
752 | } |
753 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
754 | |
755 | /** |
756 | * rhashtable_walk_stop - Finish a hash table walk |
757 | * @iter: Hash table iterator |
758 | * |
759 | * Finish a hash table walk. |
760 | */ |
761 | void rhashtable_walk_stop(struct rhashtable_iter *iter) |
762 | __releases(RCU) |
763 | { |
764 | struct rhashtable *ht; |
765 | struct bucket_table *tbl = iter->walker.tbl; |
766 | |
767 | if (!tbl) |
768 | goto out; |
769 | |
770 | ht = iter->ht; |
771 | |
772 | spin_lock(&ht->lock); |
773 | if (tbl->rehash < tbl->size) |
774 | list_add(&iter->walker.list, &tbl->walkers); |
775 | else |
776 | iter->walker.tbl = NULL; |
777 | spin_unlock(&ht->lock); |
778 | |
779 | iter->p = NULL; |
780 | |
781 | out: |
782 | rcu_read_unlock(); |
783 | } |
784 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
785 | |
786 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
787 | { |
788 | size_t retsize; |
789 | |
790 | if (params->nelem_hint) |
791 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
792 | (unsigned long)params->min_size); |
793 | else |
794 | retsize = max(HASH_DEFAULT_SIZE, |
795 | (unsigned long)params->min_size); |
796 | |
797 | return retsize; |
798 | } |
799 | |
800 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
801 | { |
802 | return jhash2(key, length, seed); |
803 | } |
804 | |
805 | /** |
806 | * rhashtable_init - initialize a new hash table |
807 | * @ht: hash table to be initialized |
808 | * @params: configuration parameters |
809 | * |
810 | * Initializes a new hash table based on the provided configuration |
811 | * parameters. A table can be configured either with a variable or |
812 | * fixed length key: |
813 | * |
814 | * Configuration Example 1: Fixed length keys |
815 | * struct test_obj { |
816 | * int key; |
817 | * void * my_member; |
818 | * struct rhash_head node; |
819 | * }; |
820 | * |
821 | * struct rhashtable_params params = { |
822 | * .head_offset = offsetof(struct test_obj, node), |
823 | * .key_offset = offsetof(struct test_obj, key), |
824 | * .key_len = sizeof(int), |
825 | * .hashfn = jhash, |
826 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
827 | * }; |
828 | * |
829 | * Configuration Example 2: Variable length keys |
830 | * struct test_obj { |
831 | * [...] |
832 | * struct rhash_head node; |
833 | * }; |
834 | * |
835 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
836 | * { |
837 | * struct test_obj *obj = data; |
838 | * |
839 | * return [... hash ...]; |
840 | * } |
841 | * |
842 | * struct rhashtable_params params = { |
843 | * .head_offset = offsetof(struct test_obj, node), |
844 | * .hashfn = jhash, |
845 | * .obj_hashfn = my_hash_fn, |
846 | * }; |
847 | */ |
848 | int rhashtable_init(struct rhashtable *ht, |
849 | const struct rhashtable_params *params) |
850 | { |
851 | struct bucket_table *tbl; |
852 | size_t size; |
853 | |
854 | if ((!params->key_len && !params->obj_hashfn) || |
855 | (params->obj_hashfn && !params->obj_cmpfn)) |
856 | return -EINVAL; |
857 | |
858 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
859 | return -EINVAL; |
860 | |
861 | memset(ht, 0, sizeof(*ht)); |
862 | mutex_init(&ht->mutex); |
863 | spin_lock_init(&ht->lock); |
864 | memcpy(&ht->p, params, sizeof(*params)); |
865 | |
866 | if (params->min_size) |
867 | ht->p.min_size = roundup_pow_of_two(params->min_size); |
868 | |
869 | if (params->max_size) |
870 | ht->p.max_size = rounddown_pow_of_two(params->max_size); |
871 | |
872 | if (params->insecure_max_entries) |
873 | ht->p.insecure_max_entries = |
874 | rounddown_pow_of_two(params->insecure_max_entries); |
875 | else |
876 | ht->p.insecure_max_entries = ht->p.max_size * 2; |
877 | |
878 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
879 | |
880 | size = rounded_hashtable_size(&ht->p); |
881 | |
882 | /* The maximum (not average) chain length grows with the |
883 | * size of the hash table, at a rate of (log N)/(log log N). |
884 | * The value of 16 is selected so that even if the hash |
885 | * table grew to 2^32 you would not expect the maximum |
886 | * chain length to exceed it unless we are under attack |
887 | * (or extremely unlucky). |
888 | * |
889 | * As this limit is only to detect attacks, we don't need |
890 | * to set it to a lower value as you'd need the chain |
891 | * length to vastly exceed 16 to have any real effect |
892 | * on the system. |
893 | */ |
894 | if (!params->insecure_elasticity) |
895 | ht->elasticity = 16; |
896 | |
897 | if (params->locks_mul) |
898 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
899 | else |
900 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
901 | |
902 | ht->key_len = ht->p.key_len; |
903 | if (!params->hashfn) { |
904 | ht->p.hashfn = jhash; |
905 | |
906 | if (!(ht->key_len & (sizeof(u32) - 1))) { |
907 | ht->key_len /= sizeof(u32); |
908 | ht->p.hashfn = rhashtable_jhash2; |
909 | } |
910 | } |
911 | |
912 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
913 | if (tbl == NULL) |
914 | return -ENOMEM; |
915 | |
916 | atomic_set(&ht->nelems, 0); |
917 | |
918 | RCU_INIT_POINTER(ht->tbl, tbl); |
919 | |
920 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
921 | |
922 | return 0; |
923 | } |
924 | EXPORT_SYMBOL_GPL(rhashtable_init); |
925 | |
926 | /** |
927 | * rhltable_init - initialize a new hash list table |
928 | * @hlt: hash list table to be initialized |
929 | * @params: configuration parameters |
930 | * |
931 | * Initializes a new hash list table. |
932 | * |
933 | * See documentation for rhashtable_init. |
934 | */ |
935 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) |
936 | { |
937 | int err; |
938 | |
939 | /* No rhlist NULLs marking for now. */ |
940 | if (params->nulls_base) |
941 | return -EINVAL; |
942 | |
943 | err = rhashtable_init(&hlt->ht, params); |
944 | hlt->ht.rhlist = true; |
945 | return err; |
946 | } |
947 | EXPORT_SYMBOL_GPL(rhltable_init); |
948 | |
949 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, |
950 | void (*free_fn)(void *ptr, void *arg), |
951 | void *arg) |
952 | { |
953 | struct rhlist_head *list; |
954 | |
955 | if (!ht->rhlist) { |
956 | free_fn(rht_obj(ht, obj), arg); |
957 | return; |
958 | } |
959 | |
960 | list = container_of(obj, struct rhlist_head, rhead); |
961 | do { |
962 | obj = &list->rhead; |
963 | list = rht_dereference(list->next, ht); |
964 | free_fn(rht_obj(ht, obj), arg); |
965 | } while (list); |
966 | } |
967 | |
968 | /** |
969 | * rhashtable_free_and_destroy - free elements and destroy hash table |
970 | * @ht: the hash table to destroy |
971 | * @free_fn: callback to release resources of element |
972 | * @arg: pointer passed to free_fn |
973 | * |
974 | * Stops an eventual async resize. If defined, invokes free_fn for each |
975 | * element to releasal resources. Please note that RCU protected |
976 | * readers may still be accessing the elements. Releasing of resources |
977 | * must occur in a compatible manner. Then frees the bucket array. |
978 | * |
979 | * This function will eventually sleep to wait for an async resize |
980 | * to complete. The caller is responsible that no further write operations |
981 | * occurs in parallel. |
982 | */ |
983 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
984 | void (*free_fn)(void *ptr, void *arg), |
985 | void *arg) |
986 | { |
987 | const struct bucket_table *tbl; |
988 | unsigned int i; |
989 | |
990 | cancel_work_sync(&ht->run_work); |
991 | |
992 | mutex_lock(&ht->mutex); |
993 | tbl = rht_dereference(ht->tbl, ht); |
994 | if (free_fn) { |
995 | for (i = 0; i < tbl->size; i++) { |
996 | struct rhash_head *pos, *next; |
997 | |
998 | cond_resched(); |
999 | for (pos = rht_dereference(tbl->buckets[i], ht), |
1000 | next = !rht_is_a_nulls(pos) ? |
1001 | rht_dereference(pos->next, ht) : NULL; |
1002 | !rht_is_a_nulls(pos); |
1003 | pos = next, |
1004 | next = !rht_is_a_nulls(pos) ? |
1005 | rht_dereference(pos->next, ht) : NULL) |
1006 | rhashtable_free_one(ht, pos, free_fn, arg); |
1007 | } |
1008 | } |
1009 | |
1010 | bucket_table_free(tbl); |
1011 | mutex_unlock(&ht->mutex); |
1012 | } |
1013 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
1014 | |
1015 | void rhashtable_destroy(struct rhashtable *ht) |
1016 | { |
1017 | return rhashtable_free_and_destroy(ht, NULL, NULL); |
1018 | } |
1019 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
1020 |