blob: 27e6bf6f09c6333f6fc790f2cdef9335c059c3bd
1 | #include <linux/spinlock.h> |
2 | #include <linux/slab.h> |
3 | #include <linux/list.h> |
4 | #include <linux/list_bl.h> |
5 | #include <linux/module.h> |
6 | #include <linux/sched.h> |
7 | #include <linux/workqueue.h> |
8 | #include <linux/mbcache.h> |
9 | |
10 | /* |
11 | * Mbcache is a simple key-value store. Keys need not be unique, however |
12 | * key-value pairs are expected to be unique (we use this fact in |
13 | * mb_cache_entry_delete_block()). |
14 | * |
15 | * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. |
16 | * They use hash of a block contents as a key and block number as a value. |
17 | * That's why keys need not be unique (different xattr blocks may end up having |
18 | * the same hash). However block number always uniquely identifies a cache |
19 | * entry. |
20 | * |
21 | * We provide functions for creation and removal of entries, search by key, |
22 | * and a special "delete entry with given key-value pair" operation. Fixed |
23 | * size hash table is used for fast key lookups. |
24 | */ |
25 | |
26 | struct mb_cache { |
27 | /* Hash table of entries */ |
28 | struct hlist_bl_head *c_hash; |
29 | /* log2 of hash table size */ |
30 | int c_bucket_bits; |
31 | /* Maximum entries in cache to avoid degrading hash too much */ |
32 | int c_max_entries; |
33 | /* Protects c_list, c_entry_count */ |
34 | spinlock_t c_list_lock; |
35 | struct list_head c_list; |
36 | /* Number of entries in cache */ |
37 | unsigned long c_entry_count; |
38 | struct shrinker c_shrink; |
39 | /* Work for shrinking when the cache has too many entries */ |
40 | struct work_struct c_shrink_work; |
41 | }; |
42 | |
43 | static struct kmem_cache *mb_entry_cache; |
44 | |
45 | static unsigned long mb_cache_shrink(struct mb_cache *cache, |
46 | unsigned int nr_to_scan); |
47 | |
48 | static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, |
49 | u32 key) |
50 | { |
51 | return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; |
52 | } |
53 | |
54 | /* |
55 | * Number of entries to reclaim synchronously when there are too many entries |
56 | * in cache |
57 | */ |
58 | #define SYNC_SHRINK_BATCH 64 |
59 | |
60 | /* |
61 | * mb_cache_entry_create - create entry in cache |
62 | * @cache - cache where the entry should be created |
63 | * @mask - gfp mask with which the entry should be allocated |
64 | * @key - key of the entry |
65 | * @block - block that contains data |
66 | * @reusable - is the block reusable by other inodes? |
67 | * |
68 | * Creates entry in @cache with key @key and records that data is stored in |
69 | * block @block. The function returns -EBUSY if entry with the same key |
70 | * and for the same block already exists in cache. Otherwise 0 is returned. |
71 | */ |
72 | int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, |
73 | sector_t block, bool reusable) |
74 | { |
75 | struct mb_cache_entry *entry, *dup; |
76 | struct hlist_bl_node *dup_node; |
77 | struct hlist_bl_head *head; |
78 | |
79 | /* Schedule background reclaim if there are too many entries */ |
80 | if (cache->c_entry_count >= cache->c_max_entries) |
81 | schedule_work(&cache->c_shrink_work); |
82 | /* Do some sync reclaim if background reclaim cannot keep up */ |
83 | if (cache->c_entry_count >= 2*cache->c_max_entries) |
84 | mb_cache_shrink(cache, SYNC_SHRINK_BATCH); |
85 | |
86 | entry = kmem_cache_alloc(mb_entry_cache, mask); |
87 | if (!entry) |
88 | return -ENOMEM; |
89 | |
90 | INIT_LIST_HEAD(&entry->e_list); |
91 | /* One ref for hash, one ref returned */ |
92 | atomic_set(&entry->e_refcnt, 1); |
93 | entry->e_key = key; |
94 | entry->e_block = block; |
95 | entry->e_reusable = reusable; |
96 | entry->e_referenced = 0; |
97 | head = mb_cache_entry_head(cache, key); |
98 | hlist_bl_lock(head); |
99 | hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { |
100 | if (dup->e_key == key && dup->e_block == block) { |
101 | hlist_bl_unlock(head); |
102 | kmem_cache_free(mb_entry_cache, entry); |
103 | return -EBUSY; |
104 | } |
105 | } |
106 | hlist_bl_add_head(&entry->e_hash_list, head); |
107 | hlist_bl_unlock(head); |
108 | |
109 | spin_lock(&cache->c_list_lock); |
110 | list_add_tail(&entry->e_list, &cache->c_list); |
111 | /* Grab ref for LRU list */ |
112 | atomic_inc(&entry->e_refcnt); |
113 | cache->c_entry_count++; |
114 | spin_unlock(&cache->c_list_lock); |
115 | |
116 | return 0; |
117 | } |
118 | EXPORT_SYMBOL(mb_cache_entry_create); |
119 | |
120 | void __mb_cache_entry_free(struct mb_cache_entry *entry) |
121 | { |
122 | kmem_cache_free(mb_entry_cache, entry); |
123 | } |
124 | EXPORT_SYMBOL(__mb_cache_entry_free); |
125 | |
126 | static struct mb_cache_entry *__entry_find(struct mb_cache *cache, |
127 | struct mb_cache_entry *entry, |
128 | u32 key) |
129 | { |
130 | struct mb_cache_entry *old_entry = entry; |
131 | struct hlist_bl_node *node; |
132 | struct hlist_bl_head *head; |
133 | |
134 | head = mb_cache_entry_head(cache, key); |
135 | hlist_bl_lock(head); |
136 | if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) |
137 | node = entry->e_hash_list.next; |
138 | else |
139 | node = hlist_bl_first(head); |
140 | while (node) { |
141 | entry = hlist_bl_entry(node, struct mb_cache_entry, |
142 | e_hash_list); |
143 | if (entry->e_key == key && entry->e_reusable) { |
144 | atomic_inc(&entry->e_refcnt); |
145 | goto out; |
146 | } |
147 | node = node->next; |
148 | } |
149 | entry = NULL; |
150 | out: |
151 | hlist_bl_unlock(head); |
152 | if (old_entry) |
153 | mb_cache_entry_put(cache, old_entry); |
154 | |
155 | return entry; |
156 | } |
157 | |
158 | /* |
159 | * mb_cache_entry_find_first - find the first entry in cache with given key |
160 | * @cache: cache where we should search |
161 | * @key: key to look for |
162 | * |
163 | * Search in @cache for entry with key @key. Grabs reference to the first |
164 | * entry found and returns the entry. |
165 | */ |
166 | struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, |
167 | u32 key) |
168 | { |
169 | return __entry_find(cache, NULL, key); |
170 | } |
171 | EXPORT_SYMBOL(mb_cache_entry_find_first); |
172 | |
173 | /* |
174 | * mb_cache_entry_find_next - find next entry in cache with the same |
175 | * @cache: cache where we should search |
176 | * @entry: entry to start search from |
177 | * |
178 | * Finds next entry in the hash chain which has the same key as @entry. |
179 | * If @entry is unhashed (which can happen when deletion of entry races |
180 | * with the search), finds the first entry in the hash chain. The function |
181 | * drops reference to @entry and returns with a reference to the found entry. |
182 | */ |
183 | struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, |
184 | struct mb_cache_entry *entry) |
185 | { |
186 | return __entry_find(cache, entry, entry->e_key); |
187 | } |
188 | EXPORT_SYMBOL(mb_cache_entry_find_next); |
189 | |
190 | /* |
191 | * mb_cache_entry_get - get a cache entry by block number (and key) |
192 | * @cache - cache we work with |
193 | * @key - key of block number @block |
194 | * @block - block number |
195 | */ |
196 | struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, |
197 | sector_t block) |
198 | { |
199 | struct hlist_bl_node *node; |
200 | struct hlist_bl_head *head; |
201 | struct mb_cache_entry *entry; |
202 | |
203 | head = mb_cache_entry_head(cache, key); |
204 | hlist_bl_lock(head); |
205 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { |
206 | if (entry->e_key == key && entry->e_block == block) { |
207 | atomic_inc(&entry->e_refcnt); |
208 | goto out; |
209 | } |
210 | } |
211 | entry = NULL; |
212 | out: |
213 | hlist_bl_unlock(head); |
214 | return entry; |
215 | } |
216 | EXPORT_SYMBOL(mb_cache_entry_get); |
217 | |
218 | /* mb_cache_entry_delete_block - remove information about block from cache |
219 | * @cache - cache we work with |
220 | * @key - key of block @block |
221 | * @block - block number |
222 | * |
223 | * Remove entry from cache @cache with key @key with data stored in @block. |
224 | */ |
225 | void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, |
226 | sector_t block) |
227 | { |
228 | struct hlist_bl_node *node; |
229 | struct hlist_bl_head *head; |
230 | struct mb_cache_entry *entry; |
231 | |
232 | head = mb_cache_entry_head(cache, key); |
233 | hlist_bl_lock(head); |
234 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { |
235 | if (entry->e_key == key && entry->e_block == block) { |
236 | /* We keep hash list reference to keep entry alive */ |
237 | hlist_bl_del_init(&entry->e_hash_list); |
238 | hlist_bl_unlock(head); |
239 | spin_lock(&cache->c_list_lock); |
240 | if (!list_empty(&entry->e_list)) { |
241 | list_del_init(&entry->e_list); |
242 | cache->c_entry_count--; |
243 | atomic_dec(&entry->e_refcnt); |
244 | } |
245 | spin_unlock(&cache->c_list_lock); |
246 | mb_cache_entry_put(cache, entry); |
247 | return; |
248 | } |
249 | } |
250 | hlist_bl_unlock(head); |
251 | } |
252 | EXPORT_SYMBOL(mb_cache_entry_delete_block); |
253 | |
254 | /* mb_cache_entry_touch - cache entry got used |
255 | * @cache - cache the entry belongs to |
256 | * @entry - entry that got used |
257 | * |
258 | * Marks entry as used to give hit higher chances of surviving in cache. |
259 | */ |
260 | void mb_cache_entry_touch(struct mb_cache *cache, |
261 | struct mb_cache_entry *entry) |
262 | { |
263 | entry->e_referenced = 1; |
264 | } |
265 | EXPORT_SYMBOL(mb_cache_entry_touch); |
266 | |
267 | static unsigned long mb_cache_count(struct shrinker *shrink, |
268 | struct shrink_control *sc) |
269 | { |
270 | struct mb_cache *cache = container_of(shrink, struct mb_cache, |
271 | c_shrink); |
272 | |
273 | return cache->c_entry_count; |
274 | } |
275 | |
276 | /* Shrink number of entries in cache */ |
277 | static unsigned long mb_cache_shrink(struct mb_cache *cache, |
278 | unsigned int nr_to_scan) |
279 | { |
280 | struct mb_cache_entry *entry; |
281 | struct hlist_bl_head *head; |
282 | unsigned int shrunk = 0; |
283 | |
284 | spin_lock(&cache->c_list_lock); |
285 | while (nr_to_scan-- && !list_empty(&cache->c_list)) { |
286 | entry = list_first_entry(&cache->c_list, |
287 | struct mb_cache_entry, e_list); |
288 | if (entry->e_referenced) { |
289 | entry->e_referenced = 0; |
290 | list_move_tail(&cache->c_list, &entry->e_list); |
291 | continue; |
292 | } |
293 | list_del_init(&entry->e_list); |
294 | cache->c_entry_count--; |
295 | /* |
296 | * We keep LRU list reference so that entry doesn't go away |
297 | * from under us. |
298 | */ |
299 | spin_unlock(&cache->c_list_lock); |
300 | head = mb_cache_entry_head(cache, entry->e_key); |
301 | hlist_bl_lock(head); |
302 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { |
303 | hlist_bl_del_init(&entry->e_hash_list); |
304 | atomic_dec(&entry->e_refcnt); |
305 | } |
306 | hlist_bl_unlock(head); |
307 | if (mb_cache_entry_put(cache, entry)) |
308 | shrunk++; |
309 | cond_resched(); |
310 | spin_lock(&cache->c_list_lock); |
311 | } |
312 | spin_unlock(&cache->c_list_lock); |
313 | |
314 | return shrunk; |
315 | } |
316 | |
317 | static unsigned long mb_cache_scan(struct shrinker *shrink, |
318 | struct shrink_control *sc) |
319 | { |
320 | int nr_to_scan = sc->nr_to_scan; |
321 | struct mb_cache *cache = container_of(shrink, struct mb_cache, |
322 | c_shrink); |
323 | return mb_cache_shrink(cache, nr_to_scan); |
324 | } |
325 | |
326 | /* We shrink 1/X of the cache when we have too many entries in it */ |
327 | #define SHRINK_DIVISOR 16 |
328 | |
329 | static void mb_cache_shrink_worker(struct work_struct *work) |
330 | { |
331 | struct mb_cache *cache = container_of(work, struct mb_cache, |
332 | c_shrink_work); |
333 | mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); |
334 | } |
335 | |
336 | /* |
337 | * mb_cache_create - create cache |
338 | * @bucket_bits: log2 of the hash table size |
339 | * |
340 | * Create cache for keys with 2^bucket_bits hash entries. |
341 | */ |
342 | struct mb_cache *mb_cache_create(int bucket_bits) |
343 | { |
344 | struct mb_cache *cache; |
345 | int bucket_count = 1 << bucket_bits; |
346 | int i; |
347 | |
348 | if (!try_module_get(THIS_MODULE)) |
349 | return NULL; |
350 | |
351 | cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); |
352 | if (!cache) |
353 | goto err_out; |
354 | cache->c_bucket_bits = bucket_bits; |
355 | cache->c_max_entries = bucket_count << 4; |
356 | INIT_LIST_HEAD(&cache->c_list); |
357 | spin_lock_init(&cache->c_list_lock); |
358 | cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), |
359 | GFP_KERNEL); |
360 | if (!cache->c_hash) { |
361 | kfree(cache); |
362 | goto err_out; |
363 | } |
364 | for (i = 0; i < bucket_count; i++) |
365 | INIT_HLIST_BL_HEAD(&cache->c_hash[i]); |
366 | |
367 | cache->c_shrink.count_objects = mb_cache_count; |
368 | cache->c_shrink.scan_objects = mb_cache_scan; |
369 | cache->c_shrink.seeks = DEFAULT_SEEKS; |
370 | if (register_shrinker(&cache->c_shrink)) { |
371 | kfree(cache->c_hash); |
372 | kfree(cache); |
373 | goto err_out; |
374 | } |
375 | |
376 | INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); |
377 | |
378 | return cache; |
379 | |
380 | err_out: |
381 | module_put(THIS_MODULE); |
382 | return NULL; |
383 | } |
384 | EXPORT_SYMBOL(mb_cache_create); |
385 | |
386 | /* |
387 | * mb_cache_destroy - destroy cache |
388 | * @cache: the cache to destroy |
389 | * |
390 | * Free all entries in cache and cache itself. Caller must make sure nobody |
391 | * (except shrinker) can reach @cache when calling this. |
392 | */ |
393 | void mb_cache_destroy(struct mb_cache *cache) |
394 | { |
395 | struct mb_cache_entry *entry, *next; |
396 | |
397 | unregister_shrinker(&cache->c_shrink); |
398 | |
399 | /* |
400 | * We don't bother with any locking. Cache must not be used at this |
401 | * point. |
402 | */ |
403 | list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { |
404 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { |
405 | hlist_bl_del_init(&entry->e_hash_list); |
406 | atomic_dec(&entry->e_refcnt); |
407 | } else |
408 | WARN_ON(1); |
409 | list_del(&entry->e_list); |
410 | WARN_ON(atomic_read(&entry->e_refcnt) != 1); |
411 | mb_cache_entry_put(cache, entry); |
412 | } |
413 | kfree(cache->c_hash); |
414 | kfree(cache); |
415 | module_put(THIS_MODULE); |
416 | } |
417 | EXPORT_SYMBOL(mb_cache_destroy); |
418 | |
419 | static int __init mbcache_init(void) |
420 | { |
421 | mb_entry_cache = kmem_cache_create("mbcache", |
422 | sizeof(struct mb_cache_entry), 0, |
423 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
424 | BUG_ON(!mb_entry_cache); |
425 | return 0; |
426 | } |
427 | |
428 | static void __exit mbcache_exit(void) |
429 | { |
430 | kmem_cache_destroy(mb_entry_cache); |
431 | } |
432 | |
433 | module_init(mbcache_init) |
434 | module_exit(mbcache_exit) |
435 | |
436 | MODULE_AUTHOR("Jan Kara <jack@suse.cz>"); |
437 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); |
438 | MODULE_LICENSE("GPL"); |
439 |