blob: 2d519c2235626e4237a074e11d6e03d942914602 [file] [log] [blame]
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
Mikulas Patockaafa53df2018-03-15 16:02:31 -04009#include <linux/dm-bufio.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000010
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010014#include <linux/sched/mm.h>
Asaf Vertzf4953392015-01-06 15:44:15 +020015#include <linux/jiffies.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000016#include <linux/vmalloc.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000017#include <linux/shrinker.h>
Stephen Rothwell6f662632011-11-01 18:30:49 +110018#include <linux/module.h>
Joe Thornber4e420c42014-10-06 13:48:51 +010019#include <linux/rbtree.h>
Mikulas Patocka86bad0c2015-11-23 19:20:06 -050020#include <linux/stacktrace.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000021
22#define DM_MSG_PREFIX "bufio"
23
24/*
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30 * dirty buffers.
31 */
32#define DM_BUFIO_MIN_BUFFERS 8
33
34#define DM_BUFIO_MEMORY_PERCENT 2
35#define DM_BUFIO_VMALLOC_PERCENT 25
Mikulas Patockab132ff32019-09-12 10:44:47 +020036#define DM_BUFIO_WRITEBACK_RATIO 3
Mikulas Patocka6e913b22019-09-12 12:07:23 -040037#define DM_BUFIO_LOW_WATERMARK_RATIO 16
Mikulas Patocka95d402f2011-10-31 20:19:09 +000038
39/*
40 * Check buffer ages in this interval (seconds)
41 */
Joe Thornber33096a72014-10-09 11:10:25 +010042#define DM_BUFIO_WORK_TIMER_SECS 30
Mikulas Patocka95d402f2011-10-31 20:19:09 +000043
44/*
45 * Free buffers when they are older than this (seconds)
46 */
Joe Thornber33096a72014-10-09 11:10:25 +010047#define DM_BUFIO_DEFAULT_AGE_SECS 300
48
49/*
50 * The nr of bytes of cached data to keep around.
51 */
52#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
Mikulas Patocka95d402f2011-10-31 20:19:09 +000053
54/*
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -040055 * Align buffer writes to this boundary.
56 * Tests show that SSDs have the highest IOPS when using 4k writes.
57 */
58#define DM_BUFIO_WRITE_ALIGN 4096
59
60/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +000061 * dm_buffer->list_mode
62 */
63#define LIST_CLEAN 0
64#define LIST_DIRTY 1
65#define LIST_SIZE 2
66
67/*
68 * Linking of buffers:
Shenghui Wangef992372018-10-30 15:35:54 +080069 * All buffers are linked to buffer_tree with their node field.
Mikulas Patocka95d402f2011-10-31 20:19:09 +000070 *
71 * Clean buffers that are not being written (B_WRITING not set)
72 * are linked to lru[LIST_CLEAN] with their lru_list field.
73 *
74 * Dirty and clean buffers that are being written are linked to
75 * lru[LIST_DIRTY] with their lru_list field. When the write
76 * finishes, the buffer cannot be relinked immediately (because we
77 * are in an interrupt context and relinking requires process
78 * context), so some clean-not-writing buffers can be held on
79 * dirty_lru too. They are later added to lru in the process
80 * context.
81 */
82struct dm_bufio_client {
83 struct mutex lock;
84
85 struct list_head lru[LIST_SIZE];
86 unsigned long n_buffers[LIST_SIZE];
87
88 struct block_device *bdev;
89 unsigned block_size;
Mikulas Patockaf51f2e02018-03-26 20:29:46 +020090 s8 sectors_per_block_bits;
Mikulas Patocka95d402f2011-10-31 20:19:09 +000091 void (*alloc_callback)(struct dm_buffer *);
92 void (*write_callback)(struct dm_buffer *);
93
Mikulas Patocka359dbf12018-03-26 20:29:45 +020094 struct kmem_cache *slab_buffer;
Mikulas Patocka21bb1322018-03-26 20:29:42 +020095 struct kmem_cache *slab_cache;
Mikulas Patocka95d402f2011-10-31 20:19:09 +000096 struct dm_io_client *dm_io;
97
98 struct list_head reserved_buffers;
99 unsigned need_reserved_buffers;
100
Mikulas Patocka55b082e2014-01-13 19:13:05 -0500101 unsigned minimum_buffers;
102
Joe Thornber4e420c42014-10-06 13:48:51 +0100103 struct rb_root buffer_tree;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000104 wait_queue_head_t free_buffer_wait;
105
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100106 sector_t start;
107
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000108 int async_write_error;
109
110 struct list_head client_list;
111 struct shrinker shrinker;
112};
113
114/*
115 * Buffer state bits.
116 */
117#define B_READING 0
118#define B_WRITING 1
119#define B_DIRTY 2
120
121/*
122 * Describes how the block was allocated:
123 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
124 * See the comment at alloc_buffer_data.
125 */
126enum data_mode {
127 DATA_MODE_SLAB = 0,
128 DATA_MODE_GET_FREE_PAGES = 1,
129 DATA_MODE_VMALLOC = 2,
130 DATA_MODE_LIMIT = 3
131};
132
133struct dm_buffer {
Joe Thornber4e420c42014-10-06 13:48:51 +0100134 struct rb_node node;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000135 struct list_head lru_list;
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200136 struct list_head global_list;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000137 sector_t block;
138 void *data;
Mikulas Patocka03b029392018-03-26 20:29:44 +0200139 unsigned char data_mode; /* DATA_MODE_* */
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000140 unsigned char list_mode; /* LIST_* */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200141 blk_status_t read_error;
142 blk_status_t write_error;
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400143 unsigned accessed;
Mikulas Patocka03b029392018-03-26 20:29:44 +0200144 unsigned hold_count;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000145 unsigned long state;
146 unsigned long last_accessed;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400147 unsigned dirty_start;
148 unsigned dirty_end;
149 unsigned write_start;
150 unsigned write_end;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000151 struct dm_bufio_client *c;
Mikulas Patocka24809452013-07-10 23:41:18 +0100152 struct list_head write_list;
Mikulas Patocka45354f12018-03-26 20:29:47 +0200153 void (*end_io)(struct dm_buffer *, blk_status_t);
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500154#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
155#define MAX_STACK 10
Thomas Gleixner741b58f2019-04-25 11:45:07 +0200156 unsigned int stack_len;
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500157 unsigned long stack_entries[MAX_STACK];
158#endif
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000159};
160
161/*----------------------------------------------------------------*/
162
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000163#define dm_bufio_in_request() (!!current->bio_list)
164
165static void dm_bufio_lock(struct dm_bufio_client *c)
166{
167 mutex_lock_nested(&c->lock, dm_bufio_in_request());
168}
169
170static int dm_bufio_trylock(struct dm_bufio_client *c)
171{
172 return mutex_trylock(&c->lock);
173}
174
175static void dm_bufio_unlock(struct dm_bufio_client *c)
176{
177 mutex_unlock(&c->lock);
178}
179
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000180/*----------------------------------------------------------------*/
181
182/*
183 * Default cache size: available memory divided by the ratio.
184 */
185static unsigned long dm_bufio_default_cache_size;
186
187/*
188 * Total cache size set by the user.
189 */
190static unsigned long dm_bufio_cache_size;
191
192/*
193 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
194 * at any time. If it disagrees, the user has changed cache size.
195 */
196static unsigned long dm_bufio_cache_size_latch;
197
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200198static DEFINE_SPINLOCK(global_spinlock);
199
200static LIST_HEAD(global_queue);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000201
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400202static unsigned long global_num = 0;
203
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000204/*
205 * Buffers are freed after this timeout
206 */
207static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
Mikulas Patocka13840d32017-04-30 17:32:28 -0400208static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000209
210static unsigned long dm_bufio_peak_allocated;
211static unsigned long dm_bufio_allocated_kmem_cache;
212static unsigned long dm_bufio_allocated_get_free_pages;
213static unsigned long dm_bufio_allocated_vmalloc;
214static unsigned long dm_bufio_current_allocated;
215
216/*----------------------------------------------------------------*/
217
218/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000219 * The current number of clients.
220 */
221static int dm_bufio_client_count;
222
223/*
224 * The list of all clients.
225 */
226static LIST_HEAD(dm_bufio_all_clients);
227
228/*
Mikulas Patockab132ff32019-09-12 10:44:47 +0200229 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000230 */
231static DEFINE_MUTEX(dm_bufio_clients_lock);
232
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400233static struct workqueue_struct *dm_bufio_wq;
234static struct delayed_work dm_bufio_cleanup_old_work;
235static struct work_struct dm_bufio_replacement_work;
236
237
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500238#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
239static void buffer_record_stack(struct dm_buffer *b)
240{
Thomas Gleixner741b58f2019-04-25 11:45:07 +0200241 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500242}
243#endif
244
Joe Thornber4e420c42014-10-06 13:48:51 +0100245/*----------------------------------------------------------------
246 * A red/black tree acts as an index for all the buffers.
247 *--------------------------------------------------------------*/
248static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
249{
250 struct rb_node *n = c->buffer_tree.rb_node;
251 struct dm_buffer *b;
252
253 while (n) {
254 b = container_of(n, struct dm_buffer, node);
255
256 if (b->block == block)
257 return b;
258
259 n = (b->block < block) ? n->rb_left : n->rb_right;
260 }
261
262 return NULL;
263}
264
265static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
266{
267 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
268 struct dm_buffer *found;
269
270 while (*new) {
271 found = container_of(*new, struct dm_buffer, node);
272
273 if (found->block == b->block) {
274 BUG_ON(found != b);
275 return;
276 }
277
278 parent = *new;
279 new = (found->block < b->block) ?
280 &((*new)->rb_left) : &((*new)->rb_right);
281 }
282
283 rb_link_node(&b->node, parent, new);
284 rb_insert_color(&b->node, &c->buffer_tree);
285}
286
287static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
288{
289 rb_erase(&b->node, &c->buffer_tree);
290}
291
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000292/*----------------------------------------------------------------*/
293
Mikulas Patockad0a328a2019-09-12 10:44:45 +0200294static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000295{
Mikulas Patockad0a328a2019-09-12 10:44:45 +0200296 unsigned char data_mode;
297 long diff;
298
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000299 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
300 &dm_bufio_allocated_kmem_cache,
301 &dm_bufio_allocated_get_free_pages,
302 &dm_bufio_allocated_vmalloc,
303 };
304
Mikulas Patockad0a328a2019-09-12 10:44:45 +0200305 data_mode = b->data_mode;
306 diff = (long)b->c->block_size;
307 if (unlink)
308 diff = -diff;
309
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200310 spin_lock(&global_spinlock);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000311
312 *class_ptr[data_mode] += diff;
313
314 dm_bufio_current_allocated += diff;
315
316 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
317 dm_bufio_peak_allocated = dm_bufio_current_allocated;
318
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400319 b->accessed = 1;
320
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200321 if (!unlink) {
322 list_add(&b->global_list, &global_queue);
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400323 global_num++;
324 if (dm_bufio_current_allocated > dm_bufio_cache_size)
325 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200326 } else {
327 list_del(&b->global_list);
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400328 global_num--;
Mikulas Patockaaf53bad2019-09-12 10:44:46 +0200329 }
330
331 spin_unlock(&global_spinlock);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000332}
333
334/*
335 * Change the number of clients and recalculate per-client limit.
336 */
337static void __cache_size_refresh(void)
338{
339 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
340 BUG_ON(dm_bufio_client_count < 0);
341
Mark Rutland6aa7de02017-10-23 14:07:29 -0700342 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000343
344 /*
345 * Use default if set to 0 and report the actual cache size used.
346 */
347 if (!dm_bufio_cache_size_latch) {
348 (void)cmpxchg(&dm_bufio_cache_size, 0,
349 dm_bufio_default_cache_size);
350 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
351 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000352}
353
354/*
355 * Allocating buffer data.
356 *
357 * Small buffers are allocated with kmem_cache, to use space optimally.
358 *
359 * For large buffers, we choose between get_free_pages and vmalloc.
360 * Each has advantages and disadvantages.
361 *
362 * __get_free_pages can randomly fail if the memory is fragmented.
363 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
364 * as low as 128M) so using it for caching is not appropriate.
365 *
366 * If the allocation may fail we use __get_free_pages. Memory fragmentation
367 * won't have a fatal effect here, but it just causes flushes of some other
368 * buffers and more I/O will be performed. Don't use __get_free_pages if it
369 * always fails (i.e. order >= MAX_ORDER).
370 *
371 * If the allocation shouldn't fail we use __vmalloc. This is only for the
372 * initial reserve allocation, so there's no risk of wasting all vmalloc
373 * space.
374 */
375static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
Mikulas Patocka03b029392018-03-26 20:29:44 +0200376 unsigned char *data_mode)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000377{
Mikulas Patocka21bb1322018-03-26 20:29:42 +0200378 if (unlikely(c->slab_cache != NULL)) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000379 *data_mode = DATA_MODE_SLAB;
Mikulas Patocka21bb1322018-03-26 20:29:42 +0200380 return kmem_cache_alloc(c->slab_cache, gfp_mask);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000381 }
382
Mikulas Patockaf51f2e02018-03-26 20:29:46 +0200383 if (c->block_size <= KMALLOC_MAX_SIZE &&
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000384 gfp_mask & __GFP_NORETRY) {
385 *data_mode = DATA_MODE_GET_FREE_PAGES;
386 return (void *)__get_free_pages(gfp_mask,
Mikulas Patockaf51f2e02018-03-26 20:29:46 +0200387 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000388 }
389
390 *data_mode = DATA_MODE_VMALLOC;
Mikulas Patocka502624b2013-05-10 14:37:15 +0100391
392 /*
393 * __vmalloc allocates the data pages and auxiliary structures with
394 * gfp_flags that were specified, but pagetables are always allocated
395 * with GFP_KERNEL, no matter what was specified as gfp_mask.
396 *
397 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
398 * all allocations done by this process (including pagetables) are done
399 * as if GFP_NOIO was specified.
400 */
Arnd Bergmann590347e2018-02-22 16:56:16 +0100401 if (gfp_mask & __GFP_NORETRY) {
402 unsigned noio_flag = memalloc_noio_save();
403 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
Mikulas Patocka502624b2013-05-10 14:37:15 +0100404
Mikulas Patocka502624b2013-05-10 14:37:15 +0100405 memalloc_noio_restore(noio_flag);
Arnd Bergmann590347e2018-02-22 16:56:16 +0100406 return ptr;
407 }
Mikulas Patocka502624b2013-05-10 14:37:15 +0100408
Arnd Bergmann590347e2018-02-22 16:56:16 +0100409 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000410}
411
412/*
413 * Free buffer's data.
414 */
415static void free_buffer_data(struct dm_bufio_client *c,
Mikulas Patocka03b029392018-03-26 20:29:44 +0200416 void *data, unsigned char data_mode)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000417{
418 switch (data_mode) {
419 case DATA_MODE_SLAB:
Mikulas Patocka21bb1322018-03-26 20:29:42 +0200420 kmem_cache_free(c->slab_cache, data);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000421 break;
422
423 case DATA_MODE_GET_FREE_PAGES:
Mikulas Patockaf51f2e02018-03-26 20:29:46 +0200424 free_pages((unsigned long)data,
425 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000426 break;
427
428 case DATA_MODE_VMALLOC:
429 vfree(data);
430 break;
431
432 default:
433 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
434 data_mode);
435 BUG();
436 }
437}
438
439/*
440 * Allocate buffer and its data.
441 */
442static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
443{
Mikulas Patocka359dbf12018-03-26 20:29:45 +0200444 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000445
446 if (!b)
447 return NULL;
448
449 b->c = c;
450
451 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
452 if (!b->data) {
Mikulas Patocka359dbf12018-03-26 20:29:45 +0200453 kmem_cache_free(c->slab_buffer, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000454 return NULL;
455 }
456
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500457#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
Thomas Gleixner741b58f2019-04-25 11:45:07 +0200458 b->stack_len = 0;
Mikulas Patocka86bad0c2015-11-23 19:20:06 -0500459#endif
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000460 return b;
461}
462
463/*
464 * Free buffer and its data.
465 */
466static void free_buffer(struct dm_buffer *b)
467{
468 struct dm_bufio_client *c = b->c;
469
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000470 free_buffer_data(c, b->data, b->data_mode);
Mikulas Patocka359dbf12018-03-26 20:29:45 +0200471 kmem_cache_free(c->slab_buffer, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000472}
473
474/*
Shenghui Wangef992372018-10-30 15:35:54 +0800475 * Link buffer to the buffer tree and clean or dirty queue.
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000476 */
477static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
478{
479 struct dm_bufio_client *c = b->c;
480
481 c->n_buffers[dirty]++;
482 b->block = block;
483 b->list_mode = dirty;
484 list_add(&b->lru_list, &c->lru[dirty]);
Joe Thornber4e420c42014-10-06 13:48:51 +0100485 __insert(b->c, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000486 b->last_accessed = jiffies;
Mikulas Patocka26d2ef02019-09-12 10:44:44 +0200487
Mikulas Patockad0a328a2019-09-12 10:44:45 +0200488 adjust_total_allocated(b, false);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000489}
490
491/*
Shenghui Wangef992372018-10-30 15:35:54 +0800492 * Unlink buffer from the buffer tree and dirty or clean queue.
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000493 */
494static void __unlink_buffer(struct dm_buffer *b)
495{
496 struct dm_bufio_client *c = b->c;
497
498 BUG_ON(!c->n_buffers[b->list_mode]);
499
500 c->n_buffers[b->list_mode]--;
Joe Thornber4e420c42014-10-06 13:48:51 +0100501 __remove(b->c, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000502 list_del(&b->lru_list);
Mikulas Patocka26d2ef02019-09-12 10:44:44 +0200503
Mikulas Patockad0a328a2019-09-12 10:44:45 +0200504 adjust_total_allocated(b, true);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000505}
506
507/*
508 * Place the buffer to the head of dirty or clean LRU queue.
509 */
510static void __relink_lru(struct dm_buffer *b, int dirty)
511{
512 struct dm_bufio_client *c = b->c;
513
Mikulas Patocka6e913b22019-09-12 12:07:23 -0400514 b->accessed = 1;
515
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000516 BUG_ON(!c->n_buffers[b->list_mode]);
517
518 c->n_buffers[b->list_mode]--;
519 c->n_buffers[dirty]++;
520 b->list_mode = dirty;
Wei Yongjun54499af2012-10-12 16:59:44 +0100521 list_move(&b->lru_list, &c->lru[dirty]);
Joe Thornbereb76faf2014-09-30 09:32:46 +0100522 b->last_accessed = jiffies;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000523}
524
525/*----------------------------------------------------------------
526 * Submit I/O on the buffer.
527 *
528 * Bio interface is faster but it has some problems:
529 * the vector list is limited (increasing this limit increases
530 * memory-consumption per buffer, so it is not viable);
531 *
532 * the memory must be direct-mapped, not vmalloced;
533 *
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000534 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
535 * it is not vmalloced, try using the bio interface.
536 *
537 * If the buffer is big, if it is vmalloced or if the underlying device
538 * rejects the bio because it is too large, use dm-io layer to do the I/O.
539 * The dm-io layer splits the I/O into multiple requests, avoiding the above
540 * shortcomings.
541 *--------------------------------------------------------------*/
542
543/*
544 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
545 * that the request was handled directly with bio interface.
546 */
547static void dmio_complete(unsigned long error, void *context)
548{
549 struct dm_buffer *b = context;
550
Mikulas Patocka45354f12018-03-26 20:29:47 +0200551 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000552}
553
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100554static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
Mikulas Patocka45354f12018-03-26 20:29:47 +0200555 unsigned n_sectors, unsigned offset)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000556{
557 int r;
558 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500559 .bi_op = rw,
560 .bi_op_flags = 0,
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000561 .notify.fn = dmio_complete,
562 .notify.context = b,
563 .client = b->c->dm_io,
564 };
565 struct dm_io_region region = {
566 .bdev = b->c->bdev,
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100567 .sector = sector,
568 .count = n_sectors,
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000569 };
570
571 if (b->data_mode != DATA_MODE_VMALLOC) {
572 io_req.mem.type = DM_IO_KMEM;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400573 io_req.mem.ptr.addr = (char *)b->data + offset;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000574 } else {
575 io_req.mem.type = DM_IO_VMA;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400576 io_req.mem.ptr.vma = (char *)b->data + offset;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000577 }
578
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000579 r = dm_io(&io_req, 1, &region, NULL);
Mikulas Patocka45354f12018-03-26 20:29:47 +0200580 if (unlikely(r))
581 b->end_io(b, errno_to_blk_status(r));
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000582}
583
Mikulas Patocka45354f12018-03-26 20:29:47 +0200584static void bio_complete(struct bio *bio)
Darrick J. Wong445559c2014-11-25 17:45:15 -0800585{
Mikulas Patocka45354f12018-03-26 20:29:47 +0200586 struct dm_buffer *b = bio->bi_private;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200587 blk_status_t status = bio->bi_status;
Mikulas Patocka45354f12018-03-26 20:29:47 +0200588 bio_put(bio);
589 b->end_io(b, status);
Darrick J. Wong445559c2014-11-25 17:45:15 -0800590}
591
Mikulas Patocka45354f12018-03-26 20:29:47 +0200592static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
593 unsigned n_sectors, unsigned offset)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000594{
Mikulas Patocka45354f12018-03-26 20:29:47 +0200595 struct bio *bio;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000596 char *ptr;
Mikulas Patocka45354f12018-03-26 20:29:47 +0200597 unsigned vec_size, len;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000598
Mikulas Patocka45354f12018-03-26 20:29:47 +0200599 vec_size = b->c->block_size >> PAGE_SHIFT;
600 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
601 vec_size += 2;
602
603 bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
604 if (!bio) {
605dmio:
606 use_dmio(b, rw, sector, n_sectors, offset);
607 return;
608 }
609
610 bio->bi_iter.bi_sector = sector;
611 bio_set_dev(bio, b->c->bdev);
612 bio_set_op_attrs(bio, rw, 0);
613 bio->bi_end_io = bio_complete;
614 bio->bi_private = b;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000615
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400616 ptr = (char *)b->data + offset;
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100617 len = n_sectors << SECTOR_SHIFT;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000618
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000619 do {
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400620 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
Mikulas Patocka45354f12018-03-26 20:29:47 +0200621 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
Al Viro756d0972016-01-02 12:45:27 -0500622 offset_in_page(ptr))) {
Mikulas Patocka45354f12018-03-26 20:29:47 +0200623 bio_put(bio);
624 goto dmio;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000625 }
626
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400627 len -= this_step;
628 ptr += this_step;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000629 } while (len > 0);
630
Mikulas Patocka45354f12018-03-26 20:29:47 +0200631 submit_bio(bio);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000632}
633
Mikulas Patocka45354f12018-03-26 20:29:47 +0200634static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000635{
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100636 unsigned n_sectors;
637 sector_t sector;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400638 unsigned offset, end;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000639
Mikulas Patocka45354f12018-03-26 20:29:47 +0200640 b->end_io = end_io;
641
Mikulas Patockaf51f2e02018-03-26 20:29:46 +0200642 if (likely(b->c->sectors_per_block_bits >= 0))
643 sector = b->block << b->c->sectors_per_block_bits;
644 else
645 sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
646 sector += b->c->start;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400647
Mikulas Patocka905be0a2017-12-02 00:33:39 -0500648 if (rw != REQ_OP_WRITE) {
Mikulas Patockaf51f2e02018-03-26 20:29:46 +0200649 n_sectors = b->c->block_size >> SECTOR_SHIFT;
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400650 offset = 0;
651 } else {
652 if (b->c->write_callback)
653 b->c->write_callback(b);
654 offset = b->write_start;
655 end = b->write_end;
656 offset &= -DM_BUFIO_WRITE_ALIGN;
657 end += DM_BUFIO_WRITE_ALIGN - 1;
658 end &= -DM_BUFIO_WRITE_ALIGN;
659 if (unlikely(end > b->c->block_size))
660 end = b->c->block_size;
661
662 sector += offset >> SECTOR_SHIFT;
663 n_sectors = (end - offset) >> SECTOR_SHIFT;
664 }
Mikulas Patocka400a0be2017-01-04 20:23:52 +0100665
Mikulas Patocka45354f12018-03-26 20:29:47 +0200666 if (b->data_mode != DATA_MODE_VMALLOC)
667 use_bio(b, rw, sector, n_sectors, offset);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000668 else
Mikulas Patocka45354f12018-03-26 20:29:47 +0200669 use_dmio(b, rw, sector, n_sectors, offset);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000670}
671
672/*----------------------------------------------------------------
673 * Writing dirty buffers
674 *--------------------------------------------------------------*/
675
676/*
677 * The endio routine for write.
678 *
679 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
680 * it.
681 */
Mikulas Patocka45354f12018-03-26 20:29:47 +0200682static void write_endio(struct dm_buffer *b, blk_status_t status)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000683{
Mikulas Patocka45354f12018-03-26 20:29:47 +0200684 b->write_error = status;
685 if (unlikely(status)) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000686 struct dm_bufio_client *c = b->c;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200687
688 (void)cmpxchg(&c->async_write_error, 0,
Mikulas Patocka45354f12018-03-26 20:29:47 +0200689 blk_status_to_errno(status));
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000690 }
691
692 BUG_ON(!test_bit(B_WRITING, &b->state));
693
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100694 smp_mb__before_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000695 clear_bit(B_WRITING, &b->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100696 smp_mb__after_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000697
698 wake_up_bit(&b->state, B_WRITING);
699}
700
701/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000702 * Initiate a write on a dirty buffer, but don't wait for it.
703 *
704 * - If the buffer is not dirty, exit.
705 * - If there some previous write going on, wait for it to finish (we can't
706 * have two writes on the same buffer simultaneously).
707 * - Submit our write and don't wait on it. We set B_WRITING indicating
708 * that there is a write in progress.
709 */
Mikulas Patocka24809452013-07-10 23:41:18 +0100710static void __write_dirty_buffer(struct dm_buffer *b,
711 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000712{
713 if (!test_bit(B_DIRTY, &b->state))
714 return;
715
716 clear_bit(B_DIRTY, &b->state);
NeilBrown74316202014-07-07 15:16:04 +1000717 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000718
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -0400719 b->write_start = b->dirty_start;
720 b->write_end = b->dirty_end;
721
Mikulas Patocka24809452013-07-10 23:41:18 +0100722 if (!write_list)
Mikulas Patocka905be0a2017-12-02 00:33:39 -0500723 submit_io(b, REQ_OP_WRITE, write_endio);
Mikulas Patocka24809452013-07-10 23:41:18 +0100724 else
725 list_add_tail(&b->write_list, write_list);
726}
727
728static void __flush_write_list(struct list_head *write_list)
729{
730 struct blk_plug plug;
731 blk_start_plug(&plug);
732 while (!list_empty(write_list)) {
733 struct dm_buffer *b =
734 list_entry(write_list->next, struct dm_buffer, write_list);
735 list_del(&b->write_list);
Mikulas Patocka905be0a2017-12-02 00:33:39 -0500736 submit_io(b, REQ_OP_WRITE, write_endio);
Peter Zijlstra7cd32672016-09-13 10:45:20 +0200737 cond_resched();
Mikulas Patocka24809452013-07-10 23:41:18 +0100738 }
739 blk_finish_plug(&plug);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000740}
741
742/*
743 * Wait until any activity on the buffer finishes. Possibly write the
744 * buffer if it is dirty. When this function finishes, there is no I/O
745 * running on the buffer and the buffer is not dirty.
746 */
747static void __make_buffer_clean(struct dm_buffer *b)
748{
749 BUG_ON(b->hold_count);
750
751 if (!b->state) /* fast case */
752 return;
753
NeilBrown74316202014-07-07 15:16:04 +1000754 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka24809452013-07-10 23:41:18 +0100755 __write_dirty_buffer(b, NULL);
NeilBrown74316202014-07-07 15:16:04 +1000756 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000757}
758
759/*
760 * Find some buffer that is not held by anybody, clean it, unlink it and
761 * return it.
762 */
763static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
764{
765 struct dm_buffer *b;
766
767 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
768 BUG_ON(test_bit(B_WRITING, &b->state));
769 BUG_ON(test_bit(B_DIRTY, &b->state));
770
771 if (!b->hold_count) {
772 __make_buffer_clean(b);
773 __unlink_buffer(b);
774 return b;
775 }
Peter Zijlstra7cd32672016-09-13 10:45:20 +0200776 cond_resched();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000777 }
778
779 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
780 BUG_ON(test_bit(B_READING, &b->state));
781
782 if (!b->hold_count) {
783 __make_buffer_clean(b);
784 __unlink_buffer(b);
785 return b;
786 }
Peter Zijlstra7cd32672016-09-13 10:45:20 +0200787 cond_resched();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000788 }
789
790 return NULL;
791}
792
793/*
794 * Wait until some other threads free some buffer or release hold count on
795 * some buffer.
796 *
797 * This function is entered with c->lock held, drops it and regains it
798 * before exiting.
799 */
800static void __wait_for_free_buffer(struct dm_bufio_client *c)
801{
802 DECLARE_WAITQUEUE(wait, current);
803
804 add_wait_queue(&c->free_buffer_wait, &wait);
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800805 set_current_state(TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000806 dm_bufio_unlock(c);
807
808 io_schedule();
809
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000810 remove_wait_queue(&c->free_buffer_wait, &wait);
811
812 dm_bufio_lock(c);
813}
814
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100815enum new_flag {
816 NF_FRESH = 0,
817 NF_READ = 1,
818 NF_GET = 2,
819 NF_PREFETCH = 3
820};
821
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000822/*
823 * Allocate a new buffer. If the allocation is not possible, wait until
824 * some other thread frees a buffer.
825 *
826 * May drop the lock and regain it.
827 */
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100828static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000829{
830 struct dm_buffer *b;
Mikulas Patocka41c73a42016-11-23 17:04:00 -0500831 bool tried_noio_alloc = false;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000832
833 /*
834 * dm-bufio is resistant to allocation failures (it just keeps
835 * one buffer reserved in cases all the allocations fail).
836 * So set flags to not try too hard:
Douglas Anderson9ea61ca2016-11-17 11:24:20 -0800837 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
838 * mutex and wait ourselves.
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000839 * __GFP_NORETRY: don't retry and rather return failure
840 * __GFP_NOMEMALLOC: don't use emergency reserves
841 * __GFP_NOWARN: don't print a warning in case of failure
842 *
843 * For debugging, if we set the cache size to 1, no new buffers will
844 * be allocated.
845 */
846 while (1) {
847 if (dm_bufio_cache_size_latch != 1) {
Douglas Anderson9ea61ca2016-11-17 11:24:20 -0800848 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000849 if (b)
850 return b;
851 }
852
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100853 if (nf == NF_PREFETCH)
854 return NULL;
855
Mikulas Patocka41c73a42016-11-23 17:04:00 -0500856 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
857 dm_bufio_unlock(c);
858 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
859 dm_bufio_lock(c);
860 if (b)
861 return b;
862 tried_noio_alloc = true;
863 }
864
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000865 if (!list_empty(&c->reserved_buffers)) {
866 b = list_entry(c->reserved_buffers.next,
867 struct dm_buffer, lru_list);
868 list_del(&b->lru_list);
869 c->need_reserved_buffers++;
870
871 return b;
872 }
873
874 b = __get_unclaimed_buffer(c);
875 if (b)
876 return b;
877
878 __wait_for_free_buffer(c);
879 }
880}
881
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100882static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000883{
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100884 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
885
886 if (!b)
887 return NULL;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000888
889 if (c->alloc_callback)
890 c->alloc_callback(b);
891
892 return b;
893}
894
895/*
896 * Free a buffer and wake other threads waiting for free buffers.
897 */
898static void __free_buffer_wake(struct dm_buffer *b)
899{
900 struct dm_bufio_client *c = b->c;
901
902 if (!c->need_reserved_buffers)
903 free_buffer(b);
904 else {
905 list_add(&b->lru_list, &c->reserved_buffers);
906 c->need_reserved_buffers--;
907 }
908
909 wake_up(&c->free_buffer_wait);
910}
911
Mikulas Patocka24809452013-07-10 23:41:18 +0100912static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
913 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000914{
915 struct dm_buffer *b, *tmp;
916
917 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
918 BUG_ON(test_bit(B_READING, &b->state));
919
920 if (!test_bit(B_DIRTY, &b->state) &&
921 !test_bit(B_WRITING, &b->state)) {
922 __relink_lru(b, LIST_CLEAN);
923 continue;
924 }
925
926 if (no_wait && test_bit(B_WRITING, &b->state))
927 return;
928
Mikulas Patocka24809452013-07-10 23:41:18 +0100929 __write_dirty_buffer(b, write_list);
Peter Zijlstra7cd32672016-09-13 10:45:20 +0200930 cond_resched();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000931 }
932}
933
934/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000935 * Check if we're over watermark.
936 * If we are over threshold_buffers, start freeing buffers.
937 * If we're over "limit_buffers", block until we get under the limit.
938 */
Mikulas Patocka24809452013-07-10 23:41:18 +0100939static void __check_watermark(struct dm_bufio_client *c,
940 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000941{
Mikulas Patockab132ff32019-09-12 10:44:47 +0200942 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
Mikulas Patocka24809452013-07-10 23:41:18 +0100943 __write_dirty_buffers_async(c, 1, write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000944}
945
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000946/*----------------------------------------------------------------
947 * Getting a buffer
948 *--------------------------------------------------------------*/
949
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000950static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
Mikulas Patocka24809452013-07-10 23:41:18 +0100951 enum new_flag nf, int *need_submit,
952 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000953{
954 struct dm_buffer *b, *new_b = NULL;
955
956 *need_submit = 0;
957
958 b = __find(c, block);
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100959 if (b)
960 goto found_buffer;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000961
962 if (nf == NF_GET)
963 return NULL;
964
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100965 new_b = __alloc_buffer_wait(c, nf);
966 if (!new_b)
967 return NULL;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000968
969 /*
970 * We've had a period where the mutex was unlocked, so need to
Shenghui Wangef992372018-10-30 15:35:54 +0800971 * recheck the buffer tree.
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000972 */
973 b = __find(c, block);
974 if (b) {
975 __free_buffer_wake(new_b);
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100976 goto found_buffer;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000977 }
978
Mikulas Patocka24809452013-07-10 23:41:18 +0100979 __check_watermark(c, write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000980
981 b = new_b;
982 b->hold_count = 1;
983 b->read_error = 0;
984 b->write_error = 0;
985 __link_buffer(b, block, LIST_CLEAN);
986
987 if (nf == NF_FRESH) {
988 b->state = 0;
989 return b;
990 }
991
992 b->state = 1 << B_READING;
993 *need_submit = 1;
994
995 return b;
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100996
997found_buffer:
998 if (nf == NF_PREFETCH)
999 return NULL;
1000 /*
1001 * Note: it is essential that we don't wait for the buffer to be
1002 * read if dm_bufio_get function is used. Both dm_bufio_get and
1003 * dm_bufio_prefetch can be used in the driver request routine.
1004 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1005 * the same buffer, it would deadlock if we waited.
1006 */
1007 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1008 return NULL;
1009
1010 b->hold_count++;
1011 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1012 test_bit(B_WRITING, &b->state));
1013 return b;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001014}
1015
1016/*
1017 * The endio routine for reading: set the error, clear the bit and wake up
1018 * anyone waiting on the buffer.
1019 */
Mikulas Patocka45354f12018-03-26 20:29:47 +02001020static void read_endio(struct dm_buffer *b, blk_status_t status)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001021{
Mikulas Patocka45354f12018-03-26 20:29:47 +02001022 b->read_error = status;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001023
1024 BUG_ON(!test_bit(B_READING, &b->state));
1025
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001026 smp_mb__before_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001027 clear_bit(B_READING, &b->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001028 smp_mb__after_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001029
1030 wake_up_bit(&b->state, B_READING);
1031}
1032
1033/*
1034 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1035 * functions is similar except that dm_bufio_new doesn't read the
1036 * buffer from the disk (assuming that the caller overwrites all the data
1037 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1038 */
1039static void *new_read(struct dm_bufio_client *c, sector_t block,
1040 enum new_flag nf, struct dm_buffer **bp)
1041{
1042 int need_submit;
1043 struct dm_buffer *b;
1044
Mikulas Patocka24809452013-07-10 23:41:18 +01001045 LIST_HEAD(write_list);
1046
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001047 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001048 b = __bufio_new(c, block, nf, &need_submit, &write_list);
Mikulas Patocka86bad0c2015-11-23 19:20:06 -05001049#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1050 if (b && b->hold_count == 1)
1051 buffer_record_stack(b);
1052#endif
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001053 dm_bufio_unlock(c);
1054
Mikulas Patocka24809452013-07-10 23:41:18 +01001055 __flush_write_list(&write_list);
1056
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001057 if (!b)
Mikulas Patockaf98c8f72015-11-23 19:11:32 -05001058 return NULL;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001059
1060 if (need_submit)
Mikulas Patocka905be0a2017-12-02 00:33:39 -05001061 submit_io(b, REQ_OP_READ, read_endio);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001062
NeilBrown74316202014-07-07 15:16:04 +10001063 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001064
1065 if (b->read_error) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001066 int error = blk_status_to_errno(b->read_error);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001067
1068 dm_bufio_release(b);
1069
1070 return ERR_PTR(error);
1071 }
1072
1073 *bp = b;
1074
1075 return b->data;
1076}
1077
1078void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1079 struct dm_buffer **bp)
1080{
1081 return new_read(c, block, NF_GET, bp);
1082}
1083EXPORT_SYMBOL_GPL(dm_bufio_get);
1084
1085void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1086 struct dm_buffer **bp)
1087{
1088 BUG_ON(dm_bufio_in_request());
1089
1090 return new_read(c, block, NF_READ, bp);
1091}
1092EXPORT_SYMBOL_GPL(dm_bufio_read);
1093
1094void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1095 struct dm_buffer **bp)
1096{
1097 BUG_ON(dm_bufio_in_request());
1098
1099 return new_read(c, block, NF_FRESH, bp);
1100}
1101EXPORT_SYMBOL_GPL(dm_bufio_new);
1102
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001103void dm_bufio_prefetch(struct dm_bufio_client *c,
1104 sector_t block, unsigned n_blocks)
1105{
1106 struct blk_plug plug;
1107
Mikulas Patocka24809452013-07-10 23:41:18 +01001108 LIST_HEAD(write_list);
1109
Mikulas Patocka3b6b7812013-03-20 17:21:25 +00001110 BUG_ON(dm_bufio_in_request());
1111
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001112 blk_start_plug(&plug);
1113 dm_bufio_lock(c);
1114
1115 for (; n_blocks--; block++) {
1116 int need_submit;
1117 struct dm_buffer *b;
Mikulas Patocka24809452013-07-10 23:41:18 +01001118 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1119 &write_list);
1120 if (unlikely(!list_empty(&write_list))) {
1121 dm_bufio_unlock(c);
1122 blk_finish_plug(&plug);
1123 __flush_write_list(&write_list);
1124 blk_start_plug(&plug);
1125 dm_bufio_lock(c);
1126 }
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001127 if (unlikely(b != NULL)) {
1128 dm_bufio_unlock(c);
1129
1130 if (need_submit)
Mikulas Patocka905be0a2017-12-02 00:33:39 -05001131 submit_io(b, REQ_OP_READ, read_endio);
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001132 dm_bufio_release(b);
1133
Peter Zijlstra7cd32672016-09-13 10:45:20 +02001134 cond_resched();
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001135
1136 if (!n_blocks)
1137 goto flush_plug;
1138 dm_bufio_lock(c);
1139 }
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001140 }
1141
1142 dm_bufio_unlock(c);
1143
1144flush_plug:
1145 blk_finish_plug(&plug);
1146}
1147EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1148
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001149void dm_bufio_release(struct dm_buffer *b)
1150{
1151 struct dm_bufio_client *c = b->c;
1152
1153 dm_bufio_lock(c);
1154
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001155 BUG_ON(!b->hold_count);
1156
1157 b->hold_count--;
1158 if (!b->hold_count) {
1159 wake_up(&c->free_buffer_wait);
1160
1161 /*
1162 * If there were errors on the buffer, and the buffer is not
1163 * to be written, free the buffer. There is no point in caching
1164 * invalid buffer.
1165 */
1166 if ((b->read_error || b->write_error) &&
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001167 !test_bit(B_READING, &b->state) &&
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001168 !test_bit(B_WRITING, &b->state) &&
1169 !test_bit(B_DIRTY, &b->state)) {
1170 __unlink_buffer(b);
1171 __free_buffer_wake(b);
1172 }
1173 }
1174
1175 dm_bufio_unlock(c);
1176}
1177EXPORT_SYMBOL_GPL(dm_bufio_release);
1178
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001179void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1180 unsigned start, unsigned end)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001181{
1182 struct dm_bufio_client *c = b->c;
1183
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001184 BUG_ON(start >= end);
1185 BUG_ON(end > b->c->block_size);
1186
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001187 dm_bufio_lock(c);
1188
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001189 BUG_ON(test_bit(B_READING, &b->state));
1190
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001191 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1192 b->dirty_start = start;
1193 b->dirty_end = end;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001194 __relink_lru(b, LIST_DIRTY);
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001195 } else {
1196 if (start < b->dirty_start)
1197 b->dirty_start = start;
1198 if (end > b->dirty_end)
1199 b->dirty_end = end;
1200 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001201
1202 dm_bufio_unlock(c);
1203}
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001204EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1205
1206void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1207{
1208 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1209}
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001210EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1211
1212void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1213{
Mikulas Patocka24809452013-07-10 23:41:18 +01001214 LIST_HEAD(write_list);
1215
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001216 BUG_ON(dm_bufio_in_request());
1217
1218 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001219 __write_dirty_buffers_async(c, 0, &write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001220 dm_bufio_unlock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001221 __flush_write_list(&write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001222}
1223EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1224
1225/*
1226 * For performance, it is essential that the buffers are written asynchronously
1227 * and simultaneously (so that the block layer can merge the writes) and then
1228 * waited upon.
1229 *
1230 * Finally, we flush hardware disk cache.
1231 */
1232int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1233{
Dan Carpenteredc11d42017-07-12 10:26:34 +03001234 int a, f;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001235 unsigned long buffers_processed = 0;
1236 struct dm_buffer *b, *tmp;
1237
Mikulas Patocka24809452013-07-10 23:41:18 +01001238 LIST_HEAD(write_list);
1239
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001240 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001241 __write_dirty_buffers_async(c, 0, &write_list);
1242 dm_bufio_unlock(c);
1243 __flush_write_list(&write_list);
1244 dm_bufio_lock(c);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001245
1246again:
1247 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1248 int dropped_lock = 0;
1249
1250 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1251 buffers_processed++;
1252
1253 BUG_ON(test_bit(B_READING, &b->state));
1254
1255 if (test_bit(B_WRITING, &b->state)) {
1256 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1257 dropped_lock = 1;
1258 b->hold_count++;
1259 dm_bufio_unlock(c);
NeilBrown74316202014-07-07 15:16:04 +10001260 wait_on_bit_io(&b->state, B_WRITING,
1261 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001262 dm_bufio_lock(c);
1263 b->hold_count--;
1264 } else
NeilBrown74316202014-07-07 15:16:04 +10001265 wait_on_bit_io(&b->state, B_WRITING,
1266 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001267 }
1268
1269 if (!test_bit(B_DIRTY, &b->state) &&
1270 !test_bit(B_WRITING, &b->state))
1271 __relink_lru(b, LIST_CLEAN);
1272
Peter Zijlstra7cd32672016-09-13 10:45:20 +02001273 cond_resched();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001274
1275 /*
1276 * If we dropped the lock, the list is no longer consistent,
1277 * so we must restart the search.
1278 *
1279 * In the most common case, the buffer just processed is
1280 * relinked to the clean list, so we won't loop scanning the
1281 * same buffer again and again.
1282 *
1283 * This may livelock if there is another thread simultaneously
1284 * dirtying buffers, so we count the number of buffers walked
1285 * and if it exceeds the total number of buffers, it means that
1286 * someone is doing some writes simultaneously with us. In
1287 * this case, stop, dropping the lock.
1288 */
1289 if (dropped_lock)
1290 goto again;
1291 }
1292 wake_up(&c->free_buffer_wait);
1293 dm_bufio_unlock(c);
1294
1295 a = xchg(&c->async_write_error, 0);
1296 f = dm_bufio_issue_flush(c);
1297 if (a)
1298 return a;
1299
1300 return f;
1301}
1302EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1303
1304/*
Shenghui Wangef992372018-10-30 15:35:54 +08001305 * Use dm-io to send an empty barrier to flush the device.
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001306 */
1307int dm_bufio_issue_flush(struct dm_bufio_client *c)
1308{
1309 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -05001310 .bi_op = REQ_OP_WRITE,
Jan Karaff0361b2017-05-31 09:44:32 +02001311 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001312 .mem.type = DM_IO_KMEM,
1313 .mem.ptr.addr = NULL,
1314 .client = c->dm_io,
1315 };
1316 struct dm_io_region io_reg = {
1317 .bdev = c->bdev,
1318 .sector = 0,
1319 .count = 0,
1320 };
1321
1322 BUG_ON(dm_bufio_in_request());
1323
1324 return dm_io(&io_req, 1, &io_reg, NULL);
1325}
1326EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1327
1328/*
1329 * We first delete any other buffer that may be at that new location.
1330 *
1331 * Then, we write the buffer to the original location if it was dirty.
1332 *
1333 * Then, if we are the only one who is holding the buffer, relink the buffer
Shenghui Wangef992372018-10-30 15:35:54 +08001334 * in the buffer tree for the new location.
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001335 *
1336 * If there was someone else holding the buffer, we write it to the new
1337 * location but not relink it, because that other user needs to have the buffer
1338 * at the same place.
1339 */
1340void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1341{
1342 struct dm_bufio_client *c = b->c;
1343 struct dm_buffer *new;
1344
1345 BUG_ON(dm_bufio_in_request());
1346
1347 dm_bufio_lock(c);
1348
1349retry:
1350 new = __find(c, new_block);
1351 if (new) {
1352 if (new->hold_count) {
1353 __wait_for_free_buffer(c);
1354 goto retry;
1355 }
1356
1357 /*
1358 * FIXME: Is there any point waiting for a write that's going
1359 * to be overwritten in a bit?
1360 */
1361 __make_buffer_clean(new);
1362 __unlink_buffer(new);
1363 __free_buffer_wake(new);
1364 }
1365
1366 BUG_ON(!b->hold_count);
1367 BUG_ON(test_bit(B_READING, &b->state));
1368
Mikulas Patocka24809452013-07-10 23:41:18 +01001369 __write_dirty_buffer(b, NULL);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001370 if (b->hold_count == 1) {
NeilBrown74316202014-07-07 15:16:04 +10001371 wait_on_bit_io(&b->state, B_WRITING,
1372 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001373 set_bit(B_DIRTY, &b->state);
Mikulas Patocka1e3b21c2017-04-30 17:31:22 -04001374 b->dirty_start = 0;
1375 b->dirty_end = c->block_size;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001376 __unlink_buffer(b);
1377 __link_buffer(b, new_block, LIST_DIRTY);
1378 } else {
1379 sector_t old_block;
NeilBrown74316202014-07-07 15:16:04 +10001380 wait_on_bit_lock_io(&b->state, B_WRITING,
1381 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001382 /*
1383 * Relink buffer to "new_block" so that write_callback
1384 * sees "new_block" as a block number.
1385 * After the write, link the buffer back to old_block.
1386 * All this must be done in bufio lock, so that block number
1387 * change isn't visible to other threads.
1388 */
1389 old_block = b->block;
1390 __unlink_buffer(b);
1391 __link_buffer(b, new_block, b->list_mode);
Mikulas Patocka905be0a2017-12-02 00:33:39 -05001392 submit_io(b, REQ_OP_WRITE, write_endio);
NeilBrown74316202014-07-07 15:16:04 +10001393 wait_on_bit_io(&b->state, B_WRITING,
1394 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001395 __unlink_buffer(b);
1396 __link_buffer(b, old_block, b->list_mode);
1397 }
1398
1399 dm_bufio_unlock(c);
1400 dm_bufio_release(b);
1401}
1402EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1403
Mikulas Patocka55494bf2014-01-13 19:12:36 -05001404/*
1405 * Free the given buffer.
1406 *
1407 * This is just a hint, if the buffer is in use or dirty, this function
1408 * does nothing.
1409 */
1410void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1411{
1412 struct dm_buffer *b;
1413
1414 dm_bufio_lock(c);
1415
1416 b = __find(c, block);
1417 if (b && likely(!b->hold_count) && likely(!b->state)) {
1418 __unlink_buffer(b);
1419 __free_buffer_wake(b);
1420 }
1421
1422 dm_bufio_unlock(c);
1423}
Mikulas Patockaafa53df2018-03-15 16:02:31 -04001424EXPORT_SYMBOL_GPL(dm_bufio_forget);
Mikulas Patocka55494bf2014-01-13 19:12:36 -05001425
Mikulas Patocka55b082e2014-01-13 19:13:05 -05001426void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1427{
1428 c->minimum_buffers = n;
1429}
Mikulas Patockaafa53df2018-03-15 16:02:31 -04001430EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
Mikulas Patocka55b082e2014-01-13 19:13:05 -05001431
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001432unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1433{
1434 return c->block_size;
1435}
1436EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1437
1438sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1439{
Mikulas Patockaf51f2e02018-03-26 20:29:46 +02001440 sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1441 if (likely(c->sectors_per_block_bits >= 0))
1442 s >>= c->sectors_per_block_bits;
1443 else
1444 sector_div(s, c->block_size >> SECTOR_SHIFT);
1445 return s;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001446}
1447EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1448
1449sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1450{
1451 return b->block;
1452}
1453EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1454
1455void *dm_bufio_get_block_data(struct dm_buffer *b)
1456{
1457 return b->data;
1458}
1459EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1460
1461void *dm_bufio_get_aux_data(struct dm_buffer *b)
1462{
1463 return b + 1;
1464}
1465EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1466
1467struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1468{
1469 return b->c;
1470}
1471EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1472
1473static void drop_buffers(struct dm_bufio_client *c)
1474{
1475 struct dm_buffer *b;
1476 int i;
Mikulas Patocka86bad0c2015-11-23 19:20:06 -05001477 bool warned = false;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001478
1479 BUG_ON(dm_bufio_in_request());
1480
1481 /*
1482 * An optimization so that the buffers are not written one-by-one.
1483 */
1484 dm_bufio_write_dirty_buffers_async(c);
1485
1486 dm_bufio_lock(c);
1487
1488 while ((b = __get_unclaimed_buffer(c)))
1489 __free_buffer_wake(b);
1490
1491 for (i = 0; i < LIST_SIZE; i++)
Mikulas Patocka86bad0c2015-11-23 19:20:06 -05001492 list_for_each_entry(b, &c->lru[i], lru_list) {
1493 WARN_ON(!warned);
1494 warned = true;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001495 DMERR("leaked buffer %llx, hold count %u, list %d",
1496 (unsigned long long)b->block, b->hold_count, i);
Mikulas Patocka86bad0c2015-11-23 19:20:06 -05001497#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
Thomas Gleixner741b58f2019-04-25 11:45:07 +02001498 stack_trace_print(b->stack_entries, b->stack_len, 1);
1499 /* mark unclaimed to avoid BUG_ON below */
1500 b->hold_count = 0;
Mikulas Patocka86bad0c2015-11-23 19:20:06 -05001501#endif
1502 }
1503
1504#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1505 while ((b = __get_unclaimed_buffer(c)))
1506 __free_buffer_wake(b);
1507#endif
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001508
1509 for (i = 0; i < LIST_SIZE; i++)
1510 BUG_ON(!list_empty(&c->lru[i]));
1511
1512 dm_bufio_unlock(c);
1513}
1514
1515/*
Joe Thornber33096a72014-10-09 11:10:25 +01001516 * We may not be able to evict this buffer if IO pending or the client
1517 * is still using it. Caller is expected to know buffer is too old.
1518 *
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001519 * And if GFP_NOFS is used, we must not do any I/O because we hold
1520 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1521 * rerouted to different bufio client.
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001522 */
Joe Thornber33096a72014-10-09 11:10:25 +01001523static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001524{
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001525 if (!(gfp & __GFP_FS)) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001526 if (test_bit(B_READING, &b->state) ||
1527 test_bit(B_WRITING, &b->state) ||
1528 test_bit(B_DIRTY, &b->state))
Joe Thornber33096a72014-10-09 11:10:25 +01001529 return false;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001530 }
1531
1532 if (b->hold_count)
Joe Thornber33096a72014-10-09 11:10:25 +01001533 return false;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001534
1535 __make_buffer_clean(b);
1536 __unlink_buffer(b);
1537 __free_buffer_wake(b);
1538
Joe Thornber33096a72014-10-09 11:10:25 +01001539 return true;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001540}
1541
Mikulas Patocka13840d32017-04-30 17:32:28 -04001542static unsigned long get_retain_buffers(struct dm_bufio_client *c)
Joe Thornber33096a72014-10-09 11:10:25 +01001543{
Mikulas Patockaf51f2e02018-03-26 20:29:46 +02001544 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1545 if (likely(c->sectors_per_block_bits >= 0))
1546 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1547 else
1548 retain_bytes /= c->block_size;
1549 return retain_bytes;
Joe Thornber33096a72014-10-09 11:10:25 +01001550}
1551
1552static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1553 gfp_t gfp_mask)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001554{
1555 int l;
1556 struct dm_buffer *b, *tmp;
Joe Thornber33096a72014-10-09 11:10:25 +01001557 unsigned long freed = 0;
Suren Baghdasaryanfbc7c072017-12-06 09:27:30 -08001558 unsigned long count = c->n_buffers[LIST_CLEAN] +
1559 c->n_buffers[LIST_DIRTY];
Mikulas Patocka13840d32017-04-30 17:32:28 -04001560 unsigned long retain_target = get_retain_buffers(c);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001561
1562 for (l = 0; l < LIST_SIZE; l++) {
Dave Chinner7dc19d52013-08-28 10:18:11 +10001563 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
Joe Thornber33096a72014-10-09 11:10:25 +01001564 if (__try_evict_buffer(b, gfp_mask))
1565 freed++;
1566 if (!--nr_to_scan || ((count - freed) <= retain_target))
Mikulas Patocka0e825862014-10-01 13:29:48 -04001567 return freed;
Peter Zijlstra7cd32672016-09-13 10:45:20 +02001568 cond_resched();
Dave Chinner7dc19d52013-08-28 10:18:11 +10001569 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001570 }
Dave Chinner7dc19d52013-08-28 10:18:11 +10001571 return freed;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001572}
1573
Dave Chinner7dc19d52013-08-28 10:18:11 +10001574static unsigned long
1575dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001576{
Dave Chinner7dc19d52013-08-28 10:18:11 +10001577 struct dm_bufio_client *c;
1578 unsigned long freed;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001579
Dave Chinner7dc19d52013-08-28 10:18:11 +10001580 c = container_of(shrink, struct dm_bufio_client, shrinker);
Mikulas Patockacf3591e2019-08-08 05:40:04 -04001581 if (sc->gfp_mask & __GFP_FS)
1582 dm_bufio_lock(c);
1583 else if (!dm_bufio_trylock(c))
Dave Chinner7dc19d52013-08-28 10:18:11 +10001584 return SHRINK_STOP;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001585
Dave Chinner7dc19d52013-08-28 10:18:11 +10001586 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001587 dm_bufio_unlock(c);
Dave Chinner7dc19d52013-08-28 10:18:11 +10001588 return freed;
1589}
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001590
Dave Chinner7dc19d52013-08-28 10:18:11 +10001591static unsigned long
1592dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1593{
Mikulas Patockad12067f2016-11-23 16:52:01 -05001594 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
Suren Baghdasaryanfbc7c072017-12-06 09:27:30 -08001595 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1596 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1597 unsigned long retain_target = get_retain_buffers(c);
Dave Chinner7dc19d52013-08-28 10:18:11 +10001598
Suren Baghdasaryanfbc7c072017-12-06 09:27:30 -08001599 return (count < retain_target) ? 0 : (count - retain_target);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001600}
1601
1602/*
1603 * Create the buffering interface
1604 */
1605struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1606 unsigned reserved_buffers, unsigned aux_size,
1607 void (*alloc_callback)(struct dm_buffer *),
1608 void (*write_callback)(struct dm_buffer *))
1609{
1610 int r;
1611 struct dm_bufio_client *c;
1612 unsigned i;
Mikulas Patocka359dbf12018-03-26 20:29:45 +02001613 char slab_name[27];
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001614
Mikulas Patockaf51f2e02018-03-26 20:29:46 +02001615 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1616 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1617 r = -EINVAL;
1618 goto bad_client;
1619 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001620
Greg Thelend8c712e2014-07-31 09:07:19 -07001621 c = kzalloc(sizeof(*c), GFP_KERNEL);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001622 if (!c) {
1623 r = -ENOMEM;
1624 goto bad_client;
1625 }
Joe Thornber4e420c42014-10-06 13:48:51 +01001626 c->buffer_tree = RB_ROOT;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001627
1628 c->bdev = bdev;
1629 c->block_size = block_size;
Mikulas Patockaf51f2e02018-03-26 20:29:46 +02001630 if (is_power_of_2(block_size))
1631 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1632 else
1633 c->sectors_per_block_bits = -1;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001634
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001635 c->alloc_callback = alloc_callback;
1636 c->write_callback = write_callback;
1637
1638 for (i = 0; i < LIST_SIZE; i++) {
1639 INIT_LIST_HEAD(&c->lru[i]);
1640 c->n_buffers[i] = 0;
1641 }
1642
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001643 mutex_init(&c->lock);
1644 INIT_LIST_HEAD(&c->reserved_buffers);
1645 c->need_reserved_buffers = reserved_buffers;
1646
Mikulas Patockaafa53df2018-03-15 16:02:31 -04001647 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
Mikulas Patocka55b082e2014-01-13 19:13:05 -05001648
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001649 init_waitqueue_head(&c->free_buffer_wait);
1650 c->async_write_error = 0;
1651
1652 c->dm_io = dm_io_client_create();
1653 if (IS_ERR(c->dm_io)) {
1654 r = PTR_ERR(c->dm_io);
1655 goto bad_dm_io;
1656 }
1657
Mikulas Patockaf51f2e02018-03-26 20:29:46 +02001658 if (block_size <= KMALLOC_MAX_SIZE &&
1659 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
Mikulas Patockaf7879b42018-04-19 08:33:00 -04001660 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1661 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1662 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
Mikulas Patocka6b5e7182018-03-15 17:22:00 -04001663 SLAB_RECLAIM_ACCOUNT, NULL);
Mikulas Patocka21bb1322018-03-26 20:29:42 +02001664 if (!c->slab_cache) {
1665 r = -ENOMEM;
1666 goto bad;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001667 }
1668 }
Mikulas Patocka359dbf12018-03-26 20:29:45 +02001669 if (aux_size)
1670 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1671 else
1672 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1673 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1674 0, SLAB_RECLAIM_ACCOUNT, NULL);
1675 if (!c->slab_buffer) {
1676 r = -ENOMEM;
1677 goto bad;
1678 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001679
1680 while (c->need_reserved_buffers) {
1681 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1682
1683 if (!b) {
1684 r = -ENOMEM;
Mike Snitzer0e696d32018-01-04 12:14:57 -05001685 goto bad;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001686 }
1687 __free_buffer_wake(b);
1688 }
1689
Aliaksei Karaliou46898e92017-12-23 13:27:04 +03001690 c->shrinker.count_objects = dm_bufio_shrink_count;
1691 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1692 c->shrinker.seeks = 1;
1693 c->shrinker.batch = 0;
1694 r = register_shrinker(&c->shrinker);
1695 if (r)
Mike Snitzer0e696d32018-01-04 12:14:57 -05001696 goto bad;
Aliaksei Karaliou46898e92017-12-23 13:27:04 +03001697
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001698 mutex_lock(&dm_bufio_clients_lock);
1699 dm_bufio_client_count++;
1700 list_add(&c->client_list, &dm_bufio_all_clients);
1701 __cache_size_refresh();
1702 mutex_unlock(&dm_bufio_clients_lock);
1703
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001704 return c;
1705
Mike Snitzer0e696d32018-01-04 12:14:57 -05001706bad:
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001707 while (!list_empty(&c->reserved_buffers)) {
1708 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1709 struct dm_buffer, lru_list);
1710 list_del(&b->lru_list);
1711 free_buffer(b);
1712 }
Mikulas Patocka21bb1322018-03-26 20:29:42 +02001713 kmem_cache_destroy(c->slab_cache);
Mikulas Patocka359dbf12018-03-26 20:29:45 +02001714 kmem_cache_destroy(c->slab_buffer);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001715 dm_io_client_destroy(c->dm_io);
1716bad_dm_io:
Aliaksei Karalioubde14182017-12-23 13:27:03 +03001717 mutex_destroy(&c->lock);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001718 kfree(c);
1719bad_client:
1720 return ERR_PTR(r);
1721}
1722EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1723
1724/*
1725 * Free the buffering interface.
1726 * It is required that there are no references on any buffers.
1727 */
1728void dm_bufio_client_destroy(struct dm_bufio_client *c)
1729{
1730 unsigned i;
1731
1732 drop_buffers(c);
1733
1734 unregister_shrinker(&c->shrinker);
1735
1736 mutex_lock(&dm_bufio_clients_lock);
1737
1738 list_del(&c->client_list);
1739 dm_bufio_client_count--;
1740 __cache_size_refresh();
1741
1742 mutex_unlock(&dm_bufio_clients_lock);
1743
Joe Thornber4e420c42014-10-06 13:48:51 +01001744 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001745 BUG_ON(c->need_reserved_buffers);
1746
1747 while (!list_empty(&c->reserved_buffers)) {
1748 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1749 struct dm_buffer, lru_list);
1750 list_del(&b->lru_list);
1751 free_buffer(b);
1752 }
1753
1754 for (i = 0; i < LIST_SIZE; i++)
1755 if (c->n_buffers[i])
1756 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1757
1758 for (i = 0; i < LIST_SIZE; i++)
1759 BUG_ON(c->n_buffers[i]);
1760
Mikulas Patocka21bb1322018-03-26 20:29:42 +02001761 kmem_cache_destroy(c->slab_cache);
Mikulas Patocka359dbf12018-03-26 20:29:45 +02001762 kmem_cache_destroy(c->slab_buffer);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001763 dm_io_client_destroy(c->dm_io);
Aliaksei Karalioubde14182017-12-23 13:27:03 +03001764 mutex_destroy(&c->lock);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001765 kfree(c);
1766}
1767EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1768
Mikulas Patocka400a0be2017-01-04 20:23:52 +01001769void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1770{
1771 c->start = start;
1772}
1773EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1774
Joe Thornber33096a72014-10-09 11:10:25 +01001775static unsigned get_max_age_hz(void)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001776{
Mark Rutland6aa7de02017-10-23 14:07:29 -07001777 unsigned max_age = READ_ONCE(dm_bufio_max_age);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001778
Joe Thornber33096a72014-10-09 11:10:25 +01001779 if (max_age > UINT_MAX / HZ)
1780 max_age = UINT_MAX / HZ;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001781
Joe Thornber33096a72014-10-09 11:10:25 +01001782 return max_age * HZ;
1783}
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001784
Joe Thornber33096a72014-10-09 11:10:25 +01001785static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1786{
Asaf Vertzf4953392015-01-06 15:44:15 +02001787 return time_after_eq(jiffies, b->last_accessed + age_hz);
Joe Thornber33096a72014-10-09 11:10:25 +01001788}
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001789
Joe Thornber33096a72014-10-09 11:10:25 +01001790static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1791{
1792 struct dm_buffer *b, *tmp;
Mikulas Patocka13840d32017-04-30 17:32:28 -04001793 unsigned long retain_target = get_retain_buffers(c);
1794 unsigned long count;
Mikulas Patocka390020a2017-04-30 17:34:53 -04001795 LIST_HEAD(write_list);
Joe Thornber33096a72014-10-09 11:10:25 +01001796
1797 dm_bufio_lock(c);
1798
Mikulas Patocka390020a2017-04-30 17:34:53 -04001799 __check_watermark(c, &write_list);
1800 if (unlikely(!list_empty(&write_list))) {
1801 dm_bufio_unlock(c);
1802 __flush_write_list(&write_list);
1803 dm_bufio_lock(c);
1804 }
1805
Joe Thornber33096a72014-10-09 11:10:25 +01001806 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1807 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1808 if (count <= retain_target)
1809 break;
1810
1811 if (!older_than(b, age_hz))
1812 break;
1813
1814 if (__try_evict_buffer(b, 0))
1815 count--;
1816
Peter Zijlstra7cd32672016-09-13 10:45:20 +02001817 cond_resched();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001818 }
Joe Thornber33096a72014-10-09 11:10:25 +01001819
1820 dm_bufio_unlock(c);
1821}
1822
Mikulas Patocka6e913b22019-09-12 12:07:23 -04001823static void do_global_cleanup(struct work_struct *w)
1824{
1825 struct dm_bufio_client *locked_client = NULL;
1826 struct dm_bufio_client *current_client;
1827 struct dm_buffer *b;
1828 unsigned spinlock_hold_count;
1829 unsigned long threshold = dm_bufio_cache_size -
1830 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1831 unsigned long loops = global_num * 2;
1832
1833 mutex_lock(&dm_bufio_clients_lock);
1834
1835 while (1) {
1836 cond_resched();
1837
1838 spin_lock(&global_spinlock);
1839 if (unlikely(dm_bufio_current_allocated <= threshold))
1840 break;
1841
1842 spinlock_hold_count = 0;
1843get_next:
1844 if (!loops--)
1845 break;
1846 if (unlikely(list_empty(&global_queue)))
1847 break;
1848 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1849
1850 if (b->accessed) {
1851 b->accessed = 0;
1852 list_move(&b->global_list, &global_queue);
1853 if (likely(++spinlock_hold_count < 16))
1854 goto get_next;
1855 spin_unlock(&global_spinlock);
1856 continue;
1857 }
1858
1859 current_client = b->c;
1860 if (unlikely(current_client != locked_client)) {
1861 if (locked_client)
1862 dm_bufio_unlock(locked_client);
1863
1864 if (!dm_bufio_trylock(current_client)) {
1865 spin_unlock(&global_spinlock);
1866 dm_bufio_lock(current_client);
1867 locked_client = current_client;
1868 continue;
1869 }
1870
1871 locked_client = current_client;
1872 }
1873
1874 spin_unlock(&global_spinlock);
1875
1876 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1877 spin_lock(&global_spinlock);
1878 list_move(&b->global_list, &global_queue);
1879 spin_unlock(&global_spinlock);
1880 }
1881 }
1882
1883 spin_unlock(&global_spinlock);
1884
1885 if (locked_client)
1886 dm_bufio_unlock(locked_client);
1887
1888 mutex_unlock(&dm_bufio_clients_lock);
1889}
1890
Joe Thornber33096a72014-10-09 11:10:25 +01001891static void cleanup_old_buffers(void)
1892{
1893 unsigned long max_age_hz = get_max_age_hz();
1894 struct dm_bufio_client *c;
1895
1896 mutex_lock(&dm_bufio_clients_lock);
1897
Mikulas Patocka390020a2017-04-30 17:34:53 -04001898 __cache_size_refresh();
1899
Joe Thornber33096a72014-10-09 11:10:25 +01001900 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1901 __evict_old_buffers(c, max_age_hz);
1902
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001903 mutex_unlock(&dm_bufio_clients_lock);
1904}
1905
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001906static void work_fn(struct work_struct *w)
1907{
1908 cleanup_old_buffers();
1909
Mikulas Patocka6e913b22019-09-12 12:07:23 -04001910 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001911 DM_BUFIO_WORK_TIMER_SECS * HZ);
1912}
1913
1914/*----------------------------------------------------------------
1915 * Module setup
1916 *--------------------------------------------------------------*/
1917
1918/*
1919 * This is called only once for the whole dm_bufio module.
1920 * It initializes memory limit.
1921 */
1922static int __init dm_bufio_init(void)
1923{
1924 __u64 mem;
1925
Mikulas Patocka4cb57ab2013-12-05 17:33:29 -05001926 dm_bufio_allocated_kmem_cache = 0;
1927 dm_bufio_allocated_get_free_pages = 0;
1928 dm_bufio_allocated_vmalloc = 0;
1929 dm_bufio_current_allocated = 0;
1930
Arun KSca79b0c2018-12-28 00:34:29 -08001931 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
Eric Biggers74d41082017-11-15 16:38:09 -08001932 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001933
1934 if (mem > ULONG_MAX)
1935 mem = ULONG_MAX;
1936
1937#ifdef CONFIG_MMU
Eric Biggers74d41082017-11-15 16:38:09 -08001938 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1939 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001940#endif
1941
1942 dm_bufio_default_cache_size = mem;
1943
1944 mutex_lock(&dm_bufio_clients_lock);
1945 __cache_size_refresh();
1946 mutex_unlock(&dm_bufio_clients_lock);
1947
Bhaktipriya Shridharedd1ea22016-08-30 22:19:11 +05301948 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001949 if (!dm_bufio_wq)
1950 return -ENOMEM;
1951
Mikulas Patocka6e913b22019-09-12 12:07:23 -04001952 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
1953 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
1954 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001955 DM_BUFIO_WORK_TIMER_SECS * HZ);
1956
1957 return 0;
1958}
1959
1960/*
1961 * This is called once when unloading the dm_bufio module.
1962 */
1963static void __exit dm_bufio_exit(void)
1964{
1965 int bug = 0;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001966
Mikulas Patocka6e913b22019-09-12 12:07:23 -04001967 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
1968 flush_workqueue(dm_bufio_wq);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001969 destroy_workqueue(dm_bufio_wq);
1970
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001971 if (dm_bufio_client_count) {
1972 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1973 __func__, dm_bufio_client_count);
1974 bug = 1;
1975 }
1976
1977 if (dm_bufio_current_allocated) {
1978 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1979 __func__, dm_bufio_current_allocated);
1980 bug = 1;
1981 }
1982
1983 if (dm_bufio_allocated_get_free_pages) {
1984 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1985 __func__, dm_bufio_allocated_get_free_pages);
1986 bug = 1;
1987 }
1988
1989 if (dm_bufio_allocated_vmalloc) {
1990 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1991 __func__, dm_bufio_allocated_vmalloc);
1992 bug = 1;
1993 }
1994
Anup Limbu86a49e22015-11-25 15:46:05 +05301995 BUG_ON(bug);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001996}
1997
1998module_init(dm_bufio_init)
1999module_exit(dm_bufio_exit)
2000
2001module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2002MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2003
2004module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2005MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2006
Mikulas Patocka13840d32017-04-30 17:32:28 -04002007module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
Joe Thornber33096a72014-10-09 11:10:25 +01002008MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2009
Mikulas Patocka95d402f2011-10-31 20:19:09 +00002010module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2011MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2012
2013module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2014MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2015
2016module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2017MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2018
2019module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2020MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2021
2022module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2023MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2024
2025MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2026MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2027MODULE_LICENSE("GPL");