blob: 53b7b06d0aa8bcad21aa6ccd795cac920b9e02b0 [file] [log] [blame]
Jonthan Brassowf5db4af2009-06-22 10:12:35 +01001/*
2 * Copyright (C) 2006-2009 Red Hat, Inc.
3 *
4 * This file is released under the LGPL.
5 */
6
7#include <linux/bio.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Manuel Schölling0f30af92014-05-22 22:42:37 +02009#include <linux/jiffies.h>
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010010#include <linux/dm-dirty-log.h>
11#include <linux/device-mapper.h>
12#include <linux/dm-log-userspace.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040013#include <linux/module.h>
Dongmao Zhang5066a4d2014-01-15 15:44:37 -060014#include <linux/workqueue.h>
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010015
16#include "dm-log-userspace-transfer.h"
17
Dongmao Zhang5066a4d2014-01-15 15:44:37 -060018#define DM_LOG_USERSPACE_VSN "1.3.0"
Jonathan Brassow86a54a482011-01-13 19:59:52 +000019
Mike Snitzerac1f9ef2015-02-12 15:20:35 -050020#define FLUSH_ENTRY_POOL_SIZE 16
21
22struct dm_dirty_log_flush_entry {
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010023 int type;
24 region_t region;
25 struct list_head list;
26};
27
Jonathan Brassow085ae062011-01-13 19:59:51 +000028/*
29 * This limit on the number of mark and clear request is, to a degree,
30 * arbitrary. However, there is some basis for the choice in the limits
31 * imposed on the size of data payload by dm-log-userspace-transfer.c:
32 * dm_consult_userspace().
33 */
34#define MAX_FLUSH_GROUP_COUNT 32
35
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010036struct log_c {
37 struct dm_target *ti;
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +000038 struct dm_dev *log_dev;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010039
40 char *usr_argv_str;
41 uint32_t usr_argc;
42
Mike Snitzerac1f9ef2015-02-12 15:20:35 -050043 uint32_t region_size;
44 region_t region_count;
45 uint64_t luid;
46 char uuid[DM_UUID_LEN];
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010047
Jonathan Brassow909cc4f2011-01-13 19:59:50 +000048 /*
49 * Mark and clear requests are held until a flush is issued
50 * so that we can group, and thereby limit, the amount of
51 * network traffic between kernel and userspace. The 'flush_lock'
52 * is used to protect these lists.
53 */
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010054 spinlock_t flush_lock;
Jonathan Brassow909cc4f2011-01-13 19:59:50 +000055 struct list_head mark_list;
56 struct list_head clear_list;
Dongmao Zhang5066a4d2014-01-15 15:44:37 -060057
58 /*
Mike Snitzerac1f9ef2015-02-12 15:20:35 -050059 * in_sync_hint gets set when doing is_remote_recovering. It
60 * represents the first region that needs recovery. IOW, the
61 * first zero bit of sync_bits. This can be useful for to limit
62 * traffic for calls like is_remote_recovering and get_resync_work,
63 * but be take care in its use for anything else.
64 */
65 uint64_t in_sync_hint;
66
67 /*
Dongmao Zhang5066a4d2014-01-15 15:44:37 -060068 * Workqueue for flush of clear region requests.
69 */
70 struct workqueue_struct *dmlog_wq;
71 struct delayed_work flush_log_work;
72 atomic_t sched_flush;
73
74 /*
75 * Combine userspace flush and mark requests for efficiency.
76 */
77 uint32_t integrated_flush;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -050078
79 mempool_t *flush_entry_pool;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010080};
81
Mike Snitzerac1f9ef2015-02-12 15:20:35 -050082static struct kmem_cache *_flush_entry_cache;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010083
84static int userspace_do_request(struct log_c *lc, const char *uuid,
85 int request_type, char *data, size_t data_size,
86 char *rdata, size_t *rdata_size)
87{
88 int r;
89
90 /*
91 * If the server isn't there, -ESRCH is returned,
92 * and we must keep trying until the server is
93 * restored.
94 */
95retry:
Jonathan Brassow7ec23d52009-09-04 20:40:34 +010096 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +010097 data_size, rdata, rdata_size);
98
99 if (r != -ESRCH)
100 return r;
101
102 DMERR(" Userspace log server not found.");
103 while (1) {
104 set_current_state(TASK_INTERRUPTIBLE);
105 schedule_timeout(2*HZ);
106 DMWARN("Attempting to contact userspace log server...");
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100107 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
108 lc->usr_argv_str,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100109 strlen(lc->usr_argv_str) + 1,
110 NULL, NULL);
111 if (!r)
112 break;
113 }
114 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100115 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100116 0, NULL, NULL);
117 if (!r)
118 goto retry;
119
120 DMERR("Error trying to resume userspace log: %d", r);
121
122 return -ESRCH;
123}
124
125static int build_constructor_string(struct dm_target *ti,
126 unsigned argc, char **argv,
127 char **ctr_str)
128{
129 int i, str_size;
130 char *str = NULL;
131
132 *ctr_str = NULL;
133
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600134 /*
135 * Determine overall size of the string.
136 */
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100137 for (i = 0, str_size = 0; i < argc; i++)
138 str_size += strlen(argv[i]) + 1; /* +1 for space between args */
139
140 str_size += 20; /* Max number of chars in a printed u64 number */
141
142 str = kzalloc(str_size, GFP_KERNEL);
143 if (!str) {
144 DMWARN("Unable to allocate memory for constructor string");
145 return -ENOMEM;
146 }
147
Jonathan Brassowb8313b62009-09-04 20:40:30 +0100148 str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
149 for (i = 0; i < argc; i++)
150 str_size += sprintf(str + str_size, " %s", argv[i]);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100151
152 *ctr_str = str;
153 return str_size;
154}
155
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600156static void do_flush(struct work_struct *work)
157{
158 int r;
159 struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
160
161 atomic_set(&lc->sched_flush, 0);
162
163 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
164
165 if (r)
166 dm_table_event(lc->ti->table);
167}
168
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100169/*
170 * userspace_ctr
171 *
172 * argv contains:
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600173 * <UUID> [integrated_flush] <other args>
174 * Where 'other args' are the userspace implementation-specific log
175 * arguments.
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100176 *
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600177 * Example:
178 * <UUID> [integrated_flush] clustered-disk <arg count> <log dev>
179 * <region_size> [[no]sync]
180 *
181 * This module strips off the <UUID> and uses it for identification
182 * purposes when communicating with userspace about a log.
183 *
184 * If integrated_flush is defined, the kernel combines flush
185 * and mark requests.
186 *
187 * The rest of the line, beginning with 'clustered-disk', is passed
188 * to the userspace ctr function.
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100189 */
190static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
191 unsigned argc, char **argv)
192{
193 int r = 0;
194 int str_size;
195 char *ctr_str = NULL;
196 struct log_c *lc = NULL;
197 uint64_t rdata;
198 size_t rdata_size = sizeof(rdata);
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000199 char *devices_rdata = NULL;
200 size_t devices_rdata_size = DM_NAME_LEN;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100201
202 if (argc < 3) {
203 DMWARN("Too few arguments to userspace dirty log");
204 return -EINVAL;
205 }
206
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000207 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100208 if (!lc) {
209 DMWARN("Unable to allocate userspace log context.");
210 return -ENOMEM;
211 }
212
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100213 /* The ptr value is sufficient for local unique id */
Andrew Mortonbca915a2009-10-16 23:18:15 +0100214 lc->luid = (unsigned long)lc;
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100215
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100216 lc->ti = ti;
217
218 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
219 DMWARN("UUID argument too long.");
220 kfree(lc);
221 return -EINVAL;
222 }
223
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600224 lc->usr_argc = argc;
225
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100226 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600227 argc--;
228 argv++;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100229 spin_lock_init(&lc->flush_lock);
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000230 INIT_LIST_HEAD(&lc->mark_list);
231 INIT_LIST_HEAD(&lc->clear_list);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100232
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600233 if (!strcasecmp(argv[0], "integrated_flush")) {
234 lc->integrated_flush = 1;
235 argc--;
236 argv++;
237 }
238
239 str_size = build_constructor_string(ti, argc, argv, &ctr_str);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100240 if (str_size < 0) {
241 kfree(lc);
242 return str_size;
243 }
244
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000245 devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
246 if (!devices_rdata) {
247 DMERR("Failed to allocate memory for device information");
248 r = -ENOMEM;
249 goto out;
250 }
251
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500252 lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
253 _flush_entry_cache);
254 if (!lc->flush_entry_pool) {
255 DMERR("Failed to create flush_entry_pool");
256 r = -ENOMEM;
257 goto out;
258 }
259
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000260 /*
261 * Send table string and get back any opened device.
262 */
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100263 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000264 ctr_str, str_size,
265 devices_rdata, &devices_rdata_size);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100266
Jonathan Brassow4a038672011-01-13 19:59:49 +0000267 if (r < 0) {
268 if (r == -ESRCH)
269 DMERR("Userspace log server not found");
270 else
271 DMERR("Userspace log server failed to create log");
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100272 goto out;
273 }
274
275 /* Since the region size does not change, get it now */
276 rdata_size = sizeof(rdata);
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100277 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100278 NULL, 0, (char *)&rdata, &rdata_size);
279
280 if (r) {
281 DMERR("Failed to get region size of dirty log");
282 goto out;
283 }
284
285 lc->region_size = (uint32_t)rdata;
286 lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
287
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000288 if (devices_rdata_size) {
289 if (devices_rdata[devices_rdata_size - 1] != '\0') {
290 DMERR("DM_ULOG_CTR device return string not properly terminated");
291 r = -EINVAL;
292 goto out;
293 }
294 r = dm_get_device(ti, devices_rdata,
295 dm_table_get_mode(ti->table), &lc->log_dev);
296 if (r)
297 DMERR("Failed to register %s with device-mapper",
298 devices_rdata);
299 }
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600300
301 if (lc->integrated_flush) {
302 lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
303 if (!lc->dmlog_wq) {
304 DMERR("couldn't start dmlogd");
305 r = -ENOMEM;
306 goto out;
307 }
308
309 INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
310 atomic_set(&lc->sched_flush, 0);
311 }
312
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100313out:
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000314 kfree(devices_rdata);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100315 if (r) {
Julia Lawall6f659852015-09-13 14:15:05 +0200316 mempool_destroy(lc->flush_entry_pool);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100317 kfree(lc);
318 kfree(ctr_str);
319 } else {
320 lc->usr_argv_str = ctr_str;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100321 log->context = lc;
322 }
323
324 return r;
325}
326
327static void userspace_dtr(struct dm_dirty_log *log)
328{
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100329 struct log_c *lc = log->context;
330
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600331 if (lc->integrated_flush) {
332 /* flush workqueue */
333 if (atomic_read(&lc->sched_flush))
334 flush_delayed_work(&lc->flush_log_work);
335
336 destroy_workqueue(lc->dmlog_wq);
337 }
338
Jonathan Brassow4a038672011-01-13 19:59:49 +0000339 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600340 NULL, 0, NULL, NULL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100341
Jonathan E Brassow5a25f0e2011-10-31 20:21:24 +0000342 if (lc->log_dev)
343 dm_put_device(lc->ti, lc->log_dev);
344
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500345 mempool_destroy(lc->flush_entry_pool);
346
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100347 kfree(lc->usr_argv_str);
348 kfree(lc);
349
350 return;
351}
352
353static int userspace_presuspend(struct dm_dirty_log *log)
354{
355 int r;
356 struct log_c *lc = log->context;
357
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100358 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600359 NULL, 0, NULL, NULL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100360
361 return r;
362}
363
364static int userspace_postsuspend(struct dm_dirty_log *log)
365{
366 int r;
367 struct log_c *lc = log->context;
368
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600369 /*
370 * Run planned flush earlier.
371 */
372 if (lc->integrated_flush && atomic_read(&lc->sched_flush))
373 flush_delayed_work(&lc->flush_log_work);
374
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100375 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600376 NULL, 0, NULL, NULL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100377
378 return r;
379}
380
381static int userspace_resume(struct dm_dirty_log *log)
382{
383 int r;
384 struct log_c *lc = log->context;
385
386 lc->in_sync_hint = 0;
Jonathan Brassow7ec23d52009-09-04 20:40:34 +0100387 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600388 NULL, 0, NULL, NULL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100389
390 return r;
391}
392
393static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
394{
395 struct log_c *lc = log->context;
396
397 return lc->region_size;
398}
399
400/*
401 * userspace_is_clean
402 *
403 * Check whether a region is clean. If there is any sort of
404 * failure when consulting the server, we return not clean.
405 *
406 * Returns: 1 if clean, 0 otherwise
407 */
408static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
409{
410 int r;
411 uint64_t region64 = (uint64_t)region;
412 int64_t is_clean;
413 size_t rdata_size;
414 struct log_c *lc = log->context;
415
416 rdata_size = sizeof(is_clean);
417 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
418 (char *)&region64, sizeof(region64),
419 (char *)&is_clean, &rdata_size);
420
421 return (r) ? 0 : (int)is_clean;
422}
423
424/*
425 * userspace_in_sync
426 *
427 * Check if the region is in-sync. If there is any sort
428 * of failure when consulting the server, we assume that
429 * the region is not in sync.
430 *
431 * If 'can_block' is set, return immediately
432 *
433 * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
434 */
435static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
436 int can_block)
437{
438 int r;
439 uint64_t region64 = region;
440 int64_t in_sync;
441 size_t rdata_size;
442 struct log_c *lc = log->context;
443
444 /*
445 * We can never respond directly - even if in_sync_hint is
446 * set. This is because another machine could see a device
447 * failure and mark the region out-of-sync. If we don't go
448 * to userspace to ask, we might think the region is in-sync
449 * and allow a read to pick up data that is stale. (This is
450 * very unlikely if a device actually fails; but it is very
451 * likely if a connection to one device from one machine fails.)
452 *
453 * There still might be a problem if the mirror caches the region
454 * state as in-sync... but then this call would not be made. So,
455 * that is a mirror problem.
456 */
457 if (!can_block)
458 return -EWOULDBLOCK;
459
460 rdata_size = sizeof(in_sync);
461 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
462 (char *)&region64, sizeof(region64),
463 (char *)&in_sync, &rdata_size);
464 return (r) ? 0 : (int)in_sync;
465}
466
Jonathan Brassow085ae062011-01-13 19:59:51 +0000467static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
468{
469 int r = 0;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500470 struct dm_dirty_log_flush_entry *fe;
Jonathan Brassow085ae062011-01-13 19:59:51 +0000471
472 list_for_each_entry(fe, flush_list, list) {
473 r = userspace_do_request(lc, lc->uuid, fe->type,
474 (char *)&fe->region,
475 sizeof(fe->region),
476 NULL, NULL);
477 if (r)
478 break;
479 }
480
481 return r;
482}
483
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600484static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
485 int flush_with_payload)
Jonathan Brassow085ae062011-01-13 19:59:51 +0000486{
487 int r = 0;
488 int count;
489 uint32_t type = 0;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500490 struct dm_dirty_log_flush_entry *fe, *tmp_fe;
Jonathan Brassow085ae062011-01-13 19:59:51 +0000491 LIST_HEAD(tmp_list);
492 uint64_t group[MAX_FLUSH_GROUP_COUNT];
493
494 /*
495 * Group process the requests
496 */
497 while (!list_empty(flush_list)) {
498 count = 0;
499
500 list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
501 group[count] = fe->region;
502 count++;
503
Kirill A. Shutemov6c9b27a2011-08-02 12:32:02 +0100504 list_move(&fe->list, &tmp_list);
Jonathan Brassow085ae062011-01-13 19:59:51 +0000505
506 type = fe->type;
507 if (count >= MAX_FLUSH_GROUP_COUNT)
508 break;
509 }
510
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600511 if (flush_with_payload) {
512 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
513 (char *)(group),
514 count * sizeof(uint64_t),
515 NULL, NULL);
516 /*
517 * Integrated flush failed.
518 */
519 if (r)
520 break;
521 } else {
522 r = userspace_do_request(lc, lc->uuid, type,
523 (char *)(group),
524 count * sizeof(uint64_t),
525 NULL, NULL);
526 if (r) {
527 /*
528 * Group send failed. Attempt one-by-one.
529 */
530 list_splice_init(&tmp_list, flush_list);
531 r = flush_one_by_one(lc, flush_list);
532 break;
533 }
Jonathan Brassow085ae062011-01-13 19:59:51 +0000534 }
535 }
536
537 /*
538 * Must collect flush_entrys that were successfully processed
539 * as a group so that they will be free'd by the caller.
540 */
541 list_splice_init(&tmp_list, flush_list);
542
543 return r;
544}
545
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100546/*
547 * userspace_flush
548 *
549 * This function is ok to block.
550 * The flush happens in two stages. First, it sends all
551 * clear/mark requests that are on the list. Then it
552 * tells the server to commit them. This gives the
553 * server a chance to optimise the commit, instead of
554 * doing it for every request.
555 *
556 * Additionally, we could implement another thread that
557 * sends the requests up to the server - reducing the
558 * load on flush. Then the flush would have less in
559 * the list and be responsible for the finishing commit.
560 *
561 * Returns: 0 on success, < 0 on failure
562 */
563static int userspace_flush(struct dm_dirty_log *log)
564{
565 int r = 0;
566 unsigned long flags;
567 struct log_c *lc = log->context;
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000568 LIST_HEAD(mark_list);
569 LIST_HEAD(clear_list);
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600570 int mark_list_is_empty;
571 int clear_list_is_empty;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500572 struct dm_dirty_log_flush_entry *fe, *tmp_fe;
573 mempool_t *flush_entry_pool = lc->flush_entry_pool;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100574
575 spin_lock_irqsave(&lc->flush_lock, flags);
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000576 list_splice_init(&lc->mark_list, &mark_list);
577 list_splice_init(&lc->clear_list, &clear_list);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100578 spin_unlock_irqrestore(&lc->flush_lock, flags);
579
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600580 mark_list_is_empty = list_empty(&mark_list);
581 clear_list_is_empty = list_empty(&clear_list);
582
583 if (mark_list_is_empty && clear_list_is_empty)
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100584 return 0;
585
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600586 r = flush_by_group(lc, &clear_list, 0);
Jonathan Brassow085ae062011-01-13 19:59:51 +0000587 if (r)
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600588 goto out;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100589
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600590 if (!lc->integrated_flush) {
591 r = flush_by_group(lc, &mark_list, 0);
592 if (r)
593 goto out;
594 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
595 NULL, 0, NULL, NULL);
596 goto out;
597 }
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100598
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100599 /*
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600600 * Send integrated flush request with mark_list as payload.
601 */
602 r = flush_by_group(lc, &mark_list, 1);
603 if (r)
604 goto out;
605
606 if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
607 /*
608 * When there are only clear region requests,
609 * we schedule a flush in the future.
610 */
611 queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
612 atomic_set(&lc->sched_flush, 1);
613 } else {
614 /*
615 * Cancel pending flush because we
616 * have already flushed in mark_region.
617 */
618 cancel_delayed_work(&lc->flush_log_work);
619 atomic_set(&lc->sched_flush, 0);
620 }
621
622out:
623 /*
624 * We can safely remove these entries, even after failure.
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100625 * Calling code will receive an error and will know that
626 * the log facility has failed.
627 */
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000628 list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
629 list_del(&fe->list);
630 mempool_free(fe, flush_entry_pool);
631 }
632 list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100633 list_del(&fe->list);
634 mempool_free(fe, flush_entry_pool);
635 }
636
637 if (r)
638 dm_table_event(lc->ti->table);
639
640 return r;
641}
642
643/*
644 * userspace_mark_region
645 *
646 * This function should avoid blocking unless absolutely required.
647 * (Memory allocation is valid for blocking.)
648 */
649static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
650{
651 unsigned long flags;
652 struct log_c *lc = log->context;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500653 struct dm_dirty_log_flush_entry *fe;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100654
655 /* Wait for an allocation, but _never_ fail */
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500656 fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100657 BUG_ON(!fe);
658
659 spin_lock_irqsave(&lc->flush_lock, flags);
660 fe->type = DM_ULOG_MARK_REGION;
661 fe->region = region;
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000662 list_add(&fe->list, &lc->mark_list);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100663 spin_unlock_irqrestore(&lc->flush_lock, flags);
664
665 return;
666}
667
668/*
669 * userspace_clear_region
670 *
671 * This function must not block.
672 * So, the alloc can't block. In the worst case, it is ok to
673 * fail. It would simply mean we can't clear the region.
674 * Does nothing to current sync context, but does mean
675 * the region will be re-sync'ed on a reload of the mirror
676 * even though it is in-sync.
677 */
678static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
679{
680 unsigned long flags;
681 struct log_c *lc = log->context;
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500682 struct dm_dirty_log_flush_entry *fe;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100683
684 /*
685 * If we fail to allocate, we skip the clearing of
686 * the region. This doesn't hurt us in any way, except
687 * to cause the region to be resync'ed when the
688 * device is activated next time.
689 */
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500690 fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100691 if (!fe) {
692 DMERR("Failed to allocate memory to clear region.");
693 return;
694 }
695
696 spin_lock_irqsave(&lc->flush_lock, flags);
697 fe->type = DM_ULOG_CLEAR_REGION;
698 fe->region = region;
Jonathan Brassow909cc4f2011-01-13 19:59:50 +0000699 list_add(&fe->list, &lc->clear_list);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100700 spin_unlock_irqrestore(&lc->flush_lock, flags);
701
702 return;
703}
704
705/*
706 * userspace_get_resync_work
707 *
708 * Get a region that needs recovery. It is valid to return
709 * an error for this function.
710 *
711 * Returns: 1 if region filled, 0 if no work, <0 on error
712 */
713static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
714{
715 int r;
716 size_t rdata_size;
717 struct log_c *lc = log->context;
718 struct {
719 int64_t i; /* 64-bit for mix arch compatibility */
720 region_t r;
721 } pkg;
722
723 if (lc->in_sync_hint >= lc->region_count)
724 return 0;
725
726 rdata_size = sizeof(pkg);
727 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600728 NULL, 0, (char *)&pkg, &rdata_size);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100729
730 *region = pkg.r;
731 return (r) ? r : (int)pkg.i;
732}
733
734/*
735 * userspace_set_region_sync
736 *
737 * Set the sync status of a given region. This function
738 * must not fail.
739 */
740static void userspace_set_region_sync(struct dm_dirty_log *log,
741 region_t region, int in_sync)
742{
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100743 struct log_c *lc = log->context;
744 struct {
745 region_t r;
746 int64_t i;
747 } pkg;
748
749 pkg.r = region;
750 pkg.i = (int64_t)in_sync;
751
Nicholas Mc Guire18cc9802015-03-18 18:59:02 -0400752 (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
753 (char *)&pkg, sizeof(pkg), NULL, NULL);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100754
755 /*
756 * It would be nice to be able to report failures.
Nicholas Mc Guire18cc9802015-03-18 18:59:02 -0400757 * However, it is easy enough to detect and resolve.
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100758 */
759 return;
760}
761
762/*
763 * userspace_get_sync_count
764 *
765 * If there is any sort of failure when consulting the server,
766 * we assume that the sync count is zero.
767 *
768 * Returns: sync count on success, 0 on failure
769 */
770static region_t userspace_get_sync_count(struct dm_dirty_log *log)
771{
772 int r;
773 size_t rdata_size;
774 uint64_t sync_count;
775 struct log_c *lc = log->context;
776
777 rdata_size = sizeof(sync_count);
778 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600779 NULL, 0, (char *)&sync_count, &rdata_size);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100780
781 if (r)
782 return 0;
783
784 if (sync_count >= lc->region_count)
785 lc->in_sync_hint = lc->region_count;
786
787 return (region_t)sync_count;
788}
789
790/*
791 * userspace_status
792 *
793 * Returns: amount of space consumed
794 */
795static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
796 char *result, unsigned maxlen)
797{
798 int r = 0;
Jonathan Brassowb8313b62009-09-04 20:40:30 +0100799 char *table_args;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100800 size_t sz = (size_t)maxlen;
801 struct log_c *lc = log->context;
802
803 switch (status_type) {
804 case STATUSTYPE_INFO:
805 r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600806 NULL, 0, result, &sz);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100807
808 if (r) {
809 sz = 0;
810 DMEMIT("%s 1 COM_FAILURE", log->type->name);
811 }
812 break;
813 case STATUSTYPE_TABLE:
814 sz = 0;
Geert Uytterhoeven0d03d592009-09-10 23:13:28 +0200815 table_args = strchr(lc->usr_argv_str, ' ');
Jonathan Brassowb8313b62009-09-04 20:40:30 +0100816 BUG_ON(!table_args); /* There will always be a ' ' */
817 table_args++;
818
Dongmao Zhang5066a4d2014-01-15 15:44:37 -0600819 DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
820 if (lc->integrated_flush)
821 DMEMIT("integrated_flush ");
822 DMEMIT("%s ", table_args);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100823 break;
824 }
825 return (r) ? 0 : (int)sz;
826}
827
828/*
829 * userspace_is_remote_recovering
830 *
831 * Returns: 1 if region recovering, 0 otherwise
832 */
833static int userspace_is_remote_recovering(struct dm_dirty_log *log,
834 region_t region)
835{
836 int r;
837 uint64_t region64 = region;
838 struct log_c *lc = log->context;
Manuel Schölling0f30af92014-05-22 22:42:37 +0200839 static unsigned long limit;
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100840 struct {
841 int64_t is_recovering;
842 uint64_t in_sync_hint;
843 } pkg;
844 size_t rdata_size = sizeof(pkg);
845
846 /*
847 * Once the mirror has been reported to be in-sync,
848 * it will never again ask for recovery work. So,
849 * we can safely say there is not a remote machine
850 * recovering if the device is in-sync. (in_sync_hint
851 * must be reset at resume time.)
852 */
853 if (region < lc->in_sync_hint)
854 return 0;
Manuel Schölling0f30af92014-05-22 22:42:37 +0200855 else if (time_after(limit, jiffies))
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100856 return 1;
857
858 limit = jiffies + (HZ / 4);
859 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
860 (char *)&region64, sizeof(region64),
861 (char *)&pkg, &rdata_size);
862 if (r)
863 return 1;
864
865 lc->in_sync_hint = pkg.in_sync_hint;
866
867 return (int)pkg.is_recovering;
868}
869
870static struct dm_dirty_log_type _userspace_type = {
871 .name = "userspace",
872 .module = THIS_MODULE,
873 .ctr = userspace_ctr,
874 .dtr = userspace_dtr,
875 .presuspend = userspace_presuspend,
876 .postsuspend = userspace_postsuspend,
877 .resume = userspace_resume,
878 .get_region_size = userspace_get_region_size,
879 .is_clean = userspace_is_clean,
880 .in_sync = userspace_in_sync,
881 .flush = userspace_flush,
882 .mark_region = userspace_mark_region,
883 .clear_region = userspace_clear_region,
884 .get_resync_work = userspace_get_resync_work,
885 .set_region_sync = userspace_set_region_sync,
886 .get_sync_count = userspace_get_sync_count,
887 .status = userspace_status,
888 .is_remote_recovering = userspace_is_remote_recovering,
889};
890
891static int __init userspace_dirty_log_init(void)
892{
893 int r = 0;
894
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500895 _flush_entry_cache = KMEM_CACHE(dm_dirty_log_flush_entry, 0);
896 if (!_flush_entry_cache) {
897 DMWARN("Unable to create flush_entry_cache: No memory.");
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100898 return -ENOMEM;
899 }
900
901 r = dm_ulog_tfr_init();
902 if (r) {
903 DMWARN("Unable to initialize userspace log communications");
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500904 kmem_cache_destroy(_flush_entry_cache);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100905 return r;
906 }
907
908 r = dm_dirty_log_type_register(&_userspace_type);
909 if (r) {
910 DMWARN("Couldn't register userspace dirty log type");
911 dm_ulog_tfr_exit();
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500912 kmem_cache_destroy(_flush_entry_cache);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100913 return r;
914 }
915
Jonathan Brassow86a54a482011-01-13 19:59:52 +0000916 DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100917 return 0;
918}
919
920static void __exit userspace_dirty_log_exit(void)
921{
922 dm_dirty_log_type_unregister(&_userspace_type);
923 dm_ulog_tfr_exit();
Mike Snitzerac1f9ef2015-02-12 15:20:35 -0500924 kmem_cache_destroy(_flush_entry_cache);
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100925
Jonathan Brassow86a54a482011-01-13 19:59:52 +0000926 DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
Jonthan Brassowf5db4af2009-06-22 10:12:35 +0100927 return;
928}
929
930module_init(userspace_dirty_log_init);
931module_exit(userspace_dirty_log_exit);
932
933MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
934MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
935MODULE_LICENSE("GPL");