blob: 0275450a0da969aeffd4cc57bcc37fa9f7ff1f82 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kent Overstreetcafe5632013-03-23 16:11:31 -07002/*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "sysfs.h"
11#include "btree.h"
12#include "request.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070013#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070014
Kent Overstreetc37511b2013-04-26 15:39:55 -070015#include <linux/blkdev.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070016#include <linux/sort.h>
Ingo Molnare6017572017-02-01 16:36:40 +010017#include <linux/sched/clock.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070018
Andy Shevchenko04cbc212018-05-28 15:37:42 +080019/* Default is -1; we skip past it for struct cached_dev's cache mode */
20static const char * const bch_cache_modes[] = {
21 "default",
22 "writethrough",
23 "writeback",
24 "writearound",
25 "none",
26 NULL
27};
28
29/* Default is -1; we skip past it for stop_when_cache_set_failed */
30static const char * const bch_stop_on_failure_modes[] = {
31 "default",
32 "auto",
33 "always",
34 NULL
35};
36
Kent Overstreetcafe5632013-03-23 16:11:31 -070037static const char * const cache_replacement_policies[] = {
38 "lru",
39 "fifo",
40 "random",
41 NULL
42};
43
Kent Overstreet77c320e2013-07-11 19:42:51 -070044static const char * const error_actions[] = {
45 "unregister",
46 "panic",
47 NULL
48};
49
Kent Overstreetcafe5632013-03-23 16:11:31 -070050write_attribute(attach);
51write_attribute(detach);
52write_attribute(unregister);
53write_attribute(stop);
54write_attribute(clear_stats);
55write_attribute(trigger_gc);
56write_attribute(prune_cache);
57write_attribute(flash_vol_create);
58
59read_attribute(bucket_size);
60read_attribute(block_size);
61read_attribute(nbuckets);
62read_attribute(tree_depth);
63read_attribute(root_usage_percent);
64read_attribute(priority_stats);
65read_attribute(btree_cache_size);
66read_attribute(btree_cache_max_chain);
67read_attribute(cache_available_percent);
68read_attribute(written);
69read_attribute(btree_written);
70read_attribute(metadata_written);
71read_attribute(active_journal_entries);
72
73sysfs_time_stats_attribute(btree_gc, sec, ms);
74sysfs_time_stats_attribute(btree_split, sec, us);
75sysfs_time_stats_attribute(btree_sort, ms, us);
76sysfs_time_stats_attribute(btree_read, ms, us);
Kent Overstreetcafe5632013-03-23 16:11:31 -070077
78read_attribute(btree_nodes);
79read_attribute(btree_used_percent);
80read_attribute(average_key_size);
81read_attribute(dirty_data);
82read_attribute(bset_tree_stats);
83
84read_attribute(state);
85read_attribute(cache_read_races);
Tang Junhuia728eac2018-02-07 11:41:39 -080086read_attribute(reclaim);
87read_attribute(flush_write);
88read_attribute(retry_flush_write);
Kent Overstreetcafe5632013-03-23 16:11:31 -070089read_attribute(writeback_keys_done);
90read_attribute(writeback_keys_failed);
91read_attribute(io_errors);
92read_attribute(congested);
93rw_attribute(congested_read_threshold_us);
94rw_attribute(congested_write_threshold_us);
95
96rw_attribute(sequential_cutoff);
Kent Overstreetcafe5632013-03-23 16:11:31 -070097rw_attribute(data_csum);
98rw_attribute(cache_mode);
Coly Li7e027ca2018-03-18 17:36:18 -070099rw_attribute(stop_when_cache_set_failed);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700100rw_attribute(writeback_metadata);
101rw_attribute(writeback_running);
102rw_attribute(writeback_percent);
103rw_attribute(writeback_delay);
104rw_attribute(writeback_rate);
105
106rw_attribute(writeback_rate_update_seconds);
Michael Lyle1d316e62017-10-13 16:35:36 -0700107rw_attribute(writeback_rate_i_term_inverse);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700108rw_attribute(writeback_rate_p_term_inverse);
Michael Lyle1d316e62017-10-13 16:35:36 -0700109rw_attribute(writeback_rate_minimum);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700110read_attribute(writeback_rate_debug);
111
Kent Overstreet72c27062013-06-05 06:24:39 -0700112read_attribute(stripe_size);
113read_attribute(partial_stripes_expensive);
114
Kent Overstreetcafe5632013-03-23 16:11:31 -0700115rw_attribute(synchronous);
116rw_attribute(journal_delay_ms);
Coly Li771f3932018-03-18 17:36:17 -0700117rw_attribute(io_disable);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700118rw_attribute(discard);
119rw_attribute(running);
120rw_attribute(label);
121rw_attribute(readahead);
Kent Overstreet77c320e2013-07-11 19:42:51 -0700122rw_attribute(errors);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700123rw_attribute(io_error_limit);
124rw_attribute(io_error_halflife);
125rw_attribute(verify);
Kent Overstreet5ceaaad2013-09-10 14:27:42 -0700126rw_attribute(bypass_torture_test);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700127rw_attribute(key_merging_disabled);
128rw_attribute(gc_always_rewrite);
Kent Overstreet280481d2013-10-24 16:36:03 -0700129rw_attribute(expensive_debug_checks);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700130rw_attribute(cache_replacement_policy);
131rw_attribute(btree_shrinker_disabled);
132rw_attribute(copy_gc_enabled);
133rw_attribute(size);
134
135SHOW(__bch_cached_dev)
136{
137 struct cached_dev *dc = container_of(kobj, struct cached_dev,
138 disk.kobj);
139 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
140
141#define var(stat) (dc->stat)
142
143 if (attr == &sysfs_cache_mode)
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600144 return bch_snprint_string_list(buf, PAGE_SIZE,
145 bch_cache_modes + 1,
146 BDEV_CACHE_MODE(&dc->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700147
Coly Li7e027ca2018-03-18 17:36:18 -0700148 if (attr == &sysfs_stop_when_cache_set_failed)
149 return bch_snprint_string_list(buf, PAGE_SIZE,
150 bch_stop_on_failure_modes + 1,
151 dc->stop_when_cache_set_failed);
152
153
Kent Overstreetcafe5632013-03-23 16:11:31 -0700154 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
155 var_printf(verify, "%i");
Kent Overstreet5ceaaad2013-09-10 14:27:42 -0700156 var_printf(bypass_torture_test, "%i");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700157 var_printf(writeback_metadata, "%i");
158 var_printf(writeback_running, "%i");
159 var_print(writeback_delay);
160 var_print(writeback_percent);
Kent Overstreet16749c22013-11-11 13:58:34 -0800161 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
Coly Lic7b7bd02018-03-18 17:36:25 -0700162 sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
163 sysfs_printf(io_error_limit, "%i", dc->error_limit);
164 sysfs_printf(io_disable, "%i", dc->io_disable);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700165 var_print(writeback_rate_update_seconds);
Michael Lyle1d316e62017-10-13 16:35:36 -0700166 var_print(writeback_rate_i_term_inverse);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700167 var_print(writeback_rate_p_term_inverse);
Michael Lyle1d316e62017-10-13 16:35:36 -0700168 var_print(writeback_rate_minimum);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700169
170 if (attr == &sysfs_writeback_rate_debug) {
Kent Overstreet16749c22013-11-11 13:58:34 -0800171 char rate[20];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700172 char dirty[20];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700173 char target[20];
Kent Overstreet16749c22013-11-11 13:58:34 -0800174 char proportional[20];
Michael Lyle1d316e62017-10-13 16:35:36 -0700175 char integral[20];
Kent Overstreet16749c22013-11-11 13:58:34 -0800176 char change[20];
177 s64 next_io;
178
179 bch_hprint(rate, dc->writeback_rate.rate << 9);
180 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600181 bch_hprint(target, dc->writeback_rate_target << 9);
Kent Overstreet16749c22013-11-11 13:58:34 -0800182 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
Michael Lyle1d316e62017-10-13 16:35:36 -0700183 bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
Kent Overstreet16749c22013-11-11 13:58:34 -0800184 bch_hprint(change, dc->writeback_rate_change << 9);
185
186 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
187 NSEC_PER_MSEC);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700188
189 return sprintf(buf,
Kent Overstreet16749c22013-11-11 13:58:34 -0800190 "rate:\t\t%s/sec\n"
Kent Overstreetcafe5632013-03-23 16:11:31 -0700191 "dirty:\t\t%s\n"
Kent Overstreet16749c22013-11-11 13:58:34 -0800192 "target:\t\t%s\n"
193 "proportional:\t%s\n"
Michael Lyle1d316e62017-10-13 16:35:36 -0700194 "integral:\t%s\n"
Kent Overstreet16749c22013-11-11 13:58:34 -0800195 "change:\t\t%s/sec\n"
196 "next io:\t%llims\n",
197 rate, dirty, target, proportional,
Michael Lyle1d316e62017-10-13 16:35:36 -0700198 integral, change, next_io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700199 }
200
201 sysfs_hprint(dirty_data,
Kent Overstreet279afba2013-06-05 06:21:07 -0700202 bcache_dev_sectors_dirty(&dc->disk) << 9);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700203
Tang Junhui688892b2018-03-18 17:36:20 -0700204 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
Kent Overstreet72c27062013-06-05 06:24:39 -0700205 var_printf(partial_stripes_expensive, "%u");
206
Kent Overstreetcafe5632013-03-23 16:11:31 -0700207 var_hprint(sequential_cutoff);
208 var_hprint(readahead);
209
210 sysfs_print(running, atomic_read(&dc->running));
211 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
212
213 if (attr == &sysfs_label) {
214 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
215 buf[SB_LABEL_SIZE + 1] = '\0';
216 strcat(buf, "\n");
217 return strlen(buf);
218 }
219
220#undef var
221 return 0;
222}
223SHOW_LOCKED(bch_cached_dev)
224
225STORE(__cached_dev)
226{
227 struct cached_dev *dc = container_of(kobj, struct cached_dev,
228 disk.kobj);
Tang Junhui7f4fc932018-02-07 11:41:45 -0800229 ssize_t v;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700230 struct cache_set *c;
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200231 struct kobj_uevent_env *env;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700232
233#define d_strtoul(var) sysfs_strtoul(var, dc->var)
Kent Overstreet16749c22013-11-11 13:58:34 -0800234#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700235#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
236
237 sysfs_strtoul(data_csum, dc->disk.data_csum);
238 d_strtoul(verify);
Kent Overstreet5ceaaad2013-09-10 14:27:42 -0700239 d_strtoul(bypass_torture_test);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700240 d_strtoul(writeback_metadata);
241 d_strtoul(writeback_running);
242 d_strtoul(writeback_delay);
Kent Overstreet16749c22013-11-11 13:58:34 -0800243
Kent Overstreetcafe5632013-03-23 16:11:31 -0700244 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
245
Kent Overstreet16749c22013-11-11 13:58:34 -0800246 sysfs_strtoul_clamp(writeback_rate,
247 dc->writeback_rate.rate, 1, INT_MAX);
248
Coly Li7a5e3ec2018-02-07 11:41:44 -0800249 sysfs_strtoul_clamp(writeback_rate_update_seconds,
250 dc->writeback_rate_update_seconds,
251 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
Michael Lyle1d316e62017-10-13 16:35:36 -0700252 d_strtoul(writeback_rate_i_term_inverse);
Kent Overstreet16749c22013-11-11 13:58:34 -0800253 d_strtoul_nonzero(writeback_rate_p_term_inverse);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700254
Coly Lic7b7bd02018-03-18 17:36:25 -0700255 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
256
257 if (attr == &sysfs_io_disable) {
258 int v = strtoul_or_return(buf);
259
260 dc->io_disable = v ? 1 : 0;
261 }
262
Kent Overstreetcafe5632013-03-23 16:11:31 -0700263 d_strtoi_h(sequential_cutoff);
264 d_strtoi_h(readahead);
265
266 if (attr == &sysfs_clear_stats)
267 bch_cache_accounting_clear(&dc->accounting);
268
269 if (attr == &sysfs_running &&
270 strtoul_or_return(buf))
271 bch_cached_dev_run(dc);
272
273 if (attr == &sysfs_cache_mode) {
Tony Asleson77fa1002017-09-06 14:25:57 +0800274 v = bch_read_string_list(buf, bch_cache_modes + 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700275
276 if (v < 0)
277 return v;
278
279 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
280 SET_BDEV_CACHE_MODE(&dc->sb, v);
281 bch_write_bdev_super(dc, NULL);
282 }
283 }
284
Coly Li7e027ca2018-03-18 17:36:18 -0700285 if (attr == &sysfs_stop_when_cache_set_failed) {
286 v = bch_read_string_list(buf, bch_stop_on_failure_modes + 1);
287
288 if (v < 0)
289 return v;
290
291 dc->stop_when_cache_set_failed = v;
292 }
293
Kent Overstreetcafe5632013-03-23 16:11:31 -0700294 if (attr == &sysfs_label) {
Gabriel de Perthuisaee6f1c2013-09-23 23:17:28 -0700295 if (size > SB_LABEL_SIZE)
296 return -EINVAL;
297 memcpy(dc->sb.label, buf, size);
298 if (size < SB_LABEL_SIZE)
299 dc->sb.label[size] = '\0';
300 if (size && dc->sb.label[size - 1] == '\n')
301 dc->sb.label[size - 1] = '\0';
Kent Overstreetcafe5632013-03-23 16:11:31 -0700302 bch_write_bdev_super(dc, NULL);
303 if (dc->disk.c) {
304 memcpy(dc->disk.c->uuids[dc->disk.id].label,
305 buf, SB_LABEL_SIZE);
306 bch_uuid_write(dc->disk.c);
307 }
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200308 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
Dan Carpenterd2a65ce2013-07-05 09:05:46 +0300309 if (!env)
310 return -ENOMEM;
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200311 add_uevent_var(env, "DRIVER=bcache");
312 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
313 add_uevent_var(env, "CACHED_LABEL=%s", buf);
314 kobject_uevent_env(
315 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
316 kfree(env);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700317 }
318
319 if (attr == &sysfs_attach) {
Tang Junhui73ac1052018-02-07 11:41:46 -0800320 uint8_t set_uuid[16];
321
322 if (bch_parse_uuid(buf, set_uuid) < 16)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700323 return -EINVAL;
324
Tang Junhui7f4fc932018-02-07 11:41:45 -0800325 v = -ENOENT;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700326 list_for_each_entry(c, &bch_cache_sets, list) {
Tang Junhui73ac1052018-02-07 11:41:46 -0800327 v = bch_cached_dev_attach(dc, c, set_uuid);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700328 if (!v)
329 return size;
330 }
331
332 pr_err("Can't attach %s: cache set not found", buf);
Tang Junhui7f4fc932018-02-07 11:41:45 -0800333 return v;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700334 }
335
336 if (attr == &sysfs_detach && dc->disk.c)
337 bch_cached_dev_detach(dc);
338
339 if (attr == &sysfs_stop)
340 bcache_device_stop(&dc->disk);
341
342 return size;
343}
344
345STORE(bch_cached_dev)
346{
347 struct cached_dev *dc = container_of(kobj, struct cached_dev,
348 disk.kobj);
349
350 mutex_lock(&bch_register_lock);
351 size = __cached_dev_store(kobj, attr, buf, size);
352
353 if (attr == &sysfs_writeback_running)
354 bch_writeback_queue(dc);
355
356 if (attr == &sysfs_writeback_percent)
Coly Li3fd47bf2018-03-18 17:36:16 -0700357 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
358 schedule_delayed_work(&dc->writeback_rate_update,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700359 dc->writeback_rate_update_seconds * HZ);
360
361 mutex_unlock(&bch_register_lock);
362 return size;
363}
364
365static struct attribute *bch_cached_dev_files[] = {
366 &sysfs_attach,
367 &sysfs_detach,
368 &sysfs_stop,
369#if 0
370 &sysfs_data_csum,
371#endif
372 &sysfs_cache_mode,
Coly Li7e027ca2018-03-18 17:36:18 -0700373 &sysfs_stop_when_cache_set_failed,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700374 &sysfs_writeback_metadata,
375 &sysfs_writeback_running,
376 &sysfs_writeback_delay,
377 &sysfs_writeback_percent,
378 &sysfs_writeback_rate,
379 &sysfs_writeback_rate_update_seconds,
Michael Lyle1d316e62017-10-13 16:35:36 -0700380 &sysfs_writeback_rate_i_term_inverse,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700381 &sysfs_writeback_rate_p_term_inverse,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700382 &sysfs_writeback_rate_debug,
Coly Lic7b7bd02018-03-18 17:36:25 -0700383 &sysfs_errors,
384 &sysfs_io_error_limit,
385 &sysfs_io_disable,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700386 &sysfs_dirty_data,
Kent Overstreet72c27062013-06-05 06:24:39 -0700387 &sysfs_stripe_size,
388 &sysfs_partial_stripes_expensive,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700389 &sysfs_sequential_cutoff,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700390 &sysfs_clear_stats,
391 &sysfs_running,
392 &sysfs_state,
393 &sysfs_label,
394 &sysfs_readahead,
395#ifdef CONFIG_BCACHE_DEBUG
396 &sysfs_verify,
Kent Overstreet5ceaaad2013-09-10 14:27:42 -0700397 &sysfs_bypass_torture_test,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700398#endif
399 NULL
400};
401KTYPE(bch_cached_dev);
402
403SHOW(bch_flash_dev)
404{
405 struct bcache_device *d = container_of(kobj, struct bcache_device,
406 kobj);
407 struct uuid_entry *u = &d->c->uuids[d->id];
408
409 sysfs_printf(data_csum, "%i", d->data_csum);
410 sysfs_hprint(size, u->sectors << 9);
411
412 if (attr == &sysfs_label) {
413 memcpy(buf, u->label, SB_LABEL_SIZE);
414 buf[SB_LABEL_SIZE + 1] = '\0';
415 strcat(buf, "\n");
416 return strlen(buf);
417 }
418
419 return 0;
420}
421
422STORE(__bch_flash_dev)
423{
424 struct bcache_device *d = container_of(kobj, struct bcache_device,
425 kobj);
426 struct uuid_entry *u = &d->c->uuids[d->id];
427
428 sysfs_strtoul(data_csum, d->data_csum);
429
430 if (attr == &sysfs_size) {
431 uint64_t v;
432 strtoi_h_or_return(buf, v);
433
434 u->sectors = v >> 9;
435 bch_uuid_write(d->c);
436 set_capacity(d->disk, u->sectors);
437 }
438
439 if (attr == &sysfs_label) {
440 memcpy(u->label, buf, SB_LABEL_SIZE);
441 bch_uuid_write(d->c);
442 }
443
444 if (attr == &sysfs_unregister) {
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700445 set_bit(BCACHE_DEV_DETACHING, &d->flags);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700446 bcache_device_stop(d);
447 }
448
449 return size;
450}
451STORE_LOCKED(bch_flash_dev)
452
453static struct attribute *bch_flash_dev_files[] = {
454 &sysfs_unregister,
455#if 0
456 &sysfs_data_csum,
457#endif
458 &sysfs_label,
459 &sysfs_size,
460 NULL
461};
462KTYPE(bch_flash_dev);
463
Kent Overstreetf67342d2013-11-11 19:25:55 -0800464struct bset_stats_op {
465 struct btree_op op;
466 size_t nodes;
467 struct bset_stats stats;
468};
469
John Sheucb851142014-03-17 23:13:56 -0700470static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
Kent Overstreetf67342d2013-11-11 19:25:55 -0800471{
472 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
473
474 op->nodes++;
475 bch_btree_keys_stats(&b->keys, &op->stats);
476
477 return MAP_CONTINUE;
478}
479
Kent Overstreet35723242014-01-10 18:53:02 -0800480static int bch_bset_print_stats(struct cache_set *c, char *buf)
Kent Overstreetf67342d2013-11-11 19:25:55 -0800481{
482 struct bset_stats_op op;
483 int ret;
484
485 memset(&op, 0, sizeof(op));
486 bch_btree_op_init(&op.op, -1);
487
John Sheucb851142014-03-17 23:13:56 -0700488 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
Kent Overstreetf67342d2013-11-11 19:25:55 -0800489 if (ret < 0)
490 return ret;
491
492 return snprintf(buf, PAGE_SIZE,
493 "btree nodes: %zu\n"
494 "written sets: %zu\n"
495 "unwritten sets: %zu\n"
496 "written key bytes: %zu\n"
497 "unwritten key bytes: %zu\n"
498 "floats: %zu\n"
499 "failed: %zu\n",
500 op.nodes,
501 op.stats.sets_written, op.stats.sets_unwritten,
502 op.stats.bytes_written, op.stats.bytes_unwritten,
503 op.stats.floats, op.stats.failed);
504}
505
John Sheucb851142014-03-17 23:13:56 -0700506static unsigned bch_root_usage(struct cache_set *c)
507{
508 unsigned bytes = 0;
509 struct bkey *k;
510 struct btree *b;
511 struct btree_iter iter;
512
513 goto lock_root;
514
515 do {
516 rw_unlock(false, b);
517lock_root:
518 b = c->root;
519 rw_lock(false, b, b->level);
520 } while (b != c->root);
521
522 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
523 bytes += bkey_bytes(k);
524
525 rw_unlock(false, b);
526
527 return (bytes * 100) / btree_bytes(c);
528}
529
530static size_t bch_cache_size(struct cache_set *c)
531{
532 size_t ret = 0;
533 struct btree *b;
534
535 mutex_lock(&c->bucket_lock);
536 list_for_each_entry(b, &c->btree_cache, list)
537 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
538
539 mutex_unlock(&c->bucket_lock);
540 return ret;
541}
542
543static unsigned bch_cache_max_chain(struct cache_set *c)
544{
545 unsigned ret = 0;
546 struct hlist_head *h;
547
548 mutex_lock(&c->bucket_lock);
549
550 for (h = c->bucket_hash;
551 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
552 h++) {
553 unsigned i = 0;
554 struct hlist_node *p;
555
556 hlist_for_each(p, h)
557 i++;
558
559 ret = max(ret, i);
560 }
561
562 mutex_unlock(&c->bucket_lock);
563 return ret;
564}
565
566static unsigned bch_btree_used(struct cache_set *c)
567{
568 return div64_u64(c->gc_stats.key_bytes * 100,
569 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
570}
571
572static unsigned bch_average_key_size(struct cache_set *c)
573{
574 return c->gc_stats.nkeys
575 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
576 : 0;
577}
578
Kent Overstreetcafe5632013-03-23 16:11:31 -0700579SHOW(__bch_cache_set)
580{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700581 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
582
583 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
584 sysfs_print(journal_delay_ms, c->journal_delay_ms);
585 sysfs_hprint(bucket_size, bucket_bytes(c));
586 sysfs_hprint(block_size, block_bytes(c));
587 sysfs_print(tree_depth, c->root->level);
John Sheucb851142014-03-17 23:13:56 -0700588 sysfs_print(root_usage_percent, bch_root_usage(c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700589
John Sheucb851142014-03-17 23:13:56 -0700590 sysfs_hprint(btree_cache_size, bch_cache_size(c));
591 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700592 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
593
594 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
595 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
Kent Overstreet67539e82013-09-10 22:53:34 -0700596 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700597 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700598
John Sheucb851142014-03-17 23:13:56 -0700599 sysfs_print(btree_used_percent, bch_btree_used(c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700600 sysfs_print(btree_nodes, c->gc_stats.nodes);
John Sheucb851142014-03-17 23:13:56 -0700601 sysfs_hprint(average_key_size, bch_average_key_size(c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700602
603 sysfs_print(cache_read_races,
604 atomic_long_read(&c->cache_read_races));
605
Tang Junhuia728eac2018-02-07 11:41:39 -0800606 sysfs_print(reclaim,
607 atomic_long_read(&c->reclaim));
608
609 sysfs_print(flush_write,
610 atomic_long_read(&c->flush_write));
611
612 sysfs_print(retry_flush_write,
613 atomic_long_read(&c->retry_flush_write));
614
Kent Overstreetcafe5632013-03-23 16:11:31 -0700615 sysfs_print(writeback_keys_done,
616 atomic_long_read(&c->writeback_keys_done));
617 sysfs_print(writeback_keys_failed,
618 atomic_long_read(&c->writeback_keys_failed));
619
Kent Overstreet77c320e2013-07-11 19:42:51 -0700620 if (attr == &sysfs_errors)
621 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
622 c->on_error);
623
Kent Overstreetcafe5632013-03-23 16:11:31 -0700624 /* See count_io_errors for why 88 */
625 sysfs_print(io_error_halflife, c->error_decay * 88);
Coly Li7ba0d832018-02-07 11:41:42 -0800626 sysfs_print(io_error_limit, c->error_limit);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700627
628 sysfs_hprint(congested,
629 ((uint64_t) bch_get_congested(c)) << 9);
630 sysfs_print(congested_read_threshold_us,
631 c->congested_read_threshold_us);
632 sysfs_print(congested_write_threshold_us,
633 c->congested_write_threshold_us);
634
635 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
636 sysfs_printf(verify, "%i", c->verify);
637 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
Kent Overstreet280481d2013-10-24 16:36:03 -0700638 sysfs_printf(expensive_debug_checks,
639 "%i", c->expensive_debug_checks);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700640 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
641 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
642 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
Coly Li771f3932018-03-18 17:36:17 -0700643 sysfs_printf(io_disable, "%i",
644 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700645
646 if (attr == &sysfs_bset_tree_stats)
647 return bch_bset_print_stats(c, buf);
648
649 return 0;
650}
651SHOW_LOCKED(bch_cache_set)
652
653STORE(__bch_cache_set)
654{
655 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
656
657 if (attr == &sysfs_unregister)
658 bch_cache_set_unregister(c);
659
660 if (attr == &sysfs_stop)
661 bch_cache_set_stop(c);
662
663 if (attr == &sysfs_synchronous) {
664 bool sync = strtoul_or_return(buf);
665
666 if (sync != CACHE_SYNC(&c->sb)) {
667 SET_CACHE_SYNC(&c->sb, sync);
668 bcache_write_super(c);
669 }
670 }
671
672 if (attr == &sysfs_flash_vol_create) {
673 int r;
674 uint64_t v;
675 strtoi_h_or_return(buf, v);
676
677 r = bch_flash_dev_create(c, v);
678 if (r)
679 return r;
680 }
681
682 if (attr == &sysfs_clear_stats) {
683 atomic_long_set(&c->writeback_keys_done, 0);
684 atomic_long_set(&c->writeback_keys_failed, 0);
685
686 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
687 bch_cache_accounting_clear(&c->accounting);
688 }
689
Tang Junhui0b43f492017-09-06 14:25:55 +0800690 if (attr == &sysfs_trigger_gc) {
691 /*
692 * Garbage collection thread only works when sectors_to_gc < 0,
693 * when users write to sysfs entry trigger_gc, most of time
694 * they want to forcibly triger gargage collection. Here -1 is
695 * set to c->sectors_to_gc, to make gc_should_run() give a
696 * chance to permit gc thread to run. "give a chance" means
697 * before going into gc_should_run(), there is still chance
698 * that c->sectors_to_gc being set to other positive value. So
699 * writing sysfs entry trigger_gc won't always make sure gc
700 * thread takes effect.
701 */
702 atomic_set(&c->sectors_to_gc, -1);
Kent Overstreet72a44512013-10-24 17:19:26 -0700703 wake_up_gc(c);
Tang Junhui0b43f492017-09-06 14:25:55 +0800704 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700705
706 if (attr == &sysfs_prune_cache) {
707 struct shrink_control sc;
708 sc.gfp_mask = GFP_KERNEL;
709 sc.nr_to_scan = strtoul_or_return(buf);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000710 c->shrink.scan_objects(&c->shrink, &sc);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700711 }
712
713 sysfs_strtoul(congested_read_threshold_us,
714 c->congested_read_threshold_us);
715 sysfs_strtoul(congested_write_threshold_us,
716 c->congested_write_threshold_us);
717
Kent Overstreet77c320e2013-07-11 19:42:51 -0700718 if (attr == &sysfs_errors) {
719 ssize_t v = bch_read_string_list(buf, error_actions);
720
721 if (v < 0)
722 return v;
723
724 c->on_error = v;
725 }
726
Kent Overstreetcafe5632013-03-23 16:11:31 -0700727 if (attr == &sysfs_io_error_limit)
Coly Li7ba0d832018-02-07 11:41:42 -0800728 c->error_limit = strtoul_or_return(buf);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700729
730 /* See count_io_errors() for why 88 */
731 if (attr == &sysfs_io_error_halflife)
732 c->error_decay = strtoul_or_return(buf) / 88;
733
Coly Li771f3932018-03-18 17:36:17 -0700734 if (attr == &sysfs_io_disable) {
735 int v = strtoul_or_return(buf);
736
737 if (v) {
738 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
739 &c->flags))
740 pr_warn("CACHE_SET_IO_DISABLE already set");
741 } else {
742 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
743 &c->flags))
744 pr_warn("CACHE_SET_IO_DISABLE already cleared");
745 }
746 }
747
Kent Overstreetcafe5632013-03-23 16:11:31 -0700748 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
749 sysfs_strtoul(verify, c->verify);
750 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
Kent Overstreet280481d2013-10-24 16:36:03 -0700751 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700752 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
753 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
754 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
755
756 return size;
757}
758STORE_LOCKED(bch_cache_set)
759
760SHOW(bch_cache_set_internal)
761{
762 struct cache_set *c = container_of(kobj, struct cache_set, internal);
763 return bch_cache_set_show(&c->kobj, attr, buf);
764}
765
766STORE(bch_cache_set_internal)
767{
768 struct cache_set *c = container_of(kobj, struct cache_set, internal);
769 return bch_cache_set_store(&c->kobj, attr, buf, size);
770}
771
772static void bch_cache_set_internal_release(struct kobject *k)
773{
774}
775
776static struct attribute *bch_cache_set_files[] = {
777 &sysfs_unregister,
778 &sysfs_stop,
779 &sysfs_synchronous,
780 &sysfs_journal_delay_ms,
781 &sysfs_flash_vol_create,
782
783 &sysfs_bucket_size,
784 &sysfs_block_size,
785 &sysfs_tree_depth,
786 &sysfs_root_usage_percent,
787 &sysfs_btree_cache_size,
788 &sysfs_cache_available_percent,
789
790 &sysfs_average_key_size,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700791
Kent Overstreet77c320e2013-07-11 19:42:51 -0700792 &sysfs_errors,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700793 &sysfs_io_error_limit,
794 &sysfs_io_error_halflife,
795 &sysfs_congested,
796 &sysfs_congested_read_threshold_us,
797 &sysfs_congested_write_threshold_us,
798 &sysfs_clear_stats,
799 NULL
800};
801KTYPE(bch_cache_set);
802
803static struct attribute *bch_cache_set_internal_files[] = {
804 &sysfs_active_journal_entries,
805
806 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
807 sysfs_time_stats_attribute_list(btree_split, sec, us)
808 sysfs_time_stats_attribute_list(btree_sort, ms, us)
809 sysfs_time_stats_attribute_list(btree_read, ms, us)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700810
811 &sysfs_btree_nodes,
812 &sysfs_btree_used_percent,
813 &sysfs_btree_cache_max_chain,
814
815 &sysfs_bset_tree_stats,
816 &sysfs_cache_read_races,
Tang Junhuia728eac2018-02-07 11:41:39 -0800817 &sysfs_reclaim,
818 &sysfs_flush_write,
819 &sysfs_retry_flush_write,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700820 &sysfs_writeback_keys_done,
821 &sysfs_writeback_keys_failed,
822
823 &sysfs_trigger_gc,
824 &sysfs_prune_cache,
825#ifdef CONFIG_BCACHE_DEBUG
826 &sysfs_verify,
827 &sysfs_key_merging_disabled,
Kent Overstreet280481d2013-10-24 16:36:03 -0700828 &sysfs_expensive_debug_checks,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700829#endif
830 &sysfs_gc_always_rewrite,
831 &sysfs_btree_shrinker_disabled,
832 &sysfs_copy_gc_enabled,
Coly Li771f3932018-03-18 17:36:17 -0700833 &sysfs_io_disable,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700834 NULL
835};
836KTYPE(bch_cache_set_internal);
837
Peter Foley58f913d2017-10-13 16:35:28 -0700838static int __bch_cache_cmp(const void *l, const void *r)
839{
840 return *((uint16_t *)r) - *((uint16_t *)l);
841}
842
Kent Overstreetcafe5632013-03-23 16:11:31 -0700843SHOW(__bch_cache)
844{
845 struct cache *ca = container_of(kobj, struct cache, kobj);
846
847 sysfs_hprint(bucket_size, bucket_bytes(ca));
848 sysfs_hprint(block_size, block_bytes(ca));
849 sysfs_print(nbuckets, ca->sb.nbuckets);
850 sysfs_print(discard, ca->discard);
851 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
852 sysfs_hprint(btree_written,
853 atomic_long_read(&ca->btree_sectors_written) << 9);
854 sysfs_hprint(metadata_written,
855 (atomic_long_read(&ca->meta_sectors_written) +
856 atomic_long_read(&ca->btree_sectors_written)) << 9);
857
858 sysfs_print(io_errors,
859 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
860
Kent Overstreetcafe5632013-03-23 16:11:31 -0700861 if (attr == &sysfs_cache_replacement_policy)
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600862 return bch_snprint_string_list(buf, PAGE_SIZE,
863 cache_replacement_policies,
864 CACHE_REPLACEMENT(&ca->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700865
866 if (attr == &sysfs_priority_stats) {
Kent Overstreet15754022014-02-25 17:34:21 -0800867 struct bucket *b;
868 size_t n = ca->sb.nbuckets, i;
869 size_t unused = 0, available = 0, dirty = 0, meta = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700870 uint64_t sum = 0;
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700871 /* Compute 31 quantiles */
872 uint16_t q[31], *p, *cached;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700873 ssize_t ret;
874
875 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
876 if (!p)
877 return -ENOMEM;
878
879 mutex_lock(&ca->set->bucket_lock);
Kent Overstreet15754022014-02-25 17:34:21 -0800880 for_each_bucket(b, ca) {
881 if (!GC_SECTORS_USED(b))
882 unused++;
883 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
884 available++;
885 if (GC_MARK(b) == GC_MARK_DIRTY)
886 dirty++;
887 if (GC_MARK(b) == GC_MARK_METADATA)
888 meta++;
889 }
890
Kent Overstreetcafe5632013-03-23 16:11:31 -0700891 for (i = ca->sb.first_bucket; i < n; i++)
892 p[i] = ca->buckets[i].prio;
893 mutex_unlock(&ca->set->bucket_lock);
894
Peter Foley58f913d2017-10-13 16:35:28 -0700895 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700896
897 while (n &&
898 !cached[n - 1])
899 --n;
900
901 unused = ca->sb.nbuckets - n;
902
903 while (cached < p + n &&
904 *cached == BTREE_PRIO)
Kent Overstreet15754022014-02-25 17:34:21 -0800905 cached++, n--;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700906
907 for (i = 0; i < n; i++)
908 sum += INITIAL_PRIO - cached[i];
909
910 if (n)
911 do_div(sum, n);
912
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700913 for (i = 0; i < ARRAY_SIZE(q); i++)
914 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
915 (ARRAY_SIZE(q) + 1)];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700916
917 vfree(p);
918
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700919 ret = scnprintf(buf, PAGE_SIZE,
920 "Unused: %zu%%\n"
Kent Overstreet15754022014-02-25 17:34:21 -0800921 "Clean: %zu%%\n"
922 "Dirty: %zu%%\n"
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700923 "Metadata: %zu%%\n"
924 "Average: %llu\n"
925 "Sectors per Q: %zu\n"
926 "Quantiles: [",
927 unused * 100 / (size_t) ca->sb.nbuckets,
Kent Overstreet15754022014-02-25 17:34:21 -0800928 available * 100 / (size_t) ca->sb.nbuckets,
929 dirty * 100 / (size_t) ca->sb.nbuckets,
930 meta * 100 / (size_t) ca->sb.nbuckets, sum,
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700931 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700932
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700933 for (i = 0; i < ARRAY_SIZE(q); i++)
934 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
935 "%u ", q[i]);
936 ret--;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700937
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700938 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
939
Kent Overstreetcafe5632013-03-23 16:11:31 -0700940 return ret;
941 }
942
943 return 0;
944}
945SHOW_LOCKED(bch_cache)
946
947STORE(__bch_cache)
948{
949 struct cache *ca = container_of(kobj, struct cache, kobj);
950
951 if (attr == &sysfs_discard) {
952 bool v = strtoul_or_return(buf);
953
954 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
955 ca->discard = v;
956
957 if (v != CACHE_DISCARD(&ca->sb)) {
958 SET_CACHE_DISCARD(&ca->sb, v);
959 bcache_write_super(ca->set);
960 }
961 }
962
963 if (attr == &sysfs_cache_replacement_policy) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600964 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700965
966 if (v < 0)
967 return v;
968
969 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
970 mutex_lock(&ca->set->bucket_lock);
971 SET_CACHE_REPLACEMENT(&ca->sb, v);
972 mutex_unlock(&ca->set->bucket_lock);
973
974 bcache_write_super(ca->set);
975 }
976 }
977
Kent Overstreetcafe5632013-03-23 16:11:31 -0700978 if (attr == &sysfs_clear_stats) {
979 atomic_long_set(&ca->sectors_written, 0);
980 atomic_long_set(&ca->btree_sectors_written, 0);
981 atomic_long_set(&ca->meta_sectors_written, 0);
982 atomic_set(&ca->io_count, 0);
983 atomic_set(&ca->io_errors, 0);
984 }
985
986 return size;
987}
988STORE_LOCKED(bch_cache)
989
990static struct attribute *bch_cache_files[] = {
991 &sysfs_bucket_size,
992 &sysfs_block_size,
993 &sysfs_nbuckets,
994 &sysfs_priority_stats,
995 &sysfs_discard,
996 &sysfs_written,
997 &sysfs_btree_written,
998 &sysfs_metadata_written,
999 &sysfs_io_errors,
1000 &sysfs_clear_stats,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001001 &sysfs_cache_replacement_policy,
1002 NULL
1003};
1004KTYPE(bch_cache);