blob: f5c2d86952300279e918f5acba491e60fed97ee6 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * bcache sysfs interfaces
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "sysfs.h"
10#include "btree.h"
11#include "request.h"
12
Kent Overstreetc37511b2013-04-26 15:39:55 -070013#include <linux/blkdev.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070014#include <linux/sort.h>
15
16static const char * const cache_replacement_policies[] = {
17 "lru",
18 "fifo",
19 "random",
20 NULL
21};
22
23write_attribute(attach);
24write_attribute(detach);
25write_attribute(unregister);
26write_attribute(stop);
27write_attribute(clear_stats);
28write_attribute(trigger_gc);
29write_attribute(prune_cache);
30write_attribute(flash_vol_create);
31
32read_attribute(bucket_size);
33read_attribute(block_size);
34read_attribute(nbuckets);
35read_attribute(tree_depth);
36read_attribute(root_usage_percent);
37read_attribute(priority_stats);
38read_attribute(btree_cache_size);
39read_attribute(btree_cache_max_chain);
40read_attribute(cache_available_percent);
41read_attribute(written);
42read_attribute(btree_written);
43read_attribute(metadata_written);
44read_attribute(active_journal_entries);
45
46sysfs_time_stats_attribute(btree_gc, sec, ms);
47sysfs_time_stats_attribute(btree_split, sec, us);
48sysfs_time_stats_attribute(btree_sort, ms, us);
49sysfs_time_stats_attribute(btree_read, ms, us);
50sysfs_time_stats_attribute(try_harder, ms, us);
51
52read_attribute(btree_nodes);
53read_attribute(btree_used_percent);
54read_attribute(average_key_size);
55read_attribute(dirty_data);
56read_attribute(bset_tree_stats);
57
58read_attribute(state);
59read_attribute(cache_read_races);
60read_attribute(writeback_keys_done);
61read_attribute(writeback_keys_failed);
62read_attribute(io_errors);
63read_attribute(congested);
64rw_attribute(congested_read_threshold_us);
65rw_attribute(congested_write_threshold_us);
66
67rw_attribute(sequential_cutoff);
68rw_attribute(sequential_merge);
69rw_attribute(data_csum);
70rw_attribute(cache_mode);
71rw_attribute(writeback_metadata);
72rw_attribute(writeback_running);
73rw_attribute(writeback_percent);
74rw_attribute(writeback_delay);
75rw_attribute(writeback_rate);
76
77rw_attribute(writeback_rate_update_seconds);
78rw_attribute(writeback_rate_d_term);
79rw_attribute(writeback_rate_p_term_inverse);
80rw_attribute(writeback_rate_d_smooth);
81read_attribute(writeback_rate_debug);
82
83rw_attribute(synchronous);
84rw_attribute(journal_delay_ms);
85rw_attribute(discard);
86rw_attribute(running);
87rw_attribute(label);
88rw_attribute(readahead);
89rw_attribute(io_error_limit);
90rw_attribute(io_error_halflife);
91rw_attribute(verify);
92rw_attribute(key_merging_disabled);
93rw_attribute(gc_always_rewrite);
94rw_attribute(freelist_percent);
95rw_attribute(cache_replacement_policy);
96rw_attribute(btree_shrinker_disabled);
97rw_attribute(copy_gc_enabled);
98rw_attribute(size);
99
100SHOW(__bch_cached_dev)
101{
102 struct cached_dev *dc = container_of(kobj, struct cached_dev,
103 disk.kobj);
104 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
105
106#define var(stat) (dc->stat)
107
108 if (attr == &sysfs_cache_mode)
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600109 return bch_snprint_string_list(buf, PAGE_SIZE,
110 bch_cache_modes + 1,
111 BDEV_CACHE_MODE(&dc->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700112
113 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
114 var_printf(verify, "%i");
115 var_printf(writeback_metadata, "%i");
116 var_printf(writeback_running, "%i");
117 var_print(writeback_delay);
118 var_print(writeback_percent);
119 sysfs_print(writeback_rate, dc->writeback_rate.rate);
120
121 var_print(writeback_rate_update_seconds);
122 var_print(writeback_rate_d_term);
123 var_print(writeback_rate_p_term_inverse);
124 var_print(writeback_rate_d_smooth);
125
126 if (attr == &sysfs_writeback_rate_debug) {
127 char dirty[20];
128 char derivative[20];
129 char target[20];
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600130 bch_hprint(dirty,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700131 atomic_long_read(&dc->disk.sectors_dirty) << 9);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600132 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
133 bch_hprint(target, dc->writeback_rate_target << 9);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700134
135 return sprintf(buf,
136 "rate:\t\t%u\n"
137 "change:\t\t%i\n"
138 "dirty:\t\t%s\n"
139 "derivative:\t%s\n"
140 "target:\t\t%s\n",
141 dc->writeback_rate.rate,
142 dc->writeback_rate_change,
143 dirty, derivative, target);
144 }
145
146 sysfs_hprint(dirty_data,
147 atomic_long_read(&dc->disk.sectors_dirty) << 9);
148
149 var_printf(sequential_merge, "%i");
150 var_hprint(sequential_cutoff);
151 var_hprint(readahead);
152
153 sysfs_print(running, atomic_read(&dc->running));
154 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
155
156 if (attr == &sysfs_label) {
157 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
158 buf[SB_LABEL_SIZE + 1] = '\0';
159 strcat(buf, "\n");
160 return strlen(buf);
161 }
162
163#undef var
164 return 0;
165}
166SHOW_LOCKED(bch_cached_dev)
167
168STORE(__cached_dev)
169{
170 struct cached_dev *dc = container_of(kobj, struct cached_dev,
171 disk.kobj);
172 unsigned v = size;
173 struct cache_set *c;
174
175#define d_strtoul(var) sysfs_strtoul(var, dc->var)
176#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
177
178 sysfs_strtoul(data_csum, dc->disk.data_csum);
179 d_strtoul(verify);
180 d_strtoul(writeback_metadata);
181 d_strtoul(writeback_running);
182 d_strtoul(writeback_delay);
183 sysfs_strtoul_clamp(writeback_rate,
184 dc->writeback_rate.rate, 1, 1000000);
185 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
186
187 d_strtoul(writeback_rate_update_seconds);
188 d_strtoul(writeback_rate_d_term);
189 d_strtoul(writeback_rate_p_term_inverse);
190 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
191 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
192 d_strtoul(writeback_rate_d_smooth);
193
194 d_strtoul(sequential_merge);
195 d_strtoi_h(sequential_cutoff);
196 d_strtoi_h(readahead);
197
198 if (attr == &sysfs_clear_stats)
199 bch_cache_accounting_clear(&dc->accounting);
200
201 if (attr == &sysfs_running &&
202 strtoul_or_return(buf))
203 bch_cached_dev_run(dc);
204
205 if (attr == &sysfs_cache_mode) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600206 ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700207
208 if (v < 0)
209 return v;
210
211 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
212 SET_BDEV_CACHE_MODE(&dc->sb, v);
213 bch_write_bdev_super(dc, NULL);
214 }
215 }
216
217 if (attr == &sysfs_label) {
218 memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
219 bch_write_bdev_super(dc, NULL);
220 if (dc->disk.c) {
221 memcpy(dc->disk.c->uuids[dc->disk.id].label,
222 buf, SB_LABEL_SIZE);
223 bch_uuid_write(dc->disk.c);
224 }
225 }
226
227 if (attr == &sysfs_attach) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600228 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700229 return -EINVAL;
230
231 list_for_each_entry(c, &bch_cache_sets, list) {
232 v = bch_cached_dev_attach(dc, c);
233 if (!v)
234 return size;
235 }
236
237 pr_err("Can't attach %s: cache set not found", buf);
238 size = v;
239 }
240
241 if (attr == &sysfs_detach && dc->disk.c)
242 bch_cached_dev_detach(dc);
243
244 if (attr == &sysfs_stop)
245 bcache_device_stop(&dc->disk);
246
247 return size;
248}
249
250STORE(bch_cached_dev)
251{
252 struct cached_dev *dc = container_of(kobj, struct cached_dev,
253 disk.kobj);
254
255 mutex_lock(&bch_register_lock);
256 size = __cached_dev_store(kobj, attr, buf, size);
257
258 if (attr == &sysfs_writeback_running)
259 bch_writeback_queue(dc);
260
261 if (attr == &sysfs_writeback_percent)
262 schedule_delayed_work(&dc->writeback_rate_update,
263 dc->writeback_rate_update_seconds * HZ);
264
265 mutex_unlock(&bch_register_lock);
266 return size;
267}
268
269static struct attribute *bch_cached_dev_files[] = {
270 &sysfs_attach,
271 &sysfs_detach,
272 &sysfs_stop,
273#if 0
274 &sysfs_data_csum,
275#endif
276 &sysfs_cache_mode,
277 &sysfs_writeback_metadata,
278 &sysfs_writeback_running,
279 &sysfs_writeback_delay,
280 &sysfs_writeback_percent,
281 &sysfs_writeback_rate,
282 &sysfs_writeback_rate_update_seconds,
283 &sysfs_writeback_rate_d_term,
284 &sysfs_writeback_rate_p_term_inverse,
285 &sysfs_writeback_rate_d_smooth,
286 &sysfs_writeback_rate_debug,
287 &sysfs_dirty_data,
288 &sysfs_sequential_cutoff,
289 &sysfs_sequential_merge,
290 &sysfs_clear_stats,
291 &sysfs_running,
292 &sysfs_state,
293 &sysfs_label,
294 &sysfs_readahead,
295#ifdef CONFIG_BCACHE_DEBUG
296 &sysfs_verify,
297#endif
298 NULL
299};
300KTYPE(bch_cached_dev);
301
302SHOW(bch_flash_dev)
303{
304 struct bcache_device *d = container_of(kobj, struct bcache_device,
305 kobj);
306 struct uuid_entry *u = &d->c->uuids[d->id];
307
308 sysfs_printf(data_csum, "%i", d->data_csum);
309 sysfs_hprint(size, u->sectors << 9);
310
311 if (attr == &sysfs_label) {
312 memcpy(buf, u->label, SB_LABEL_SIZE);
313 buf[SB_LABEL_SIZE + 1] = '\0';
314 strcat(buf, "\n");
315 return strlen(buf);
316 }
317
318 return 0;
319}
320
321STORE(__bch_flash_dev)
322{
323 struct bcache_device *d = container_of(kobj, struct bcache_device,
324 kobj);
325 struct uuid_entry *u = &d->c->uuids[d->id];
326
327 sysfs_strtoul(data_csum, d->data_csum);
328
329 if (attr == &sysfs_size) {
330 uint64_t v;
331 strtoi_h_or_return(buf, v);
332
333 u->sectors = v >> 9;
334 bch_uuid_write(d->c);
335 set_capacity(d->disk, u->sectors);
336 }
337
338 if (attr == &sysfs_label) {
339 memcpy(u->label, buf, SB_LABEL_SIZE);
340 bch_uuid_write(d->c);
341 }
342
343 if (attr == &sysfs_unregister) {
344 atomic_set(&d->detaching, 1);
345 bcache_device_stop(d);
346 }
347
348 return size;
349}
350STORE_LOCKED(bch_flash_dev)
351
352static struct attribute *bch_flash_dev_files[] = {
353 &sysfs_unregister,
354#if 0
355 &sysfs_data_csum,
356#endif
357 &sysfs_label,
358 &sysfs_size,
359 NULL
360};
361KTYPE(bch_flash_dev);
362
363SHOW(__bch_cache_set)
364{
365 unsigned root_usage(struct cache_set *c)
366 {
367 unsigned bytes = 0;
368 struct bkey *k;
369 struct btree *b;
370 struct btree_iter iter;
371
372 goto lock_root;
373
374 do {
375 rw_unlock(false, b);
376lock_root:
377 b = c->root;
378 rw_lock(false, b, b->level);
379 } while (b != c->root);
380
381 for_each_key_filter(b, k, &iter, bch_ptr_bad)
382 bytes += bkey_bytes(k);
383
384 rw_unlock(false, b);
385
386 return (bytes * 100) / btree_bytes(c);
387 }
388
389 size_t cache_size(struct cache_set *c)
390 {
391 size_t ret = 0;
392 struct btree *b;
393
394 mutex_lock(&c->bucket_lock);
395 list_for_each_entry(b, &c->btree_cache, list)
396 ret += 1 << (b->page_order + PAGE_SHIFT);
397
398 mutex_unlock(&c->bucket_lock);
399 return ret;
400 }
401
402 unsigned cache_max_chain(struct cache_set *c)
403 {
404 unsigned ret = 0;
405 struct hlist_head *h;
406
407 mutex_lock(&c->bucket_lock);
408
409 for (h = c->bucket_hash;
410 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
411 h++) {
412 unsigned i = 0;
413 struct hlist_node *p;
414
415 hlist_for_each(p, h)
416 i++;
417
418 ret = max(ret, i);
419 }
420
421 mutex_unlock(&c->bucket_lock);
422 return ret;
423 }
424
425 unsigned btree_used(struct cache_set *c)
426 {
427 return div64_u64(c->gc_stats.key_bytes * 100,
428 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
429 }
430
431 unsigned average_key_size(struct cache_set *c)
432 {
433 return c->gc_stats.nkeys
434 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
435 : 0;
436 }
437
438 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
439
440 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
441 sysfs_print(journal_delay_ms, c->journal_delay_ms);
442 sysfs_hprint(bucket_size, bucket_bytes(c));
443 sysfs_hprint(block_size, block_bytes(c));
444 sysfs_print(tree_depth, c->root->level);
445 sysfs_print(root_usage_percent, root_usage(c));
446
447 sysfs_hprint(btree_cache_size, cache_size(c));
448 sysfs_print(btree_cache_max_chain, cache_max_chain(c));
449 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
450
451 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
452 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
453 sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
454 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
455 sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
456
457 sysfs_print(btree_used_percent, btree_used(c));
458 sysfs_print(btree_nodes, c->gc_stats.nodes);
459 sysfs_hprint(dirty_data, c->gc_stats.dirty);
460 sysfs_hprint(average_key_size, average_key_size(c));
461
462 sysfs_print(cache_read_races,
463 atomic_long_read(&c->cache_read_races));
464
465 sysfs_print(writeback_keys_done,
466 atomic_long_read(&c->writeback_keys_done));
467 sysfs_print(writeback_keys_failed,
468 atomic_long_read(&c->writeback_keys_failed));
469
470 /* See count_io_errors for why 88 */
471 sysfs_print(io_error_halflife, c->error_decay * 88);
472 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
473
474 sysfs_hprint(congested,
475 ((uint64_t) bch_get_congested(c)) << 9);
476 sysfs_print(congested_read_threshold_us,
477 c->congested_read_threshold_us);
478 sysfs_print(congested_write_threshold_us,
479 c->congested_write_threshold_us);
480
481 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
482 sysfs_printf(verify, "%i", c->verify);
483 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
484 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
485 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
486 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
487
488 if (attr == &sysfs_bset_tree_stats)
489 return bch_bset_print_stats(c, buf);
490
491 return 0;
492}
493SHOW_LOCKED(bch_cache_set)
494
495STORE(__bch_cache_set)
496{
497 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
498
499 if (attr == &sysfs_unregister)
500 bch_cache_set_unregister(c);
501
502 if (attr == &sysfs_stop)
503 bch_cache_set_stop(c);
504
505 if (attr == &sysfs_synchronous) {
506 bool sync = strtoul_or_return(buf);
507
508 if (sync != CACHE_SYNC(&c->sb)) {
509 SET_CACHE_SYNC(&c->sb, sync);
510 bcache_write_super(c);
511 }
512 }
513
514 if (attr == &sysfs_flash_vol_create) {
515 int r;
516 uint64_t v;
517 strtoi_h_or_return(buf, v);
518
519 r = bch_flash_dev_create(c, v);
520 if (r)
521 return r;
522 }
523
524 if (attr == &sysfs_clear_stats) {
525 atomic_long_set(&c->writeback_keys_done, 0);
526 atomic_long_set(&c->writeback_keys_failed, 0);
527
528 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
529 bch_cache_accounting_clear(&c->accounting);
530 }
531
532 if (attr == &sysfs_trigger_gc)
533 bch_queue_gc(c);
534
535 if (attr == &sysfs_prune_cache) {
536 struct shrink_control sc;
537 sc.gfp_mask = GFP_KERNEL;
538 sc.nr_to_scan = strtoul_or_return(buf);
539 c->shrink.shrink(&c->shrink, &sc);
540 }
541
542 sysfs_strtoul(congested_read_threshold_us,
543 c->congested_read_threshold_us);
544 sysfs_strtoul(congested_write_threshold_us,
545 c->congested_write_threshold_us);
546
547 if (attr == &sysfs_io_error_limit)
548 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
549
550 /* See count_io_errors() for why 88 */
551 if (attr == &sysfs_io_error_halflife)
552 c->error_decay = strtoul_or_return(buf) / 88;
553
554 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
555 sysfs_strtoul(verify, c->verify);
556 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
557 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
558 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
559 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
560
561 return size;
562}
563STORE_LOCKED(bch_cache_set)
564
565SHOW(bch_cache_set_internal)
566{
567 struct cache_set *c = container_of(kobj, struct cache_set, internal);
568 return bch_cache_set_show(&c->kobj, attr, buf);
569}
570
571STORE(bch_cache_set_internal)
572{
573 struct cache_set *c = container_of(kobj, struct cache_set, internal);
574 return bch_cache_set_store(&c->kobj, attr, buf, size);
575}
576
577static void bch_cache_set_internal_release(struct kobject *k)
578{
579}
580
581static struct attribute *bch_cache_set_files[] = {
582 &sysfs_unregister,
583 &sysfs_stop,
584 &sysfs_synchronous,
585 &sysfs_journal_delay_ms,
586 &sysfs_flash_vol_create,
587
588 &sysfs_bucket_size,
589 &sysfs_block_size,
590 &sysfs_tree_depth,
591 &sysfs_root_usage_percent,
592 &sysfs_btree_cache_size,
593 &sysfs_cache_available_percent,
594
595 &sysfs_average_key_size,
596 &sysfs_dirty_data,
597
598 &sysfs_io_error_limit,
599 &sysfs_io_error_halflife,
600 &sysfs_congested,
601 &sysfs_congested_read_threshold_us,
602 &sysfs_congested_write_threshold_us,
603 &sysfs_clear_stats,
604 NULL
605};
606KTYPE(bch_cache_set);
607
608static struct attribute *bch_cache_set_internal_files[] = {
609 &sysfs_active_journal_entries,
610
611 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
612 sysfs_time_stats_attribute_list(btree_split, sec, us)
613 sysfs_time_stats_attribute_list(btree_sort, ms, us)
614 sysfs_time_stats_attribute_list(btree_read, ms, us)
615 sysfs_time_stats_attribute_list(try_harder, ms, us)
616
617 &sysfs_btree_nodes,
618 &sysfs_btree_used_percent,
619 &sysfs_btree_cache_max_chain,
620
621 &sysfs_bset_tree_stats,
622 &sysfs_cache_read_races,
623 &sysfs_writeback_keys_done,
624 &sysfs_writeback_keys_failed,
625
626 &sysfs_trigger_gc,
627 &sysfs_prune_cache,
628#ifdef CONFIG_BCACHE_DEBUG
629 &sysfs_verify,
630 &sysfs_key_merging_disabled,
631#endif
632 &sysfs_gc_always_rewrite,
633 &sysfs_btree_shrinker_disabled,
634 &sysfs_copy_gc_enabled,
635 NULL
636};
637KTYPE(bch_cache_set_internal);
638
639SHOW(__bch_cache)
640{
641 struct cache *ca = container_of(kobj, struct cache, kobj);
642
643 sysfs_hprint(bucket_size, bucket_bytes(ca));
644 sysfs_hprint(block_size, block_bytes(ca));
645 sysfs_print(nbuckets, ca->sb.nbuckets);
646 sysfs_print(discard, ca->discard);
647 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
648 sysfs_hprint(btree_written,
649 atomic_long_read(&ca->btree_sectors_written) << 9);
650 sysfs_hprint(metadata_written,
651 (atomic_long_read(&ca->meta_sectors_written) +
652 atomic_long_read(&ca->btree_sectors_written)) << 9);
653
654 sysfs_print(io_errors,
655 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
656
657 sysfs_print(freelist_percent, ca->free.size * 100 /
658 ((size_t) ca->sb.nbuckets));
659
660 if (attr == &sysfs_cache_replacement_policy)
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600661 return bch_snprint_string_list(buf, PAGE_SIZE,
662 cache_replacement_policies,
663 CACHE_REPLACEMENT(&ca->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700664
665 if (attr == &sysfs_priority_stats) {
666 int cmp(const void *l, const void *r)
667 { return *((uint16_t *) r) - *((uint16_t *) l); }
668
Kent Overstreetcafe5632013-03-23 16:11:31 -0700669 size_t n = ca->sb.nbuckets, i, unused, btree;
670 uint64_t sum = 0;
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700671 /* Compute 31 quantiles */
672 uint16_t q[31], *p, *cached;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700673 ssize_t ret;
674
675 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
676 if (!p)
677 return -ENOMEM;
678
679 mutex_lock(&ca->set->bucket_lock);
680 for (i = ca->sb.first_bucket; i < n; i++)
681 p[i] = ca->buckets[i].prio;
682 mutex_unlock(&ca->set->bucket_lock);
683
684 sort(p, n, sizeof(uint16_t), cmp, NULL);
685
686 while (n &&
687 !cached[n - 1])
688 --n;
689
690 unused = ca->sb.nbuckets - n;
691
692 while (cached < p + n &&
693 *cached == BTREE_PRIO)
694 cached++;
695
696 btree = cached - p;
697 n -= btree;
698
699 for (i = 0; i < n; i++)
700 sum += INITIAL_PRIO - cached[i];
701
702 if (n)
703 do_div(sum, n);
704
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700705 for (i = 0; i < ARRAY_SIZE(q); i++)
706 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
707 (ARRAY_SIZE(q) + 1)];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700708
709 vfree(p);
710
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700711 ret = scnprintf(buf, PAGE_SIZE,
712 "Unused: %zu%%\n"
713 "Metadata: %zu%%\n"
714 "Average: %llu\n"
715 "Sectors per Q: %zu\n"
716 "Quantiles: [",
717 unused * 100 / (size_t) ca->sb.nbuckets,
718 btree * 100 / (size_t) ca->sb.nbuckets, sum,
719 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700720
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700721 for (i = 0; i < ARRAY_SIZE(q); i++)
722 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
723 "%u ", q[i]);
724 ret--;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700725
Kent Overstreetbbc77aa2013-05-28 21:53:19 -0700726 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
727
Kent Overstreetcafe5632013-03-23 16:11:31 -0700728 return ret;
729 }
730
731 return 0;
732}
733SHOW_LOCKED(bch_cache)
734
735STORE(__bch_cache)
736{
737 struct cache *ca = container_of(kobj, struct cache, kobj);
738
739 if (attr == &sysfs_discard) {
740 bool v = strtoul_or_return(buf);
741
742 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
743 ca->discard = v;
744
745 if (v != CACHE_DISCARD(&ca->sb)) {
746 SET_CACHE_DISCARD(&ca->sb, v);
747 bcache_write_super(ca->set);
748 }
749 }
750
751 if (attr == &sysfs_cache_replacement_policy) {
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600752 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700753
754 if (v < 0)
755 return v;
756
757 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
758 mutex_lock(&ca->set->bucket_lock);
759 SET_CACHE_REPLACEMENT(&ca->sb, v);
760 mutex_unlock(&ca->set->bucket_lock);
761
762 bcache_write_super(ca->set);
763 }
764 }
765
766 if (attr == &sysfs_freelist_percent) {
767 DECLARE_FIFO(long, free);
768 long i;
769 size_t p = strtoul_or_return(buf);
770
771 p = clamp_t(size_t,
772 ((size_t) ca->sb.nbuckets * p) / 100,
773 roundup_pow_of_two(ca->sb.nbuckets) >> 9,
774 ca->sb.nbuckets / 2);
775
776 if (!init_fifo_exact(&free, p, GFP_KERNEL))
777 return -ENOMEM;
778
779 mutex_lock(&ca->set->bucket_lock);
780
781 fifo_move(&free, &ca->free);
782 fifo_swap(&free, &ca->free);
783
784 mutex_unlock(&ca->set->bucket_lock);
785
786 while (fifo_pop(&free, i))
787 atomic_dec(&ca->buckets[i].pin);
788
789 free_fifo(&free);
790 }
791
792 if (attr == &sysfs_clear_stats) {
793 atomic_long_set(&ca->sectors_written, 0);
794 atomic_long_set(&ca->btree_sectors_written, 0);
795 atomic_long_set(&ca->meta_sectors_written, 0);
796 atomic_set(&ca->io_count, 0);
797 atomic_set(&ca->io_errors, 0);
798 }
799
800 return size;
801}
802STORE_LOCKED(bch_cache)
803
804static struct attribute *bch_cache_files[] = {
805 &sysfs_bucket_size,
806 &sysfs_block_size,
807 &sysfs_nbuckets,
808 &sysfs_priority_stats,
809 &sysfs_discard,
810 &sysfs_written,
811 &sysfs_btree_written,
812 &sysfs_metadata_written,
813 &sysfs_io_errors,
814 &sysfs_clear_stats,
815 &sysfs_freelist_percent,
816 &sysfs_cache_replacement_policy,
817 NULL
818};
819KTYPE(bch_cache);