blob: 230d9e1fc85c2b413839bf51496122b50d6c6754 [file] [log] [blame]
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02001#define pr_fmt(fmt) "drbd debugfs: " fmt
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/debugfs.h>
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +02005#include <linux/seq_file.h>
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02006#include <linux/stat.h>
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +02007#include <linux/jiffies.h>
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02008#include <linux/list.h>
9
10#include "drbd_int.h"
11#include "drbd_req.h"
12#include "drbd_debugfs.h"
13
14static struct dentry *drbd_debugfs_root;
15static struct dentry *drbd_debugfs_resources;
16static struct dentry *drbd_debugfs_minors;
17
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020018static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
19{
20 if (valid)
21 seq_printf(m, "\t%d", jiffies_to_msecs(dt));
22 else
23 seq_printf(m, "\t-");
24}
25
Lars Ellenbergf4188152014-05-05 23:05:47 +020026static void __seq_print_rq_state_bit(struct seq_file *m,
27 bool is_set, char *sep, const char *set_name, const char *unset_name)
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020028{
Lars Ellenbergf4188152014-05-05 23:05:47 +020029 if (is_set && set_name) {
30 seq_putc(m, *sep);
31 seq_puts(m, set_name);
32 *sep = '|';
33 } else if (!is_set && unset_name) {
34 seq_putc(m, *sep);
35 seq_puts(m, unset_name);
36 *sep = '|';
37 }
38}
39
40static void seq_print_rq_state_bit(struct seq_file *m,
41 bool is_set, char *sep, const char *set_name)
42{
43 __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020044}
45
46/* pretty print enum drbd_req_state_bits req->rq_state */
47static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
48{
49 unsigned int s = req->rq_state;
50 char sep = ' ';
51 seq_printf(m, "\t0x%08x", s);
52 seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
53
54 /* RQ_WRITE ignored, already reported */
55 seq_puts(m, "\tlocal:");
56 seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
57 seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
58 seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
59 sep = ' ';
60 seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
61 seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
62 seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
63 seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
64 if (sep == ' ')
65 seq_puts(m, " -");
66
67 /* for_each_connection ... */
68 seq_printf(m, "\tnet:");
69 sep = ' ';
70 seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
71 seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
72 seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
73 seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
74 seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
75 seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
76 if (sep == ' ')
77 seq_puts(m, " -");
78
79 seq_printf(m, " :");
80 sep = ' ';
81 seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
82 seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
83 seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
84 if (sep == ' ')
85 seq_puts(m, " -");
86 seq_printf(m, "\n");
87}
88
89static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
90{
91 /* change anything here, fixup header below! */
92 unsigned int s = req->rq_state;
93
94#define RQ_HDR_1 "epoch\tsector\tsize\trw"
95 seq_printf(m, "0x%x\t%llu\t%u\t%s",
96 req->epoch,
97 (unsigned long long)req->i.sector, req->i.size >> 9,
98 (s & RQ_WRITE) ? "W" : "R");
99
100#define RQ_HDR_2 "\tstart\tin AL\tsubmit"
101 seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
102 seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
103 seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
104
105#define RQ_HDR_3 "\tsent\tacked\tdone"
106 seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
107 seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
108 seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
109
110#define RQ_HDR_4 "\tstate\n"
111 seq_print_request_state(m, req);
112}
113#define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
114
115static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
116{
117 seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
118 seq_print_one_request(m, req, now);
119}
120
Lars Ellenbergf4188152014-05-05 23:05:47 +0200121static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
122{
123 struct drbd_device *device;
124 unsigned int i;
125
126 seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
127 rcu_read_lock();
128 idr_for_each_entry(&resource->devices, device, i) {
129 struct drbd_md_io tmp;
130 /* In theory this is racy,
131 * in the sense that there could have been a
132 * drbd_md_put_buffer(); drbd_md_get_buffer();
133 * between accessing these members here. */
134 tmp = device->md_io;
135 if (atomic_read(&tmp.in_use)) {
136 seq_printf(m, "%u\t%u\t%d\t",
137 device->minor, device->vnr,
138 jiffies_to_msecs(now - tmp.start_jif));
139 if (time_before(tmp.submit_jif, tmp.start_jif))
140 seq_puts(m, "-\t");
141 else
142 seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
143 seq_printf(m, "%s\n", tmp.current_use);
144 }
145 }
146 rcu_read_unlock();
147}
148
149static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
150{
151 struct drbd_device *device;
152 unsigned int i;
153
154 seq_puts(m, "minor\tvnr\tage\t#waiting\n");
155 rcu_read_lock();
156 idr_for_each_entry(&resource->devices, device, i) {
157 unsigned long jif;
158 struct drbd_request *req;
159 int n = atomic_read(&device->ap_actlog_cnt);
160 if (n) {
161 spin_lock_irq(&device->resource->req_lock);
162 req = list_first_entry_or_null(&device->pending_master_completion[1],
163 struct drbd_request, req_pending_master_completion);
164 /* if the oldest request does not wait for the activity log
165 * it is not interesting for us here */
166 if (req && !(req->rq_state & RQ_IN_ACT_LOG))
167 jif = req->start_jif;
168 else
169 req = NULL;
170 spin_unlock_irq(&device->resource->req_lock);
171 }
172 if (n) {
173 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
174 if (req)
175 seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
176 else
177 seq_puts(m, "-\t");
178 seq_printf(m, "%u\n", n);
179 }
180 }
181 rcu_read_unlock();
182}
183
184static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
185{
186 struct drbd_bm_aio_ctx *ctx;
187 unsigned long start_jif;
188 unsigned int in_flight;
189 unsigned int flags;
190 spin_lock_irq(&device->resource->req_lock);
191 ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
192 if (ctx && ctx->done)
193 ctx = NULL;
194 if (ctx) {
195 start_jif = ctx->start_jif;
196 in_flight = atomic_read(&ctx->in_flight);
197 flags = ctx->flags;
198 }
199 spin_unlock_irq(&device->resource->req_lock);
200 if (ctx) {
201 seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
202 device->minor, device->vnr,
203 (flags & BM_AIO_READ) ? 'R' : 'W',
204 jiffies_to_msecs(now - start_jif),
205 in_flight);
206 }
207}
208
209static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
210{
211 struct drbd_device *device;
212 unsigned int i;
213
214 seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
215 rcu_read_lock();
216 idr_for_each_entry(&resource->devices, device, i) {
217 seq_print_device_bitmap_io(m, device, now);
218 }
219 rcu_read_unlock();
220}
221
222/* pretty print enum peer_req->flags */
223static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
224{
225 unsigned long f = peer_req->flags;
226 char sep = ' ';
227
228 __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
229 __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
230 seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
231 seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
232 seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
233
234 if (f & EE_IS_TRIM) {
235 seq_putc(m, sep);
236 sep = '|';
237 if (f & EE_IS_TRIM_USE_ZEROOUT)
238 seq_puts(m, "zero-out");
239 else
240 seq_puts(m, "trim");
241 }
242 seq_putc(m, '\n');
243}
244
245static void seq_print_peer_request(struct seq_file *m,
246 struct drbd_device *device, struct list_head *lh,
247 unsigned long now)
248{
249 bool reported_preparing = false;
250 struct drbd_peer_request *peer_req;
251 list_for_each_entry(peer_req, lh, w.list) {
252 if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
253 continue;
254
255 if (device)
256 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
257
258 seq_printf(m, "%llu\t%u\t%c\t%u\t",
259 (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
260 (peer_req->flags & EE_WRITE) ? 'W' : 'R',
261 jiffies_to_msecs(now - peer_req->submit_jif));
262 seq_print_peer_request_flags(m, peer_req);
263 if (peer_req->flags & EE_SUBMITTED)
264 break;
265 else
266 reported_preparing = true;
267 }
268}
269
270static void seq_print_device_peer_requests(struct seq_file *m,
271 struct drbd_device *device, unsigned long now)
272{
273 seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
274 spin_lock_irq(&device->resource->req_lock);
275 seq_print_peer_request(m, device, &device->active_ee, now);
276 seq_print_peer_request(m, device, &device->read_ee, now);
277 seq_print_peer_request(m, device, &device->sync_ee, now);
278 spin_unlock_irq(&device->resource->req_lock);
279 if (test_bit(FLUSH_PENDING, &device->flags)) {
280 seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
281 device->minor, device->vnr,
282 jiffies_to_msecs(now - device->flush_jif));
283 }
284}
285
286static void seq_print_resource_pending_peer_requests(struct seq_file *m,
287 struct drbd_resource *resource, unsigned long now)
288{
289 struct drbd_device *device;
290 unsigned int i;
291
292 rcu_read_lock();
293 idr_for_each_entry(&resource->devices, device, i) {
294 seq_print_device_peer_requests(m, device, now);
295 }
296 rcu_read_unlock();
297}
298
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200299static void seq_print_resource_transfer_log_summary(struct seq_file *m,
300 struct drbd_resource *resource,
301 struct drbd_connection *connection,
302 unsigned long now)
303{
304 struct drbd_request *req;
305 unsigned int count = 0;
306 unsigned int show_state = 0;
307
308 seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
309 spin_lock_irq(&resource->req_lock);
310 list_for_each_entry(req, &connection->transfer_log, tl_requests) {
311 unsigned int tmp = 0;
312 unsigned int s;
313 ++count;
314
315 /* don't disable irq "forever" */
316 if (!(count & 0x1ff)) {
317 struct drbd_request *req_next;
318 kref_get(&req->kref);
319 spin_unlock_irq(&resource->req_lock);
320 cond_resched();
321 spin_lock_irq(&resource->req_lock);
322 req_next = list_next_entry(req, tl_requests);
323 if (kref_put(&req->kref, drbd_req_destroy))
324 req = req_next;
325 if (&req->tl_requests == &connection->transfer_log)
326 break;
327 }
328
329 s = req->rq_state;
330
331 /* This is meant to summarize timing issues, to be able to tell
332 * local disk problems from network problems.
333 * Skip requests, if we have shown an even older request with
334 * similar aspects already. */
335 if (req->master_bio == NULL)
336 tmp |= 1;
337 if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
338 tmp |= 2;
339 if (s & RQ_NET_MASK) {
340 if (!(s & RQ_NET_SENT))
341 tmp |= 4;
342 if (s & RQ_NET_PENDING)
343 tmp |= 8;
344 if (!(s & RQ_NET_DONE))
345 tmp |= 16;
346 }
347 if ((tmp & show_state) == tmp)
348 continue;
349 show_state |= tmp;
350 seq_printf(m, "%u\t", count);
351 seq_print_minor_vnr_req(m, req, now);
352 if (show_state == 0x1f)
353 break;
354 }
355 spin_unlock_irq(&resource->req_lock);
356}
357
358/* TODO: transfer_log and friends should be moved to resource */
359static int in_flight_summary_show(struct seq_file *m, void *pos)
360{
361 struct drbd_resource *resource = m->private;
362 struct drbd_connection *connection;
363 unsigned long jif = jiffies;
364
365 connection = first_connection(resource);
366 /* This does not happen, actually.
367 * But be robust and prepare for future code changes. */
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000368 if (!connection || !kref_get_unless_zero(&connection->kref))
369 return -ESTALE;
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200370
Lars Ellenbergf4188152014-05-05 23:05:47 +0200371 seq_puts(m, "oldest bitmap IO\n");
372 seq_print_resource_pending_bitmap_io(m, resource, jif);
373 seq_putc(m, '\n');
374
375 seq_puts(m, "meta data IO\n");
376 seq_print_resource_pending_meta_io(m, resource, jif);
377 seq_putc(m, '\n');
378
379 seq_puts(m, "socket buffer stats\n");
380 /* for each connection ... once we have more than one */
381 rcu_read_lock();
382 if (connection->data.socket) {
383 /* open coded SIOCINQ, the "relevant" part */
384 struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
385 int answ = tp->rcv_nxt - tp->copied_seq;
386 seq_printf(m, "unread receive buffer: %u Byte\n", answ);
387 /* open coded SIOCOUTQ, the "relevant" part */
388 answ = tp->write_seq - tp->snd_una;
389 seq_printf(m, "unacked send buffer: %u Byte\n", answ);
390 }
391 rcu_read_unlock();
392 seq_putc(m, '\n');
393
394 seq_puts(m, "oldest peer requests\n");
395 seq_print_resource_pending_peer_requests(m, resource, jif);
396 seq_putc(m, '\n');
397
398 seq_puts(m, "application requests waiting for activity log\n");
399 seq_print_waiting_for_AL(m, resource, jif);
400 seq_putc(m, '\n');
401
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200402 seq_puts(m, "oldest application requests\n");
403 seq_print_resource_transfer_log_summary(m, resource, connection, jif);
404 seq_putc(m, '\n');
405
406 jif = jiffies - jif;
407 if (jif)
408 seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000409 kref_put(&connection->kref, drbd_destroy_connection);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200410 return 0;
411}
412
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000413/* simple_positive(file->f_dentry) respectively debugfs_positive(),
414 * but neither is "reachable" from here.
415 * So we have our own inline version of it above. :-( */
416static inline int debugfs_positive(struct dentry *dentry)
417{
418 return dentry->d_inode && !d_unhashed(dentry);
419}
420
421/* make sure at *open* time that the respective object won't go away. */
422static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
423 void *data, struct kref *kref,
424 void (*release)(struct kref *))
425{
426 struct dentry *parent;
427 int ret = -ESTALE;
428
429 /* Are we still linked,
430 * or has debugfs_remove() already been called? */
431 parent = file->f_dentry->d_parent;
432 /* not sure if this can happen: */
433 if (!parent || !parent->d_inode)
434 goto out;
435 /* serialize with d_delete() */
436 mutex_lock(&parent->d_inode->i_mutex);
437 if (!debugfs_positive(file->f_dentry))
438 goto out_unlock;
439 /* Make sure the object is still alive */
440 if (kref_get_unless_zero(kref))
441 ret = 0;
442out_unlock:
443 mutex_unlock(&parent->d_inode->i_mutex);
444 if (!ret) {
445 ret = single_open(file, show, data);
446 if (ret)
447 kref_put(kref, release);
448 }
449out:
450 return ret;
451}
452
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200453static int in_flight_summary_open(struct inode *inode, struct file *file)
454{
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000455 struct drbd_resource *resource = inode->i_private;
456 return drbd_single_open(file, in_flight_summary_show, resource,
457 &resource->kref, drbd_destroy_resource);
458}
459
460static int in_flight_summary_release(struct inode *inode, struct file *file)
461{
462 struct drbd_resource *resource = inode->i_private;
463 kref_put(&resource->kref, drbd_destroy_resource);
464 return single_release(inode, file);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200465}
466
467static const struct file_operations in_flight_summary_fops = {
468 .owner = THIS_MODULE,
469 .open = in_flight_summary_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000472 .release = in_flight_summary_release,
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200473};
474
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200475void drbd_debugfs_resource_add(struct drbd_resource *resource)
476{
477 struct dentry *dentry;
478 if (!drbd_debugfs_resources)
479 return;
480
481 dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
482 if (IS_ERR_OR_NULL(dentry))
483 goto fail;
484 resource->debugfs_res = dentry;
485
486 dentry = debugfs_create_dir("volumes", resource->debugfs_res);
487 if (IS_ERR_OR_NULL(dentry))
488 goto fail;
489 resource->debugfs_res_volumes = dentry;
490
491 dentry = debugfs_create_dir("connections", resource->debugfs_res);
492 if (IS_ERR_OR_NULL(dentry))
493 goto fail;
494 resource->debugfs_res_connections = dentry;
495
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200496 dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
497 resource->debugfs_res, resource,
498 &in_flight_summary_fops);
499 if (IS_ERR_OR_NULL(dentry))
500 goto fail;
501 resource->debugfs_res_in_flight_summary = dentry;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200502 return;
503
504fail:
505 drbd_debugfs_resource_cleanup(resource);
506 drbd_err(resource, "failed to create debugfs dentry\n");
507}
508
509static void drbd_debugfs_remove(struct dentry **dp)
510{
511 debugfs_remove(*dp);
512 *dp = NULL;
513}
514
515void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
516{
517 /* it is ok to call debugfs_remove(NULL) */
518 drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
519 drbd_debugfs_remove(&resource->debugfs_res_connections);
520 drbd_debugfs_remove(&resource->debugfs_res_volumes);
521 drbd_debugfs_remove(&resource->debugfs_res);
522}
523
Lars Ellenberg944410e2014-05-06 15:02:05 +0200524static void seq_print_one_timing_detail(struct seq_file *m,
525 const struct drbd_thread_timing_details *tdp,
526 unsigned long now)
527{
528 struct drbd_thread_timing_details td;
529 /* No locking...
530 * use temporary assignment to get at consistent data. */
531 do {
532 td = *tdp;
533 } while (td.cb_nr != tdp->cb_nr);
534 if (!td.cb_addr)
535 return;
536 seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
537 td.cb_nr,
538 jiffies_to_msecs(now - td.start_jif),
539 td.caller_fn, td.line,
540 td.cb_addr);
541}
542
543static void seq_print_timing_details(struct seq_file *m,
544 const char *title,
545 unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
546{
547 unsigned int start_idx;
548 unsigned int i;
549
550 seq_printf(m, "%s\n", title);
551 /* If not much is going on, this will result in natural ordering.
552 * If it is very busy, we will possibly skip events, or even see wrap
553 * arounds, which could only be avoided with locking.
554 */
555 start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
556 for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
557 seq_print_one_timing_detail(m, tdp+i, now);
558 for (i = 0; i < start_idx; i++)
559 seq_print_one_timing_detail(m, tdp+i, now);
560}
561
562static int callback_history_show(struct seq_file *m, void *ignored)
563{
564 struct drbd_connection *connection = m->private;
565 unsigned long jif = jiffies;
566
567 seq_puts(m, "n\tage\tcallsite\tfn\n");
568 seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
569 seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
570 return 0;
571}
572
573static int callback_history_open(struct inode *inode, struct file *file)
574{
575 struct drbd_connection *connection = inode->i_private;
576 return drbd_single_open(file, callback_history_show, connection,
577 &connection->kref, drbd_destroy_connection);
578}
579
580static int callback_history_release(struct inode *inode, struct file *file)
581{
582 struct drbd_connection *connection = inode->i_private;
583 kref_put(&connection->kref, drbd_destroy_connection);
584 return single_release(inode, file);
585}
586
587static const struct file_operations connection_callback_history_fops = {
588 .owner = THIS_MODULE,
589 .open = callback_history_open,
590 .read = seq_read,
591 .llseek = seq_lseek,
592 .release = callback_history_release,
593};
594
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200595void drbd_debugfs_connection_add(struct drbd_connection *connection)
596{
597 struct dentry *conns_dir = connection->resource->debugfs_res_connections;
598 struct dentry *dentry;
599 if (!conns_dir)
600 return;
601
602 /* Once we enable mutliple peers,
603 * these connections will have descriptive names.
604 * For now, it is just the one connection to the (only) "peer". */
605 dentry = debugfs_create_dir("peer", conns_dir);
606 if (IS_ERR_OR_NULL(dentry))
607 goto fail;
608 connection->debugfs_conn = dentry;
Lars Ellenberg944410e2014-05-06 15:02:05 +0200609
610 dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
611 connection->debugfs_conn, connection,
612 &connection_callback_history_fops);
613 if (IS_ERR_OR_NULL(dentry))
614 goto fail;
615 connection->debugfs_conn_callback_history = dentry;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200616 return;
617
618fail:
619 drbd_debugfs_connection_cleanup(connection);
620 drbd_err(connection, "failed to create debugfs dentry\n");
621}
622
623void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
624{
625 drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
626 drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
627 drbd_debugfs_remove(&connection->debugfs_conn);
628}
629
630void drbd_debugfs_device_add(struct drbd_device *device)
631{
632 struct dentry *vols_dir = device->resource->debugfs_res_volumes;
633 char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
634 char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
635 char *slink_name = NULL;
636
637 struct dentry *dentry;
638 if (!vols_dir || !drbd_debugfs_minors)
639 return;
640
641 snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
642 dentry = debugfs_create_dir(vnr_buf, vols_dir);
643 if (IS_ERR_OR_NULL(dentry))
644 goto fail;
645 device->debugfs_vol = dentry;
646
647 snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
648 slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
649 device->resource->name, device->vnr);
650 if (!slink_name)
651 goto fail;
652 dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
653 if (IS_ERR_OR_NULL(dentry))
654 goto fail;
655 device->debugfs_minor = dentry;
656 kfree(slink_name);
657
658fail:
659 drbd_debugfs_device_cleanup(device);
660 drbd_err(device, "failed to create debugfs entries\n");
661}
662
663void drbd_debugfs_device_cleanup(struct drbd_device *device)
664{
665 drbd_debugfs_remove(&device->debugfs_minor);
666 drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
667 drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
668 drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
669 drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
670 drbd_debugfs_remove(&device->debugfs_vol);
671}
672
673void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
674{
675 struct dentry *conn_dir = peer_device->connection->debugfs_conn;
676 struct dentry *dentry;
677 char vnr_buf[8];
678
679 if (!conn_dir)
680 return;
681
682 snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
683 dentry = debugfs_create_dir(vnr_buf, conn_dir);
684 if (IS_ERR_OR_NULL(dentry))
685 goto fail;
686 peer_device->debugfs_peer_dev = dentry;
687 return;
688
689fail:
690 drbd_debugfs_peer_device_cleanup(peer_device);
691 drbd_err(peer_device, "failed to create debugfs entries\n");
692}
693
694void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
695{
696 drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
697}
698
699/* not __exit, may be indirectly called
700 * from the module-load-failure path as well. */
701void drbd_debugfs_cleanup(void)
702{
703 drbd_debugfs_remove(&drbd_debugfs_resources);
704 drbd_debugfs_remove(&drbd_debugfs_minors);
705 drbd_debugfs_remove(&drbd_debugfs_root);
706}
707
708int __init drbd_debugfs_init(void)
709{
710 struct dentry *dentry;
711
712 dentry = debugfs_create_dir("drbd", NULL);
713 if (IS_ERR_OR_NULL(dentry))
714 goto fail;
715 drbd_debugfs_root = dentry;
716
717 dentry = debugfs_create_dir("resources", drbd_debugfs_root);
718 if (IS_ERR_OR_NULL(dentry))
719 goto fail;
720 drbd_debugfs_resources = dentry;
721
722 dentry = debugfs_create_dir("minors", drbd_debugfs_root);
723 if (IS_ERR_OR_NULL(dentry))
724 goto fail;
725 drbd_debugfs_minors = dentry;
726 return 0;
727
728fail:
729 drbd_debugfs_cleanup();
730 if (dentry)
731 return PTR_ERR(dentry);
732 else
733 return -EINVAL;
734}