blob: 45b52056f88d0f5ffe8730b4e79d379c126106c5 [file] [log] [blame]
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02001#define pr_fmt(fmt) "drbd debugfs: " fmt
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/debugfs.h>
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +02005#include <linux/seq_file.h>
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02006#include <linux/stat.h>
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +02007#include <linux/jiffies.h>
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +02008#include <linux/list.h>
9
10#include "drbd_int.h"
11#include "drbd_req.h"
12#include "drbd_debugfs.h"
13
14static struct dentry *drbd_debugfs_root;
15static struct dentry *drbd_debugfs_resources;
16static struct dentry *drbd_debugfs_minors;
17
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020018static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
19{
20 if (valid)
21 seq_printf(m, "\t%d", jiffies_to_msecs(dt));
22 else
23 seq_printf(m, "\t-");
24}
25
Lars Ellenbergf4188152014-05-05 23:05:47 +020026static void __seq_print_rq_state_bit(struct seq_file *m,
27 bool is_set, char *sep, const char *set_name, const char *unset_name)
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020028{
Lars Ellenbergf4188152014-05-05 23:05:47 +020029 if (is_set && set_name) {
30 seq_putc(m, *sep);
31 seq_puts(m, set_name);
32 *sep = '|';
33 } else if (!is_set && unset_name) {
34 seq_putc(m, *sep);
35 seq_puts(m, unset_name);
36 *sep = '|';
37 }
38}
39
40static void seq_print_rq_state_bit(struct seq_file *m,
41 bool is_set, char *sep, const char *set_name)
42{
43 __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +020044}
45
46/* pretty print enum drbd_req_state_bits req->rq_state */
47static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
48{
49 unsigned int s = req->rq_state;
50 char sep = ' ';
51 seq_printf(m, "\t0x%08x", s);
52 seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
53
54 /* RQ_WRITE ignored, already reported */
55 seq_puts(m, "\tlocal:");
56 seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
57 seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
58 seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
59 sep = ' ';
60 seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
61 seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
62 seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
63 seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
64 if (sep == ' ')
65 seq_puts(m, " -");
66
67 /* for_each_connection ... */
68 seq_printf(m, "\tnet:");
69 sep = ' ';
70 seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
71 seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
72 seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
73 seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
74 seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
75 seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
76 if (sep == ' ')
77 seq_puts(m, " -");
78
79 seq_printf(m, " :");
80 sep = ' ';
81 seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
82 seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
83 seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
84 if (sep == ' ')
85 seq_puts(m, " -");
86 seq_printf(m, "\n");
87}
88
89static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
90{
91 /* change anything here, fixup header below! */
92 unsigned int s = req->rq_state;
93
94#define RQ_HDR_1 "epoch\tsector\tsize\trw"
95 seq_printf(m, "0x%x\t%llu\t%u\t%s",
96 req->epoch,
97 (unsigned long long)req->i.sector, req->i.size >> 9,
98 (s & RQ_WRITE) ? "W" : "R");
99
100#define RQ_HDR_2 "\tstart\tin AL\tsubmit"
101 seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
102 seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
103 seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
104
105#define RQ_HDR_3 "\tsent\tacked\tdone"
106 seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
107 seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
108 seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
109
110#define RQ_HDR_4 "\tstate\n"
111 seq_print_request_state(m, req);
112}
113#define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
114
115static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
116{
117 seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
118 seq_print_one_request(m, req, now);
119}
120
Lars Ellenbergf4188152014-05-05 23:05:47 +0200121static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
122{
123 struct drbd_device *device;
124 unsigned int i;
125
126 seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
127 rcu_read_lock();
128 idr_for_each_entry(&resource->devices, device, i) {
129 struct drbd_md_io tmp;
130 /* In theory this is racy,
131 * in the sense that there could have been a
132 * drbd_md_put_buffer(); drbd_md_get_buffer();
133 * between accessing these members here. */
134 tmp = device->md_io;
135 if (atomic_read(&tmp.in_use)) {
136 seq_printf(m, "%u\t%u\t%d\t",
137 device->minor, device->vnr,
138 jiffies_to_msecs(now - tmp.start_jif));
139 if (time_before(tmp.submit_jif, tmp.start_jif))
140 seq_puts(m, "-\t");
141 else
142 seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
143 seq_printf(m, "%s\n", tmp.current_use);
144 }
145 }
146 rcu_read_unlock();
147}
148
149static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
150{
151 struct drbd_device *device;
152 unsigned int i;
153
154 seq_puts(m, "minor\tvnr\tage\t#waiting\n");
155 rcu_read_lock();
156 idr_for_each_entry(&resource->devices, device, i) {
157 unsigned long jif;
158 struct drbd_request *req;
159 int n = atomic_read(&device->ap_actlog_cnt);
160 if (n) {
161 spin_lock_irq(&device->resource->req_lock);
162 req = list_first_entry_or_null(&device->pending_master_completion[1],
163 struct drbd_request, req_pending_master_completion);
164 /* if the oldest request does not wait for the activity log
165 * it is not interesting for us here */
166 if (req && !(req->rq_state & RQ_IN_ACT_LOG))
167 jif = req->start_jif;
168 else
169 req = NULL;
170 spin_unlock_irq(&device->resource->req_lock);
171 }
172 if (n) {
173 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
174 if (req)
175 seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
176 else
177 seq_puts(m, "-\t");
178 seq_printf(m, "%u\n", n);
179 }
180 }
181 rcu_read_unlock();
182}
183
184static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
185{
186 struct drbd_bm_aio_ctx *ctx;
187 unsigned long start_jif;
188 unsigned int in_flight;
189 unsigned int flags;
190 spin_lock_irq(&device->resource->req_lock);
191 ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
192 if (ctx && ctx->done)
193 ctx = NULL;
194 if (ctx) {
195 start_jif = ctx->start_jif;
196 in_flight = atomic_read(&ctx->in_flight);
197 flags = ctx->flags;
198 }
199 spin_unlock_irq(&device->resource->req_lock);
200 if (ctx) {
201 seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
202 device->minor, device->vnr,
203 (flags & BM_AIO_READ) ? 'R' : 'W',
204 jiffies_to_msecs(now - start_jif),
205 in_flight);
206 }
207}
208
209static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
210{
211 struct drbd_device *device;
212 unsigned int i;
213
214 seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
215 rcu_read_lock();
216 idr_for_each_entry(&resource->devices, device, i) {
217 seq_print_device_bitmap_io(m, device, now);
218 }
219 rcu_read_unlock();
220}
221
222/* pretty print enum peer_req->flags */
223static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
224{
225 unsigned long f = peer_req->flags;
226 char sep = ' ';
227
228 __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
229 __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
230 seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
231 seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
232 seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
233
234 if (f & EE_IS_TRIM) {
235 seq_putc(m, sep);
236 sep = '|';
237 if (f & EE_IS_TRIM_USE_ZEROOUT)
238 seq_puts(m, "zero-out");
239 else
240 seq_puts(m, "trim");
241 }
242 seq_putc(m, '\n');
243}
244
245static void seq_print_peer_request(struct seq_file *m,
246 struct drbd_device *device, struct list_head *lh,
247 unsigned long now)
248{
249 bool reported_preparing = false;
250 struct drbd_peer_request *peer_req;
251 list_for_each_entry(peer_req, lh, w.list) {
252 if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
253 continue;
254
255 if (device)
256 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
257
258 seq_printf(m, "%llu\t%u\t%c\t%u\t",
259 (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
260 (peer_req->flags & EE_WRITE) ? 'W' : 'R',
261 jiffies_to_msecs(now - peer_req->submit_jif));
262 seq_print_peer_request_flags(m, peer_req);
263 if (peer_req->flags & EE_SUBMITTED)
264 break;
265 else
266 reported_preparing = true;
267 }
268}
269
270static void seq_print_device_peer_requests(struct seq_file *m,
271 struct drbd_device *device, unsigned long now)
272{
273 seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
274 spin_lock_irq(&device->resource->req_lock);
275 seq_print_peer_request(m, device, &device->active_ee, now);
276 seq_print_peer_request(m, device, &device->read_ee, now);
277 seq_print_peer_request(m, device, &device->sync_ee, now);
278 spin_unlock_irq(&device->resource->req_lock);
279 if (test_bit(FLUSH_PENDING, &device->flags)) {
280 seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
281 device->minor, device->vnr,
282 jiffies_to_msecs(now - device->flush_jif));
283 }
284}
285
286static void seq_print_resource_pending_peer_requests(struct seq_file *m,
287 struct drbd_resource *resource, unsigned long now)
288{
289 struct drbd_device *device;
290 unsigned int i;
291
292 rcu_read_lock();
293 idr_for_each_entry(&resource->devices, device, i) {
294 seq_print_device_peer_requests(m, device, now);
295 }
296 rcu_read_unlock();
297}
298
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200299static void seq_print_resource_transfer_log_summary(struct seq_file *m,
300 struct drbd_resource *resource,
301 struct drbd_connection *connection,
302 unsigned long now)
303{
304 struct drbd_request *req;
305 unsigned int count = 0;
306 unsigned int show_state = 0;
307
308 seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
309 spin_lock_irq(&resource->req_lock);
310 list_for_each_entry(req, &connection->transfer_log, tl_requests) {
311 unsigned int tmp = 0;
312 unsigned int s;
313 ++count;
314
315 /* don't disable irq "forever" */
316 if (!(count & 0x1ff)) {
317 struct drbd_request *req_next;
318 kref_get(&req->kref);
319 spin_unlock_irq(&resource->req_lock);
320 cond_resched();
321 spin_lock_irq(&resource->req_lock);
322 req_next = list_next_entry(req, tl_requests);
323 if (kref_put(&req->kref, drbd_req_destroy))
324 req = req_next;
325 if (&req->tl_requests == &connection->transfer_log)
326 break;
327 }
328
329 s = req->rq_state;
330
331 /* This is meant to summarize timing issues, to be able to tell
332 * local disk problems from network problems.
333 * Skip requests, if we have shown an even older request with
334 * similar aspects already. */
335 if (req->master_bio == NULL)
336 tmp |= 1;
337 if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
338 tmp |= 2;
339 if (s & RQ_NET_MASK) {
340 if (!(s & RQ_NET_SENT))
341 tmp |= 4;
342 if (s & RQ_NET_PENDING)
343 tmp |= 8;
344 if (!(s & RQ_NET_DONE))
345 tmp |= 16;
346 }
347 if ((tmp & show_state) == tmp)
348 continue;
349 show_state |= tmp;
350 seq_printf(m, "%u\t", count);
351 seq_print_minor_vnr_req(m, req, now);
352 if (show_state == 0x1f)
353 break;
354 }
355 spin_unlock_irq(&resource->req_lock);
356}
357
358/* TODO: transfer_log and friends should be moved to resource */
359static int in_flight_summary_show(struct seq_file *m, void *pos)
360{
361 struct drbd_resource *resource = m->private;
362 struct drbd_connection *connection;
363 unsigned long jif = jiffies;
364
365 connection = first_connection(resource);
366 /* This does not happen, actually.
367 * But be robust and prepare for future code changes. */
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000368 if (!connection || !kref_get_unless_zero(&connection->kref))
369 return -ESTALE;
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200370
Lars Ellenbergf4188152014-05-05 23:05:47 +0200371 seq_puts(m, "oldest bitmap IO\n");
372 seq_print_resource_pending_bitmap_io(m, resource, jif);
373 seq_putc(m, '\n');
374
375 seq_puts(m, "meta data IO\n");
376 seq_print_resource_pending_meta_io(m, resource, jif);
377 seq_putc(m, '\n');
378
379 seq_puts(m, "socket buffer stats\n");
380 /* for each connection ... once we have more than one */
381 rcu_read_lock();
382 if (connection->data.socket) {
383 /* open coded SIOCINQ, the "relevant" part */
384 struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
385 int answ = tp->rcv_nxt - tp->copied_seq;
386 seq_printf(m, "unread receive buffer: %u Byte\n", answ);
387 /* open coded SIOCOUTQ, the "relevant" part */
388 answ = tp->write_seq - tp->snd_una;
389 seq_printf(m, "unacked send buffer: %u Byte\n", answ);
390 }
391 rcu_read_unlock();
392 seq_putc(m, '\n');
393
394 seq_puts(m, "oldest peer requests\n");
395 seq_print_resource_pending_peer_requests(m, resource, jif);
396 seq_putc(m, '\n');
397
398 seq_puts(m, "application requests waiting for activity log\n");
399 seq_print_waiting_for_AL(m, resource, jif);
400 seq_putc(m, '\n');
401
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200402 seq_puts(m, "oldest application requests\n");
403 seq_print_resource_transfer_log_summary(m, resource, connection, jif);
404 seq_putc(m, '\n');
405
406 jif = jiffies - jif;
407 if (jif)
408 seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000409 kref_put(&connection->kref, drbd_destroy_connection);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200410 return 0;
411}
412
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000413/* simple_positive(file->f_dentry) respectively debugfs_positive(),
414 * but neither is "reachable" from here.
415 * So we have our own inline version of it above. :-( */
416static inline int debugfs_positive(struct dentry *dentry)
417{
418 return dentry->d_inode && !d_unhashed(dentry);
419}
420
421/* make sure at *open* time that the respective object won't go away. */
422static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
423 void *data, struct kref *kref,
424 void (*release)(struct kref *))
425{
426 struct dentry *parent;
427 int ret = -ESTALE;
428
429 /* Are we still linked,
430 * or has debugfs_remove() already been called? */
431 parent = file->f_dentry->d_parent;
432 /* not sure if this can happen: */
433 if (!parent || !parent->d_inode)
434 goto out;
435 /* serialize with d_delete() */
436 mutex_lock(&parent->d_inode->i_mutex);
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000437 /* Make sure the object is still alive */
Lars Ellenberg54e6fc32014-05-08 13:39:35 +0200438 if (debugfs_positive(file->f_dentry)
439 && kref_get_unless_zero(kref))
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000440 ret = 0;
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000441 mutex_unlock(&parent->d_inode->i_mutex);
442 if (!ret) {
443 ret = single_open(file, show, data);
444 if (ret)
445 kref_put(kref, release);
446 }
447out:
448 return ret;
449}
450
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200451static int in_flight_summary_open(struct inode *inode, struct file *file)
452{
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000453 struct drbd_resource *resource = inode->i_private;
454 return drbd_single_open(file, in_flight_summary_show, resource,
455 &resource->kref, drbd_destroy_resource);
456}
457
458static int in_flight_summary_release(struct inode *inode, struct file *file)
459{
460 struct drbd_resource *resource = inode->i_private;
461 kref_put(&resource->kref, drbd_destroy_resource);
462 return single_release(inode, file);
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200463}
464
465static const struct file_operations in_flight_summary_fops = {
466 .owner = THIS_MODULE,
467 .open = in_flight_summary_open,
468 .read = seq_read,
469 .llseek = seq_lseek,
Lars Ellenberg4a521cc2014-05-05 12:05:54 +0000470 .release = in_flight_summary_release,
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200471};
472
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200473void drbd_debugfs_resource_add(struct drbd_resource *resource)
474{
475 struct dentry *dentry;
476 if (!drbd_debugfs_resources)
477 return;
478
479 dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
480 if (IS_ERR_OR_NULL(dentry))
481 goto fail;
482 resource->debugfs_res = dentry;
483
484 dentry = debugfs_create_dir("volumes", resource->debugfs_res);
485 if (IS_ERR_OR_NULL(dentry))
486 goto fail;
487 resource->debugfs_res_volumes = dentry;
488
489 dentry = debugfs_create_dir("connections", resource->debugfs_res);
490 if (IS_ERR_OR_NULL(dentry))
491 goto fail;
492 resource->debugfs_res_connections = dentry;
493
Lars Ellenbergdb1866ff2014-05-02 13:20:05 +0200494 dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
495 resource->debugfs_res, resource,
496 &in_flight_summary_fops);
497 if (IS_ERR_OR_NULL(dentry))
498 goto fail;
499 resource->debugfs_res_in_flight_summary = dentry;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200500 return;
501
502fail:
503 drbd_debugfs_resource_cleanup(resource);
504 drbd_err(resource, "failed to create debugfs dentry\n");
505}
506
507static void drbd_debugfs_remove(struct dentry **dp)
508{
509 debugfs_remove(*dp);
510 *dp = NULL;
511}
512
513void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
514{
515 /* it is ok to call debugfs_remove(NULL) */
516 drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
517 drbd_debugfs_remove(&resource->debugfs_res_connections);
518 drbd_debugfs_remove(&resource->debugfs_res_volumes);
519 drbd_debugfs_remove(&resource->debugfs_res);
520}
521
Lars Ellenberg944410e2014-05-06 15:02:05 +0200522static void seq_print_one_timing_detail(struct seq_file *m,
523 const struct drbd_thread_timing_details *tdp,
524 unsigned long now)
525{
526 struct drbd_thread_timing_details td;
527 /* No locking...
528 * use temporary assignment to get at consistent data. */
529 do {
530 td = *tdp;
531 } while (td.cb_nr != tdp->cb_nr);
532 if (!td.cb_addr)
533 return;
534 seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
535 td.cb_nr,
536 jiffies_to_msecs(now - td.start_jif),
537 td.caller_fn, td.line,
538 td.cb_addr);
539}
540
541static void seq_print_timing_details(struct seq_file *m,
542 const char *title,
543 unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
544{
545 unsigned int start_idx;
546 unsigned int i;
547
548 seq_printf(m, "%s\n", title);
549 /* If not much is going on, this will result in natural ordering.
550 * If it is very busy, we will possibly skip events, or even see wrap
551 * arounds, which could only be avoided with locking.
552 */
553 start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
554 for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
555 seq_print_one_timing_detail(m, tdp+i, now);
556 for (i = 0; i < start_idx; i++)
557 seq_print_one_timing_detail(m, tdp+i, now);
558}
559
560static int callback_history_show(struct seq_file *m, void *ignored)
561{
562 struct drbd_connection *connection = m->private;
563 unsigned long jif = jiffies;
564
565 seq_puts(m, "n\tage\tcallsite\tfn\n");
566 seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
567 seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
568 return 0;
569}
570
571static int callback_history_open(struct inode *inode, struct file *file)
572{
573 struct drbd_connection *connection = inode->i_private;
574 return drbd_single_open(file, callback_history_show, connection,
575 &connection->kref, drbd_destroy_connection);
576}
577
578static int callback_history_release(struct inode *inode, struct file *file)
579{
580 struct drbd_connection *connection = inode->i_private;
581 kref_put(&connection->kref, drbd_destroy_connection);
582 return single_release(inode, file);
583}
584
585static const struct file_operations connection_callback_history_fops = {
586 .owner = THIS_MODULE,
587 .open = callback_history_open,
588 .read = seq_read,
589 .llseek = seq_lseek,
590 .release = callback_history_release,
591};
592
Lars Ellenberg54e6fc32014-05-08 13:39:35 +0200593static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
594{
595 struct drbd_connection *connection = m->private;
596 unsigned long now = jiffies;
597 struct drbd_request *r1, *r2;
598
599 /* BUMP me if you change the file format/content/presentation */
600 seq_printf(m, "v: %u\n\n", 0);
601
602 spin_lock_irq(&connection->resource->req_lock);
603 r1 = connection->req_next;
604 if (r1)
605 seq_print_minor_vnr_req(m, r1, now);
606 r2 = connection->req_ack_pending;
607 if (r2 && r2 != r1) {
608 r1 = r2;
609 seq_print_minor_vnr_req(m, r1, now);
610 }
611 r2 = connection->req_not_net_done;
612 if (r2 && r2 != r1)
613 seq_print_minor_vnr_req(m, r2, now);
614 spin_unlock_irq(&connection->resource->req_lock);
615 return 0;
616}
617
618static int connection_oldest_requests_open(struct inode *inode, struct file *file)
619{
620 struct drbd_connection *connection = inode->i_private;
621 return drbd_single_open(file, connection_oldest_requests_show, connection,
622 &connection->kref, drbd_destroy_connection);
623}
624
625static int connection_oldest_requests_release(struct inode *inode, struct file *file)
626{
627 struct drbd_connection *connection = inode->i_private;
628 kref_put(&connection->kref, drbd_destroy_connection);
629 return single_release(inode, file);
630}
631
632static const struct file_operations connection_oldest_requests_fops = {
633 .owner = THIS_MODULE,
634 .open = connection_oldest_requests_open,
635 .read = seq_read,
636 .llseek = seq_lseek,
637 .release = connection_oldest_requests_release,
638};
639
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200640void drbd_debugfs_connection_add(struct drbd_connection *connection)
641{
642 struct dentry *conns_dir = connection->resource->debugfs_res_connections;
643 struct dentry *dentry;
644 if (!conns_dir)
645 return;
646
647 /* Once we enable mutliple peers,
648 * these connections will have descriptive names.
649 * For now, it is just the one connection to the (only) "peer". */
650 dentry = debugfs_create_dir("peer", conns_dir);
651 if (IS_ERR_OR_NULL(dentry))
652 goto fail;
653 connection->debugfs_conn = dentry;
Lars Ellenberg944410e2014-05-06 15:02:05 +0200654
655 dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
656 connection->debugfs_conn, connection,
657 &connection_callback_history_fops);
658 if (IS_ERR_OR_NULL(dentry))
659 goto fail;
660 connection->debugfs_conn_callback_history = dentry;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200661 return;
662
663fail:
664 drbd_debugfs_connection_cleanup(connection);
665 drbd_err(connection, "failed to create debugfs dentry\n");
666}
667
668void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
669{
670 drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
671 drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
672 drbd_debugfs_remove(&connection->debugfs_conn);
673}
674
Lars Ellenberg54e6fc32014-05-08 13:39:35 +0200675static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
676{
677 struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
678
679 seq_printf(m, "%5d %s %s %s\n", bme->rs_left,
680 test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
681 test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
682 test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
683 );
684}
685
686static int device_resync_extents_show(struct seq_file *m, void *ignored)
687{
688 struct drbd_device *device = m->private;
689 if (get_ldev_if_state(device, D_FAILED)) {
690 lc_seq_printf_stats(m, device->resync);
691 lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
692 put_ldev(device);
693 }
694 return 0;
695}
696
697static int device_act_log_extents_show(struct seq_file *m, void *ignored)
698{
699 struct drbd_device *device = m->private;
700 if (get_ldev_if_state(device, D_FAILED)) {
701 lc_seq_printf_stats(m, device->act_log);
702 lc_seq_dump_details(m, device->act_log, "", NULL);
703 put_ldev(device);
704 }
705 return 0;
706}
707
708static int device_oldest_requests_show(struct seq_file *m, void *ignored)
709{
710 struct drbd_device *device = m->private;
711 struct drbd_resource *resource = device->resource;
712 unsigned long now = jiffies;
713 struct drbd_request *r1, *r2;
714 int i;
715
716 seq_puts(m, RQ_HDR);
717 spin_lock_irq(&resource->req_lock);
718 /* WRITE, then READ */
719 for (i = 1; i >= 0; --i) {
720 r1 = list_first_entry_or_null(&device->pending_master_completion[i],
721 struct drbd_request, req_pending_master_completion);
722 r2 = list_first_entry_or_null(&device->pending_completion[i],
723 struct drbd_request, req_pending_local);
724 if (r1)
725 seq_print_one_request(m, r1, now);
726 if (r2 && r2 != r1)
727 seq_print_one_request(m, r2, now);
728 }
729 spin_unlock_irq(&resource->req_lock);
730 return 0;
731}
732
733#define drbd_debugfs_device_attr(name) \
734static int device_ ## name ## _open(struct inode *inode, struct file *file) \
735{ \
736 struct drbd_device *device = inode->i_private; \
737 return drbd_single_open(file, device_ ## name ## _show, device, \
738 &device->kref, drbd_destroy_device); \
739} \
740static int device_ ## name ## _release(struct inode *inode, struct file *file) \
741{ \
742 struct drbd_device *device = inode->i_private; \
743 kref_put(&device->kref, drbd_destroy_device); \
744 return single_release(inode, file); \
745} \
746static const struct file_operations device_ ## name ## _fops = { \
747 .owner = THIS_MODULE, \
748 .open = device_ ## name ## _open, \
749 .read = seq_read, \
750 .llseek = seq_lseek, \
751 .release = device_ ## name ## _release, \
752};
753
754drbd_debugfs_device_attr(oldest_requests)
755drbd_debugfs_device_attr(act_log_extents)
756drbd_debugfs_device_attr(resync_extents)
757
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200758void drbd_debugfs_device_add(struct drbd_device *device)
759{
760 struct dentry *vols_dir = device->resource->debugfs_res_volumes;
761 char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
762 char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
763 char *slink_name = NULL;
764
765 struct dentry *dentry;
766 if (!vols_dir || !drbd_debugfs_minors)
767 return;
768
769 snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
770 dentry = debugfs_create_dir(vnr_buf, vols_dir);
771 if (IS_ERR_OR_NULL(dentry))
772 goto fail;
773 device->debugfs_vol = dentry;
774
775 snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
776 slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
777 device->resource->name, device->vnr);
778 if (!slink_name)
779 goto fail;
780 dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
Lars Ellenberg54e6fc32014-05-08 13:39:35 +0200781 kfree(slink_name);
782 slink_name = NULL;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200783 if (IS_ERR_OR_NULL(dentry))
784 goto fail;
785 device->debugfs_minor = dentry;
Lars Ellenberg54e6fc32014-05-08 13:39:35 +0200786
787#define DCF(name) do { \
788 dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \
789 device->debugfs_vol, device, \
790 &device_ ## name ## _fops); \
791 if (IS_ERR_OR_NULL(dentry)) \
792 goto fail; \
793 device->debugfs_vol_ ## name = dentry; \
794 } while (0)
795
796 DCF(oldest_requests);
797 DCF(act_log_extents);
798 DCF(resync_extents);
799 return;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200800
801fail:
802 drbd_debugfs_device_cleanup(device);
803 drbd_err(device, "failed to create debugfs entries\n");
804}
805
806void drbd_debugfs_device_cleanup(struct drbd_device *device)
807{
808 drbd_debugfs_remove(&device->debugfs_minor);
809 drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
810 drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
811 drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
812 drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
813 drbd_debugfs_remove(&device->debugfs_vol);
814}
815
816void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
817{
818 struct dentry *conn_dir = peer_device->connection->debugfs_conn;
819 struct dentry *dentry;
820 char vnr_buf[8];
821
822 if (!conn_dir)
823 return;
824
825 snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
826 dentry = debugfs_create_dir(vnr_buf, conn_dir);
827 if (IS_ERR_OR_NULL(dentry))
828 goto fail;
829 peer_device->debugfs_peer_dev = dentry;
830 return;
831
832fail:
833 drbd_debugfs_peer_device_cleanup(peer_device);
834 drbd_err(peer_device, "failed to create debugfs entries\n");
835}
836
837void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
838{
839 drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
840}
841
842/* not __exit, may be indirectly called
843 * from the module-load-failure path as well. */
844void drbd_debugfs_cleanup(void)
845{
846 drbd_debugfs_remove(&drbd_debugfs_resources);
847 drbd_debugfs_remove(&drbd_debugfs_minors);
848 drbd_debugfs_remove(&drbd_debugfs_root);
849}
850
851int __init drbd_debugfs_init(void)
852{
853 struct dentry *dentry;
854
855 dentry = debugfs_create_dir("drbd", NULL);
856 if (IS_ERR_OR_NULL(dentry))
857 goto fail;
858 drbd_debugfs_root = dentry;
859
860 dentry = debugfs_create_dir("resources", drbd_debugfs_root);
861 if (IS_ERR_OR_NULL(dentry))
862 goto fail;
863 drbd_debugfs_resources = dentry;
864
865 dentry = debugfs_create_dir("minors", drbd_debugfs_root);
866 if (IS_ERR_OR_NULL(dentry))
867 goto fail;
868 drbd_debugfs_minors = dentry;
869 return 0;
870
871fail:
872 drbd_debugfs_cleanup();
873 if (dentry)
874 return PTR_ERR(dentry);
875 else
876 return -EINVAL;
877}