Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 1 | #define pr_fmt(fmt) "drbd debugfs: " fmt |
| 2 | #include <linux/kernel.h> |
| 3 | #include <linux/module.h> |
| 4 | #include <linux/debugfs.h> |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 5 | #include <linux/seq_file.h> |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 6 | #include <linux/stat.h> |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 7 | #include <linux/jiffies.h> |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 8 | #include <linux/list.h> |
| 9 | |
| 10 | #include "drbd_int.h" |
| 11 | #include "drbd_req.h" |
| 12 | #include "drbd_debugfs.h" |
| 13 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 14 | |
| 15 | /********************************************************************** |
| 16 | * Whenever you change the file format, remember to bump the version. * |
| 17 | **********************************************************************/ |
| 18 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 19 | static struct dentry *drbd_debugfs_root; |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 20 | static struct dentry *drbd_debugfs_version; |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 21 | static struct dentry *drbd_debugfs_resources; |
| 22 | static struct dentry *drbd_debugfs_minors; |
| 23 | |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 24 | static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt) |
| 25 | { |
| 26 | if (valid) |
| 27 | seq_printf(m, "\t%d", jiffies_to_msecs(dt)); |
| 28 | else |
| 29 | seq_printf(m, "\t-"); |
| 30 | } |
| 31 | |
Lars Ellenberg | f418815 | 2014-05-05 23:05:47 +0200 | [diff] [blame] | 32 | static void __seq_print_rq_state_bit(struct seq_file *m, |
| 33 | bool is_set, char *sep, const char *set_name, const char *unset_name) |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 34 | { |
Lars Ellenberg | f418815 | 2014-05-05 23:05:47 +0200 | [diff] [blame] | 35 | if (is_set && set_name) { |
| 36 | seq_putc(m, *sep); |
| 37 | seq_puts(m, set_name); |
| 38 | *sep = '|'; |
| 39 | } else if (!is_set && unset_name) { |
| 40 | seq_putc(m, *sep); |
| 41 | seq_puts(m, unset_name); |
| 42 | *sep = '|'; |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | static void seq_print_rq_state_bit(struct seq_file *m, |
| 47 | bool is_set, char *sep, const char *set_name) |
| 48 | { |
| 49 | __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL); |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | /* pretty print enum drbd_req_state_bits req->rq_state */ |
| 53 | static void seq_print_request_state(struct seq_file *m, struct drbd_request *req) |
| 54 | { |
| 55 | unsigned int s = req->rq_state; |
| 56 | char sep = ' '; |
| 57 | seq_printf(m, "\t0x%08x", s); |
| 58 | seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed"); |
| 59 | |
| 60 | /* RQ_WRITE ignored, already reported */ |
| 61 | seq_puts(m, "\tlocal:"); |
| 62 | seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL"); |
| 63 | seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed"); |
| 64 | seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended"); |
| 65 | sep = ' '; |
| 66 | seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending"); |
| 67 | seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed"); |
| 68 | seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted"); |
| 69 | seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok"); |
| 70 | if (sep == ' ') |
| 71 | seq_puts(m, " -"); |
| 72 | |
| 73 | /* for_each_connection ... */ |
| 74 | seq_printf(m, "\tnet:"); |
| 75 | sep = ' '; |
| 76 | seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending"); |
| 77 | seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued"); |
| 78 | seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent"); |
| 79 | seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done"); |
| 80 | seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis"); |
| 81 | seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok"); |
| 82 | if (sep == ' ') |
| 83 | seq_puts(m, " -"); |
| 84 | |
| 85 | seq_printf(m, " :"); |
| 86 | sep = ' '; |
| 87 | seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B"); |
| 88 | seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C"); |
| 89 | seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr"); |
| 90 | if (sep == ' ') |
| 91 | seq_puts(m, " -"); |
| 92 | seq_printf(m, "\n"); |
| 93 | } |
| 94 | |
| 95 | static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now) |
| 96 | { |
| 97 | /* change anything here, fixup header below! */ |
| 98 | unsigned int s = req->rq_state; |
| 99 | |
| 100 | #define RQ_HDR_1 "epoch\tsector\tsize\trw" |
| 101 | seq_printf(m, "0x%x\t%llu\t%u\t%s", |
| 102 | req->epoch, |
| 103 | (unsigned long long)req->i.sector, req->i.size >> 9, |
| 104 | (s & RQ_WRITE) ? "W" : "R"); |
| 105 | |
| 106 | #define RQ_HDR_2 "\tstart\tin AL\tsubmit" |
| 107 | seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif)); |
| 108 | seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif); |
| 109 | seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif); |
| 110 | |
| 111 | #define RQ_HDR_3 "\tsent\tacked\tdone" |
| 112 | seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif); |
| 113 | seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif); |
| 114 | seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif); |
| 115 | |
| 116 | #define RQ_HDR_4 "\tstate\n" |
| 117 | seq_print_request_state(m, req); |
| 118 | } |
| 119 | #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4 |
| 120 | |
| 121 | static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now) |
| 122 | { |
| 123 | seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr); |
| 124 | seq_print_one_request(m, req, now); |
| 125 | } |
| 126 | |
Lars Ellenberg | f418815 | 2014-05-05 23:05:47 +0200 | [diff] [blame] | 127 | static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now) |
| 128 | { |
| 129 | struct drbd_device *device; |
| 130 | unsigned int i; |
| 131 | |
| 132 | seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n"); |
| 133 | rcu_read_lock(); |
| 134 | idr_for_each_entry(&resource->devices, device, i) { |
| 135 | struct drbd_md_io tmp; |
| 136 | /* In theory this is racy, |
| 137 | * in the sense that there could have been a |
| 138 | * drbd_md_put_buffer(); drbd_md_get_buffer(); |
| 139 | * between accessing these members here. */ |
| 140 | tmp = device->md_io; |
| 141 | if (atomic_read(&tmp.in_use)) { |
| 142 | seq_printf(m, "%u\t%u\t%d\t", |
| 143 | device->minor, device->vnr, |
| 144 | jiffies_to_msecs(now - tmp.start_jif)); |
| 145 | if (time_before(tmp.submit_jif, tmp.start_jif)) |
| 146 | seq_puts(m, "-\t"); |
| 147 | else |
| 148 | seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif)); |
| 149 | seq_printf(m, "%s\n", tmp.current_use); |
| 150 | } |
| 151 | } |
| 152 | rcu_read_unlock(); |
| 153 | } |
| 154 | |
| 155 | static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now) |
| 156 | { |
| 157 | struct drbd_device *device; |
| 158 | unsigned int i; |
| 159 | |
| 160 | seq_puts(m, "minor\tvnr\tage\t#waiting\n"); |
| 161 | rcu_read_lock(); |
| 162 | idr_for_each_entry(&resource->devices, device, i) { |
| 163 | unsigned long jif; |
| 164 | struct drbd_request *req; |
| 165 | int n = atomic_read(&device->ap_actlog_cnt); |
| 166 | if (n) { |
| 167 | spin_lock_irq(&device->resource->req_lock); |
| 168 | req = list_first_entry_or_null(&device->pending_master_completion[1], |
| 169 | struct drbd_request, req_pending_master_completion); |
| 170 | /* if the oldest request does not wait for the activity log |
| 171 | * it is not interesting for us here */ |
| 172 | if (req && !(req->rq_state & RQ_IN_ACT_LOG)) |
| 173 | jif = req->start_jif; |
| 174 | else |
| 175 | req = NULL; |
| 176 | spin_unlock_irq(&device->resource->req_lock); |
| 177 | } |
| 178 | if (n) { |
| 179 | seq_printf(m, "%u\t%u\t", device->minor, device->vnr); |
| 180 | if (req) |
| 181 | seq_printf(m, "%u\t", jiffies_to_msecs(now - jif)); |
| 182 | else |
| 183 | seq_puts(m, "-\t"); |
| 184 | seq_printf(m, "%u\n", n); |
| 185 | } |
| 186 | } |
| 187 | rcu_read_unlock(); |
| 188 | } |
| 189 | |
| 190 | static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now) |
| 191 | { |
| 192 | struct drbd_bm_aio_ctx *ctx; |
| 193 | unsigned long start_jif; |
| 194 | unsigned int in_flight; |
| 195 | unsigned int flags; |
| 196 | spin_lock_irq(&device->resource->req_lock); |
| 197 | ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list); |
| 198 | if (ctx && ctx->done) |
| 199 | ctx = NULL; |
| 200 | if (ctx) { |
| 201 | start_jif = ctx->start_jif; |
| 202 | in_flight = atomic_read(&ctx->in_flight); |
| 203 | flags = ctx->flags; |
| 204 | } |
| 205 | spin_unlock_irq(&device->resource->req_lock); |
| 206 | if (ctx) { |
| 207 | seq_printf(m, "%u\t%u\t%c\t%u\t%u\n", |
| 208 | device->minor, device->vnr, |
| 209 | (flags & BM_AIO_READ) ? 'R' : 'W', |
| 210 | jiffies_to_msecs(now - start_jif), |
| 211 | in_flight); |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now) |
| 216 | { |
| 217 | struct drbd_device *device; |
| 218 | unsigned int i; |
| 219 | |
| 220 | seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n"); |
| 221 | rcu_read_lock(); |
| 222 | idr_for_each_entry(&resource->devices, device, i) { |
| 223 | seq_print_device_bitmap_io(m, device, now); |
| 224 | } |
| 225 | rcu_read_unlock(); |
| 226 | } |
| 227 | |
| 228 | /* pretty print enum peer_req->flags */ |
| 229 | static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req) |
| 230 | { |
| 231 | unsigned long f = peer_req->flags; |
| 232 | char sep = ' '; |
| 233 | |
| 234 | __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing"); |
| 235 | __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal"); |
| 236 | seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL"); |
| 237 | seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C"); |
| 238 | seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync"); |
| 239 | |
| 240 | if (f & EE_IS_TRIM) { |
| 241 | seq_putc(m, sep); |
| 242 | sep = '|'; |
| 243 | if (f & EE_IS_TRIM_USE_ZEROOUT) |
| 244 | seq_puts(m, "zero-out"); |
| 245 | else |
| 246 | seq_puts(m, "trim"); |
| 247 | } |
| 248 | seq_putc(m, '\n'); |
| 249 | } |
| 250 | |
| 251 | static void seq_print_peer_request(struct seq_file *m, |
| 252 | struct drbd_device *device, struct list_head *lh, |
| 253 | unsigned long now) |
| 254 | { |
| 255 | bool reported_preparing = false; |
| 256 | struct drbd_peer_request *peer_req; |
| 257 | list_for_each_entry(peer_req, lh, w.list) { |
| 258 | if (reported_preparing && !(peer_req->flags & EE_SUBMITTED)) |
| 259 | continue; |
| 260 | |
| 261 | if (device) |
| 262 | seq_printf(m, "%u\t%u\t", device->minor, device->vnr); |
| 263 | |
| 264 | seq_printf(m, "%llu\t%u\t%c\t%u\t", |
| 265 | (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9, |
| 266 | (peer_req->flags & EE_WRITE) ? 'W' : 'R', |
| 267 | jiffies_to_msecs(now - peer_req->submit_jif)); |
| 268 | seq_print_peer_request_flags(m, peer_req); |
| 269 | if (peer_req->flags & EE_SUBMITTED) |
| 270 | break; |
| 271 | else |
| 272 | reported_preparing = true; |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | static void seq_print_device_peer_requests(struct seq_file *m, |
| 277 | struct drbd_device *device, unsigned long now) |
| 278 | { |
| 279 | seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n"); |
| 280 | spin_lock_irq(&device->resource->req_lock); |
| 281 | seq_print_peer_request(m, device, &device->active_ee, now); |
| 282 | seq_print_peer_request(m, device, &device->read_ee, now); |
| 283 | seq_print_peer_request(m, device, &device->sync_ee, now); |
| 284 | spin_unlock_irq(&device->resource->req_lock); |
| 285 | if (test_bit(FLUSH_PENDING, &device->flags)) { |
| 286 | seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n", |
| 287 | device->minor, device->vnr, |
| 288 | jiffies_to_msecs(now - device->flush_jif)); |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | static void seq_print_resource_pending_peer_requests(struct seq_file *m, |
| 293 | struct drbd_resource *resource, unsigned long now) |
| 294 | { |
| 295 | struct drbd_device *device; |
| 296 | unsigned int i; |
| 297 | |
| 298 | rcu_read_lock(); |
| 299 | idr_for_each_entry(&resource->devices, device, i) { |
| 300 | seq_print_device_peer_requests(m, device, now); |
| 301 | } |
| 302 | rcu_read_unlock(); |
| 303 | } |
| 304 | |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 305 | static void seq_print_resource_transfer_log_summary(struct seq_file *m, |
| 306 | struct drbd_resource *resource, |
| 307 | struct drbd_connection *connection, |
| 308 | unsigned long now) |
| 309 | { |
| 310 | struct drbd_request *req; |
| 311 | unsigned int count = 0; |
| 312 | unsigned int show_state = 0; |
| 313 | |
| 314 | seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR); |
| 315 | spin_lock_irq(&resource->req_lock); |
| 316 | list_for_each_entry(req, &connection->transfer_log, tl_requests) { |
| 317 | unsigned int tmp = 0; |
| 318 | unsigned int s; |
| 319 | ++count; |
| 320 | |
| 321 | /* don't disable irq "forever" */ |
| 322 | if (!(count & 0x1ff)) { |
| 323 | struct drbd_request *req_next; |
| 324 | kref_get(&req->kref); |
| 325 | spin_unlock_irq(&resource->req_lock); |
| 326 | cond_resched(); |
| 327 | spin_lock_irq(&resource->req_lock); |
| 328 | req_next = list_next_entry(req, tl_requests); |
| 329 | if (kref_put(&req->kref, drbd_req_destroy)) |
| 330 | req = req_next; |
| 331 | if (&req->tl_requests == &connection->transfer_log) |
| 332 | break; |
| 333 | } |
| 334 | |
| 335 | s = req->rq_state; |
| 336 | |
| 337 | /* This is meant to summarize timing issues, to be able to tell |
| 338 | * local disk problems from network problems. |
| 339 | * Skip requests, if we have shown an even older request with |
| 340 | * similar aspects already. */ |
| 341 | if (req->master_bio == NULL) |
| 342 | tmp |= 1; |
| 343 | if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING)) |
| 344 | tmp |= 2; |
| 345 | if (s & RQ_NET_MASK) { |
| 346 | if (!(s & RQ_NET_SENT)) |
| 347 | tmp |= 4; |
| 348 | if (s & RQ_NET_PENDING) |
| 349 | tmp |= 8; |
| 350 | if (!(s & RQ_NET_DONE)) |
| 351 | tmp |= 16; |
| 352 | } |
| 353 | if ((tmp & show_state) == tmp) |
| 354 | continue; |
| 355 | show_state |= tmp; |
| 356 | seq_printf(m, "%u\t", count); |
| 357 | seq_print_minor_vnr_req(m, req, now); |
| 358 | if (show_state == 0x1f) |
| 359 | break; |
| 360 | } |
| 361 | spin_unlock_irq(&resource->req_lock); |
| 362 | } |
| 363 | |
| 364 | /* TODO: transfer_log and friends should be moved to resource */ |
| 365 | static int in_flight_summary_show(struct seq_file *m, void *pos) |
| 366 | { |
| 367 | struct drbd_resource *resource = m->private; |
| 368 | struct drbd_connection *connection; |
| 369 | unsigned long jif = jiffies; |
| 370 | |
| 371 | connection = first_connection(resource); |
| 372 | /* This does not happen, actually. |
| 373 | * But be robust and prepare for future code changes. */ |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 374 | if (!connection || !kref_get_unless_zero(&connection->kref)) |
| 375 | return -ESTALE; |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 376 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 377 | /* BUMP me if you change the file format/content/presentation */ |
| 378 | seq_printf(m, "v: %u\n\n", 0); |
| 379 | |
Lars Ellenberg | f418815 | 2014-05-05 23:05:47 +0200 | [diff] [blame] | 380 | seq_puts(m, "oldest bitmap IO\n"); |
| 381 | seq_print_resource_pending_bitmap_io(m, resource, jif); |
| 382 | seq_putc(m, '\n'); |
| 383 | |
| 384 | seq_puts(m, "meta data IO\n"); |
| 385 | seq_print_resource_pending_meta_io(m, resource, jif); |
| 386 | seq_putc(m, '\n'); |
| 387 | |
| 388 | seq_puts(m, "socket buffer stats\n"); |
| 389 | /* for each connection ... once we have more than one */ |
| 390 | rcu_read_lock(); |
| 391 | if (connection->data.socket) { |
| 392 | /* open coded SIOCINQ, the "relevant" part */ |
| 393 | struct tcp_sock *tp = tcp_sk(connection->data.socket->sk); |
| 394 | int answ = tp->rcv_nxt - tp->copied_seq; |
| 395 | seq_printf(m, "unread receive buffer: %u Byte\n", answ); |
| 396 | /* open coded SIOCOUTQ, the "relevant" part */ |
| 397 | answ = tp->write_seq - tp->snd_una; |
| 398 | seq_printf(m, "unacked send buffer: %u Byte\n", answ); |
| 399 | } |
| 400 | rcu_read_unlock(); |
| 401 | seq_putc(m, '\n'); |
| 402 | |
| 403 | seq_puts(m, "oldest peer requests\n"); |
| 404 | seq_print_resource_pending_peer_requests(m, resource, jif); |
| 405 | seq_putc(m, '\n'); |
| 406 | |
| 407 | seq_puts(m, "application requests waiting for activity log\n"); |
| 408 | seq_print_waiting_for_AL(m, resource, jif); |
| 409 | seq_putc(m, '\n'); |
| 410 | |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 411 | seq_puts(m, "oldest application requests\n"); |
| 412 | seq_print_resource_transfer_log_summary(m, resource, connection, jif); |
| 413 | seq_putc(m, '\n'); |
| 414 | |
| 415 | jif = jiffies - jif; |
| 416 | if (jif) |
| 417 | seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif)); |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 418 | kref_put(&connection->kref, drbd_destroy_connection); |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 419 | return 0; |
| 420 | } |
| 421 | |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 422 | /* make sure at *open* time that the respective object won't go away. */ |
| 423 | static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *), |
| 424 | void *data, struct kref *kref, |
| 425 | void (*release)(struct kref *)) |
| 426 | { |
| 427 | struct dentry *parent; |
| 428 | int ret = -ESTALE; |
| 429 | |
| 430 | /* Are we still linked, |
| 431 | * or has debugfs_remove() already been called? */ |
Al Viro | b583043 | 2014-10-31 01:22:04 -0400 | [diff] [blame] | 432 | parent = file->f_path.dentry->d_parent; |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 433 | /* not sure if this can happen: */ |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 434 | if (!parent || d_really_is_negative(parent)) |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 435 | goto out; |
| 436 | /* serialize with d_delete() */ |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 437 | inode_lock(d_inode(parent)); |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 438 | /* Make sure the object is still alive */ |
Al Viro | dc3f419 | 2015-05-18 10:10:34 -0400 | [diff] [blame] | 439 | if (simple_positive(file->f_path.dentry) |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 440 | && kref_get_unless_zero(kref)) |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 441 | ret = 0; |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 442 | inode_unlock(d_inode(parent)); |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 443 | if (!ret) { |
| 444 | ret = single_open(file, show, data); |
| 445 | if (ret) |
| 446 | kref_put(kref, release); |
| 447 | } |
| 448 | out: |
| 449 | return ret; |
| 450 | } |
| 451 | |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 452 | static int in_flight_summary_open(struct inode *inode, struct file *file) |
| 453 | { |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 454 | struct drbd_resource *resource = inode->i_private; |
| 455 | return drbd_single_open(file, in_flight_summary_show, resource, |
| 456 | &resource->kref, drbd_destroy_resource); |
| 457 | } |
| 458 | |
| 459 | static int in_flight_summary_release(struct inode *inode, struct file *file) |
| 460 | { |
| 461 | struct drbd_resource *resource = inode->i_private; |
| 462 | kref_put(&resource->kref, drbd_destroy_resource); |
| 463 | return single_release(inode, file); |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 464 | } |
| 465 | |
| 466 | static const struct file_operations in_flight_summary_fops = { |
| 467 | .owner = THIS_MODULE, |
| 468 | .open = in_flight_summary_open, |
| 469 | .read = seq_read, |
| 470 | .llseek = seq_lseek, |
Lars Ellenberg | 4a521cc | 2014-05-05 12:05:54 +0000 | [diff] [blame] | 471 | .release = in_flight_summary_release, |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 472 | }; |
| 473 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 474 | void drbd_debugfs_resource_add(struct drbd_resource *resource) |
| 475 | { |
| 476 | struct dentry *dentry; |
| 477 | if (!drbd_debugfs_resources) |
| 478 | return; |
| 479 | |
| 480 | dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources); |
| 481 | if (IS_ERR_OR_NULL(dentry)) |
| 482 | goto fail; |
| 483 | resource->debugfs_res = dentry; |
| 484 | |
| 485 | dentry = debugfs_create_dir("volumes", resource->debugfs_res); |
| 486 | if (IS_ERR_OR_NULL(dentry)) |
| 487 | goto fail; |
| 488 | resource->debugfs_res_volumes = dentry; |
| 489 | |
| 490 | dentry = debugfs_create_dir("connections", resource->debugfs_res); |
| 491 | if (IS_ERR_OR_NULL(dentry)) |
| 492 | goto fail; |
| 493 | resource->debugfs_res_connections = dentry; |
| 494 | |
Lars Ellenberg | db1866ff | 2014-05-02 13:20:05 +0200 | [diff] [blame] | 495 | dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP, |
| 496 | resource->debugfs_res, resource, |
| 497 | &in_flight_summary_fops); |
| 498 | if (IS_ERR_OR_NULL(dentry)) |
| 499 | goto fail; |
| 500 | resource->debugfs_res_in_flight_summary = dentry; |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 501 | return; |
| 502 | |
| 503 | fail: |
| 504 | drbd_debugfs_resource_cleanup(resource); |
| 505 | drbd_err(resource, "failed to create debugfs dentry\n"); |
| 506 | } |
| 507 | |
| 508 | static void drbd_debugfs_remove(struct dentry **dp) |
| 509 | { |
| 510 | debugfs_remove(*dp); |
| 511 | *dp = NULL; |
| 512 | } |
| 513 | |
| 514 | void drbd_debugfs_resource_cleanup(struct drbd_resource *resource) |
| 515 | { |
| 516 | /* it is ok to call debugfs_remove(NULL) */ |
| 517 | drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary); |
| 518 | drbd_debugfs_remove(&resource->debugfs_res_connections); |
| 519 | drbd_debugfs_remove(&resource->debugfs_res_volumes); |
| 520 | drbd_debugfs_remove(&resource->debugfs_res); |
| 521 | } |
| 522 | |
Lars Ellenberg | 944410e | 2014-05-06 15:02:05 +0200 | [diff] [blame] | 523 | static void seq_print_one_timing_detail(struct seq_file *m, |
| 524 | const struct drbd_thread_timing_details *tdp, |
| 525 | unsigned long now) |
| 526 | { |
| 527 | struct drbd_thread_timing_details td; |
| 528 | /* No locking... |
| 529 | * use temporary assignment to get at consistent data. */ |
| 530 | do { |
| 531 | td = *tdp; |
| 532 | } while (td.cb_nr != tdp->cb_nr); |
| 533 | if (!td.cb_addr) |
| 534 | return; |
| 535 | seq_printf(m, "%u\t%d\t%s:%u\t%ps\n", |
| 536 | td.cb_nr, |
| 537 | jiffies_to_msecs(now - td.start_jif), |
| 538 | td.caller_fn, td.line, |
| 539 | td.cb_addr); |
| 540 | } |
| 541 | |
| 542 | static void seq_print_timing_details(struct seq_file *m, |
| 543 | const char *title, |
| 544 | unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now) |
| 545 | { |
| 546 | unsigned int start_idx; |
| 547 | unsigned int i; |
| 548 | |
| 549 | seq_printf(m, "%s\n", title); |
| 550 | /* If not much is going on, this will result in natural ordering. |
| 551 | * If it is very busy, we will possibly skip events, or even see wrap |
| 552 | * arounds, which could only be avoided with locking. |
| 553 | */ |
| 554 | start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST; |
| 555 | for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++) |
| 556 | seq_print_one_timing_detail(m, tdp+i, now); |
| 557 | for (i = 0; i < start_idx; i++) |
| 558 | seq_print_one_timing_detail(m, tdp+i, now); |
| 559 | } |
| 560 | |
| 561 | static int callback_history_show(struct seq_file *m, void *ignored) |
| 562 | { |
| 563 | struct drbd_connection *connection = m->private; |
| 564 | unsigned long jif = jiffies; |
| 565 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 566 | /* BUMP me if you change the file format/content/presentation */ |
| 567 | seq_printf(m, "v: %u\n\n", 0); |
| 568 | |
Lars Ellenberg | 944410e | 2014-05-06 15:02:05 +0200 | [diff] [blame] | 569 | seq_puts(m, "n\tage\tcallsite\tfn\n"); |
| 570 | seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif); |
| 571 | seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif); |
| 572 | return 0; |
| 573 | } |
| 574 | |
| 575 | static int callback_history_open(struct inode *inode, struct file *file) |
| 576 | { |
| 577 | struct drbd_connection *connection = inode->i_private; |
| 578 | return drbd_single_open(file, callback_history_show, connection, |
| 579 | &connection->kref, drbd_destroy_connection); |
| 580 | } |
| 581 | |
| 582 | static int callback_history_release(struct inode *inode, struct file *file) |
| 583 | { |
| 584 | struct drbd_connection *connection = inode->i_private; |
| 585 | kref_put(&connection->kref, drbd_destroy_connection); |
| 586 | return single_release(inode, file); |
| 587 | } |
| 588 | |
| 589 | static const struct file_operations connection_callback_history_fops = { |
| 590 | .owner = THIS_MODULE, |
| 591 | .open = callback_history_open, |
| 592 | .read = seq_read, |
| 593 | .llseek = seq_lseek, |
| 594 | .release = callback_history_release, |
| 595 | }; |
| 596 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 597 | static int connection_oldest_requests_show(struct seq_file *m, void *ignored) |
| 598 | { |
| 599 | struct drbd_connection *connection = m->private; |
| 600 | unsigned long now = jiffies; |
| 601 | struct drbd_request *r1, *r2; |
| 602 | |
| 603 | /* BUMP me if you change the file format/content/presentation */ |
| 604 | seq_printf(m, "v: %u\n\n", 0); |
| 605 | |
| 606 | spin_lock_irq(&connection->resource->req_lock); |
| 607 | r1 = connection->req_next; |
| 608 | if (r1) |
| 609 | seq_print_minor_vnr_req(m, r1, now); |
| 610 | r2 = connection->req_ack_pending; |
| 611 | if (r2 && r2 != r1) { |
| 612 | r1 = r2; |
| 613 | seq_print_minor_vnr_req(m, r1, now); |
| 614 | } |
| 615 | r2 = connection->req_not_net_done; |
| 616 | if (r2 && r2 != r1) |
| 617 | seq_print_minor_vnr_req(m, r2, now); |
| 618 | spin_unlock_irq(&connection->resource->req_lock); |
| 619 | return 0; |
| 620 | } |
| 621 | |
| 622 | static int connection_oldest_requests_open(struct inode *inode, struct file *file) |
| 623 | { |
| 624 | struct drbd_connection *connection = inode->i_private; |
| 625 | return drbd_single_open(file, connection_oldest_requests_show, connection, |
| 626 | &connection->kref, drbd_destroy_connection); |
| 627 | } |
| 628 | |
| 629 | static int connection_oldest_requests_release(struct inode *inode, struct file *file) |
| 630 | { |
| 631 | struct drbd_connection *connection = inode->i_private; |
| 632 | kref_put(&connection->kref, drbd_destroy_connection); |
| 633 | return single_release(inode, file); |
| 634 | } |
| 635 | |
| 636 | static const struct file_operations connection_oldest_requests_fops = { |
| 637 | .owner = THIS_MODULE, |
| 638 | .open = connection_oldest_requests_open, |
| 639 | .read = seq_read, |
| 640 | .llseek = seq_lseek, |
| 641 | .release = connection_oldest_requests_release, |
| 642 | }; |
| 643 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 644 | void drbd_debugfs_connection_add(struct drbd_connection *connection) |
| 645 | { |
| 646 | struct dentry *conns_dir = connection->resource->debugfs_res_connections; |
| 647 | struct dentry *dentry; |
| 648 | if (!conns_dir) |
| 649 | return; |
| 650 | |
| 651 | /* Once we enable mutliple peers, |
| 652 | * these connections will have descriptive names. |
| 653 | * For now, it is just the one connection to the (only) "peer". */ |
| 654 | dentry = debugfs_create_dir("peer", conns_dir); |
| 655 | if (IS_ERR_OR_NULL(dentry)) |
| 656 | goto fail; |
| 657 | connection->debugfs_conn = dentry; |
Lars Ellenberg | 944410e | 2014-05-06 15:02:05 +0200 | [diff] [blame] | 658 | |
| 659 | dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP, |
| 660 | connection->debugfs_conn, connection, |
| 661 | &connection_callback_history_fops); |
| 662 | if (IS_ERR_OR_NULL(dentry)) |
| 663 | goto fail; |
| 664 | connection->debugfs_conn_callback_history = dentry; |
Lars Ellenberg | 3d299f4 | 2014-05-14 20:13:36 +0200 | [diff] [blame] | 665 | |
| 666 | dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP, |
| 667 | connection->debugfs_conn, connection, |
| 668 | &connection_oldest_requests_fops); |
| 669 | if (IS_ERR_OR_NULL(dentry)) |
| 670 | goto fail; |
| 671 | connection->debugfs_conn_oldest_requests = dentry; |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 672 | return; |
| 673 | |
| 674 | fail: |
| 675 | drbd_debugfs_connection_cleanup(connection); |
| 676 | drbd_err(connection, "failed to create debugfs dentry\n"); |
| 677 | } |
| 678 | |
| 679 | void drbd_debugfs_connection_cleanup(struct drbd_connection *connection) |
| 680 | { |
| 681 | drbd_debugfs_remove(&connection->debugfs_conn_callback_history); |
| 682 | drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests); |
| 683 | drbd_debugfs_remove(&connection->debugfs_conn); |
| 684 | } |
| 685 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 686 | static void resync_dump_detail(struct seq_file *m, struct lc_element *e) |
| 687 | { |
| 688 | struct bm_extent *bme = lc_entry(e, struct bm_extent, lce); |
| 689 | |
Philipp Marek | f0c21e6 | 2014-09-11 14:29:07 +0200 | [diff] [blame] | 690 | seq_printf(m, "%5d %s %s %s", bme->rs_left, |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 691 | test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------", |
| 692 | test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------", |
| 693 | test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------" |
| 694 | ); |
| 695 | } |
| 696 | |
| 697 | static int device_resync_extents_show(struct seq_file *m, void *ignored) |
| 698 | { |
| 699 | struct drbd_device *device = m->private; |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 700 | |
| 701 | /* BUMP me if you change the file format/content/presentation */ |
| 702 | seq_printf(m, "v: %u\n\n", 0); |
| 703 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 704 | if (get_ldev_if_state(device, D_FAILED)) { |
| 705 | lc_seq_printf_stats(m, device->resync); |
| 706 | lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail); |
| 707 | put_ldev(device); |
| 708 | } |
| 709 | return 0; |
| 710 | } |
| 711 | |
| 712 | static int device_act_log_extents_show(struct seq_file *m, void *ignored) |
| 713 | { |
| 714 | struct drbd_device *device = m->private; |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 715 | |
| 716 | /* BUMP me if you change the file format/content/presentation */ |
| 717 | seq_printf(m, "v: %u\n\n", 0); |
| 718 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 719 | if (get_ldev_if_state(device, D_FAILED)) { |
| 720 | lc_seq_printf_stats(m, device->act_log); |
| 721 | lc_seq_dump_details(m, device->act_log, "", NULL); |
| 722 | put_ldev(device); |
| 723 | } |
| 724 | return 0; |
| 725 | } |
| 726 | |
| 727 | static int device_oldest_requests_show(struct seq_file *m, void *ignored) |
| 728 | { |
| 729 | struct drbd_device *device = m->private; |
| 730 | struct drbd_resource *resource = device->resource; |
| 731 | unsigned long now = jiffies; |
| 732 | struct drbd_request *r1, *r2; |
| 733 | int i; |
| 734 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 735 | /* BUMP me if you change the file format/content/presentation */ |
| 736 | seq_printf(m, "v: %u\n\n", 0); |
| 737 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 738 | seq_puts(m, RQ_HDR); |
| 739 | spin_lock_irq(&resource->req_lock); |
| 740 | /* WRITE, then READ */ |
| 741 | for (i = 1; i >= 0; --i) { |
| 742 | r1 = list_first_entry_or_null(&device->pending_master_completion[i], |
| 743 | struct drbd_request, req_pending_master_completion); |
| 744 | r2 = list_first_entry_or_null(&device->pending_completion[i], |
| 745 | struct drbd_request, req_pending_local); |
| 746 | if (r1) |
| 747 | seq_print_one_request(m, r1, now); |
| 748 | if (r2 && r2 != r1) |
| 749 | seq_print_one_request(m, r2, now); |
| 750 | } |
| 751 | spin_unlock_irq(&resource->req_lock); |
| 752 | return 0; |
| 753 | } |
| 754 | |
Lars Ellenberg | cc356f8 | 2014-05-14 20:33:05 +0200 | [diff] [blame] | 755 | static int device_data_gen_id_show(struct seq_file *m, void *ignored) |
| 756 | { |
| 757 | struct drbd_device *device = m->private; |
| 758 | struct drbd_md *md; |
| 759 | enum drbd_uuid_index idx; |
| 760 | |
| 761 | if (!get_ldev_if_state(device, D_FAILED)) |
| 762 | return -ENODEV; |
| 763 | |
| 764 | md = &device->ldev->md; |
| 765 | spin_lock_irq(&md->uuid_lock); |
| 766 | for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) { |
| 767 | seq_printf(m, "0x%016llX\n", md->uuid[idx]); |
| 768 | } |
| 769 | spin_unlock_irq(&md->uuid_lock); |
| 770 | put_ldev(device); |
| 771 | return 0; |
| 772 | } |
| 773 | |
Lars Ellenberg | f5ec017 | 2015-03-18 17:19:10 +0100 | [diff] [blame] | 774 | static int device_ed_gen_id_show(struct seq_file *m, void *ignored) |
| 775 | { |
| 776 | struct drbd_device *device = m->private; |
| 777 | seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid); |
| 778 | return 0; |
| 779 | } |
| 780 | |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 781 | #define drbd_debugfs_device_attr(name) \ |
| 782 | static int device_ ## name ## _open(struct inode *inode, struct file *file) \ |
| 783 | { \ |
| 784 | struct drbd_device *device = inode->i_private; \ |
| 785 | return drbd_single_open(file, device_ ## name ## _show, device, \ |
| 786 | &device->kref, drbd_destroy_device); \ |
| 787 | } \ |
| 788 | static int device_ ## name ## _release(struct inode *inode, struct file *file) \ |
| 789 | { \ |
| 790 | struct drbd_device *device = inode->i_private; \ |
| 791 | kref_put(&device->kref, drbd_destroy_device); \ |
| 792 | return single_release(inode, file); \ |
| 793 | } \ |
| 794 | static const struct file_operations device_ ## name ## _fops = { \ |
| 795 | .owner = THIS_MODULE, \ |
| 796 | .open = device_ ## name ## _open, \ |
| 797 | .read = seq_read, \ |
| 798 | .llseek = seq_lseek, \ |
| 799 | .release = device_ ## name ## _release, \ |
| 800 | }; |
| 801 | |
| 802 | drbd_debugfs_device_attr(oldest_requests) |
| 803 | drbd_debugfs_device_attr(act_log_extents) |
| 804 | drbd_debugfs_device_attr(resync_extents) |
Lars Ellenberg | cc356f8 | 2014-05-14 20:33:05 +0200 | [diff] [blame] | 805 | drbd_debugfs_device_attr(data_gen_id) |
Lars Ellenberg | f5ec017 | 2015-03-18 17:19:10 +0100 | [diff] [blame] | 806 | drbd_debugfs_device_attr(ed_gen_id) |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 807 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 808 | void drbd_debugfs_device_add(struct drbd_device *device) |
| 809 | { |
| 810 | struct dentry *vols_dir = device->resource->debugfs_res_volumes; |
| 811 | char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */ |
| 812 | char vnr_buf[8]; /* volume number vnr is even 16 bit only; */ |
| 813 | char *slink_name = NULL; |
| 814 | |
| 815 | struct dentry *dentry; |
| 816 | if (!vols_dir || !drbd_debugfs_minors) |
| 817 | return; |
| 818 | |
| 819 | snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr); |
| 820 | dentry = debugfs_create_dir(vnr_buf, vols_dir); |
| 821 | if (IS_ERR_OR_NULL(dentry)) |
| 822 | goto fail; |
| 823 | device->debugfs_vol = dentry; |
| 824 | |
| 825 | snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor); |
| 826 | slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u", |
| 827 | device->resource->name, device->vnr); |
| 828 | if (!slink_name) |
| 829 | goto fail; |
| 830 | dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name); |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 831 | kfree(slink_name); |
| 832 | slink_name = NULL; |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 833 | if (IS_ERR_OR_NULL(dentry)) |
| 834 | goto fail; |
| 835 | device->debugfs_minor = dentry; |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 836 | |
| 837 | #define DCF(name) do { \ |
| 838 | dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \ |
| 839 | device->debugfs_vol, device, \ |
| 840 | &device_ ## name ## _fops); \ |
| 841 | if (IS_ERR_OR_NULL(dentry)) \ |
| 842 | goto fail; \ |
| 843 | device->debugfs_vol_ ## name = dentry; \ |
| 844 | } while (0) |
| 845 | |
| 846 | DCF(oldest_requests); |
| 847 | DCF(act_log_extents); |
| 848 | DCF(resync_extents); |
Lars Ellenberg | cc356f8 | 2014-05-14 20:33:05 +0200 | [diff] [blame] | 849 | DCF(data_gen_id); |
Lars Ellenberg | f5ec017 | 2015-03-18 17:19:10 +0100 | [diff] [blame] | 850 | DCF(ed_gen_id); |
Lars Ellenberg | cc356f8 | 2014-05-14 20:33:05 +0200 | [diff] [blame] | 851 | #undef DCF |
Lars Ellenberg | 54e6fc3 | 2014-05-08 13:39:35 +0200 | [diff] [blame] | 852 | return; |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 853 | |
| 854 | fail: |
| 855 | drbd_debugfs_device_cleanup(device); |
| 856 | drbd_err(device, "failed to create debugfs entries\n"); |
| 857 | } |
| 858 | |
| 859 | void drbd_debugfs_device_cleanup(struct drbd_device *device) |
| 860 | { |
| 861 | drbd_debugfs_remove(&device->debugfs_minor); |
| 862 | drbd_debugfs_remove(&device->debugfs_vol_oldest_requests); |
| 863 | drbd_debugfs_remove(&device->debugfs_vol_act_log_extents); |
| 864 | drbd_debugfs_remove(&device->debugfs_vol_resync_extents); |
| 865 | drbd_debugfs_remove(&device->debugfs_vol_data_gen_id); |
Lars Ellenberg | f5ec017 | 2015-03-18 17:19:10 +0100 | [diff] [blame] | 866 | drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id); |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 867 | drbd_debugfs_remove(&device->debugfs_vol); |
| 868 | } |
| 869 | |
| 870 | void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device) |
| 871 | { |
| 872 | struct dentry *conn_dir = peer_device->connection->debugfs_conn; |
| 873 | struct dentry *dentry; |
| 874 | char vnr_buf[8]; |
| 875 | |
| 876 | if (!conn_dir) |
| 877 | return; |
| 878 | |
| 879 | snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr); |
| 880 | dentry = debugfs_create_dir(vnr_buf, conn_dir); |
| 881 | if (IS_ERR_OR_NULL(dentry)) |
| 882 | goto fail; |
| 883 | peer_device->debugfs_peer_dev = dentry; |
| 884 | return; |
| 885 | |
| 886 | fail: |
| 887 | drbd_debugfs_peer_device_cleanup(peer_device); |
| 888 | drbd_err(peer_device, "failed to create debugfs entries\n"); |
| 889 | } |
| 890 | |
| 891 | void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device) |
| 892 | { |
| 893 | drbd_debugfs_remove(&peer_device->debugfs_peer_dev); |
| 894 | } |
| 895 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 896 | static int drbd_version_show(struct seq_file *m, void *ignored) |
| 897 | { |
| 898 | seq_printf(m, "# %s\n", drbd_buildtag()); |
| 899 | seq_printf(m, "VERSION=%s\n", REL_VERSION); |
| 900 | seq_printf(m, "API_VERSION=%u\n", API_VERSION); |
| 901 | seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN); |
| 902 | seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX); |
| 903 | return 0; |
| 904 | } |
| 905 | |
| 906 | static int drbd_version_open(struct inode *inode, struct file *file) |
| 907 | { |
| 908 | return single_open(file, drbd_version_show, NULL); |
| 909 | } |
| 910 | |
| 911 | static struct file_operations drbd_version_fops = { |
| 912 | .owner = THIS_MODULE, |
| 913 | .open = drbd_version_open, |
| 914 | .llseek = seq_lseek, |
| 915 | .read = seq_read, |
| 916 | .release = single_release, |
| 917 | }; |
| 918 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 919 | /* not __exit, may be indirectly called |
| 920 | * from the module-load-failure path as well. */ |
| 921 | void drbd_debugfs_cleanup(void) |
| 922 | { |
| 923 | drbd_debugfs_remove(&drbd_debugfs_resources); |
| 924 | drbd_debugfs_remove(&drbd_debugfs_minors); |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 925 | drbd_debugfs_remove(&drbd_debugfs_version); |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 926 | drbd_debugfs_remove(&drbd_debugfs_root); |
| 927 | } |
| 928 | |
| 929 | int __init drbd_debugfs_init(void) |
| 930 | { |
| 931 | struct dentry *dentry; |
| 932 | |
| 933 | dentry = debugfs_create_dir("drbd", NULL); |
| 934 | if (IS_ERR_OR_NULL(dentry)) |
| 935 | goto fail; |
| 936 | drbd_debugfs_root = dentry; |
| 937 | |
Lars Ellenberg | b44e118 | 2014-05-06 15:05:23 +0200 | [diff] [blame] | 938 | dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops); |
| 939 | if (IS_ERR_OR_NULL(dentry)) |
| 940 | goto fail; |
| 941 | drbd_debugfs_version = dentry; |
| 942 | |
Lars Ellenberg | 4d3d5aa | 2014-05-02 13:19:51 +0200 | [diff] [blame] | 943 | dentry = debugfs_create_dir("resources", drbd_debugfs_root); |
| 944 | if (IS_ERR_OR_NULL(dentry)) |
| 945 | goto fail; |
| 946 | drbd_debugfs_resources = dentry; |
| 947 | |
| 948 | dentry = debugfs_create_dir("minors", drbd_debugfs_root); |
| 949 | if (IS_ERR_OR_NULL(dentry)) |
| 950 | goto fail; |
| 951 | drbd_debugfs_minors = dentry; |
| 952 | return 0; |
| 953 | |
| 954 | fail: |
| 955 | drbd_debugfs_cleanup(); |
| 956 | if (dentry) |
| 957 | return PTR_ERR(dentry); |
| 958 | else |
| 959 | return -EINVAL; |
| 960 | } |