Martin Fuzzey | 23d3e7a | 2009-11-21 12:14:48 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2009 by Martin Fuzzey |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software Foundation, |
| 16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 17 | */ |
| 18 | |
| 19 | /* this file is part of imx21-hcd.c */ |
| 20 | |
Oliver Neukum | 1c20163 | 2013-11-18 13:23:16 +0100 | [diff] [blame] | 21 | #ifdef CONFIG_DYNAMIC_DEBUG |
| 22 | #define DEBUG |
| 23 | #endif |
| 24 | |
Martin Fuzzey | 23d3e7a | 2009-11-21 12:14:48 +0100 | [diff] [blame] | 25 | #ifndef DEBUG |
| 26 | |
| 27 | static inline void create_debug_files(struct imx21 *imx21) { } |
| 28 | static inline void remove_debug_files(struct imx21 *imx21) { } |
| 29 | static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {} |
| 30 | static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb, |
| 31 | int status) {} |
| 32 | static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {} |
| 33 | static inline void debug_urb_queued_for_etd(struct imx21 *imx21, |
| 34 | struct urb *urb) {} |
| 35 | static inline void debug_urb_queued_for_dmem(struct imx21 *imx21, |
| 36 | struct urb *urb) {} |
| 37 | static inline void debug_etd_allocated(struct imx21 *imx21) {} |
| 38 | static inline void debug_etd_freed(struct imx21 *imx21) {} |
| 39 | static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {} |
| 40 | static inline void debug_dmem_freed(struct imx21 *imx21, int size) {} |
| 41 | static inline void debug_isoc_submitted(struct imx21 *imx21, |
| 42 | int frame, struct td *td) {} |
| 43 | static inline void debug_isoc_completed(struct imx21 *imx21, |
| 44 | int frame, struct td *td, int cc, int len) {} |
| 45 | |
| 46 | #else |
| 47 | |
| 48 | #include <linux/debugfs.h> |
| 49 | #include <linux/seq_file.h> |
| 50 | |
| 51 | static const char *dir_labels[] = { |
| 52 | "TD 0", |
| 53 | "OUT", |
| 54 | "IN", |
| 55 | "TD 1" |
| 56 | }; |
| 57 | |
| 58 | static const char *speed_labels[] = { |
| 59 | "Full", |
| 60 | "Low" |
| 61 | }; |
| 62 | |
| 63 | static const char *format_labels[] = { |
| 64 | "Control", |
| 65 | "ISO", |
| 66 | "Bulk", |
| 67 | "Interrupt" |
| 68 | }; |
| 69 | |
| 70 | static inline struct debug_stats *stats_for_urb(struct imx21 *imx21, |
| 71 | struct urb *urb) |
| 72 | { |
| 73 | return usb_pipeisoc(urb->pipe) ? |
| 74 | &imx21->isoc_stats : &imx21->nonisoc_stats; |
| 75 | } |
| 76 | |
| 77 | static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) |
| 78 | { |
| 79 | stats_for_urb(imx21, urb)->submitted++; |
| 80 | } |
| 81 | |
| 82 | static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st) |
| 83 | { |
| 84 | if (st) |
| 85 | stats_for_urb(imx21, urb)->completed_failed++; |
| 86 | else |
| 87 | stats_for_urb(imx21, urb)->completed_ok++; |
| 88 | } |
| 89 | |
| 90 | static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) |
| 91 | { |
| 92 | stats_for_urb(imx21, urb)->unlinked++; |
| 93 | } |
| 94 | |
| 95 | static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) |
| 96 | { |
| 97 | stats_for_urb(imx21, urb)->queue_etd++; |
| 98 | } |
| 99 | |
| 100 | static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) |
| 101 | { |
| 102 | stats_for_urb(imx21, urb)->queue_dmem++; |
| 103 | } |
| 104 | |
| 105 | static inline void debug_etd_allocated(struct imx21 *imx21) |
| 106 | { |
| 107 | imx21->etd_usage.maximum = max( |
| 108 | ++(imx21->etd_usage.value), |
| 109 | imx21->etd_usage.maximum); |
| 110 | } |
| 111 | |
| 112 | static inline void debug_etd_freed(struct imx21 *imx21) |
| 113 | { |
| 114 | imx21->etd_usage.value--; |
| 115 | } |
| 116 | |
| 117 | static inline void debug_dmem_allocated(struct imx21 *imx21, int size) |
| 118 | { |
| 119 | imx21->dmem_usage.value += size; |
| 120 | imx21->dmem_usage.maximum = max( |
| 121 | imx21->dmem_usage.value, |
| 122 | imx21->dmem_usage.maximum); |
| 123 | } |
| 124 | |
| 125 | static inline void debug_dmem_freed(struct imx21 *imx21, int size) |
| 126 | { |
| 127 | imx21->dmem_usage.value -= size; |
| 128 | } |
| 129 | |
| 130 | |
| 131 | static void debug_isoc_submitted(struct imx21 *imx21, |
| 132 | int frame, struct td *td) |
| 133 | { |
| 134 | struct debug_isoc_trace *trace = &imx21->isoc_trace[ |
| 135 | imx21->isoc_trace_index++]; |
| 136 | |
| 137 | imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace); |
| 138 | trace->schedule_frame = td->frame; |
| 139 | trace->submit_frame = frame; |
| 140 | trace->request_len = td->len; |
| 141 | trace->td = td; |
| 142 | } |
| 143 | |
| 144 | static inline void debug_isoc_completed(struct imx21 *imx21, |
| 145 | int frame, struct td *td, int cc, int len) |
| 146 | { |
| 147 | struct debug_isoc_trace *trace, *trace_failed; |
| 148 | int i; |
| 149 | int found = 0; |
| 150 | |
| 151 | trace = imx21->isoc_trace; |
| 152 | for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { |
| 153 | if (trace->td == td) { |
| 154 | trace->done_frame = frame; |
| 155 | trace->done_len = len; |
| 156 | trace->cc = cc; |
| 157 | trace->td = NULL; |
| 158 | found = 1; |
| 159 | break; |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | if (found && cc) { |
| 164 | trace_failed = &imx21->isoc_trace_failed[ |
| 165 | imx21->isoc_trace_index_failed++]; |
| 166 | |
| 167 | imx21->isoc_trace_index_failed %= ARRAY_SIZE( |
| 168 | imx21->isoc_trace_failed); |
| 169 | *trace_failed = *trace; |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | |
| 174 | static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) |
| 175 | { |
| 176 | if (ep) |
| 177 | snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)", |
| 178 | ep->desc.bEndpointAddress, |
| 179 | usb_endpoint_type(&ep->desc), |
| 180 | ep); |
| 181 | else |
| 182 | snprintf(buf, bufsize, "none"); |
| 183 | return buf; |
| 184 | } |
| 185 | |
| 186 | static char *format_etd_dword0(u32 value, char *buf, int bufsize) |
| 187 | { |
| 188 | snprintf(buf, bufsize, |
| 189 | "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", |
| 190 | value & 0x7F, |
| 191 | (value >> DW0_ENDPNT) & 0x0F, |
| 192 | dir_labels[(value >> DW0_DIRECT) & 0x03], |
| 193 | speed_labels[(value >> DW0_SPEED) & 0x01], |
| 194 | format_labels[(value >> DW0_FORMAT) & 0x03], |
| 195 | (value >> DW0_HALTED) & 0x01); |
| 196 | return buf; |
| 197 | } |
| 198 | |
| 199 | static int debug_status_show(struct seq_file *s, void *v) |
| 200 | { |
| 201 | struct imx21 *imx21 = s->private; |
| 202 | int etds_allocated = 0; |
| 203 | int etds_sw_busy = 0; |
| 204 | int etds_hw_busy = 0; |
| 205 | int dmem_blocks = 0; |
| 206 | int queued_for_etd = 0; |
| 207 | int queued_for_dmem = 0; |
| 208 | unsigned int dmem_bytes = 0; |
| 209 | int i; |
| 210 | struct etd_priv *etd; |
| 211 | u32 etd_enable_mask; |
| 212 | unsigned long flags; |
| 213 | struct imx21_dmem_area *dmem; |
| 214 | struct ep_priv *ep_priv; |
| 215 | |
| 216 | spin_lock_irqsave(&imx21->lock, flags); |
| 217 | |
| 218 | etd_enable_mask = readl(imx21->regs + USBH_ETDENSET); |
| 219 | for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { |
| 220 | if (etd->alloc) |
| 221 | etds_allocated++; |
| 222 | if (etd->urb) |
| 223 | etds_sw_busy++; |
| 224 | if (etd_enable_mask & (1<<i)) |
| 225 | etds_hw_busy++; |
| 226 | } |
| 227 | |
| 228 | list_for_each_entry(dmem, &imx21->dmem_list, list) { |
| 229 | dmem_bytes += dmem->size; |
| 230 | dmem_blocks++; |
| 231 | } |
| 232 | |
| 233 | list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue) |
| 234 | queued_for_etd++; |
| 235 | |
| 236 | list_for_each_entry(etd, &imx21->queue_for_dmem, queue) |
| 237 | queued_for_dmem++; |
| 238 | |
| 239 | spin_unlock_irqrestore(&imx21->lock, flags); |
| 240 | |
| 241 | seq_printf(s, |
| 242 | "Frame: %d\n" |
| 243 | "ETDs allocated: %d/%d (max=%d)\n" |
| 244 | "ETDs in use sw: %d\n" |
| 245 | "ETDs in use hw: %d\n" |
Masanari Iida | 5ee71cf3 | 2012-02-11 21:19:10 +0900 | [diff] [blame] | 246 | "DMEM allocated: %d/%d (max=%d)\n" |
Martin Fuzzey | 23d3e7a | 2009-11-21 12:14:48 +0100 | [diff] [blame] | 247 | "DMEM blocks: %d\n" |
| 248 | "Queued waiting for ETD: %d\n" |
| 249 | "Queued waiting for DMEM: %d\n", |
| 250 | readl(imx21->regs + USBH_FRMNUB) & 0xFFFF, |
| 251 | etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum, |
| 252 | etds_sw_busy, |
| 253 | etds_hw_busy, |
| 254 | dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum, |
| 255 | dmem_blocks, |
| 256 | queued_for_etd, |
| 257 | queued_for_dmem); |
| 258 | |
| 259 | return 0; |
| 260 | } |
| 261 | |
| 262 | static int debug_dmem_show(struct seq_file *s, void *v) |
| 263 | { |
| 264 | struct imx21 *imx21 = s->private; |
| 265 | struct imx21_dmem_area *dmem; |
| 266 | unsigned long flags; |
| 267 | char ep_text[40]; |
| 268 | |
| 269 | spin_lock_irqsave(&imx21->lock, flags); |
| 270 | |
| 271 | list_for_each_entry(dmem, &imx21->dmem_list, list) |
| 272 | seq_printf(s, |
| 273 | "%04X: size=0x%X " |
| 274 | "ep=%s\n", |
| 275 | dmem->offset, dmem->size, |
| 276 | format_ep(dmem->ep, ep_text, sizeof(ep_text))); |
| 277 | |
| 278 | spin_unlock_irqrestore(&imx21->lock, flags); |
| 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | static int debug_etd_show(struct seq_file *s, void *v) |
| 284 | { |
| 285 | struct imx21 *imx21 = s->private; |
| 286 | struct etd_priv *etd; |
| 287 | char buf[60]; |
| 288 | u32 dword; |
| 289 | int i, j; |
| 290 | unsigned long flags; |
| 291 | |
| 292 | spin_lock_irqsave(&imx21->lock, flags); |
| 293 | |
| 294 | for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { |
| 295 | int state = -1; |
| 296 | struct urb_priv *urb_priv; |
| 297 | if (etd->urb) { |
| 298 | urb_priv = etd->urb->hcpriv; |
| 299 | if (urb_priv) |
| 300 | state = urb_priv->state; |
| 301 | } |
| 302 | |
| 303 | seq_printf(s, |
| 304 | "etd_num: %d\n" |
| 305 | "ep: %s\n" |
| 306 | "alloc: %d\n" |
| 307 | "len: %d\n" |
| 308 | "busy sw: %d\n" |
| 309 | "busy hw: %d\n" |
| 310 | "urb state: %d\n" |
| 311 | "current urb: %p\n", |
| 312 | |
| 313 | i, |
| 314 | format_ep(etd->ep, buf, sizeof(buf)), |
| 315 | etd->alloc, |
| 316 | etd->len, |
| 317 | etd->urb != NULL, |
| 318 | (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0, |
| 319 | state, |
| 320 | etd->urb); |
| 321 | |
| 322 | for (j = 0; j < 4; j++) { |
| 323 | dword = etd_readl(imx21, i, j); |
| 324 | switch (j) { |
| 325 | case 0: |
| 326 | format_etd_dword0(dword, buf, sizeof(buf)); |
| 327 | break; |
| 328 | case 2: |
| 329 | snprintf(buf, sizeof(buf), |
| 330 | "cc=0X%02X", dword >> DW2_COMPCODE); |
| 331 | break; |
| 332 | default: |
| 333 | *buf = 0; |
| 334 | break; |
| 335 | } |
| 336 | seq_printf(s, |
| 337 | "dword %d: submitted=%08X cur=%08X [%s]\n", |
| 338 | j, |
| 339 | etd->submitted_dwords[j], |
| 340 | dword, |
| 341 | buf); |
| 342 | } |
| 343 | seq_printf(s, "\n"); |
| 344 | } |
| 345 | |
| 346 | spin_unlock_irqrestore(&imx21->lock, flags); |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | static void debug_statistics_show_one(struct seq_file *s, |
| 352 | const char *name, struct debug_stats *stats) |
| 353 | { |
| 354 | seq_printf(s, "%s:\n" |
| 355 | "submitted URBs: %lu\n" |
| 356 | "completed OK: %lu\n" |
| 357 | "completed failed: %lu\n" |
| 358 | "unlinked: %lu\n" |
| 359 | "queued for ETD: %lu\n" |
| 360 | "queued for DMEM: %lu\n\n", |
| 361 | name, |
| 362 | stats->submitted, |
| 363 | stats->completed_ok, |
| 364 | stats->completed_failed, |
| 365 | stats->unlinked, |
| 366 | stats->queue_etd, |
| 367 | stats->queue_dmem); |
| 368 | } |
| 369 | |
| 370 | static int debug_statistics_show(struct seq_file *s, void *v) |
| 371 | { |
| 372 | struct imx21 *imx21 = s->private; |
| 373 | unsigned long flags; |
| 374 | |
| 375 | spin_lock_irqsave(&imx21->lock, flags); |
| 376 | |
| 377 | debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats); |
| 378 | debug_statistics_show_one(s, "isoc", &imx21->isoc_stats); |
| 379 | seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks); |
| 380 | spin_unlock_irqrestore(&imx21->lock, flags); |
| 381 | |
| 382 | return 0; |
| 383 | } |
| 384 | |
| 385 | static void debug_isoc_show_one(struct seq_file *s, |
| 386 | const char *name, int index, struct debug_isoc_trace *trace) |
| 387 | { |
| 388 | seq_printf(s, "%s %d:\n" |
| 389 | "cc=0X%02X\n" |
| 390 | "scheduled frame %d (%d)\n" |
Justin P. Mattock | 70f23fd | 2011-05-10 10:16:21 +0200 | [diff] [blame] | 391 | "submitted frame %d (%d)\n" |
Martin Fuzzey | 23d3e7a | 2009-11-21 12:14:48 +0100 | [diff] [blame] | 392 | "completed frame %d (%d)\n" |
| 393 | "requested length=%d\n" |
| 394 | "completed length=%d\n\n", |
| 395 | name, index, |
| 396 | trace->cc, |
| 397 | trace->schedule_frame, trace->schedule_frame & 0xFFFF, |
| 398 | trace->submit_frame, trace->submit_frame & 0xFFFF, |
| 399 | trace->done_frame, trace->done_frame & 0xFFFF, |
| 400 | trace->request_len, |
| 401 | trace->done_len); |
| 402 | } |
| 403 | |
| 404 | static int debug_isoc_show(struct seq_file *s, void *v) |
| 405 | { |
| 406 | struct imx21 *imx21 = s->private; |
| 407 | struct debug_isoc_trace *trace; |
| 408 | unsigned long flags; |
| 409 | int i; |
| 410 | |
| 411 | spin_lock_irqsave(&imx21->lock, flags); |
| 412 | |
| 413 | trace = imx21->isoc_trace_failed; |
| 414 | for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) |
| 415 | debug_isoc_show_one(s, "isoc failed", i, trace); |
| 416 | |
| 417 | trace = imx21->isoc_trace; |
| 418 | for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) |
| 419 | debug_isoc_show_one(s, "isoc", i, trace); |
| 420 | |
| 421 | spin_unlock_irqrestore(&imx21->lock, flags); |
| 422 | |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | static int debug_status_open(struct inode *inode, struct file *file) |
| 427 | { |
| 428 | return single_open(file, debug_status_show, inode->i_private); |
| 429 | } |
| 430 | |
| 431 | static int debug_dmem_open(struct inode *inode, struct file *file) |
| 432 | { |
| 433 | return single_open(file, debug_dmem_show, inode->i_private); |
| 434 | } |
| 435 | |
| 436 | static int debug_etd_open(struct inode *inode, struct file *file) |
| 437 | { |
| 438 | return single_open(file, debug_etd_show, inode->i_private); |
| 439 | } |
| 440 | |
| 441 | static int debug_statistics_open(struct inode *inode, struct file *file) |
| 442 | { |
| 443 | return single_open(file, debug_statistics_show, inode->i_private); |
| 444 | } |
| 445 | |
| 446 | static int debug_isoc_open(struct inode *inode, struct file *file) |
| 447 | { |
| 448 | return single_open(file, debug_isoc_show, inode->i_private); |
| 449 | } |
| 450 | |
| 451 | static const struct file_operations debug_status_fops = { |
| 452 | .open = debug_status_open, |
| 453 | .read = seq_read, |
| 454 | .llseek = seq_lseek, |
| 455 | .release = single_release, |
| 456 | }; |
| 457 | |
| 458 | static const struct file_operations debug_dmem_fops = { |
| 459 | .open = debug_dmem_open, |
| 460 | .read = seq_read, |
| 461 | .llseek = seq_lseek, |
| 462 | .release = single_release, |
| 463 | }; |
| 464 | |
| 465 | static const struct file_operations debug_etd_fops = { |
| 466 | .open = debug_etd_open, |
| 467 | .read = seq_read, |
| 468 | .llseek = seq_lseek, |
| 469 | .release = single_release, |
| 470 | }; |
| 471 | |
| 472 | static const struct file_operations debug_statistics_fops = { |
| 473 | .open = debug_statistics_open, |
| 474 | .read = seq_read, |
| 475 | .llseek = seq_lseek, |
| 476 | .release = single_release, |
| 477 | }; |
| 478 | |
| 479 | static const struct file_operations debug_isoc_fops = { |
| 480 | .open = debug_isoc_open, |
| 481 | .read = seq_read, |
| 482 | .llseek = seq_lseek, |
| 483 | .release = single_release, |
| 484 | }; |
| 485 | |
| 486 | static void create_debug_files(struct imx21 *imx21) |
| 487 | { |
| 488 | imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL); |
| 489 | if (!imx21->debug_root) |
| 490 | goto failed_create_rootdir; |
| 491 | |
| 492 | if (!debugfs_create_file("status", S_IRUGO, |
| 493 | imx21->debug_root, imx21, &debug_status_fops)) |
| 494 | goto failed_create; |
| 495 | |
| 496 | if (!debugfs_create_file("dmem", S_IRUGO, |
| 497 | imx21->debug_root, imx21, &debug_dmem_fops)) |
| 498 | goto failed_create; |
| 499 | |
| 500 | if (!debugfs_create_file("etd", S_IRUGO, |
| 501 | imx21->debug_root, imx21, &debug_etd_fops)) |
| 502 | goto failed_create; |
| 503 | |
| 504 | if (!debugfs_create_file("statistics", S_IRUGO, |
| 505 | imx21->debug_root, imx21, &debug_statistics_fops)) |
| 506 | goto failed_create; |
| 507 | |
| 508 | if (!debugfs_create_file("isoc", S_IRUGO, |
| 509 | imx21->debug_root, imx21, &debug_isoc_fops)) |
| 510 | goto failed_create; |
| 511 | |
| 512 | return; |
| 513 | |
| 514 | failed_create: |
| 515 | debugfs_remove_recursive(imx21->debug_root); |
| 516 | |
| 517 | failed_create_rootdir: |
| 518 | imx21->debug_root = NULL; |
| 519 | } |
| 520 | |
| 521 | |
| 522 | static void remove_debug_files(struct imx21 *imx21) |
| 523 | { |
| 524 | if (imx21->debug_root) { |
| 525 | debugfs_remove_recursive(imx21->debug_root); |
| 526 | imx21->debug_root = NULL; |
| 527 | } |
| 528 | } |
| 529 | |
| 530 | #endif |
| 531 | |