Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 1 | /* |
| 2 | * MTD Oops/Panic logger |
| 3 | * |
David Woodhouse | a1452a3 | 2010-08-08 20:58:20 +0100 | [diff] [blame] | 4 | * Copyright © 2007 Nokia Corporation. All rights reserved. |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 5 | * |
| 6 | * Author: Richard Purdie <rpurdie@openedhand.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * version 2 as published by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, but |
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public License |
| 18 | * along with this program; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA |
| 20 | * 02110-1301 USA |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/console.h> |
| 27 | #include <linux/vmalloc.h> |
| 28 | #include <linux/workqueue.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/wait.h> |
Richard Purdie | 621e4f8 | 2008-02-06 10:17:50 +0000 | [diff] [blame] | 31 | #include <linux/delay.h> |
David Woodhouse | f9f7dd2 | 2008-02-07 10:50:57 +0000 | [diff] [blame] | 32 | #include <linux/interrupt.h> |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 33 | #include <linux/mtd/mtd.h> |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 34 | #include <linux/kmsg_dump.h> |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 35 | |
Simon Kagstrom | 1114e3d | 2009-11-03 08:08:41 +0200 | [diff] [blame] | 36 | /* Maximum MTD partition size */ |
| 37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) |
| 38 | |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 40 | #define MTDOOPS_HEADER_SIZE 8 |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 41 | |
| 42 | static unsigned long record_size = 4096; |
| 43 | module_param(record_size, ulong, 0400); |
| 44 | MODULE_PARM_DESC(record_size, |
| 45 | "record size for MTD OOPS pages in bytes (default 4096)"); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 46 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 47 | static char mtddev[80]; |
| 48 | module_param_string(mtddev, mtddev, 80, 0400); |
| 49 | MODULE_PARM_DESC(mtddev, |
| 50 | "name or index number of the MTD device to use"); |
| 51 | |
| 52 | static int dump_oops = 1; |
| 53 | module_param(dump_oops, int, 0600); |
| 54 | MODULE_PARM_DESC(dump_oops, |
| 55 | "set to 1 to dump oopses, 0 to only dump panics (default 1)"); |
| 56 | |
Adrian Bunk | 7903cba | 2008-04-18 13:44:11 -0700 | [diff] [blame] | 57 | static struct mtdoops_context { |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 58 | struct kmsg_dumper dump; |
| 59 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 60 | int mtd_index; |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 61 | struct work_struct work_erase; |
| 62 | struct work_struct work_write; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 63 | struct mtd_info *mtd; |
| 64 | int oops_pages; |
| 65 | int nextpage; |
| 66 | int nextcount; |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 67 | unsigned long *oops_page_used; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 68 | |
| 69 | void *oops_buf; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 70 | } oops_cxt; |
| 71 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 72 | static void mark_page_used(struct mtdoops_context *cxt, int page) |
| 73 | { |
| 74 | set_bit(page, cxt->oops_page_used); |
| 75 | } |
| 76 | |
| 77 | static void mark_page_unused(struct mtdoops_context *cxt, int page) |
| 78 | { |
| 79 | clear_bit(page, cxt->oops_page_used); |
| 80 | } |
| 81 | |
| 82 | static int page_is_used(struct mtdoops_context *cxt, int page) |
| 83 | { |
| 84 | return test_bit(page, cxt->oops_page_used); |
| 85 | } |
| 86 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 87 | static void mtdoops_erase_callback(struct erase_info *done) |
| 88 | { |
| 89 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; |
| 90 | wake_up(wait_q); |
| 91 | } |
| 92 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 93 | static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 94 | { |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 95 | struct mtd_info *mtd = cxt->mtd; |
| 96 | u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 97 | u32 start_page = start_page_offset / record_size; |
| 98 | u32 erase_pages = mtd->erasesize / record_size; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 99 | struct erase_info erase; |
| 100 | DECLARE_WAITQUEUE(wait, current); |
| 101 | wait_queue_head_t wait_q; |
| 102 | int ret; |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 103 | int page; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 104 | |
| 105 | init_waitqueue_head(&wait_q); |
| 106 | erase.mtd = mtd; |
| 107 | erase.callback = mtdoops_erase_callback; |
| 108 | erase.addr = offset; |
Richard Purdie | 79dcd8e | 2008-01-29 10:25:55 +0000 | [diff] [blame] | 109 | erase.len = mtd->erasesize; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 110 | erase.priv = (u_long)&wait_q; |
| 111 | |
| 112 | set_current_state(TASK_INTERRUPTIBLE); |
| 113 | add_wait_queue(&wait_q, &wait); |
| 114 | |
Artem Bityutskiy | 7e1f0dc | 2011-12-23 15:25:39 +0200 | [diff] [blame] | 115 | ret = mtd_erase(mtd, &erase); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 116 | if (ret) { |
| 117 | set_current_state(TASK_RUNNING); |
| 118 | remove_wait_queue(&wait_q, &wait); |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 119 | printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", |
| 120 | (unsigned long long)erase.addr, |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 121 | (unsigned long long)erase.len, mtddev); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | schedule(); /* Wait for erase to finish. */ |
| 126 | remove_wait_queue(&wait_q, &wait); |
| 127 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 128 | /* Mark pages as unused */ |
| 129 | for (page = start_page; page < start_page + erase_pages; page++) |
| 130 | mark_page_unused(cxt, page); |
| 131 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 132 | return 0; |
| 133 | } |
| 134 | |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 135 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 136 | { |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 137 | cxt->nextpage++; |
Richard Purdie | ecd5b31 | 2008-07-26 09:17:41 +0100 | [diff] [blame] | 138 | if (cxt->nextpage >= cxt->oops_pages) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 139 | cxt->nextpage = 0; |
| 140 | cxt->nextcount++; |
| 141 | if (cxt->nextcount == 0xffffffff) |
| 142 | cxt->nextcount = 0; |
| 143 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 144 | if (page_is_used(cxt, cxt->nextpage)) { |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 145 | schedule_work(&cxt->work_erase); |
| 146 | return; |
| 147 | } |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 148 | |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 149 | printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", |
| 150 | cxt->nextpage, cxt->nextcount); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 151 | } |
| 152 | |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 153 | /* Scheduled work - when we can't proceed without erasing a block */ |
| 154 | static void mtdoops_workfunc_erase(struct work_struct *work) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 155 | { |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 156 | struct mtdoops_context *cxt = |
| 157 | container_of(work, struct mtdoops_context, work_erase); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 158 | struct mtd_info *mtd = cxt->mtd; |
| 159 | int i = 0, j, ret, mod; |
| 160 | |
| 161 | /* We were unregistered */ |
| 162 | if (!mtd) |
| 163 | return; |
| 164 | |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 165 | mod = (cxt->nextpage * record_size) % mtd->erasesize; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 166 | if (mod != 0) { |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 167 | cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); |
Richard Purdie | ecd5b31 | 2008-07-26 09:17:41 +0100 | [diff] [blame] | 168 | if (cxt->nextpage >= cxt->oops_pages) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 169 | cxt->nextpage = 0; |
| 170 | } |
| 171 | |
Artem Bityutskiy | 8f461a7 | 2012-01-02 13:48:54 +0200 | [diff] [blame] | 172 | while (mtd_can_have_bb(mtd)) { |
Artem Bityutskiy | 7086c19 | 2011-12-23 19:35:30 +0200 | [diff] [blame] | 173 | ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 174 | if (!ret) |
| 175 | break; |
| 176 | if (ret < 0) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 177 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 178 | return; |
| 179 | } |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 180 | badblock: |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 181 | printk(KERN_WARNING "mtdoops: bad block at %08lx\n", |
| 182 | cxt->nextpage * record_size); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 183 | i++; |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 184 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); |
Richard Purdie | ecd5b31 | 2008-07-26 09:17:41 +0100 | [diff] [blame] | 185 | if (cxt->nextpage >= cxt->oops_pages) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 186 | cxt->nextpage = 0; |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 187 | if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 188 | printk(KERN_ERR "mtdoops: all blocks bad!\n"); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 189 | return; |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 194 | ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 195 | |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 196 | if (ret >= 0) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 197 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
| 198 | cxt->nextpage, cxt->nextcount); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 199 | return; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 200 | } |
| 201 | |
Artem Bityutskiy | 800ffd3 | 2012-01-02 13:59:12 +0200 | [diff] [blame] | 202 | if (mtd_can_have_bb(mtd) && ret == -EIO) { |
Artem Bityutskiy | 5942ddb | 2011-12-23 19:37:38 +0200 | [diff] [blame] | 203 | ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 204 | if (ret < 0) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 205 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 206 | return; |
| 207 | } |
| 208 | } |
| 209 | goto badblock; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 210 | } |
| 211 | |
Richard Purdie | 621e4f8 | 2008-02-06 10:17:50 +0000 | [diff] [blame] | 212 | static void mtdoops_write(struct mtdoops_context *cxt, int panic) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 213 | { |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 214 | struct mtd_info *mtd = cxt->mtd; |
| 215 | size_t retlen; |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 216 | u32 *hdr; |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 217 | int ret; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 218 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 219 | /* Add mtdoops header to the buffer */ |
| 220 | hdr = cxt->oops_buf; |
| 221 | hdr[0] = cxt->nextcount; |
| 222 | hdr[1] = MTDOOPS_KERNMSG_MAGIC; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 223 | |
Artem Bityutskiy | 016c129 | 2011-12-28 17:27:18 +0200 | [diff] [blame] | 224 | if (panic) { |
Artem Bityutskiy | 7ae79d7 | 2011-12-23 18:03:17 +0200 | [diff] [blame] | 225 | ret = mtd_panic_write(mtd, cxt->nextpage * record_size, |
| 226 | record_size, &retlen, cxt->oops_buf); |
Artem Bityutskiy | 016c129 | 2011-12-28 17:27:18 +0200 | [diff] [blame] | 227 | if (ret == -EOPNOTSUPP) { |
| 228 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); |
| 229 | return; |
| 230 | } |
| 231 | } else |
Artem Bityutskiy | eda95cb | 2011-12-23 17:35:41 +0200 | [diff] [blame] | 232 | ret = mtd_write(mtd, cxt->nextpage * record_size, |
| 233 | record_size, &retlen, cxt->oops_buf); |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 234 | |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 235 | if (retlen != record_size || ret < 0) |
| 236 | printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", |
| 237 | cxt->nextpage * record_size, retlen, record_size, ret); |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 238 | mark_page_used(cxt, cxt->nextpage); |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 239 | memset(cxt->oops_buf, 0xff, record_size); |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 240 | |
| 241 | mtdoops_inc_counter(cxt); |
Richard Purdie | 621e4f8 | 2008-02-06 10:17:50 +0000 | [diff] [blame] | 242 | } |
| 243 | |
Richard Purdie | 621e4f8 | 2008-02-06 10:17:50 +0000 | [diff] [blame] | 244 | static void mtdoops_workfunc_write(struct work_struct *work) |
| 245 | { |
| 246 | struct mtdoops_context *cxt = |
| 247 | container_of(work, struct mtdoops_context, work_write); |
| 248 | |
| 249 | mtdoops_write(cxt, 0); |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 250 | } |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 251 | |
| 252 | static void find_next_position(struct mtdoops_context *cxt) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 253 | { |
| 254 | struct mtd_info *mtd = cxt->mtd; |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 255 | int ret, page, maxpos = 0; |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 256 | u32 count[2], maxcount = 0xffffffff; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 257 | size_t retlen; |
| 258 | |
| 259 | for (page = 0; page < cxt->oops_pages; page++) { |
Artem Bityutskiy | 8f461a7 | 2012-01-02 13:48:54 +0200 | [diff] [blame] | 260 | if (mtd_can_have_bb(mtd) && |
Artem Bityutskiy | 7086c19 | 2011-12-23 19:35:30 +0200 | [diff] [blame] | 261 | mtd_block_isbad(mtd, page * record_size)) |
Roman Tereshonkov | 3538c56 | 2011-12-02 15:07:17 +0200 | [diff] [blame] | 262 | continue; |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 263 | /* Assume the page is used */ |
| 264 | mark_page_used(cxt, page); |
Artem Bityutskiy | 329ad39 | 2011-12-23 17:30:16 +0200 | [diff] [blame] | 265 | ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, |
| 266 | &retlen, (u_char *)&count[0]); |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 267 | if (retlen != MTDOOPS_HEADER_SIZE || |
Brian Norris | d57f4054 | 2011-09-20 18:34:25 -0700 | [diff] [blame] | 268 | (ret < 0 && !mtd_is_bitflip(ret))) { |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 269 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", |
| 270 | page * record_size, retlen, |
| 271 | MTDOOPS_HEADER_SIZE, ret); |
Richard Purdie | 2986bd2 | 2008-01-29 11:27:09 +0000 | [diff] [blame] | 272 | continue; |
| 273 | } |
| 274 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 275 | if (count[0] == 0xffffffff && count[1] == 0xffffffff) |
| 276 | mark_page_unused(cxt, page); |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 277 | if (count[0] == 0xffffffff) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 278 | continue; |
| 279 | if (maxcount == 0xffffffff) { |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 280 | maxcount = count[0]; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 281 | maxpos = page; |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 282 | } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 283 | maxcount = count[0]; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 284 | maxpos = page; |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 285 | } else if (count[0] > maxcount && count[0] < 0xc0000000) { |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 286 | maxcount = count[0]; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 287 | maxpos = page; |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 288 | } else if (count[0] > maxcount && count[0] > 0xc0000000 |
| 289 | && maxcount > 0x80000000) { |
Richard Purdie | f0482ee | 2008-07-26 09:22:45 +0100 | [diff] [blame] | 290 | maxcount = count[0]; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 291 | maxpos = page; |
| 292 | } |
| 293 | } |
| 294 | if (maxcount == 0xffffffff) { |
| 295 | cxt->nextpage = 0; |
| 296 | cxt->nextcount = 1; |
Richard Purdie | 43b5693 | 2008-07-26 09:25:18 +0100 | [diff] [blame] | 297 | schedule_work(&cxt->work_erase); |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 298 | return; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | cxt->nextpage = maxpos; |
| 302 | cxt->nextcount = maxcount; |
| 303 | |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 304 | mtdoops_inc_counter(cxt); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 305 | } |
| 306 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 307 | static void mtdoops_do_dump(struct kmsg_dumper *dumper, |
| 308 | enum kmsg_dump_reason reason, const char *s1, unsigned long l1, |
| 309 | const char *s2, unsigned long l2) |
| 310 | { |
| 311 | struct mtdoops_context *cxt = container_of(dumper, |
| 312 | struct mtdoops_context, dump); |
| 313 | unsigned long s1_start, s2_start; |
| 314 | unsigned long l1_cpy, l2_cpy; |
| 315 | char *dst; |
| 316 | |
Seiji Aguchi | fc2d557 | 2011-01-12 16:59:29 -0800 | [diff] [blame] | 317 | if (reason != KMSG_DUMP_OOPS && |
WANG Cong | a3dd332 | 2012-01-12 17:20:11 -0800 | [diff] [blame] | 318 | reason != KMSG_DUMP_PANIC) |
Seiji Aguchi | fc2d557 | 2011-01-12 16:59:29 -0800 | [diff] [blame] | 319 | return; |
| 320 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 321 | /* Only dump oopses if dump_oops is set */ |
| 322 | if (reason == KMSG_DUMP_OOPS && !dump_oops) |
| 323 | return; |
| 324 | |
| 325 | dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ |
| 326 | l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); |
| 327 | l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); |
| 328 | |
| 329 | s2_start = l2 - l2_cpy; |
| 330 | s1_start = l1 - l1_cpy; |
| 331 | |
| 332 | memcpy(dst, s1 + s1_start, l1_cpy); |
| 333 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); |
| 334 | |
| 335 | /* Panics must be written immediately */ |
Artem Bityutskiy | 016c129 | 2011-12-28 17:27:18 +0200 | [diff] [blame] | 336 | if (reason != KMSG_DUMP_OOPS) |
| 337 | mtdoops_write(cxt, 1); |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 338 | |
| 339 | /* For other cases, schedule work to write it "nicely" */ |
| 340 | schedule_work(&cxt->work_write); |
| 341 | } |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 342 | |
| 343 | static void mtdoops_notify_add(struct mtd_info *mtd) |
| 344 | { |
| 345 | struct mtdoops_context *cxt = &oops_cxt; |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 346 | u64 mtdoops_pages = div_u64(mtd->size, record_size); |
| 347 | int err; |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 348 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 349 | if (!strcmp(mtd->name, mtddev)) |
Adrian Hunter | e2a0f25 | 2009-02-16 18:21:35 +0200 | [diff] [blame] | 350 | cxt->mtd_index = mtd->index; |
| 351 | |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 352 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 353 | return; |
| 354 | |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 355 | if (mtd->size < mtd->erasesize * 2) { |
| 356 | printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", |
| 357 | mtd->index); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 358 | return; |
| 359 | } |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 360 | if (mtd->erasesize < record_size) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 361 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", |
| 362 | mtd->index); |
Richard Purdie | 79dcd8e | 2008-01-29 10:25:55 +0000 | [diff] [blame] | 363 | return; |
| 364 | } |
Simon Kagstrom | 1114e3d | 2009-11-03 08:08:41 +0200 | [diff] [blame] | 365 | if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { |
| 366 | printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", |
| 367 | mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); |
| 368 | return; |
| 369 | } |
| 370 | |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 371 | /* oops_page_used is a bit field */ |
| 372 | cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, |
Roman Tereshonkov | 556f063 | 2011-11-29 12:49:18 +0200 | [diff] [blame] | 373 | BITS_PER_LONG) * sizeof(unsigned long)); |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 374 | if (!cxt->oops_page_used) { |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 375 | printk(KERN_ERR "mtdoops: could not allocate page array\n"); |
| 376 | return; |
| 377 | } |
| 378 | |
| 379 | cxt->dump.dump = mtdoops_do_dump; |
| 380 | err = kmsg_dump_register(&cxt->dump); |
| 381 | if (err) { |
| 382 | printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); |
| 383 | vfree(cxt->oops_page_used); |
| 384 | cxt->oops_page_used = NULL; |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 385 | return; |
| 386 | } |
Simon Kagstrom | 1114e3d | 2009-11-03 08:08:41 +0200 | [diff] [blame] | 387 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 388 | cxt->mtd = mtd; |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 389 | cxt->oops_pages = (int)mtd->size / record_size; |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 390 | find_next_position(cxt); |
Richard Purdie | 79dcd8e | 2008-01-29 10:25:55 +0000 | [diff] [blame] | 391 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 392 | } |
| 393 | |
| 394 | static void mtdoops_notify_remove(struct mtd_info *mtd) |
| 395 | { |
| 396 | struct mtdoops_context *cxt = &oops_cxt; |
| 397 | |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 398 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 399 | return; |
| 400 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 401 | if (kmsg_dump_unregister(&cxt->dump) < 0) |
| 402 | printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); |
| 403 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 404 | cxt->mtd = NULL; |
Tejun Heo | 75c52a4 | 2010-12-11 17:51:44 +0100 | [diff] [blame] | 405 | flush_work_sync(&cxt->work_erase); |
| 406 | flush_work_sync(&cxt->work_write); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 407 | } |
| 408 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 409 | |
| 410 | static struct mtd_notifier mtdoops_notifier = { |
| 411 | .add = mtdoops_notify_add, |
| 412 | .remove = mtdoops_notify_remove, |
| 413 | }; |
| 414 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 415 | static int __init mtdoops_init(void) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 416 | { |
| 417 | struct mtdoops_context *cxt = &oops_cxt; |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 418 | int mtd_index; |
| 419 | char *endp; |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 420 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 421 | if (strlen(mtddev) == 0) { |
| 422 | printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); |
| 423 | return -EINVAL; |
| 424 | } |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 425 | if ((record_size & 4095) != 0) { |
| 426 | printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); |
| 427 | return -EINVAL; |
| 428 | } |
| 429 | if (record_size < 4096) { |
| 430 | printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); |
| 431 | return -EINVAL; |
| 432 | } |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 433 | |
| 434 | /* Setup the MTD device to use */ |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 435 | cxt->mtd_index = -1; |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 436 | mtd_index = simple_strtoul(mtddev, &endp, 0); |
| 437 | if (*endp == '\0') |
| 438 | cxt->mtd_index = mtd_index; |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 439 | |
Simon Kagstrom | 9507b0c | 2009-10-29 13:41:19 +0100 | [diff] [blame] | 440 | cxt->oops_buf = vmalloc(record_size); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 441 | if (!cxt->oops_buf) { |
Artem Bityutskiy | a15b124 | 2009-10-11 13:40:40 +0300 | [diff] [blame] | 442 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 443 | return -ENOMEM; |
| 444 | } |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 445 | memset(cxt->oops_buf, 0xff, record_size); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 446 | |
Richard Purdie | 6ce0a85 | 2008-01-29 11:27:11 +0000 | [diff] [blame] | 447 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); |
| 448 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 449 | |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 450 | register_mtd_user(&mtdoops_notifier); |
| 451 | return 0; |
| 452 | } |
| 453 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 454 | static void __exit mtdoops_exit(void) |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 455 | { |
| 456 | struct mtdoops_context *cxt = &oops_cxt; |
| 457 | |
| 458 | unregister_mtd_user(&mtdoops_notifier); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 459 | vfree(cxt->oops_buf); |
Simon Kagstrom | be95745 | 2009-10-29 13:41:11 +0100 | [diff] [blame] | 460 | vfree(cxt->oops_page_used); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | |
Simon Kagstrom | 2e386e4 | 2009-11-03 14:19:03 +0100 | [diff] [blame] | 464 | module_init(mtdoops_init); |
| 465 | module_exit(mtdoops_exit); |
Richard Purdie | 4b23aff | 2007-05-29 13:31:42 +0100 | [diff] [blame] | 466 | |
| 467 | MODULE_LICENSE("GPL"); |
| 468 | MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); |
| 469 | MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver"); |