blob: 509a444a30ab11365d64d8629ceb1ddda754a35f [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
24#include <asm/system.h>
25#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090026#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090027#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/mmu.h>
29#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090030#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090031
32#define NR_PMB_ENTRIES 16
33
Matt Flemingfc2bdef2009-10-06 21:22:22 +000034static void __pmb_unmap(struct pmb_entry *);
35
Matt Flemingedd7de82009-10-06 21:22:29 +000036static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090037static unsigned long pmb_map;
38
Paul Mundt0c7b1df2006-09-27 15:08:07 +090039static inline unsigned long mk_pmb_entry(unsigned int entry)
40{
41 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
42}
43
44static inline unsigned long mk_pmb_addr(unsigned int entry)
45{
46 return mk_pmb_entry(entry) | PMB_ADDR;
47}
48
49static inline unsigned long mk_pmb_data(unsigned int entry)
50{
51 return mk_pmb_entry(entry) | PMB_DATA;
52}
53
Matt Fleming067784f2009-10-06 21:22:23 +000054static int pmb_alloc_entry(void)
55{
56 unsigned int pos;
57
58repeat:
59 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
60
61 if (unlikely(pos > NR_PMB_ENTRIES))
62 return -ENOSPC;
63
64 if (test_and_set_bit(pos, &pmb_map))
65 goto repeat;
66
67 return pos;
68}
69
Matt Fleming8386aeb2009-10-06 21:22:28 +000070static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000071 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090072{
73 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000074 int pos;
75
Matt Fleming20b50142009-10-06 21:22:33 +000076 if (entry == PMB_NO_ENTRY) {
77 pos = pmb_alloc_entry();
78 if (pos < 0)
79 return ERR_PTR(pos);
80 } else {
Paul Mundt55cef912010-02-16 17:14:04 +090081 if (test_and_set_bit(entry, &pmb_map))
Matt Fleming20b50142009-10-06 21:22:33 +000082 return ERR_PTR(-ENOSPC);
83 pos = entry;
84 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +090085
Matt Flemingedd7de82009-10-06 21:22:29 +000086 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090087 if (!pmbe)
88 return ERR_PTR(-ENOMEM);
89
90 pmbe->vpn = vpn;
91 pmbe->ppn = ppn;
92 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +000093 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090094
95 return pmbe;
96}
97
Matt Fleming8386aeb2009-10-06 21:22:28 +000098static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090099{
Matt Flemingedd7de82009-10-06 21:22:29 +0000100 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +0900101
Matt Flemingedd7de82009-10-06 21:22:29 +0000102 pmbe->vpn = 0;
103 pmbe->ppn = 0;
104 pmbe->flags = 0;
105 pmbe->entry = 0;
106
107 clear_bit(pos, &pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900108}
109
110/*
111 * Must be in P2 for __set_pmb_entry()
112 */
Matt Fleming8386aeb2009-10-06 21:22:28 +0000113static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
114 unsigned long flags, int pos)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900115{
Paul Mundt9d56dd32010-01-26 12:58:40 +0900116 __raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900117
Paul Mundte7bd34a2007-07-31 17:07:28 +0900118#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900119 /*
120 * When we are in 32-bit address extended mode, CCR.CB becomes
121 * invalid, so care must be taken to manually adjust cacheable
122 * translations.
123 */
124 if (likely(flags & PMB_C))
125 flags |= PMB_WT;
126#endif
127
Paul Mundt9d56dd32010-01-26 12:58:40 +0900128 __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900129}
130
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900131static void set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900132{
Stuart Menefycbaa1182007-11-30 17:06:36 +0900133 jump_to_uncached();
Matt Fleming067784f2009-10-06 21:22:23 +0000134 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900135 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900136}
137
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900138static void clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900139{
140 unsigned int entry = pmbe->entry;
141 unsigned long addr;
142
Matt Fleming31051212009-10-06 21:22:30 +0000143 if (unlikely(entry >= NR_PMB_ENTRIES))
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900144 return;
145
Stuart Menefycbaa1182007-11-30 17:06:36 +0900146 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900147
148 /* Clear V-bit */
149 addr = mk_pmb_addr(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900150 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900151
152 addr = mk_pmb_data(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900153 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900154
Stuart Menefycbaa1182007-11-30 17:06:36 +0900155 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900156}
157
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900158
159static struct {
160 unsigned long size;
161 int flag;
162} pmb_sizes[] = {
163 { .size = 0x20000000, .flag = PMB_SZ_512M, },
164 { .size = 0x08000000, .flag = PMB_SZ_128M, },
165 { .size = 0x04000000, .flag = PMB_SZ_64M, },
166 { .size = 0x01000000, .flag = PMB_SZ_16M, },
167};
168
169long pmb_remap(unsigned long vaddr, unsigned long phys,
Paul Mundt7bdda622010-02-17 13:23:00 +0900170 unsigned long size, pgprot_t prot)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900171{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000172 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900173 unsigned long wanted;
174 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000175 long err;
Paul Mundt7bdda622010-02-17 13:23:00 +0900176 u64 flags;
177
178 flags = pgprot_val(prot);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900179
180 /* Convert typical pgprot value to the PMB equivalent */
181 if (flags & _PAGE_CACHABLE) {
182 if (flags & _PAGE_WT)
183 pmb_flags = PMB_WT;
184 else
185 pmb_flags = PMB_C;
186 } else
187 pmb_flags = PMB_WT | PMB_UB;
188
189 pmbp = NULL;
190 wanted = size;
191
192again:
193 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900194 if (size < pmb_sizes[i].size)
195 continue;
196
Matt Fleming20b50142009-10-06 21:22:33 +0000197 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
198 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000199 if (IS_ERR(pmbe)) {
200 err = PTR_ERR(pmbe);
201 goto out;
202 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900203
Matt Fleming067784f2009-10-06 21:22:23 +0000204 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900205
206 phys += pmb_sizes[i].size;
207 vaddr += pmb_sizes[i].size;
208 size -= pmb_sizes[i].size;
209
210 /*
211 * Link adjacent entries that span multiple PMB entries
212 * for easier tear-down.
213 */
214 if (likely(pmbp))
215 pmbp->link = pmbe;
216
217 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000218
219 /*
220 * Instead of trying smaller sizes on every iteration
221 * (even if we succeed in allocating space), try using
222 * pmb_sizes[i].size again.
223 */
224 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900225 }
226
227 if (size >= 0x1000000)
228 goto again;
229
230 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000231
232out:
233 if (pmbp)
234 __pmb_unmap(pmbp);
235
236 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900237}
238
239void pmb_unmap(unsigned long addr)
240{
Matt Flemingedd7de82009-10-06 21:22:29 +0000241 struct pmb_entry *pmbe = NULL;
242 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900243
Matt Flemingedd7de82009-10-06 21:22:29 +0000244 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
245 if (test_bit(i, &pmb_map)) {
246 pmbe = &pmb_entry_list[i];
247 if (pmbe->vpn == addr)
248 break;
249 }
250 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900251
252 if (unlikely(!pmbe))
253 return;
254
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000255 __pmb_unmap(pmbe);
256}
257
258static void __pmb_unmap(struct pmb_entry *pmbe)
259{
Matt Flemingedd7de82009-10-06 21:22:29 +0000260 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900261
262 do {
263 struct pmb_entry *pmblink = pmbe;
264
Matt Fleming067784f2009-10-06 21:22:23 +0000265 /*
266 * We may be called before this pmb_entry has been
267 * entered into the PMB table via set_pmb_entry(), but
268 * that's OK because we've allocated a unique slot for
269 * this entry in pmb_alloc() (even if we haven't filled
270 * it yet).
271 *
272 * Therefore, calling clear_pmb_entry() is safe as no
273 * other mapping can be using that slot.
274 */
275 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000276
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900277 pmbe = pmblink->link;
278
279 pmb_free(pmblink);
280 } while (pmbe);
281}
282
Paul Mundtefd54ea2010-02-16 18:39:30 +0900283static inline void
284pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
Matt Fleming3d467672010-01-18 19:33:10 +0900285{
Paul Mundtefd54ea2010-02-16 18:39:30 +0900286 unsigned int size;
287 const char *sz_str;
288
289 size = data_val & PMB_SZ_MASK;
290
291 sz_str = (size == PMB_SZ_16M) ? " 16MB":
292 (size == PMB_SZ_64M) ? " 64MB":
293 (size == PMB_SZ_128M) ? "128MB":
294 "512MB";
295
296 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
297 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
298 (data_val & PMB_C) ? "" : "un");
Matt Fleming3d467672010-01-18 19:33:10 +0900299}
300
Paul Mundtefd54ea2010-02-16 18:39:30 +0900301static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
302{
303 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
304}
305
306static int pmb_synchronize_mappings(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000307{
Matt Fleming3d467672010-01-18 19:33:10 +0900308 unsigned int applied = 0;
309 int i;
310
Paul Mundtefd54ea2010-02-16 18:39:30 +0900311 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900312
313 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900314 * Run through the initial boot mappings, log the established
315 * ones, and blow away anything that falls outside of the valid
316 * PPN range. Specifically, we only care about existing mappings
317 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900318 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900319 * Note that touching these can be a bit of a minefield; the boot
320 * loader can establish multi-page mappings with the same caching
321 * attributes, so we need to ensure that we aren't modifying a
322 * mapping that we're presently executing from, or may execute
323 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900324 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900325 * In the future we will have to tidy up after the boot loader by
326 * jumping between the cached and uncached mappings and tearing
327 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900328 */
329 for (i = 0; i < PMB_ENTRY_MAX; i++) {
330 unsigned long addr, data;
331 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900332 unsigned long ppn, vpn, flags;
333 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900334
335 addr = mk_pmb_addr(i);
336 data = mk_pmb_data(i);
337
338 addr_val = __raw_readl(addr);
339 data_val = __raw_readl(data);
340
341 /*
342 * Skip over any bogus entries
343 */
344 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
345 continue;
346
347 ppn = data_val & PMB_PFN_MASK;
348 vpn = addr_val & PMB_PFN_MASK;
349
350 /*
351 * Only preserve in-range mappings.
352 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900353 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900354 /*
355 * Invalidate anything out of bounds.
356 */
357 __raw_writel(addr_val & ~PMB_V, addr);
358 __raw_writel(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900359 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900360 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900361
362 /*
363 * Update the caching attributes if necessary
364 */
365 if (data_val & PMB_C) {
366#if defined(CONFIG_CACHE_WRITETHROUGH)
367 data_val |= PMB_WT;
368#elif defined(CONFIG_CACHE_WRITEBACK)
369 data_val &= ~PMB_WT;
370#else
371 data_val &= ~(PMB_C | PMB_WT);
372#endif
373 __raw_writel(data_val, data);
374 }
375
376 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK);
377
378 pmbe = pmb_alloc(vpn, ppn, flags, i);
379 if (IS_ERR(pmbe)) {
380 WARN_ON_ONCE(1);
381 continue;
382 }
383
384 pmb_log_mapping(data_val, vpn, ppn);
385
386 applied++;
Matt Fleming3d467672010-01-18 19:33:10 +0900387 }
388
389 return (applied == 0);
390}
Matt Fleming3d467672010-01-18 19:33:10 +0900391
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900392int pmb_init(void)
Matt Fleming3d467672010-01-18 19:33:10 +0900393{
Paul Mundtefd54ea2010-02-16 18:39:30 +0900394 int ret;
Matt Fleming20b50142009-10-06 21:22:33 +0000395
Matt Fleming3d467672010-01-18 19:33:10 +0900396 jump_to_uncached();
397
398 /*
Matt Fleming3d467672010-01-18 19:33:10 +0900399 * Sync our software copy of the PMB mappings with those in
400 * hardware. The mappings in the hardware PMB were either set up
401 * by the bootloader or very early on by the kernel.
402 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900403 ret = pmb_synchronize_mappings();
404 if (unlikely(ret == 0)) {
405 back_to_cached();
406 return 0;
Matt Fleming20b50142009-10-06 21:22:33 +0000407 }
408
Paul Mundt9d56dd32010-01-26 12:58:40 +0900409 __raw_writel(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900410
Paul Mundta0ab3662010-01-13 18:31:48 +0900411 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900412 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900413
Matt Fleming20b50142009-10-06 21:22:33 +0000414 back_to_cached();
415
416 return 0;
417}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900418
Paul Mundt2efa53b2010-01-20 16:40:48 +0900419bool __in_29bit_mode(void)
420{
421 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
422}
423
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900424static int pmb_seq_show(struct seq_file *file, void *iter)
425{
426 int i;
427
428 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
429 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
430 seq_printf(file, "ety vpn ppn size flags\n");
431
432 for (i = 0; i < NR_PMB_ENTRIES; i++) {
433 unsigned long addr, data;
434 unsigned int size;
435 char *sz_str = NULL;
436
Paul Mundt9d56dd32010-01-26 12:58:40 +0900437 addr = __raw_readl(mk_pmb_addr(i));
438 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900439
440 size = data & PMB_SZ_MASK;
441 sz_str = (size == PMB_SZ_16M) ? " 16MB":
442 (size == PMB_SZ_64M) ? " 64MB":
443 (size == PMB_SZ_128M) ? "128MB":
444 "512MB";
445
446 /* 02: V 0x88 0x08 128MB C CB B */
447 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
448 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
449 (addr >> 24) & 0xff, (data >> 24) & 0xff,
450 sz_str, (data & PMB_C) ? 'C' : ' ',
451 (data & PMB_WT) ? "WT" : "CB",
452 (data & PMB_UB) ? "UB" : " B");
453 }
454
455 return 0;
456}
457
458static int pmb_debugfs_open(struct inode *inode, struct file *file)
459{
460 return single_open(file, pmb_seq_show, NULL);
461}
462
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800463static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900464 .owner = THIS_MODULE,
465 .open = pmb_debugfs_open,
466 .read = seq_read,
467 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800468 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900469};
470
471static int __init pmb_debugfs_init(void)
472{
473 struct dentry *dentry;
474
475 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900476 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800477 if (!dentry)
478 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900479 if (IS_ERR(dentry))
480 return PTR_ERR(dentry);
481
482 return 0;
483}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900484postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000485
486#ifdef CONFIG_PM
487static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
488{
489 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000490 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000491
492 /* Restore the PMB after a resume from hibernation */
493 if (state.event == PM_EVENT_ON &&
494 prev_state.event == PM_EVENT_FREEZE) {
495 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000496 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
497 if (test_bit(i, &pmb_map)) {
498 pmbe = &pmb_entry_list[i];
499 set_pmb_entry(pmbe);
500 }
501 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000502 }
503 prev_state = state;
504 return 0;
505}
506
507static int pmb_sysdev_resume(struct sys_device *dev)
508{
509 return pmb_sysdev_suspend(dev, PMSG_ON);
510}
511
512static struct sysdev_driver pmb_sysdev_driver = {
513 .suspend = pmb_sysdev_suspend,
514 .resume = pmb_sysdev_resume,
515};
516
517static int __init pmb_sysdev_init(void)
518{
519 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
520}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000521subsys_initcall(pmb_sysdev_init);
522#endif