Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/char/mem.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * |
| 6 | * Added devfs support. |
| 7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> |
| 8 | * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/config.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/miscdevice.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/mman.h> |
| 17 | #include <linux/random.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/raw.h> |
| 20 | #include <linux/tty.h> |
| 21 | #include <linux/capability.h> |
| 22 | #include <linux/smp_lock.h> |
| 23 | #include <linux/devfs_fs_kernel.h> |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/device.h> |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 26 | #include <linux/highmem.h> |
| 27 | #include <linux/crash_dump.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/backing-dev.h> |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 29 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
| 31 | #include <asm/uaccess.h> |
| 32 | #include <asm/io.h> |
| 33 | |
| 34 | #ifdef CONFIG_IA64 |
| 35 | # include <linux/efi.h> |
| 36 | #endif |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | /* |
| 39 | * Architectures vary in how they handle caching for addresses |
| 40 | * outside of main memory. |
| 41 | * |
| 42 | */ |
| 43 | static inline int uncached_access(struct file *file, unsigned long addr) |
| 44 | { |
| 45 | #if defined(__i386__) |
| 46 | /* |
| 47 | * On the PPro and successors, the MTRRs are used to set |
| 48 | * memory types for physical addresses outside main memory, |
| 49 | * so blindly setting PCD or PWT on those pages is wrong. |
| 50 | * For Pentiums and earlier, the surround logic should disable |
| 51 | * caching for the high addresses through the KEN pin, but |
| 52 | * we maintain the tradition of paranoia in this code. |
| 53 | */ |
| 54 | if (file->f_flags & O_SYNC) |
| 55 | return 1; |
| 56 | return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || |
| 57 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || |
| 58 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || |
| 59 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) |
| 60 | && addr >= __pa(high_memory); |
| 61 | #elif defined(__x86_64__) |
| 62 | /* |
| 63 | * This is broken because it can generate memory type aliases, |
| 64 | * which can cause cache corruptions |
| 65 | * But it is only available for root and we have to be bug-to-bug |
| 66 | * compatible with i386. |
| 67 | */ |
| 68 | if (file->f_flags & O_SYNC) |
| 69 | return 1; |
| 70 | /* same behaviour as i386. PAT always set to cached and MTRRs control the |
| 71 | caching behaviour. |
| 72 | Hopefully a full PAT implementation will fix that soon. */ |
| 73 | return 0; |
| 74 | #elif defined(CONFIG_IA64) |
| 75 | /* |
| 76 | * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. |
| 77 | */ |
| 78 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); |
| 79 | #else |
| 80 | /* |
| 81 | * Accessing memory above the top the kernel knows about or through a file pointer |
| 82 | * that was marked O_SYNC will be done non-cached. |
| 83 | */ |
| 84 | if (file->f_flags & O_SYNC) |
| 85 | return 1; |
| 86 | return addr >= __pa(high_memory); |
| 87 | #endif |
| 88 | } |
| 89 | |
| 90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE |
| 91 | static inline int valid_phys_addr_range(unsigned long addr, size_t *count) |
| 92 | { |
| 93 | unsigned long end_mem; |
| 94 | |
| 95 | end_mem = __pa(high_memory); |
| 96 | if (addr >= end_mem) |
| 97 | return 0; |
| 98 | |
| 99 | if (*count > end_mem - addr) |
| 100 | *count = end_mem - addr; |
| 101 | |
| 102 | return 1; |
| 103 | } |
| 104 | #endif |
| 105 | |
| 106 | /* |
| 107 | * This funcion reads the *physical* memory. The f_pos points directly to the |
| 108 | * memory location. |
| 109 | */ |
| 110 | static ssize_t read_mem(struct file * file, char __user * buf, |
| 111 | size_t count, loff_t *ppos) |
| 112 | { |
| 113 | unsigned long p = *ppos; |
| 114 | ssize_t read, sz; |
| 115 | char *ptr; |
| 116 | |
| 117 | if (!valid_phys_addr_range(p, &count)) |
| 118 | return -EFAULT; |
| 119 | read = 0; |
| 120 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 121 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 122 | if (p < PAGE_SIZE) { |
| 123 | sz = PAGE_SIZE - p; |
| 124 | if (sz > count) |
| 125 | sz = count; |
| 126 | if (sz > 0) { |
| 127 | if (clear_user(buf, sz)) |
| 128 | return -EFAULT; |
| 129 | buf += sz; |
| 130 | p += sz; |
| 131 | count -= sz; |
| 132 | read += sz; |
| 133 | } |
| 134 | } |
| 135 | #endif |
| 136 | |
| 137 | while (count > 0) { |
| 138 | /* |
| 139 | * Handle first page in case it's not aligned |
| 140 | */ |
| 141 | if (-p & (PAGE_SIZE - 1)) |
| 142 | sz = -p & (PAGE_SIZE - 1); |
| 143 | else |
| 144 | sz = PAGE_SIZE; |
| 145 | |
| 146 | sz = min_t(unsigned long, sz, count); |
| 147 | |
| 148 | /* |
| 149 | * On ia64 if a page has been mapped somewhere as |
| 150 | * uncached, then it must also be accessed uncached |
| 151 | * by the kernel or data corruption may occur |
| 152 | */ |
| 153 | ptr = xlate_dev_mem_ptr(p); |
| 154 | |
| 155 | if (copy_to_user(buf, ptr, sz)) |
| 156 | return -EFAULT; |
| 157 | buf += sz; |
| 158 | p += sz; |
| 159 | count -= sz; |
| 160 | read += sz; |
| 161 | } |
| 162 | |
| 163 | *ppos += read; |
| 164 | return read; |
| 165 | } |
| 166 | |
| 167 | static ssize_t write_mem(struct file * file, const char __user * buf, |
| 168 | size_t count, loff_t *ppos) |
| 169 | { |
| 170 | unsigned long p = *ppos; |
| 171 | ssize_t written, sz; |
| 172 | unsigned long copied; |
| 173 | void *ptr; |
| 174 | |
| 175 | if (!valid_phys_addr_range(p, &count)) |
| 176 | return -EFAULT; |
| 177 | |
| 178 | written = 0; |
| 179 | |
| 180 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 181 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 182 | if (p < PAGE_SIZE) { |
| 183 | unsigned long sz = PAGE_SIZE - p; |
| 184 | if (sz > count) |
| 185 | sz = count; |
| 186 | /* Hmm. Do something? */ |
| 187 | buf += sz; |
| 188 | p += sz; |
| 189 | count -= sz; |
| 190 | written += sz; |
| 191 | } |
| 192 | #endif |
| 193 | |
| 194 | while (count > 0) { |
| 195 | /* |
| 196 | * Handle first page in case it's not aligned |
| 197 | */ |
| 198 | if (-p & (PAGE_SIZE - 1)) |
| 199 | sz = -p & (PAGE_SIZE - 1); |
| 200 | else |
| 201 | sz = PAGE_SIZE; |
| 202 | |
| 203 | sz = min_t(unsigned long, sz, count); |
| 204 | |
| 205 | /* |
| 206 | * On ia64 if a page has been mapped somewhere as |
| 207 | * uncached, then it must also be accessed uncached |
| 208 | * by the kernel or data corruption may occur |
| 209 | */ |
| 210 | ptr = xlate_dev_mem_ptr(p); |
| 211 | |
| 212 | copied = copy_from_user(ptr, buf, sz); |
| 213 | if (copied) { |
| 214 | ssize_t ret; |
| 215 | |
| 216 | ret = written + (sz - copied); |
| 217 | if (ret) |
| 218 | return ret; |
| 219 | return -EFAULT; |
| 220 | } |
| 221 | buf += sz; |
| 222 | p += sz; |
| 223 | count -= sz; |
| 224 | written += sz; |
| 225 | } |
| 226 | |
| 227 | *ppos += written; |
| 228 | return written; |
| 229 | } |
| 230 | |
| 231 | static int mmap_mem(struct file * file, struct vm_area_struct * vma) |
| 232 | { |
| 233 | #if defined(__HAVE_PHYS_MEM_ACCESS_PROT) |
| 234 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
| 235 | |
| 236 | vma->vm_page_prot = phys_mem_access_prot(file, offset, |
| 237 | vma->vm_end - vma->vm_start, |
| 238 | vma->vm_page_prot); |
| 239 | #elif defined(pgprot_noncached) |
| 240 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
| 241 | int uncached; |
| 242 | |
| 243 | uncached = uncached_access(file, offset); |
| 244 | if (uncached) |
| 245 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 246 | #endif |
| 247 | |
| 248 | /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ |
| 249 | if (remap_pfn_range(vma, |
| 250 | vma->vm_start, |
| 251 | vma->vm_pgoff, |
| 252 | vma->vm_end-vma->vm_start, |
| 253 | vma->vm_page_prot)) |
| 254 | return -EAGAIN; |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) |
| 259 | { |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 260 | unsigned long pfn; |
| 261 | |
| 262 | /* Turn a kernel-virtual address into a physical page frame */ |
| 263 | pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; |
| 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* |
| 266 | * RED-PEN: on some architectures there is more mapped memory |
| 267 | * than available in mem_map which pfn_valid checks |
| 268 | * for. Perhaps should add a new macro here. |
| 269 | * |
| 270 | * RED-PEN: vmalloc is not supported right now. |
| 271 | */ |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 272 | if (!pfn_valid(pfn)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | return -EIO; |
Linus Torvalds | 4bb8255 | 2005-08-13 14:22:59 -0700 | [diff] [blame] | 274 | |
| 275 | vma->vm_pgoff = pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | return mmap_mem(file, vma); |
| 277 | } |
| 278 | |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 279 | #ifdef CONFIG_CRASH_DUMP |
| 280 | /* |
| 281 | * Read memory corresponding to the old kernel. |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 282 | */ |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 283 | static ssize_t read_oldmem(struct file *file, char __user *buf, |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 284 | size_t count, loff_t *ppos) |
| 285 | { |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 286 | unsigned long pfn, offset; |
| 287 | size_t read = 0, csize; |
| 288 | int rc = 0; |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 289 | |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 290 | while (count) { |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 291 | pfn = *ppos / PAGE_SIZE; |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 292 | if (pfn > saved_max_pfn) |
| 293 | return read; |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 294 | |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 295 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
| 296 | if (count > PAGE_SIZE - offset) |
| 297 | csize = PAGE_SIZE - offset; |
| 298 | else |
| 299 | csize = count; |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 300 | |
Vivek Goyal | 315c215 | 2005-06-25 14:58:24 -0700 | [diff] [blame] | 301 | rc = copy_oldmem_page(pfn, buf, csize, offset, 1); |
| 302 | if (rc < 0) |
| 303 | return rc; |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 304 | buf += csize; |
| 305 | *ppos += csize; |
| 306 | read += csize; |
| 307 | count -= csize; |
| 308 | } |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 309 | return read; |
| 310 | } |
| 311 | #endif |
| 312 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | extern long vread(char *buf, char *addr, unsigned long count); |
| 314 | extern long vwrite(char *buf, char *addr, unsigned long count); |
| 315 | |
| 316 | /* |
| 317 | * This function reads the *virtual* memory as seen by the kernel. |
| 318 | */ |
| 319 | static ssize_t read_kmem(struct file *file, char __user *buf, |
| 320 | size_t count, loff_t *ppos) |
| 321 | { |
| 322 | unsigned long p = *ppos; |
| 323 | ssize_t low_count, read, sz; |
| 324 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ |
| 325 | |
| 326 | read = 0; |
| 327 | if (p < (unsigned long) high_memory) { |
| 328 | low_count = count; |
| 329 | if (count > (unsigned long) high_memory - p) |
| 330 | low_count = (unsigned long) high_memory - p; |
| 331 | |
| 332 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 333 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 334 | if (p < PAGE_SIZE && low_count > 0) { |
| 335 | size_t tmp = PAGE_SIZE - p; |
| 336 | if (tmp > low_count) tmp = low_count; |
| 337 | if (clear_user(buf, tmp)) |
| 338 | return -EFAULT; |
| 339 | buf += tmp; |
| 340 | p += tmp; |
| 341 | read += tmp; |
| 342 | low_count -= tmp; |
| 343 | count -= tmp; |
| 344 | } |
| 345 | #endif |
| 346 | while (low_count > 0) { |
| 347 | /* |
| 348 | * Handle first page in case it's not aligned |
| 349 | */ |
| 350 | if (-p & (PAGE_SIZE - 1)) |
| 351 | sz = -p & (PAGE_SIZE - 1); |
| 352 | else |
| 353 | sz = PAGE_SIZE; |
| 354 | |
| 355 | sz = min_t(unsigned long, sz, low_count); |
| 356 | |
| 357 | /* |
| 358 | * On ia64 if a page has been mapped somewhere as |
| 359 | * uncached, then it must also be accessed uncached |
| 360 | * by the kernel or data corruption may occur |
| 361 | */ |
| 362 | kbuf = xlate_dev_kmem_ptr((char *)p); |
| 363 | |
| 364 | if (copy_to_user(buf, kbuf, sz)) |
| 365 | return -EFAULT; |
| 366 | buf += sz; |
| 367 | p += sz; |
| 368 | read += sz; |
| 369 | low_count -= sz; |
| 370 | count -= sz; |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | if (count > 0) { |
| 375 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 376 | if (!kbuf) |
| 377 | return -ENOMEM; |
| 378 | while (count > 0) { |
| 379 | int len = count; |
| 380 | |
| 381 | if (len > PAGE_SIZE) |
| 382 | len = PAGE_SIZE; |
| 383 | len = vread(kbuf, (char *)p, len); |
| 384 | if (!len) |
| 385 | break; |
| 386 | if (copy_to_user(buf, kbuf, len)) { |
| 387 | free_page((unsigned long)kbuf); |
| 388 | return -EFAULT; |
| 389 | } |
| 390 | count -= len; |
| 391 | buf += len; |
| 392 | read += len; |
| 393 | p += len; |
| 394 | } |
| 395 | free_page((unsigned long)kbuf); |
| 396 | } |
| 397 | *ppos = p; |
| 398 | return read; |
| 399 | } |
| 400 | |
| 401 | |
| 402 | static inline ssize_t |
| 403 | do_write_kmem(void *p, unsigned long realp, const char __user * buf, |
| 404 | size_t count, loff_t *ppos) |
| 405 | { |
| 406 | ssize_t written, sz; |
| 407 | unsigned long copied; |
| 408 | |
| 409 | written = 0; |
| 410 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 411 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 412 | if (realp < PAGE_SIZE) { |
| 413 | unsigned long sz = PAGE_SIZE - realp; |
| 414 | if (sz > count) |
| 415 | sz = count; |
| 416 | /* Hmm. Do something? */ |
| 417 | buf += sz; |
| 418 | p += sz; |
| 419 | realp += sz; |
| 420 | count -= sz; |
| 421 | written += sz; |
| 422 | } |
| 423 | #endif |
| 424 | |
| 425 | while (count > 0) { |
| 426 | char *ptr; |
| 427 | /* |
| 428 | * Handle first page in case it's not aligned |
| 429 | */ |
| 430 | if (-realp & (PAGE_SIZE - 1)) |
| 431 | sz = -realp & (PAGE_SIZE - 1); |
| 432 | else |
| 433 | sz = PAGE_SIZE; |
| 434 | |
| 435 | sz = min_t(unsigned long, sz, count); |
| 436 | |
| 437 | /* |
| 438 | * On ia64 if a page has been mapped somewhere as |
| 439 | * uncached, then it must also be accessed uncached |
| 440 | * by the kernel or data corruption may occur |
| 441 | */ |
| 442 | ptr = xlate_dev_kmem_ptr(p); |
| 443 | |
| 444 | copied = copy_from_user(ptr, buf, sz); |
| 445 | if (copied) { |
| 446 | ssize_t ret; |
| 447 | |
| 448 | ret = written + (sz - copied); |
| 449 | if (ret) |
| 450 | return ret; |
| 451 | return -EFAULT; |
| 452 | } |
| 453 | buf += sz; |
| 454 | p += sz; |
| 455 | realp += sz; |
| 456 | count -= sz; |
| 457 | written += sz; |
| 458 | } |
| 459 | |
| 460 | *ppos += written; |
| 461 | return written; |
| 462 | } |
| 463 | |
| 464 | |
| 465 | /* |
| 466 | * This function writes to the *virtual* memory as seen by the kernel. |
| 467 | */ |
| 468 | static ssize_t write_kmem(struct file * file, const char __user * buf, |
| 469 | size_t count, loff_t *ppos) |
| 470 | { |
| 471 | unsigned long p = *ppos; |
| 472 | ssize_t wrote = 0; |
| 473 | ssize_t virtr = 0; |
| 474 | ssize_t written; |
| 475 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ |
| 476 | |
| 477 | if (p < (unsigned long) high_memory) { |
| 478 | |
| 479 | wrote = count; |
| 480 | if (count > (unsigned long) high_memory - p) |
| 481 | wrote = (unsigned long) high_memory - p; |
| 482 | |
| 483 | written = do_write_kmem((void*)p, p, buf, wrote, ppos); |
| 484 | if (written != wrote) |
| 485 | return written; |
| 486 | wrote = written; |
| 487 | p += wrote; |
| 488 | buf += wrote; |
| 489 | count -= wrote; |
| 490 | } |
| 491 | |
| 492 | if (count > 0) { |
| 493 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 494 | if (!kbuf) |
| 495 | return wrote ? wrote : -ENOMEM; |
| 496 | while (count > 0) { |
| 497 | int len = count; |
| 498 | |
| 499 | if (len > PAGE_SIZE) |
| 500 | len = PAGE_SIZE; |
| 501 | if (len) { |
| 502 | written = copy_from_user(kbuf, buf, len); |
| 503 | if (written) { |
| 504 | ssize_t ret; |
| 505 | |
| 506 | free_page((unsigned long)kbuf); |
| 507 | ret = wrote + virtr + (len - written); |
| 508 | return ret ? ret : -EFAULT; |
| 509 | } |
| 510 | } |
| 511 | len = vwrite(kbuf, (char *)p, len); |
| 512 | count -= len; |
| 513 | buf += len; |
| 514 | virtr += len; |
| 515 | p += len; |
| 516 | } |
| 517 | free_page((unsigned long)kbuf); |
| 518 | } |
| 519 | |
| 520 | *ppos = p; |
| 521 | return virtr + wrote; |
| 522 | } |
| 523 | |
Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 524 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | static ssize_t read_port(struct file * file, char __user * buf, |
| 526 | size_t count, loff_t *ppos) |
| 527 | { |
| 528 | unsigned long i = *ppos; |
| 529 | char __user *tmp = buf; |
| 530 | |
| 531 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 532 | return -EFAULT; |
| 533 | while (count-- > 0 && i < 65536) { |
| 534 | if (__put_user(inb(i),tmp) < 0) |
| 535 | return -EFAULT; |
| 536 | i++; |
| 537 | tmp++; |
| 538 | } |
| 539 | *ppos = i; |
| 540 | return tmp-buf; |
| 541 | } |
| 542 | |
| 543 | static ssize_t write_port(struct file * file, const char __user * buf, |
| 544 | size_t count, loff_t *ppos) |
| 545 | { |
| 546 | unsigned long i = *ppos; |
| 547 | const char __user * tmp = buf; |
| 548 | |
| 549 | if (!access_ok(VERIFY_READ,buf,count)) |
| 550 | return -EFAULT; |
| 551 | while (count-- > 0 && i < 65536) { |
| 552 | char c; |
| 553 | if (__get_user(c, tmp)) |
| 554 | return -EFAULT; |
| 555 | outb(c,i); |
| 556 | i++; |
| 557 | tmp++; |
| 558 | } |
| 559 | *ppos = i; |
| 560 | return tmp-buf; |
| 561 | } |
| 562 | #endif |
| 563 | |
| 564 | static ssize_t read_null(struct file * file, char __user * buf, |
| 565 | size_t count, loff_t *ppos) |
| 566 | { |
| 567 | return 0; |
| 568 | } |
| 569 | |
| 570 | static ssize_t write_null(struct file * file, const char __user * buf, |
| 571 | size_t count, loff_t *ppos) |
| 572 | { |
| 573 | return count; |
| 574 | } |
| 575 | |
| 576 | #ifdef CONFIG_MMU |
| 577 | /* |
| 578 | * For fun, we are using the MMU for this. |
| 579 | */ |
| 580 | static inline size_t read_zero_pagealigned(char __user * buf, size_t size) |
| 581 | { |
| 582 | struct mm_struct *mm; |
| 583 | struct vm_area_struct * vma; |
| 584 | unsigned long addr=(unsigned long)buf; |
| 585 | |
| 586 | mm = current->mm; |
| 587 | /* Oops, this was forgotten before. -ben */ |
| 588 | down_read(&mm->mmap_sem); |
| 589 | |
| 590 | /* For private mappings, just map in zero pages. */ |
| 591 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { |
| 592 | unsigned long count; |
| 593 | |
| 594 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) |
| 595 | goto out_up; |
| 596 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) |
| 597 | break; |
| 598 | count = vma->vm_end - addr; |
| 599 | if (count > size) |
| 600 | count = size; |
| 601 | |
| 602 | zap_page_range(vma, addr, count, NULL); |
| 603 | zeromap_page_range(vma, addr, count, PAGE_COPY); |
| 604 | |
| 605 | size -= count; |
| 606 | buf += count; |
| 607 | addr += count; |
| 608 | if (size == 0) |
| 609 | goto out_up; |
| 610 | } |
| 611 | |
| 612 | up_read(&mm->mmap_sem); |
| 613 | |
| 614 | /* The shared case is hard. Let's do the conventional zeroing. */ |
| 615 | do { |
| 616 | unsigned long unwritten = clear_user(buf, PAGE_SIZE); |
| 617 | if (unwritten) |
| 618 | return size + unwritten - PAGE_SIZE; |
| 619 | cond_resched(); |
| 620 | buf += PAGE_SIZE; |
| 621 | size -= PAGE_SIZE; |
| 622 | } while (size); |
| 623 | |
| 624 | return size; |
| 625 | out_up: |
| 626 | up_read(&mm->mmap_sem); |
| 627 | return size; |
| 628 | } |
| 629 | |
| 630 | static ssize_t read_zero(struct file * file, char __user * buf, |
| 631 | size_t count, loff_t *ppos) |
| 632 | { |
| 633 | unsigned long left, unwritten, written = 0; |
| 634 | |
| 635 | if (!count) |
| 636 | return 0; |
| 637 | |
| 638 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 639 | return -EFAULT; |
| 640 | |
| 641 | left = count; |
| 642 | |
| 643 | /* do we want to be clever? Arbitrary cut-off */ |
| 644 | if (count >= PAGE_SIZE*4) { |
| 645 | unsigned long partial; |
| 646 | |
| 647 | /* How much left of the page? */ |
| 648 | partial = (PAGE_SIZE-1) & -(unsigned long) buf; |
| 649 | unwritten = clear_user(buf, partial); |
| 650 | written = partial - unwritten; |
| 651 | if (unwritten) |
| 652 | goto out; |
| 653 | left -= partial; |
| 654 | buf += partial; |
| 655 | unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); |
| 656 | written += (left & PAGE_MASK) - unwritten; |
| 657 | if (unwritten) |
| 658 | goto out; |
| 659 | buf += left & PAGE_MASK; |
| 660 | left &= ~PAGE_MASK; |
| 661 | } |
| 662 | unwritten = clear_user(buf, left); |
| 663 | written += left - unwritten; |
| 664 | out: |
| 665 | return written ? written : -EFAULT; |
| 666 | } |
| 667 | |
| 668 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) |
| 669 | { |
| 670 | if (vma->vm_flags & VM_SHARED) |
| 671 | return shmem_zero_setup(vma); |
| 672 | if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
| 673 | return -EAGAIN; |
| 674 | return 0; |
| 675 | } |
| 676 | #else /* CONFIG_MMU */ |
| 677 | static ssize_t read_zero(struct file * file, char * buf, |
| 678 | size_t count, loff_t *ppos) |
| 679 | { |
| 680 | size_t todo = count; |
| 681 | |
| 682 | while (todo) { |
| 683 | size_t chunk = todo; |
| 684 | |
| 685 | if (chunk > 4096) |
| 686 | chunk = 4096; /* Just for latency reasons */ |
| 687 | if (clear_user(buf, chunk)) |
| 688 | return -EFAULT; |
| 689 | buf += chunk; |
| 690 | todo -= chunk; |
| 691 | cond_resched(); |
| 692 | } |
| 693 | return count; |
| 694 | } |
| 695 | |
| 696 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) |
| 697 | { |
| 698 | return -ENOSYS; |
| 699 | } |
| 700 | #endif /* CONFIG_MMU */ |
| 701 | |
| 702 | static ssize_t write_full(struct file * file, const char __user * buf, |
| 703 | size_t count, loff_t *ppos) |
| 704 | { |
| 705 | return -ENOSPC; |
| 706 | } |
| 707 | |
| 708 | /* |
| 709 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you |
| 710 | * can fopen() both devices with "a" now. This was previously impossible. |
| 711 | * -- SRB. |
| 712 | */ |
| 713 | |
| 714 | static loff_t null_lseek(struct file * file, loff_t offset, int orig) |
| 715 | { |
| 716 | return file->f_pos = 0; |
| 717 | } |
| 718 | |
| 719 | /* |
| 720 | * The memory devices use the full 32/64 bits of the offset, and so we cannot |
| 721 | * check against negative addresses: they are ok. The return value is weird, |
| 722 | * though, in that case (0). |
| 723 | * |
| 724 | * also note that seeking relative to the "end of file" isn't supported: |
| 725 | * it has no meaning, so it returns -EINVAL. |
| 726 | */ |
| 727 | static loff_t memory_lseek(struct file * file, loff_t offset, int orig) |
| 728 | { |
| 729 | loff_t ret; |
| 730 | |
| 731 | down(&file->f_dentry->d_inode->i_sem); |
| 732 | switch (orig) { |
| 733 | case 0: |
| 734 | file->f_pos = offset; |
| 735 | ret = file->f_pos; |
| 736 | force_successful_syscall_return(); |
| 737 | break; |
| 738 | case 1: |
| 739 | file->f_pos += offset; |
| 740 | ret = file->f_pos; |
| 741 | force_successful_syscall_return(); |
| 742 | break; |
| 743 | default: |
| 744 | ret = -EINVAL; |
| 745 | } |
| 746 | up(&file->f_dentry->d_inode->i_sem); |
| 747 | return ret; |
| 748 | } |
| 749 | |
| 750 | static int open_port(struct inode * inode, struct file * filp) |
| 751 | { |
| 752 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
| 753 | } |
| 754 | |
| 755 | #define zero_lseek null_lseek |
| 756 | #define full_lseek null_lseek |
| 757 | #define write_zero write_null |
| 758 | #define read_full read_zero |
| 759 | #define open_mem open_port |
| 760 | #define open_kmem open_mem |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 761 | #define open_oldmem open_mem |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | |
| 763 | static struct file_operations mem_fops = { |
| 764 | .llseek = memory_lseek, |
| 765 | .read = read_mem, |
| 766 | .write = write_mem, |
| 767 | .mmap = mmap_mem, |
| 768 | .open = open_mem, |
| 769 | }; |
| 770 | |
| 771 | static struct file_operations kmem_fops = { |
| 772 | .llseek = memory_lseek, |
| 773 | .read = read_kmem, |
| 774 | .write = write_kmem, |
| 775 | .mmap = mmap_kmem, |
| 776 | .open = open_kmem, |
| 777 | }; |
| 778 | |
| 779 | static struct file_operations null_fops = { |
| 780 | .llseek = null_lseek, |
| 781 | .read = read_null, |
| 782 | .write = write_null, |
| 783 | }; |
| 784 | |
Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 785 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | static struct file_operations port_fops = { |
| 787 | .llseek = memory_lseek, |
| 788 | .read = read_port, |
| 789 | .write = write_port, |
| 790 | .open = open_port, |
| 791 | }; |
| 792 | #endif |
| 793 | |
| 794 | static struct file_operations zero_fops = { |
| 795 | .llseek = zero_lseek, |
| 796 | .read = read_zero, |
| 797 | .write = write_zero, |
| 798 | .mmap = mmap_zero, |
| 799 | }; |
| 800 | |
| 801 | static struct backing_dev_info zero_bdi = { |
| 802 | .capabilities = BDI_CAP_MAP_COPY, |
| 803 | }; |
| 804 | |
| 805 | static struct file_operations full_fops = { |
| 806 | .llseek = full_lseek, |
| 807 | .read = read_full, |
| 808 | .write = write_full, |
| 809 | }; |
| 810 | |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 811 | #ifdef CONFIG_CRASH_DUMP |
| 812 | static struct file_operations oldmem_fops = { |
| 813 | .read = read_oldmem, |
| 814 | .open = open_oldmem, |
| 815 | }; |
| 816 | #endif |
| 817 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | static ssize_t kmsg_write(struct file * file, const char __user * buf, |
| 819 | size_t count, loff_t *ppos) |
| 820 | { |
| 821 | char *tmp; |
| 822 | int ret; |
| 823 | |
| 824 | tmp = kmalloc(count + 1, GFP_KERNEL); |
| 825 | if (tmp == NULL) |
| 826 | return -ENOMEM; |
| 827 | ret = -EFAULT; |
| 828 | if (!copy_from_user(tmp, buf, count)) { |
| 829 | tmp[count] = 0; |
| 830 | ret = printk("%s", tmp); |
| 831 | } |
| 832 | kfree(tmp); |
| 833 | return ret; |
| 834 | } |
| 835 | |
| 836 | static struct file_operations kmsg_fops = { |
| 837 | .write = kmsg_write, |
| 838 | }; |
| 839 | |
| 840 | static int memory_open(struct inode * inode, struct file * filp) |
| 841 | { |
| 842 | switch (iminor(inode)) { |
| 843 | case 1: |
| 844 | filp->f_op = &mem_fops; |
| 845 | break; |
| 846 | case 2: |
| 847 | filp->f_op = &kmem_fops; |
| 848 | break; |
| 849 | case 3: |
| 850 | filp->f_op = &null_fops; |
| 851 | break; |
Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 852 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | case 4: |
| 854 | filp->f_op = &port_fops; |
| 855 | break; |
| 856 | #endif |
| 857 | case 5: |
| 858 | filp->f_mapping->backing_dev_info = &zero_bdi; |
| 859 | filp->f_op = &zero_fops; |
| 860 | break; |
| 861 | case 7: |
| 862 | filp->f_op = &full_fops; |
| 863 | break; |
| 864 | case 8: |
| 865 | filp->f_op = &random_fops; |
| 866 | break; |
| 867 | case 9: |
| 868 | filp->f_op = &urandom_fops; |
| 869 | break; |
| 870 | case 11: |
| 871 | filp->f_op = &kmsg_fops; |
| 872 | break; |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 873 | #ifdef CONFIG_CRASH_DUMP |
| 874 | case 12: |
| 875 | filp->f_op = &oldmem_fops; |
| 876 | break; |
| 877 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | default: |
| 879 | return -ENXIO; |
| 880 | } |
| 881 | if (filp->f_op && filp->f_op->open) |
| 882 | return filp->f_op->open(inode,filp); |
| 883 | return 0; |
| 884 | } |
| 885 | |
| 886 | static struct file_operations memory_fops = { |
| 887 | .open = memory_open, /* just a selector for the real open */ |
| 888 | }; |
| 889 | |
| 890 | static const struct { |
| 891 | unsigned int minor; |
| 892 | char *name; |
| 893 | umode_t mode; |
| 894 | struct file_operations *fops; |
| 895 | } devlist[] = { /* list of minor devices */ |
| 896 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, |
| 897 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, |
| 898 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, |
Stephen Rothwell | 145d01e | 2005-06-21 17:15:52 -0700 | [diff] [blame] | 899 | #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
| 901 | #endif |
| 902 | {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, |
| 903 | {7, "full", S_IRUGO | S_IWUGO, &full_fops}, |
| 904 | {8, "random", S_IRUGO | S_IWUSR, &random_fops}, |
| 905 | {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, |
| 906 | {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, |
Vivek Goyal | 50b1fdb | 2005-06-25 14:58:23 -0700 | [diff] [blame] | 907 | #ifdef CONFIG_CRASH_DUMP |
| 908 | {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, |
| 909 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | }; |
| 911 | |
gregkh@suse.de | ca8eca6 | 2005-03-23 09:53:09 -0800 | [diff] [blame] | 912 | static struct class *mem_class; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | |
| 914 | static int __init chr_dev_init(void) |
| 915 | { |
| 916 | int i; |
| 917 | |
| 918 | if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) |
| 919 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); |
| 920 | |
gregkh@suse.de | ca8eca6 | 2005-03-23 09:53:09 -0800 | [diff] [blame] | 921 | mem_class = class_create(THIS_MODULE, "mem"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | for (i = 0; i < ARRAY_SIZE(devlist); i++) { |
gregkh@suse.de | ca8eca6 | 2005-03-23 09:53:09 -0800 | [diff] [blame] | 923 | class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | NULL, devlist[i].name); |
| 925 | devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), |
| 926 | S_IFCHR | devlist[i].mode, devlist[i].name); |
| 927 | } |
| 928 | |
| 929 | return 0; |
| 930 | } |
| 931 | |
| 932 | fs_initcall(chr_dev_init); |