Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/video/fb_defio.c |
| 3 | * |
| 4 | * Copyright (C) 2006 Jaya Kumar |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
Jaya Kumar | de7c6d1 | 2008-03-19 17:01:10 -0700 | [diff] [blame] | 7 | * License. See the file COPYING in the main directory of this archive |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 8 | * for more details. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/mm.h> |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 16 | #include <linux/vmalloc.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/fb.h> |
| 20 | #include <linux/list.h> |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 21 | |
| 22 | /* to support deferred IO */ |
| 23 | #include <linux/rmap.h> |
| 24 | #include <linux/pagemap.h> |
| 25 | |
Magnus Damm | 37b4837 | 2008-12-19 15:34:32 +0900 | [diff] [blame] | 26 | struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) |
| 27 | { |
| 28 | void *screen_base = (void __force *) info->screen_base; |
| 29 | struct page *page; |
| 30 | |
| 31 | if (is_vmalloc_addr(screen_base + offs)) |
| 32 | page = vmalloc_to_page(screen_base + offs); |
| 33 | else |
| 34 | page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); |
| 35 | |
| 36 | return page; |
| 37 | } |
| 38 | |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 39 | /* this is to find and return the vmalloc-ed fb pages */ |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 40 | static int fb_deferred_io_fault(struct vm_area_struct *vma, |
| 41 | struct vm_fault *vmf) |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 42 | { |
| 43 | unsigned long offset; |
| 44 | struct page *page; |
| 45 | struct fb_info *info = vma->vm_private_data; |
| 46 | |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 47 | offset = vmf->pgoff << PAGE_SHIFT; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 48 | if (offset >= info->fix.smem_len) |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 49 | return VM_FAULT_SIGBUS; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 50 | |
Magnus Damm | 37b4837 | 2008-12-19 15:34:32 +0900 | [diff] [blame] | 51 | page = fb_deferred_io_page(info, offset); |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 52 | if (!page) |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 53 | return VM_FAULT_SIGBUS; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 54 | |
| 55 | get_page(page); |
Jaya Kumar | de7c6d1 | 2008-03-19 17:01:10 -0700 | [diff] [blame] | 56 | |
| 57 | if (vma->vm_file) |
| 58 | page->mapping = vma->vm_file->f_mapping; |
| 59 | else |
| 60 | printk(KERN_ERR "no mapping available\n"); |
| 61 | |
| 62 | BUG_ON(!page->mapping); |
| 63 | page->index = vmf->pgoff; |
| 64 | |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 65 | vmf->page = page; |
| 66 | return 0; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 69 | int fb_deferred_io_fsync(struct file *file, int datasync) |
Paul Mundt | 5e841b8 | 2007-05-08 00:37:41 -0700 | [diff] [blame] | 70 | { |
| 71 | struct fb_info *info = file->private_data; |
| 72 | |
Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 73 | /* Skip if deferred io is compiled-in but disabled on this fbdev */ |
Magnus Damm | 87884bd | 2008-12-19 15:34:09 +0900 | [diff] [blame] | 74 | if (!info->fbdefio) |
| 75 | return 0; |
| 76 | |
Paul Mundt | 5e841b8 | 2007-05-08 00:37:41 -0700 | [diff] [blame] | 77 | /* Kill off the delayed work */ |
Tejun Heo | afe2c51 | 2010-12-14 16:21:17 +0100 | [diff] [blame] | 78 | cancel_delayed_work_sync(&info->deferred_work); |
Paul Mundt | 5e841b8 | 2007-05-08 00:37:41 -0700 | [diff] [blame] | 79 | |
| 80 | /* Run it immediately */ |
| 81 | return schedule_delayed_work(&info->deferred_work, 0); |
| 82 | } |
| 83 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); |
| 84 | |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 85 | /* vm_ops->page_mkwrite handler */ |
Adrian Bunk | 7bf1ea3 | 2007-05-08 00:37:38 -0700 | [diff] [blame] | 86 | static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, |
Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 87 | struct vm_fault *vmf) |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 88 | { |
Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 89 | struct page *page = vmf->page; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 90 | struct fb_info *info = vma->vm_private_data; |
| 91 | struct fb_deferred_io *fbdefio = info->fbdefio; |
Jaya Kumar | f31ad92 | 2008-07-12 13:47:51 -0700 | [diff] [blame] | 92 | struct page *cur; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 93 | |
| 94 | /* this is a callback we get when userspace first tries to |
| 95 | write to the page. we schedule a workqueue. that workqueue |
| 96 | will eventually mkclean the touched pages and execute the |
| 97 | deferred framebuffer IO. then if userspace touches a page |
| 98 | again, we repeat the same scheme */ |
| 99 | |
| 100 | /* protect against the workqueue changing the page list */ |
| 101 | mutex_lock(&fbdefio->lock); |
Jaya Kumar | f31ad92 | 2008-07-12 13:47:51 -0700 | [diff] [blame] | 102 | |
Albert Herranz | d6d03f9 | 2010-06-04 14:14:57 -0700 | [diff] [blame] | 103 | /* |
| 104 | * We want the page to remain locked from ->page_mkwrite until |
| 105 | * the PTE is marked dirty to avoid page_mkclean() being called |
| 106 | * before the PTE is updated, which would leave the page ignored |
| 107 | * by defio. |
| 108 | * Do this by locking the page here and informing the caller |
| 109 | * about it with VM_FAULT_LOCKED. |
| 110 | */ |
| 111 | lock_page(page); |
| 112 | |
Jaya Kumar | f31ad92 | 2008-07-12 13:47:51 -0700 | [diff] [blame] | 113 | /* we loop through the pagelist before adding in order |
| 114 | to keep the pagelist sorted */ |
| 115 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { |
| 116 | /* this check is to catch the case where a new |
| 117 | process could start writing to the same page |
| 118 | through a new pte. this new access can cause the |
| 119 | mkwrite even when the original ps's pte is marked |
| 120 | writable */ |
| 121 | if (unlikely(cur == page)) |
| 122 | goto page_already_added; |
| 123 | else if (cur->index > page->index) |
| 124 | break; |
| 125 | } |
| 126 | |
| 127 | list_add_tail(&page->lru, &cur->lru); |
| 128 | |
| 129 | page_already_added: |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 130 | mutex_unlock(&fbdefio->lock); |
| 131 | |
| 132 | /* come back after delay to process the deferred IO */ |
| 133 | schedule_delayed_work(&info->deferred_work, fbdefio->delay); |
Albert Herranz | d6d03f9 | 2010-06-04 14:14:57 -0700 | [diff] [blame] | 134 | return VM_FAULT_LOCKED; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 137 | static const struct vm_operations_struct fb_deferred_io_vm_ops = { |
Nick Piggin | 529e55b | 2008-02-06 01:39:10 -0800 | [diff] [blame] | 138 | .fault = fb_deferred_io_fault, |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 139 | .page_mkwrite = fb_deferred_io_mkwrite, |
| 140 | }; |
| 141 | |
Ian Campbell | d847471 | 2008-08-20 14:09:23 -0700 | [diff] [blame] | 142 | static int fb_deferred_io_set_page_dirty(struct page *page) |
| 143 | { |
| 144 | if (!PageDirty(page)) |
| 145 | SetPageDirty(page); |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | static const struct address_space_operations fb_deferred_io_aops = { |
| 150 | .set_page_dirty = fb_deferred_io_set_page_dirty, |
| 151 | }; |
| 152 | |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 153 | static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) |
| 154 | { |
| 155 | vma->vm_ops = &fb_deferred_io_vm_ops; |
Konrad Rzeszutek Wilk | 7164bb4 | 2009-12-03 10:31:56 -0500 | [diff] [blame] | 156 | vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND ); |
| 157 | if (!(info->flags & FBINFO_VIRTFB)) |
| 158 | vma->vm_flags |= VM_IO; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 159 | vma->vm_private_data = info; |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | /* workqueue callback */ |
| 164 | static void fb_deferred_io_work(struct work_struct *work) |
| 165 | { |
| 166 | struct fb_info *info = container_of(work, struct fb_info, |
| 167 | deferred_work.work); |
Albert Herranz | 3f505ca | 2010-06-04 14:14:56 -0700 | [diff] [blame] | 168 | struct list_head *node, *next; |
| 169 | struct page *cur; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 170 | struct fb_deferred_io *fbdefio = info->fbdefio; |
| 171 | |
| 172 | /* here we mkclean the pages, then do all deferred IO */ |
| 173 | mutex_lock(&fbdefio->lock); |
Albert Herranz | 3f505ca | 2010-06-04 14:14:56 -0700 | [diff] [blame] | 174 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { |
| 175 | lock_page(cur); |
| 176 | page_mkclean(cur); |
| 177 | unlock_page(cur); |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /* driver's callback with pagelist */ |
| 181 | fbdefio->deferred_io(info, &fbdefio->pagelist); |
| 182 | |
Albert Herranz | 3f505ca | 2010-06-04 14:14:56 -0700 | [diff] [blame] | 183 | /* clear the list */ |
| 184 | list_for_each_safe(node, next, &fbdefio->pagelist) { |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 185 | list_del(node); |
| 186 | } |
| 187 | mutex_unlock(&fbdefio->lock); |
| 188 | } |
| 189 | |
| 190 | void fb_deferred_io_init(struct fb_info *info) |
| 191 | { |
| 192 | struct fb_deferred_io *fbdefio = info->fbdefio; |
| 193 | |
| 194 | BUG_ON(!fbdefio); |
| 195 | mutex_init(&fbdefio->lock); |
| 196 | info->fbops->fb_mmap = fb_deferred_io_mmap; |
| 197 | INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); |
| 198 | INIT_LIST_HEAD(&fbdefio->pagelist); |
| 199 | if (fbdefio->delay == 0) /* set a default of 1 s */ |
| 200 | fbdefio->delay = HZ; |
| 201 | } |
| 202 | EXPORT_SYMBOL_GPL(fb_deferred_io_init); |
| 203 | |
Ian Campbell | d847471 | 2008-08-20 14:09:23 -0700 | [diff] [blame] | 204 | void fb_deferred_io_open(struct fb_info *info, |
| 205 | struct inode *inode, |
| 206 | struct file *file) |
| 207 | { |
| 208 | file->f_mapping->a_ops = &fb_deferred_io_aops; |
| 209 | } |
| 210 | EXPORT_SYMBOL_GPL(fb_deferred_io_open); |
| 211 | |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 212 | void fb_deferred_io_cleanup(struct fb_info *info) |
| 213 | { |
| 214 | struct fb_deferred_io *fbdefio = info->fbdefio; |
Jaya Kumar | de7c6d1 | 2008-03-19 17:01:10 -0700 | [diff] [blame] | 215 | struct page *page; |
| 216 | int i; |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 217 | |
| 218 | BUG_ON(!fbdefio); |
| 219 | cancel_delayed_work(&info->deferred_work); |
| 220 | flush_scheduled_work(); |
Jaya Kumar | de7c6d1 | 2008-03-19 17:01:10 -0700 | [diff] [blame] | 221 | |
| 222 | /* clear out the mapping that we setup */ |
| 223 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { |
Magnus Damm | 37b4837 | 2008-12-19 15:34:32 +0900 | [diff] [blame] | 224 | page = fb_deferred_io_page(info, i); |
Jaya Kumar | de7c6d1 | 2008-03-19 17:01:10 -0700 | [diff] [blame] | 225 | page->mapping = NULL; |
| 226 | } |
Magnus Damm | 6e1038a | 2008-12-19 15:34:23 +0900 | [diff] [blame] | 227 | |
| 228 | info->fbops->fb_mmap = NULL; |
| 229 | mutex_destroy(&fbdefio->lock); |
Jaya Kumar | 60b59be | 2007-05-08 00:37:37 -0700 | [diff] [blame] | 230 | } |
| 231 | EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); |
| 232 | |
| 233 | MODULE_LICENSE("GPL"); |