blob: 06060ccd3c231b480238d81b98ac2496b69b5e17 [file] [log] [blame]
Jaya Kumar60b59be2007-05-08 00:37:37 -07001/*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
Jaya Kumarde7c6d12008-03-19 17:01:10 -07007 * License. See the file COPYING in the main directory of this archive
Jaya Kumar60b59be2007-05-08 00:37:37 -07008 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
20#include <linux/fb.h>
21#include <linux/list.h>
Jaya Kumar60b59be2007-05-08 00:37:37 -070022
23/* to support deferred IO */
24#include <linux/rmap.h>
25#include <linux/pagemap.h>
26
27/* this is to find and return the vmalloc-ed fb pages */
Nick Piggin529e55b2008-02-06 01:39:10 -080028static int fb_deferred_io_fault(struct vm_area_struct *vma,
29 struct vm_fault *vmf)
Jaya Kumar60b59be2007-05-08 00:37:37 -070030{
31 unsigned long offset;
32 struct page *page;
33 struct fb_info *info = vma->vm_private_data;
Jaya Kumarde7c6d12008-03-19 17:01:10 -070034 /* info->screen_base is virtual memory */
Antonino A. Daplas98a11532007-05-08 00:38:44 -070035 void *screen_base = (void __force *) info->screen_base;
Jaya Kumar60b59be2007-05-08 00:37:37 -070036
Nick Piggin529e55b2008-02-06 01:39:10 -080037 offset = vmf->pgoff << PAGE_SHIFT;
Jaya Kumar60b59be2007-05-08 00:37:37 -070038 if (offset >= info->fix.smem_len)
Nick Piggin529e55b2008-02-06 01:39:10 -080039 return VM_FAULT_SIGBUS;
Jaya Kumar60b59be2007-05-08 00:37:37 -070040
Antonino A. Daplas98a11532007-05-08 00:38:44 -070041 page = vmalloc_to_page(screen_base + offset);
Jaya Kumar60b59be2007-05-08 00:37:37 -070042 if (!page)
Nick Piggin529e55b2008-02-06 01:39:10 -080043 return VM_FAULT_SIGBUS;
Jaya Kumar60b59be2007-05-08 00:37:37 -070044
45 get_page(page);
Jaya Kumarde7c6d12008-03-19 17:01:10 -070046
47 if (vma->vm_file)
48 page->mapping = vma->vm_file->f_mapping;
49 else
50 printk(KERN_ERR "no mapping available\n");
51
52 BUG_ON(!page->mapping);
53 page->index = vmf->pgoff;
54
Nick Piggin529e55b2008-02-06 01:39:10 -080055 vmf->page = page;
56 return 0;
Jaya Kumar60b59be2007-05-08 00:37:37 -070057}
58
Paul Mundt5e841b82007-05-08 00:37:41 -070059int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
60{
61 struct fb_info *info = file->private_data;
62
Magnus Damm87884bd2008-12-19 15:34:09 +090063 /* Skip if deferred io is complied-in but disabled on this fbdev */
64 if (!info->fbdefio)
65 return 0;
66
Paul Mundt5e841b82007-05-08 00:37:41 -070067 /* Kill off the delayed work */
68 cancel_rearming_delayed_work(&info->deferred_work);
69
70 /* Run it immediately */
71 return schedule_delayed_work(&info->deferred_work, 0);
72}
73EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
74
Jaya Kumar60b59be2007-05-08 00:37:37 -070075/* vm_ops->page_mkwrite handler */
Adrian Bunk7bf1ea32007-05-08 00:37:38 -070076static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
77 struct page *page)
Jaya Kumar60b59be2007-05-08 00:37:37 -070078{
79 struct fb_info *info = vma->vm_private_data;
80 struct fb_deferred_io *fbdefio = info->fbdefio;
Jaya Kumarf31ad922008-07-12 13:47:51 -070081 struct page *cur;
Jaya Kumar60b59be2007-05-08 00:37:37 -070082
83 /* this is a callback we get when userspace first tries to
84 write to the page. we schedule a workqueue. that workqueue
85 will eventually mkclean the touched pages and execute the
86 deferred framebuffer IO. then if userspace touches a page
87 again, we repeat the same scheme */
88
89 /* protect against the workqueue changing the page list */
90 mutex_lock(&fbdefio->lock);
Jaya Kumarf31ad922008-07-12 13:47:51 -070091
92 /* we loop through the pagelist before adding in order
93 to keep the pagelist sorted */
94 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
95 /* this check is to catch the case where a new
96 process could start writing to the same page
97 through a new pte. this new access can cause the
98 mkwrite even when the original ps's pte is marked
99 writable */
100 if (unlikely(cur == page))
101 goto page_already_added;
102 else if (cur->index > page->index)
103 break;
104 }
105
106 list_add_tail(&page->lru, &cur->lru);
107
108page_already_added:
Jaya Kumar60b59be2007-05-08 00:37:37 -0700109 mutex_unlock(&fbdefio->lock);
110
111 /* come back after delay to process the deferred IO */
112 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
113 return 0;
114}
115
116static struct vm_operations_struct fb_deferred_io_vm_ops = {
Nick Piggin529e55b2008-02-06 01:39:10 -0800117 .fault = fb_deferred_io_fault,
Jaya Kumar60b59be2007-05-08 00:37:37 -0700118 .page_mkwrite = fb_deferred_io_mkwrite,
119};
120
Ian Campbelld8474712008-08-20 14:09:23 -0700121static int fb_deferred_io_set_page_dirty(struct page *page)
122{
123 if (!PageDirty(page))
124 SetPageDirty(page);
125 return 0;
126}
127
128static const struct address_space_operations fb_deferred_io_aops = {
129 .set_page_dirty = fb_deferred_io_set_page_dirty,
130};
131
Jaya Kumar60b59be2007-05-08 00:37:37 -0700132static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
133{
134 vma->vm_ops = &fb_deferred_io_vm_ops;
135 vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
136 vma->vm_private_data = info;
137 return 0;
138}
139
140/* workqueue callback */
141static void fb_deferred_io_work(struct work_struct *work)
142{
143 struct fb_info *info = container_of(work, struct fb_info,
144 deferred_work.work);
145 struct list_head *node, *next;
146 struct page *cur;
147 struct fb_deferred_io *fbdefio = info->fbdefio;
148
149 /* here we mkclean the pages, then do all deferred IO */
150 mutex_lock(&fbdefio->lock);
151 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
152 lock_page(cur);
153 page_mkclean(cur);
154 unlock_page(cur);
155 }
156
157 /* driver's callback with pagelist */
158 fbdefio->deferred_io(info, &fbdefio->pagelist);
159
160 /* clear the list */
161 list_for_each_safe(node, next, &fbdefio->pagelist) {
162 list_del(node);
163 }
164 mutex_unlock(&fbdefio->lock);
165}
166
167void fb_deferred_io_init(struct fb_info *info)
168{
169 struct fb_deferred_io *fbdefio = info->fbdefio;
170
171 BUG_ON(!fbdefio);
172 mutex_init(&fbdefio->lock);
173 info->fbops->fb_mmap = fb_deferred_io_mmap;
174 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
175 INIT_LIST_HEAD(&fbdefio->pagelist);
176 if (fbdefio->delay == 0) /* set a default of 1 s */
177 fbdefio->delay = HZ;
178}
179EXPORT_SYMBOL_GPL(fb_deferred_io_init);
180
Ian Campbelld8474712008-08-20 14:09:23 -0700181void fb_deferred_io_open(struct fb_info *info,
182 struct inode *inode,
183 struct file *file)
184{
185 file->f_mapping->a_ops = &fb_deferred_io_aops;
186}
187EXPORT_SYMBOL_GPL(fb_deferred_io_open);
188
Jaya Kumar60b59be2007-05-08 00:37:37 -0700189void fb_deferred_io_cleanup(struct fb_info *info)
190{
Jaya Kumarde7c6d12008-03-19 17:01:10 -0700191 void *screen_base = (void __force *) info->screen_base;
Jaya Kumar60b59be2007-05-08 00:37:37 -0700192 struct fb_deferred_io *fbdefio = info->fbdefio;
Jaya Kumarde7c6d12008-03-19 17:01:10 -0700193 struct page *page;
194 int i;
Jaya Kumar60b59be2007-05-08 00:37:37 -0700195
196 BUG_ON(!fbdefio);
197 cancel_delayed_work(&info->deferred_work);
198 flush_scheduled_work();
Jaya Kumarde7c6d12008-03-19 17:01:10 -0700199
200 /* clear out the mapping that we setup */
201 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
202 page = vmalloc_to_page(screen_base + i);
203 page->mapping = NULL;
204 }
Magnus Damm6e1038a2008-12-19 15:34:23 +0900205
206 info->fbops->fb_mmap = NULL;
207 mutex_destroy(&fbdefio->lock);
Jaya Kumar60b59be2007-05-08 00:37:37 -0700208}
209EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
210
211MODULE_LICENSE("GPL");