1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21 
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25 
fb_deferred_io_page(struct fb_info * info,unsigned long offs)26 struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28 	void *screen_base = (void __force *) info->screen_base;
29 	struct page *page;
30 
31 	if (is_vmalloc_addr(screen_base + offs))
32 		page = vmalloc_to_page(screen_base + offs);
33 	else
34 		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35 
36 	return page;
37 }
38 
39 /* this is to find and return the vmalloc-ed fb pages */
fb_deferred_io_fault(struct vm_area_struct * vma,struct vm_fault * vmf)40 static int fb_deferred_io_fault(struct vm_area_struct *vma,
41 				struct vm_fault *vmf)
42 {
43 	unsigned long offset;
44 	struct page *page;
45 	struct fb_info *info = vma->vm_private_data;
46 
47 	offset = vmf->pgoff << PAGE_SHIFT;
48 	if (offset >= info->fix.smem_len)
49 		return VM_FAULT_SIGBUS;
50 
51 	page = fb_deferred_io_page(info, offset);
52 	if (!page)
53 		return VM_FAULT_SIGBUS;
54 
55 	get_page(page);
56 
57 	if (vma->vm_file)
58 		page->mapping = vma->vm_file->f_mapping;
59 	else
60 		printk(KERN_ERR "no mapping available\n");
61 
62 	BUG_ON(!page->mapping);
63 	page->index = vmf->pgoff;
64 
65 	vmf->page = page;
66 	return 0;
67 }
68 
fb_deferred_io_fsync(struct file * file,loff_t start,loff_t end,int datasync)69 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
70 {
71 	struct fb_info *info = file->private_data;
72 	struct inode *inode = file->f_path.dentry->d_inode;
73 	int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
74 	if (err)
75 		return err;
76 
77 	/* Skip if deferred io is compiled-in but disabled on this fbdev */
78 	if (!info->fbdefio)
79 		return 0;
80 
81 	mutex_lock(&inode->i_mutex);
82 	/* Kill off the delayed work */
83 	cancel_delayed_work_sync(&info->deferred_work);
84 
85 	/* Run it immediately */
86 	err = schedule_delayed_work(&info->deferred_work, 0);
87 	mutex_unlock(&inode->i_mutex);
88 	return err;
89 }
90 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
91 
92 /* vm_ops->page_mkwrite handler */
fb_deferred_io_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)93 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
94 				  struct vm_fault *vmf)
95 {
96 	struct page *page = vmf->page;
97 	struct fb_info *info = vma->vm_private_data;
98 	struct fb_deferred_io *fbdefio = info->fbdefio;
99 	struct page *cur;
100 
101 	/* this is a callback we get when userspace first tries to
102 	write to the page. we schedule a workqueue. that workqueue
103 	will eventually mkclean the touched pages and execute the
104 	deferred framebuffer IO. then if userspace touches a page
105 	again, we repeat the same scheme */
106 
107 	/* protect against the workqueue changing the page list */
108 	mutex_lock(&fbdefio->lock);
109 
110 	/*
111 	 * We want the page to remain locked from ->page_mkwrite until
112 	 * the PTE is marked dirty to avoid page_mkclean() being called
113 	 * before the PTE is updated, which would leave the page ignored
114 	 * by defio.
115 	 * Do this by locking the page here and informing the caller
116 	 * about it with VM_FAULT_LOCKED.
117 	 */
118 	lock_page(page);
119 
120 	/* we loop through the pagelist before adding in order
121 	to keep the pagelist sorted */
122 	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
123 		/* this check is to catch the case where a new
124 		process could start writing to the same page
125 		through a new pte. this new access can cause the
126 		mkwrite even when the original ps's pte is marked
127 		writable */
128 		if (unlikely(cur == page))
129 			goto page_already_added;
130 		else if (cur->index > page->index)
131 			break;
132 	}
133 
134 	list_add_tail(&page->lru, &cur->lru);
135 
136 page_already_added:
137 	mutex_unlock(&fbdefio->lock);
138 
139 	/* come back after delay to process the deferred IO */
140 	schedule_delayed_work(&info->deferred_work, fbdefio->delay);
141 	return VM_FAULT_LOCKED;
142 }
143 
144 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
145 	.fault		= fb_deferred_io_fault,
146 	.page_mkwrite	= fb_deferred_io_mkwrite,
147 };
148 
fb_deferred_io_set_page_dirty(struct page * page)149 static int fb_deferred_io_set_page_dirty(struct page *page)
150 {
151 	if (!PageDirty(page))
152 		SetPageDirty(page);
153 	return 0;
154 }
155 
156 static const struct address_space_operations fb_deferred_io_aops = {
157 	.set_page_dirty = fb_deferred_io_set_page_dirty,
158 };
159 
fb_deferred_io_mmap(struct fb_info * info,struct vm_area_struct * vma)160 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
161 {
162 	vma->vm_ops = &fb_deferred_io_vm_ops;
163 	vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
164 	if (!(info->flags & FBINFO_VIRTFB))
165 		vma->vm_flags |= VM_IO;
166 	vma->vm_private_data = info;
167 	return 0;
168 }
169 
170 /* workqueue callback */
fb_deferred_io_work(struct work_struct * work)171 static void fb_deferred_io_work(struct work_struct *work)
172 {
173 	struct fb_info *info = container_of(work, struct fb_info,
174 						deferred_work.work);
175 	struct list_head *node, *next;
176 	struct page *cur;
177 	struct fb_deferred_io *fbdefio = info->fbdefio;
178 
179 	/* here we mkclean the pages, then do all deferred IO */
180 	mutex_lock(&fbdefio->lock);
181 	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
182 		lock_page(cur);
183 		page_mkclean(cur);
184 		unlock_page(cur);
185 	}
186 
187 	/* driver's callback with pagelist */
188 	fbdefio->deferred_io(info, &fbdefio->pagelist);
189 
190 	/* clear the list */
191 	list_for_each_safe(node, next, &fbdefio->pagelist) {
192 		list_del(node);
193 	}
194 	mutex_unlock(&fbdefio->lock);
195 }
196 
fb_deferred_io_init(struct fb_info * info)197 void fb_deferred_io_init(struct fb_info *info)
198 {
199 	struct fb_deferred_io *fbdefio = info->fbdefio;
200 
201 	BUG_ON(!fbdefio);
202 	mutex_init(&fbdefio->lock);
203 	info->fbops->fb_mmap = fb_deferred_io_mmap;
204 	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
205 	INIT_LIST_HEAD(&fbdefio->pagelist);
206 	if (fbdefio->delay == 0) /* set a default of 1 s */
207 		fbdefio->delay = HZ;
208 }
209 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
210 
fb_deferred_io_open(struct fb_info * info,struct inode * inode,struct file * file)211 void fb_deferred_io_open(struct fb_info *info,
212 			 struct inode *inode,
213 			 struct file *file)
214 {
215 	file->f_mapping->a_ops = &fb_deferred_io_aops;
216 }
217 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
218 
fb_deferred_io_cleanup(struct fb_info * info)219 void fb_deferred_io_cleanup(struct fb_info *info)
220 {
221 	struct fb_deferred_io *fbdefio = info->fbdefio;
222 	struct page *page;
223 	int i;
224 
225 	BUG_ON(!fbdefio);
226 	cancel_delayed_work_sync(&info->deferred_work);
227 
228 	/* clear out the mapping that we setup */
229 	for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
230 		page = fb_deferred_io_page(info, i);
231 		page->mapping = NULL;
232 	}
233 
234 	info->fbops->fb_mmap = NULL;
235 	mutex_destroy(&fbdefio->lock);
236 }
237 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
238 
239 MODULE_LICENSE("GPL");
240