1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/swap.h>
39 #include <linux/slab.h>
40 #include <linux/export.h>
41 #include "drm_cache.h"
42 #include "drm_mem_util.h"
43 #include "ttm/ttm_module.h"
44 #include "ttm/ttm_bo_driver.h"
45 #include "ttm/ttm_placement.h"
46 #include "ttm/ttm_page_alloc.h"
47 
48 /**
49  * Allocates storage for pointers to the pages that back the ttm.
50  */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)51 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52 {
53 	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
54 }
55 
ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt * ttm)56 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57 {
58 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
59 	ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
60 					    sizeof(*ttm->dma_address));
61 }
62 
63 #ifdef CONFIG_X86
ttm_tt_set_page_caching(struct page * p,enum ttm_caching_state c_old,enum ttm_caching_state c_new)64 static inline int ttm_tt_set_page_caching(struct page *p,
65 					  enum ttm_caching_state c_old,
66 					  enum ttm_caching_state c_new)
67 {
68 	int ret = 0;
69 
70 	if (PageHighMem(p))
71 		return 0;
72 
73 	if (c_old != tt_cached) {
74 		/* p isn't in the default caching state, set it to
75 		 * writeback first to free its current memtype. */
76 
77 		ret = set_pages_wb(p, 1);
78 		if (ret)
79 			return ret;
80 	}
81 
82 	if (c_new == tt_wc)
83 		ret = set_memory_wc((unsigned long) page_address(p), 1);
84 	else if (c_new == tt_uncached)
85 		ret = set_pages_uc(p, 1);
86 
87 	return ret;
88 }
89 #else /* CONFIG_X86 */
ttm_tt_set_page_caching(struct page * p,enum ttm_caching_state c_old,enum ttm_caching_state c_new)90 static inline int ttm_tt_set_page_caching(struct page *p,
91 					  enum ttm_caching_state c_old,
92 					  enum ttm_caching_state c_new)
93 {
94 	return 0;
95 }
96 #endif /* CONFIG_X86 */
97 
98 /*
99  * Change caching policy for the linear kernel map
100  * for range of pages in a ttm.
101  */
102 
ttm_tt_set_caching(struct ttm_tt * ttm,enum ttm_caching_state c_state)103 static int ttm_tt_set_caching(struct ttm_tt *ttm,
104 			      enum ttm_caching_state c_state)
105 {
106 	int i, j;
107 	struct page *cur_page;
108 	int ret;
109 
110 	if (ttm->caching_state == c_state)
111 		return 0;
112 
113 	if (ttm->state == tt_unpopulated) {
114 		/* Change caching but don't populate */
115 		ttm->caching_state = c_state;
116 		return 0;
117 	}
118 
119 	if (ttm->caching_state == tt_cached)
120 		drm_clflush_pages(ttm->pages, ttm->num_pages);
121 
122 	for (i = 0; i < ttm->num_pages; ++i) {
123 		cur_page = ttm->pages[i];
124 		if (likely(cur_page != NULL)) {
125 			ret = ttm_tt_set_page_caching(cur_page,
126 						      ttm->caching_state,
127 						      c_state);
128 			if (unlikely(ret != 0))
129 				goto out_err;
130 		}
131 	}
132 
133 	ttm->caching_state = c_state;
134 
135 	return 0;
136 
137 out_err:
138 	for (j = 0; j < i; ++j) {
139 		cur_page = ttm->pages[j];
140 		if (likely(cur_page != NULL)) {
141 			(void)ttm_tt_set_page_caching(cur_page, c_state,
142 						      ttm->caching_state);
143 		}
144 	}
145 
146 	return ret;
147 }
148 
ttm_tt_set_placement_caching(struct ttm_tt * ttm,uint32_t placement)149 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
150 {
151 	enum ttm_caching_state state;
152 
153 	if (placement & TTM_PL_FLAG_WC)
154 		state = tt_wc;
155 	else if (placement & TTM_PL_FLAG_UNCACHED)
156 		state = tt_uncached;
157 	else
158 		state = tt_cached;
159 
160 	return ttm_tt_set_caching(ttm, state);
161 }
162 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
163 
ttm_tt_destroy(struct ttm_tt * ttm)164 void ttm_tt_destroy(struct ttm_tt *ttm)
165 {
166 	if (unlikely(ttm == NULL))
167 		return;
168 
169 	if (ttm->state == tt_bound) {
170 		ttm_tt_unbind(ttm);
171 	}
172 
173 	if (ttm->state == tt_unbound) {
174 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
175 	}
176 
177 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
178 	    ttm->swap_storage)
179 		fput(ttm->swap_storage);
180 
181 	ttm->swap_storage = NULL;
182 	ttm->func->destroy(ttm);
183 }
184 
ttm_tt_init(struct ttm_tt * ttm,struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read_page)185 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
186 		unsigned long size, uint32_t page_flags,
187 		struct page *dummy_read_page)
188 {
189 	ttm->bdev = bdev;
190 	ttm->glob = bdev->glob;
191 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
192 	ttm->caching_state = tt_cached;
193 	ttm->page_flags = page_flags;
194 	ttm->dummy_read_page = dummy_read_page;
195 	ttm->state = tt_unpopulated;
196 	ttm->swap_storage = NULL;
197 
198 	ttm_tt_alloc_page_directory(ttm);
199 	if (!ttm->pages) {
200 		ttm_tt_destroy(ttm);
201 		pr_err("Failed allocating page table\n");
202 		return -ENOMEM;
203 	}
204 	return 0;
205 }
206 EXPORT_SYMBOL(ttm_tt_init);
207 
ttm_tt_fini(struct ttm_tt * ttm)208 void ttm_tt_fini(struct ttm_tt *ttm)
209 {
210 	drm_free_large(ttm->pages);
211 	ttm->pages = NULL;
212 }
213 EXPORT_SYMBOL(ttm_tt_fini);
214 
ttm_dma_tt_init(struct ttm_dma_tt * ttm_dma,struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read_page)215 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
216 		unsigned long size, uint32_t page_flags,
217 		struct page *dummy_read_page)
218 {
219 	struct ttm_tt *ttm = &ttm_dma->ttm;
220 
221 	ttm->bdev = bdev;
222 	ttm->glob = bdev->glob;
223 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
224 	ttm->caching_state = tt_cached;
225 	ttm->page_flags = page_flags;
226 	ttm->dummy_read_page = dummy_read_page;
227 	ttm->state = tt_unpopulated;
228 	ttm->swap_storage = NULL;
229 
230 	INIT_LIST_HEAD(&ttm_dma->pages_list);
231 	ttm_dma_tt_alloc_page_directory(ttm_dma);
232 	if (!ttm->pages || !ttm_dma->dma_address) {
233 		ttm_tt_destroy(ttm);
234 		pr_err("Failed allocating page table\n");
235 		return -ENOMEM;
236 	}
237 	return 0;
238 }
239 EXPORT_SYMBOL(ttm_dma_tt_init);
240 
ttm_dma_tt_fini(struct ttm_dma_tt * ttm_dma)241 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
242 {
243 	struct ttm_tt *ttm = &ttm_dma->ttm;
244 
245 	drm_free_large(ttm->pages);
246 	ttm->pages = NULL;
247 	drm_free_large(ttm_dma->dma_address);
248 	ttm_dma->dma_address = NULL;
249 }
250 EXPORT_SYMBOL(ttm_dma_tt_fini);
251 
ttm_tt_unbind(struct ttm_tt * ttm)252 void ttm_tt_unbind(struct ttm_tt *ttm)
253 {
254 	int ret;
255 
256 	if (ttm->state == tt_bound) {
257 		ret = ttm->func->unbind(ttm);
258 		BUG_ON(ret);
259 		ttm->state = tt_unbound;
260 	}
261 }
262 
ttm_tt_bind(struct ttm_tt * ttm,struct ttm_mem_reg * bo_mem)263 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
264 {
265 	int ret = 0;
266 
267 	if (!ttm)
268 		return -EINVAL;
269 
270 	if (ttm->state == tt_bound)
271 		return 0;
272 
273 	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
274 	if (ret)
275 		return ret;
276 
277 	ret = ttm->func->bind(ttm, bo_mem);
278 	if (unlikely(ret != 0))
279 		return ret;
280 
281 	ttm->state = tt_bound;
282 
283 	return 0;
284 }
285 EXPORT_SYMBOL(ttm_tt_bind);
286 
ttm_tt_swapin(struct ttm_tt * ttm)287 int ttm_tt_swapin(struct ttm_tt *ttm)
288 {
289 	struct address_space *swap_space;
290 	struct file *swap_storage;
291 	struct page *from_page;
292 	struct page *to_page;
293 	void *from_virtual;
294 	void *to_virtual;
295 	int i;
296 	int ret = -ENOMEM;
297 
298 	swap_storage = ttm->swap_storage;
299 	BUG_ON(swap_storage == NULL);
300 
301 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
302 
303 	for (i = 0; i < ttm->num_pages; ++i) {
304 		from_page = shmem_read_mapping_page(swap_space, i);
305 		if (IS_ERR(from_page)) {
306 			ret = PTR_ERR(from_page);
307 			goto out_err;
308 		}
309 		to_page = ttm->pages[i];
310 		if (unlikely(to_page == NULL))
311 			goto out_err;
312 
313 		preempt_disable();
314 		from_virtual = kmap_atomic(from_page);
315 		to_virtual = kmap_atomic(to_page);
316 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
317 		kunmap_atomic(to_virtual);
318 		kunmap_atomic(from_virtual);
319 		preempt_enable();
320 		page_cache_release(from_page);
321 	}
322 
323 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
324 		fput(swap_storage);
325 	ttm->swap_storage = NULL;
326 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
327 
328 	return 0;
329 out_err:
330 	return ret;
331 }
332 
ttm_tt_swapout(struct ttm_tt * ttm,struct file * persistent_swap_storage)333 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
334 {
335 	struct address_space *swap_space;
336 	struct file *swap_storage;
337 	struct page *from_page;
338 	struct page *to_page;
339 	void *from_virtual;
340 	void *to_virtual;
341 	int i;
342 	int ret = -ENOMEM;
343 
344 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
345 	BUG_ON(ttm->caching_state != tt_cached);
346 
347 	if (!persistent_swap_storage) {
348 		swap_storage = shmem_file_setup("ttm swap",
349 						ttm->num_pages << PAGE_SHIFT,
350 						0);
351 		if (unlikely(IS_ERR(swap_storage))) {
352 			pr_err("Failed allocating swap storage\n");
353 			return PTR_ERR(swap_storage);
354 		}
355 	} else
356 		swap_storage = persistent_swap_storage;
357 
358 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
359 
360 	for (i = 0; i < ttm->num_pages; ++i) {
361 		from_page = ttm->pages[i];
362 		if (unlikely(from_page == NULL))
363 			continue;
364 		to_page = shmem_read_mapping_page(swap_space, i);
365 		if (unlikely(IS_ERR(to_page))) {
366 			ret = PTR_ERR(to_page);
367 			goto out_err;
368 		}
369 		preempt_disable();
370 		from_virtual = kmap_atomic(from_page);
371 		to_virtual = kmap_atomic(to_page);
372 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
373 		kunmap_atomic(to_virtual);
374 		kunmap_atomic(from_virtual);
375 		preempt_enable();
376 		set_page_dirty(to_page);
377 		mark_page_accessed(to_page);
378 		page_cache_release(to_page);
379 	}
380 
381 	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
382 	ttm->swap_storage = swap_storage;
383 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
384 	if (persistent_swap_storage)
385 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
386 
387 	return 0;
388 out_err:
389 	if (!persistent_swap_storage)
390 		fput(swap_storage);
391 
392 	return ret;
393 }
394