1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "drmP.h"
30 #include "ttm/ttm_bo_driver.h"
31
32 /**
33 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
34 * the number of used descriptors.
35 */
36
vmw_gmr_build_descriptors(struct list_head * desc_pages,struct page * pages[],unsigned long num_pages)37 static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
38 struct page *pages[],
39 unsigned long num_pages)
40 {
41 struct page *page, *next;
42 struct svga_guest_mem_descriptor *page_virtual = NULL;
43 struct svga_guest_mem_descriptor *desc_virtual = NULL;
44 unsigned int desc_per_page;
45 unsigned long prev_pfn;
46 unsigned long pfn;
47 int ret;
48
49 desc_per_page = PAGE_SIZE /
50 sizeof(struct svga_guest_mem_descriptor) - 1;
51
52 while (likely(num_pages != 0)) {
53 page = alloc_page(__GFP_HIGHMEM);
54 if (unlikely(page == NULL)) {
55 ret = -ENOMEM;
56 goto out_err;
57 }
58
59 list_add_tail(&page->lru, desc_pages);
60
61 /*
62 * Point previous page terminating descriptor to this
63 * page before unmapping it.
64 */
65
66 if (likely(page_virtual != NULL)) {
67 desc_virtual->ppn = page_to_pfn(page);
68 kunmap_atomic(page_virtual, KM_USER0);
69 }
70
71 page_virtual = kmap_atomic(page, KM_USER0);
72 desc_virtual = page_virtual - 1;
73 prev_pfn = ~(0UL);
74
75 while (likely(num_pages != 0)) {
76 pfn = page_to_pfn(*pages);
77
78 if (pfn != prev_pfn + 1) {
79
80 if (desc_virtual - page_virtual ==
81 desc_per_page - 1)
82 break;
83
84 (++desc_virtual)->ppn = cpu_to_le32(pfn);
85 desc_virtual->num_pages = cpu_to_le32(1);
86 } else {
87 uint32_t tmp =
88 le32_to_cpu(desc_virtual->num_pages);
89 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
90 }
91 prev_pfn = pfn;
92 --num_pages;
93 ++pages;
94 }
95
96 (++desc_virtual)->ppn = cpu_to_le32(0);
97 desc_virtual->num_pages = cpu_to_le32(0);
98 }
99
100 if (likely(page_virtual != NULL))
101 kunmap_atomic(page_virtual, KM_USER0);
102
103 return 0;
104 out_err:
105 list_for_each_entry_safe(page, next, desc_pages, lru) {
106 list_del_init(&page->lru);
107 __free_page(page);
108 }
109 return ret;
110 }
111
vmw_gmr_free_descriptors(struct list_head * desc_pages)112 static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
113 {
114 struct page *page, *next;
115
116 list_for_each_entry_safe(page, next, desc_pages, lru) {
117 list_del_init(&page->lru);
118 __free_page(page);
119 }
120 }
121
vmw_gmr_fire_descriptors(struct vmw_private * dev_priv,int gmr_id,struct list_head * desc_pages)122 static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
123 int gmr_id, struct list_head *desc_pages)
124 {
125 struct page *page;
126
127 if (unlikely(list_empty(desc_pages)))
128 return;
129
130 page = list_entry(desc_pages->next, struct page, lru);
131
132 mutex_lock(&dev_priv->hw_mutex);
133
134 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
135 wmb();
136 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
137 mb();
138
139 mutex_unlock(&dev_priv->hw_mutex);
140
141 }
142
143 /**
144 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
145 * the number of used descriptors.
146 */
147
vmw_gmr_count_descriptors(struct page * pages[],unsigned long num_pages)148 static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages)
150 {
151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn;
153 unsigned long descriptors = 0;
154
155 while (num_pages--) {
156 pfn = page_to_pfn(*pages++);
157 if (prev_pfn + 1 != pfn)
158 ++descriptors;
159 prev_pfn = pfn;
160 }
161
162 return descriptors;
163 }
164
vmw_gmr_bind(struct vmw_private * dev_priv,struct page * pages[],unsigned long num_pages,int gmr_id)165 int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct page *pages[],
167 unsigned long num_pages,
168 int gmr_id)
169 {
170 struct list_head desc_pages;
171 int ret;
172
173 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
174 return -EINVAL;
175
176 if (vmw_gmr_count_descriptors(pages, num_pages) >
177 dev_priv->max_gmr_descriptors)
178 return -EINVAL;
179
180 INIT_LIST_HEAD(&desc_pages);
181
182 ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
183 if (unlikely(ret != 0))
184 return ret;
185
186 vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
187 vmw_gmr_free_descriptors(&desc_pages);
188
189 return 0;
190 }
191
192
vmw_gmr_unbind(struct vmw_private * dev_priv,int gmr_id)193 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
194 {
195 mutex_lock(&dev_priv->hw_mutex);
196 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
197 wmb();
198 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
199 mb();
200 mutex_unlock(&dev_priv->hw_mutex);
201 }
202