1 /******************************************************************************
2  * grant_table.h
3  *
4  * Two sets of functionality:
5  * 1. Granting foreign access to our memory reservation.
6  * 2. Accessing others' memory reservations via grant references.
7  * (i.e., mechanisms for both sender and recipient of grant references)
8  *
9  * Copyright (c) 2004-2005, K A Fraser
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #ifndef __ASM_GNTTAB_H__
38 #define __ASM_GNTTAB_H__
39 
40 #include <asm/page.h>
41 
42 #include <xen/interface/xen.h>
43 #include <xen/interface/grant_table.h>
44 
45 #include <asm/xen/hypervisor.h>
46 
47 #include <xen/features.h>
48 #include <xen/page.h>
49 #include <linux/mm_types.h>
50 #include <linux/page-flags.h>
51 #include <linux/kernel.h>
52 
53 /*
54  * Technically there's no reliably invalid grant reference or grant handle,
55  * so pick the value that is the most unlikely one to be observed valid.
56  */
57 #define INVALID_GRANT_REF          ((grant_ref_t)-1)
58 #define INVALID_GRANT_HANDLE       ((grant_handle_t)-1)
59 
60 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
61 #define NR_GRANT_FRAMES 4
62 
63 struct gnttab_free_callback {
64 	struct gnttab_free_callback *next;
65 	void (*fn)(void *);
66 	void *arg;
67 	u16 count;
68 };
69 
70 struct gntab_unmap_queue_data;
71 
72 typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
73 
74 struct gntab_unmap_queue_data
75 {
76 	struct delayed_work	gnttab_work;
77 	void *data;
78 	gnttab_unmap_refs_done	done;
79 	struct gnttab_unmap_grant_ref *unmap_ops;
80 	struct gnttab_unmap_grant_ref *kunmap_ops;
81 	struct page **pages;
82 	unsigned int count;
83 	unsigned int age;
84 };
85 
86 int gnttab_init(void);
87 int gnttab_suspend(void);
88 int gnttab_resume(void);
89 
90 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
91 				int readonly);
92 
93 /*
94  * End access through the given grant reference, iff the grant entry is no
95  * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
96  * use.
97  */
98 int gnttab_end_foreign_access_ref(grant_ref_t ref);
99 
100 /*
101  * Eventually end access through the given grant reference, and once that
102  * access has been ended, free the given page too.  Access will be ended
103  * immediately iff the grant entry is not in use, otherwise it will happen
104  * some time later.  page may be NULL, in which case no freeing will occur.
105  * Note that the granted page might still be accessed (read or write) by the
106  * other side after gnttab_end_foreign_access() returns, so even if page was
107  * specified as NULL it is not allowed to just reuse the page for other
108  * purposes immediately. gnttab_end_foreign_access() will take an additional
109  * reference to the granted page in this case, which is dropped only after
110  * the grant is no longer in use.
111  * This requires that multi page allocations for areas subject to
112  * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
113  * via free_pages_exact()) in order to avoid high order pages.
114  */
115 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
116 
117 /*
118  * End access through the given grant reference, iff the grant entry is
119  * no longer in use.  In case of success ending foreign access, the
120  * grant reference is deallocated.
121  * Return 1 if the grant entry was freed, 0 if it is still in use.
122  */
123 int gnttab_try_end_foreign_access(grant_ref_t ref);
124 
125 /*
126  * operations on reserved batches of grant references
127  */
128 int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
129 
130 int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
131 
132 void gnttab_free_grant_reference(grant_ref_t ref);
133 
134 void gnttab_free_grant_references(grant_ref_t head);
135 
136 void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
137 
138 int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
139 
140 int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
141 
142 void gnttab_release_grant_reference(grant_ref_t *private_head,
143 				    grant_ref_t release);
144 
145 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
146 				  void (*fn)(void *), void *arg, u16 count);
147 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
148 
149 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
150 				     unsigned long frame, int readonly);
151 
152 /* Give access to the first 4K of the page */
gnttab_page_grant_foreign_access_ref_one(grant_ref_t ref,domid_t domid,struct page * page,int readonly)153 static inline void gnttab_page_grant_foreign_access_ref_one(
154 	grant_ref_t ref, domid_t domid,
155 	struct page *page, int readonly)
156 {
157 	gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
158 					readonly);
159 }
160 
161 static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref * map,phys_addr_t addr,uint32_t flags,grant_ref_t ref,domid_t domid)162 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
163 		  uint32_t flags, grant_ref_t ref, domid_t domid)
164 {
165 	if (flags & GNTMAP_contains_pte)
166 		map->host_addr = addr;
167 	else if (xen_feature(XENFEAT_auto_translated_physmap))
168 		map->host_addr = __pa(addr);
169 	else
170 		map->host_addr = addr;
171 
172 	map->flags = flags;
173 	map->ref = ref;
174 	map->dom = domid;
175 	map->status = 1; /* arbitrary positive value */
176 }
177 
178 static inline void
gnttab_set_unmap_op(struct gnttab_unmap_grant_ref * unmap,phys_addr_t addr,uint32_t flags,grant_handle_t handle)179 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
180 		    uint32_t flags, grant_handle_t handle)
181 {
182 	if (flags & GNTMAP_contains_pte)
183 		unmap->host_addr = addr;
184 	else if (xen_feature(XENFEAT_auto_translated_physmap))
185 		unmap->host_addr = __pa(addr);
186 	else
187 		unmap->host_addr = addr;
188 
189 	unmap->handle = handle;
190 	unmap->dev_bus_addr = 0;
191 }
192 
193 int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
194 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
195 			   unsigned long max_nr_gframes,
196 			   void **__shared);
197 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
198 			   unsigned long max_nr_gframes,
199 			   grant_status_t **__shared);
200 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
201 
202 struct grant_frames {
203 	xen_pfn_t *pfn;
204 	unsigned int count;
205 	void *vaddr;
206 };
207 extern struct grant_frames xen_auto_xlat_grant_frames;
208 unsigned int gnttab_max_grant_frames(void);
209 int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
210 void gnttab_free_auto_xlat_frames(void);
211 
212 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
213 
214 int gnttab_alloc_pages(int nr_pages, struct page **pages);
215 void gnttab_free_pages(int nr_pages, struct page **pages);
216 
217 struct gnttab_page_cache {
218 	spinlock_t		lock;
219 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
220 	struct page		*pages;
221 #else
222 	struct list_head	pages;
223 #endif
224 	unsigned int		num_pages;
225 };
226 
227 void gnttab_page_cache_init(struct gnttab_page_cache *cache);
228 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
229 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
230 			   unsigned int num);
231 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
232 			      unsigned int num);
233 
234 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
235 struct gnttab_dma_alloc_args {
236 	/* Device for which DMA memory will be/was allocated. */
237 	struct device *dev;
238 	/* If set then DMA buffer is coherent and write-combine otherwise. */
239 	bool coherent;
240 
241 	int nr_pages;
242 	struct page **pages;
243 	xen_pfn_t *frames;
244 	void *vaddr;
245 	dma_addr_t dev_bus_addr;
246 };
247 
248 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
249 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
250 #endif
251 
252 int gnttab_pages_set_private(int nr_pages, struct page **pages);
253 void gnttab_pages_clear_private(int nr_pages, struct page **pages);
254 
255 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
256 		    struct gnttab_map_grant_ref *kmap_ops,
257 		    struct page **pages, unsigned int count);
258 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
259 		      struct gnttab_unmap_grant_ref *kunmap_ops,
260 		      struct page **pages, unsigned int count);
261 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
262 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
263 
264 
265 /* Perform a batch of grant map/copy operations. Retry every batch slot
266  * for which the hypervisor returns GNTST_eagain. This is typically due
267  * to paged out target frames.
268  *
269  * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
270  *
271  * Return value in each iand every status field of the batch guaranteed
272  * to not be GNTST_eagain.
273  */
274 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
275 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
276 
277 
278 struct xen_page_foreign {
279 	domid_t domid;
280 	grant_ref_t gref;
281 };
282 
xen_page_foreign(struct page * page)283 static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
284 {
285 	if (!PageForeign(page))
286 		return NULL;
287 #if BITS_PER_LONG < 64
288 	return (struct xen_page_foreign *)page->private;
289 #else
290 	BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
291 	return (struct xen_page_foreign *)&page->private;
292 #endif
293 }
294 
295 /* Split Linux page in chunk of the size of the grant and call fn
296  *
297  * Parameters of fn:
298  *	gfn: guest frame number
299  *	offset: offset in the grant
300  *	len: length of the data in the grant.
301  *	data: internal information
302  */
303 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
304 			       unsigned int len, void *data);
305 
306 void gnttab_foreach_grant_in_range(struct page *page,
307 				   unsigned int offset,
308 				   unsigned int len,
309 				   xen_grant_fn_t fn,
310 				   void *data);
311 
312 /* Helper to get to call fn only on the first "grant chunk" */
gnttab_for_one_grant(struct page * page,unsigned int offset,unsigned len,xen_grant_fn_t fn,void * data)313 static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
314 					unsigned len, xen_grant_fn_t fn,
315 					void *data)
316 {
317 	/* The first request is limited to the size of one grant */
318 	len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
319 		    len);
320 
321 	gnttab_foreach_grant_in_range(page, offset, len, fn, data);
322 }
323 
324 /* Get @nr_grefs grants from an array of page and call fn for each grant */
325 void gnttab_foreach_grant(struct page **pages,
326 			  unsigned int nr_grefs,
327 			  xen_grant_fn_t fn,
328 			  void *data);
329 
330 /* Get the number of grant in a specified region
331  *
332  * start: Offset from the beginning of the first page
333  * len: total length of data (can cross multiple page)
334  */
gnttab_count_grant(unsigned int start,unsigned int len)335 static inline unsigned int gnttab_count_grant(unsigned int start,
336 					      unsigned int len)
337 {
338 	return XEN_PFN_UP(xen_offset_in_page(start) + len);
339 }
340 
341 #endif /* __ASM_GNTTAB_H__ */
342