1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <joerg.roedel@amd.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 #define pr_fmt(fmt)    "%s: " fmt, __func__
20 
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 
show_iommu_group(struct device * dev,struct device_attribute * attr,char * buf)30 static ssize_t show_iommu_group(struct device *dev,
31 				struct device_attribute *attr, char *buf)
32 {
33 	unsigned int groupid;
34 
35 	if (iommu_device_group(dev, &groupid))
36 		return 0;
37 
38 	return sprintf(buf, "%u", groupid);
39 }
40 static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41 
add_iommu_group(struct device * dev,void * data)42 static int add_iommu_group(struct device *dev, void *data)
43 {
44 	unsigned int groupid;
45 
46 	if (iommu_device_group(dev, &groupid) == 0)
47 		return device_create_file(dev, &dev_attr_iommu_group);
48 
49 	return 0;
50 }
51 
remove_iommu_group(struct device * dev)52 static int remove_iommu_group(struct device *dev)
53 {
54 	unsigned int groupid;
55 
56 	if (iommu_device_group(dev, &groupid) == 0)
57 		device_remove_file(dev, &dev_attr_iommu_group);
58 
59 	return 0;
60 }
61 
iommu_device_notifier(struct notifier_block * nb,unsigned long action,void * data)62 static int iommu_device_notifier(struct notifier_block *nb,
63 				 unsigned long action, void *data)
64 {
65 	struct device *dev = data;
66 
67 	if (action == BUS_NOTIFY_ADD_DEVICE)
68 		return add_iommu_group(dev, NULL);
69 	else if (action == BUS_NOTIFY_DEL_DEVICE)
70 		return remove_iommu_group(dev);
71 
72 	return 0;
73 }
74 
75 static struct notifier_block iommu_device_nb = {
76 	.notifier_call = iommu_device_notifier,
77 };
78 
iommu_bus_init(struct bus_type * bus,struct iommu_ops * ops)79 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
80 {
81 	bus_register_notifier(bus, &iommu_device_nb);
82 	bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
83 }
84 
85 /**
86  * bus_set_iommu - set iommu-callbacks for the bus
87  * @bus: bus.
88  * @ops: the callbacks provided by the iommu-driver
89  *
90  * This function is called by an iommu driver to set the iommu methods
91  * used for a particular bus. Drivers for devices on that bus can use
92  * the iommu-api after these ops are registered.
93  * This special function is needed because IOMMUs are usually devices on
94  * the bus itself, so the iommu drivers are not initialized when the bus
95  * is set up. With this function the iommu-driver can set the iommu-ops
96  * afterwards.
97  */
bus_set_iommu(struct bus_type * bus,struct iommu_ops * ops)98 int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
99 {
100 	if (bus->iommu_ops != NULL)
101 		return -EBUSY;
102 
103 	bus->iommu_ops = ops;
104 
105 	/* Do IOMMU specific setup for this bus-type */
106 	iommu_bus_init(bus, ops);
107 
108 	return 0;
109 }
110 EXPORT_SYMBOL_GPL(bus_set_iommu);
111 
iommu_present(struct bus_type * bus)112 bool iommu_present(struct bus_type *bus)
113 {
114 	return bus->iommu_ops != NULL;
115 }
116 EXPORT_SYMBOL_GPL(iommu_present);
117 
118 /**
119  * iommu_set_fault_handler() - set a fault handler for an iommu domain
120  * @domain: iommu domain
121  * @handler: fault handler
122  *
123  * This function should be used by IOMMU users which want to be notified
124  * whenever an IOMMU fault happens.
125  *
126  * The fault handler itself should return 0 on success, and an appropriate
127  * error code otherwise.
128  */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler)129 void iommu_set_fault_handler(struct iommu_domain *domain,
130 					iommu_fault_handler_t handler)
131 {
132 	BUG_ON(!domain);
133 
134 	domain->handler = handler;
135 }
136 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
137 
iommu_domain_alloc(struct bus_type * bus)138 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
139 {
140 	struct iommu_domain *domain;
141 	int ret;
142 
143 	if (bus == NULL || bus->iommu_ops == NULL)
144 		return NULL;
145 
146 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
147 	if (!domain)
148 		return NULL;
149 
150 	domain->ops = bus->iommu_ops;
151 
152 	ret = domain->ops->domain_init(domain);
153 	if (ret)
154 		goto out_free;
155 
156 	return domain;
157 
158 out_free:
159 	kfree(domain);
160 
161 	return NULL;
162 }
163 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
164 
iommu_domain_free(struct iommu_domain * domain)165 void iommu_domain_free(struct iommu_domain *domain)
166 {
167 	if (likely(domain->ops->domain_destroy != NULL))
168 		domain->ops->domain_destroy(domain);
169 
170 	kfree(domain);
171 }
172 EXPORT_SYMBOL_GPL(iommu_domain_free);
173 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)174 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
175 {
176 	if (unlikely(domain->ops->attach_dev == NULL))
177 		return -ENODEV;
178 
179 	return domain->ops->attach_dev(domain, dev);
180 }
181 EXPORT_SYMBOL_GPL(iommu_attach_device);
182 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)183 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
184 {
185 	if (unlikely(domain->ops->detach_dev == NULL))
186 		return;
187 
188 	domain->ops->detach_dev(domain, dev);
189 }
190 EXPORT_SYMBOL_GPL(iommu_detach_device);
191 
iommu_iova_to_phys(struct iommu_domain * domain,unsigned long iova)192 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
193 			       unsigned long iova)
194 {
195 	if (unlikely(domain->ops->iova_to_phys == NULL))
196 		return 0;
197 
198 	return domain->ops->iova_to_phys(domain, iova);
199 }
200 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
201 
iommu_domain_has_cap(struct iommu_domain * domain,unsigned long cap)202 int iommu_domain_has_cap(struct iommu_domain *domain,
203 			 unsigned long cap)
204 {
205 	if (unlikely(domain->ops->domain_has_cap == NULL))
206 		return 0;
207 
208 	return domain->ops->domain_has_cap(domain, cap);
209 }
210 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
211 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)212 int iommu_map(struct iommu_domain *domain, unsigned long iova,
213 	      phys_addr_t paddr, size_t size, int prot)
214 {
215 	unsigned long orig_iova = iova;
216 	unsigned int min_pagesz;
217 	size_t orig_size = size;
218 	int ret = 0;
219 
220 	if (unlikely(domain->ops->map == NULL))
221 		return -ENODEV;
222 
223 	/* find out the minimum page size supported */
224 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
225 
226 	/*
227 	 * both the virtual address and the physical one, as well as
228 	 * the size of the mapping, must be aligned (at least) to the
229 	 * size of the smallest page supported by the hardware
230 	 */
231 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
232 		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
233 			"0x%x\n", iova, (unsigned long)paddr,
234 			(unsigned long)size, min_pagesz);
235 		return -EINVAL;
236 	}
237 
238 	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
239 				(unsigned long)paddr, (unsigned long)size);
240 
241 	while (size) {
242 		unsigned long pgsize, addr_merge = iova | paddr;
243 		unsigned int pgsize_idx;
244 
245 		/* Max page size that still fits into 'size' */
246 		pgsize_idx = __fls(size);
247 
248 		/* need to consider alignment requirements ? */
249 		if (likely(addr_merge)) {
250 			/* Max page size allowed by both iova and paddr */
251 			unsigned int align_pgsize_idx = __ffs(addr_merge);
252 
253 			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
254 		}
255 
256 		/* build a mask of acceptable page sizes */
257 		pgsize = (1UL << (pgsize_idx + 1)) - 1;
258 
259 		/* throw away page sizes not supported by the hardware */
260 		pgsize &= domain->ops->pgsize_bitmap;
261 
262 		/* make sure we're still sane */
263 		BUG_ON(!pgsize);
264 
265 		/* pick the biggest page */
266 		pgsize_idx = __fls(pgsize);
267 		pgsize = 1UL << pgsize_idx;
268 
269 		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
270 					(unsigned long)paddr, pgsize);
271 
272 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
273 		if (ret)
274 			break;
275 
276 		iova += pgsize;
277 		paddr += pgsize;
278 		size -= pgsize;
279 	}
280 
281 	/* unroll mapping in case something went wrong */
282 	if (ret)
283 		iommu_unmap(domain, orig_iova, orig_size - size);
284 
285 	return ret;
286 }
287 EXPORT_SYMBOL_GPL(iommu_map);
288 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)289 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
290 {
291 	size_t unmapped_page, unmapped = 0;
292 	unsigned int min_pagesz;
293 
294 	if (unlikely(domain->ops->unmap == NULL))
295 		return -ENODEV;
296 
297 	/* find out the minimum page size supported */
298 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
299 
300 	/*
301 	 * The virtual address, as well as the size of the mapping, must be
302 	 * aligned (at least) to the size of the smallest page supported
303 	 * by the hardware
304 	 */
305 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
306 		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
307 					iova, (unsigned long)size, min_pagesz);
308 		return -EINVAL;
309 	}
310 
311 	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
312 							(unsigned long)size);
313 
314 	/*
315 	 * Keep iterating until we either unmap 'size' bytes (or more)
316 	 * or we hit an area that isn't mapped.
317 	 */
318 	while (unmapped < size) {
319 		size_t left = size - unmapped;
320 
321 		unmapped_page = domain->ops->unmap(domain, iova, left);
322 		if (!unmapped_page)
323 			break;
324 
325 		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
326 					(unsigned long)unmapped_page);
327 
328 		iova += unmapped_page;
329 		unmapped += unmapped_page;
330 	}
331 
332 	return unmapped;
333 }
334 EXPORT_SYMBOL_GPL(iommu_unmap);
335 
iommu_device_group(struct device * dev,unsigned int * groupid)336 int iommu_device_group(struct device *dev, unsigned int *groupid)
337 {
338 	if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
339 		return dev->bus->iommu_ops->device_group(dev, groupid);
340 
341 	return -ENODEV;
342 }
343 EXPORT_SYMBOL_GPL(iommu_device_group);
344