1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2006, Intel Corporation.
4  *
5  * Copyright (C) 2006-2008 Intel Corporation
6  * Author: Ashok Raj <ashok.raj@intel.com>
7  * Author: Shaohua Li <shaohua.li@intel.com>
8  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *
10  * This file implements early detection/parsing of Remapping Devices
11  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
12  * tables.
13  *
14  * These routines are used by both DMA-remapping and Interrupt-remapping
15  */
16 
17 #define pr_fmt(fmt)     "DMAR: " fmt
18 
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/timer.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/tboot.h>
26 #include <linux/dmi.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/numa.h>
30 #include <linux/limits.h>
31 #include <asm/irq_remapping.h>
32 
33 #include "iommu.h"
34 #include "../irq_remapping.h"
35 #include "perf.h"
36 #include "trace.h"
37 #include "perfmon.h"
38 
39 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
40 struct dmar_res_callback {
41 	dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED];
42 	void			*arg[ACPI_DMAR_TYPE_RESERVED];
43 	bool			ignore_unhandled;
44 	bool			print_entry;
45 };
46 
47 /*
48  * Assumptions:
49  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50  *    before IO devices managed by that unit.
51  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52  *    after IO devices managed by that unit.
53  * 3) Hotplug events are rare.
54  *
55  * Locking rules for DMA and interrupt remapping related global data structures:
56  * 1) Use dmar_global_lock in process context
57  * 2) Use RCU in interrupt context
58  */
59 DECLARE_RWSEM(dmar_global_lock);
60 LIST_HEAD(dmar_drhd_units);
61 
62 struct acpi_table_header * __initdata dmar_tbl;
63 static int dmar_dev_scope_status = 1;
64 static DEFINE_IDA(dmar_seq_ids);
65 
66 static int alloc_iommu(struct dmar_drhd_unit *drhd);
67 static void free_iommu(struct intel_iommu *iommu);
68 
dmar_register_drhd_unit(struct dmar_drhd_unit * drhd)69 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
70 {
71 	/*
72 	 * add INCLUDE_ALL at the tail, so scan the list will find it at
73 	 * the very end.
74 	 */
75 	if (drhd->include_all)
76 		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
77 	else
78 		list_add_rcu(&drhd->list, &dmar_drhd_units);
79 }
80 
dmar_alloc_dev_scope(void * start,void * end,int * cnt)81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
82 {
83 	struct acpi_dmar_device_scope *scope;
84 
85 	*cnt = 0;
86 	while (start < end) {
87 		scope = start;
88 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
91 			(*cnt)++;
92 		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 			scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 			pr_warn("Unsupported device scope\n");
95 		}
96 		start += scope->length;
97 	}
98 	if (*cnt == 0)
99 		return NULL;
100 
101 	return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
102 }
103 
dmar_free_dev_scope(struct dmar_dev_scope ** devices,int * cnt)104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
105 {
106 	int i;
107 	struct device *tmp_dev;
108 
109 	if (*devices && *cnt) {
110 		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
111 			put_device(tmp_dev);
112 		kfree(*devices);
113 	}
114 
115 	*devices = NULL;
116 	*cnt = 0;
117 }
118 
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
121 
122 static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev * dev,unsigned long event)123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
124 {
125 	int level = 0;
126 	size_t size;
127 	struct pci_dev *tmp;
128 	struct dmar_pci_notify_info *info;
129 
130 	/*
131 	 * Ignore devices that have a domain number higher than what can
132 	 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
133 	 */
134 	if (pci_domain_nr(dev->bus) > U16_MAX)
135 		return NULL;
136 
137 	/* Only generate path[] for device addition event */
138 	if (event == BUS_NOTIFY_ADD_DEVICE)
139 		for (tmp = dev; tmp; tmp = tmp->bus->self)
140 			level++;
141 
142 	size = struct_size(info, path, level);
143 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
144 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
145 	} else {
146 		info = kzalloc(size, GFP_KERNEL);
147 		if (!info) {
148 			if (dmar_dev_scope_status == 0)
149 				dmar_dev_scope_status = -ENOMEM;
150 			return NULL;
151 		}
152 	}
153 
154 	info->event = event;
155 	info->dev = dev;
156 	info->seg = pci_domain_nr(dev->bus);
157 	info->level = level;
158 	if (event == BUS_NOTIFY_ADD_DEVICE) {
159 		for (tmp = dev; tmp; tmp = tmp->bus->self) {
160 			level--;
161 			info->path[level].bus = tmp->bus->number;
162 			info->path[level].device = PCI_SLOT(tmp->devfn);
163 			info->path[level].function = PCI_FUNC(tmp->devfn);
164 			if (pci_is_root_bus(tmp->bus))
165 				info->bus = tmp->bus->number;
166 		}
167 	}
168 
169 	return info;
170 }
171 
dmar_free_pci_notify_info(struct dmar_pci_notify_info * info)172 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
173 {
174 	if ((void *)info != dmar_pci_notify_info_buf)
175 		kfree(info);
176 }
177 
dmar_match_pci_path(struct dmar_pci_notify_info * info,int bus,struct acpi_dmar_pci_path * path,int count)178 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
179 				struct acpi_dmar_pci_path *path, int count)
180 {
181 	int i;
182 
183 	if (info->bus != bus)
184 		goto fallback;
185 	if (info->level != count)
186 		goto fallback;
187 
188 	for (i = 0; i < count; i++) {
189 		if (path[i].device != info->path[i].device ||
190 		    path[i].function != info->path[i].function)
191 			goto fallback;
192 	}
193 
194 	return true;
195 
196 fallback:
197 
198 	if (count != 1)
199 		return false;
200 
201 	i = info->level - 1;
202 	if (bus              == info->path[i].bus &&
203 	    path[0].device   == info->path[i].device &&
204 	    path[0].function == info->path[i].function) {
205 		pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
206 			bus, path[0].device, path[0].function);
207 		return true;
208 	}
209 
210 	return false;
211 }
212 
213 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
dmar_insert_dev_scope(struct dmar_pci_notify_info * info,void * start,void * end,u16 segment,struct dmar_dev_scope * devices,int devices_cnt)214 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
215 			  void *start, void*end, u16 segment,
216 			  struct dmar_dev_scope *devices,
217 			  int devices_cnt)
218 {
219 	int i, level;
220 	struct device *tmp, *dev = &info->dev->dev;
221 	struct acpi_dmar_device_scope *scope;
222 	struct acpi_dmar_pci_path *path;
223 
224 	if (segment != info->seg)
225 		return 0;
226 
227 	for (; start < end; start += scope->length) {
228 		scope = start;
229 		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
230 		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
231 			continue;
232 
233 		path = (struct acpi_dmar_pci_path *)(scope + 1);
234 		level = (scope->length - sizeof(*scope)) / sizeof(*path);
235 		if (!dmar_match_pci_path(info, scope->bus, path, level))
236 			continue;
237 
238 		/*
239 		 * We expect devices with endpoint scope to have normal PCI
240 		 * headers, and devices with bridge scope to have bridge PCI
241 		 * headers.  However PCI NTB devices may be listed in the
242 		 * DMAR table with bridge scope, even though they have a
243 		 * normal PCI header.  NTB devices are identified by class
244 		 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
245 		 * for this special case.
246 		 */
247 		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
248 		     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
249 		    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
250 		     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
251 		      info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
252 			pr_warn("Device scope type does not match for %s\n",
253 				pci_name(info->dev));
254 			return -EINVAL;
255 		}
256 
257 		for_each_dev_scope(devices, devices_cnt, i, tmp)
258 			if (tmp == NULL) {
259 				devices[i].bus = info->dev->bus->number;
260 				devices[i].devfn = info->dev->devfn;
261 				rcu_assign_pointer(devices[i].dev,
262 						   get_device(dev));
263 				return 1;
264 			}
265 		if (WARN_ON(i >= devices_cnt))
266 			return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
dmar_remove_dev_scope(struct dmar_pci_notify_info * info,u16 segment,struct dmar_dev_scope * devices,int count)272 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
273 			  struct dmar_dev_scope *devices, int count)
274 {
275 	int index;
276 	struct device *tmp;
277 
278 	if (info->seg != segment)
279 		return 0;
280 
281 	for_each_active_dev_scope(devices, count, index, tmp)
282 		if (tmp == &info->dev->dev) {
283 			RCU_INIT_POINTER(devices[index].dev, NULL);
284 			synchronize_rcu();
285 			put_device(tmp);
286 			return 1;
287 		}
288 
289 	return 0;
290 }
291 
dmar_pci_bus_add_dev(struct dmar_pci_notify_info * info)292 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
293 {
294 	int ret = 0;
295 	struct dmar_drhd_unit *dmaru;
296 	struct acpi_dmar_hardware_unit *drhd;
297 
298 	for_each_drhd_unit(dmaru) {
299 		if (dmaru->include_all)
300 			continue;
301 
302 		drhd = container_of(dmaru->hdr,
303 				    struct acpi_dmar_hardware_unit, header);
304 		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
305 				((void *)drhd) + drhd->header.length,
306 				dmaru->segment,
307 				dmaru->devices, dmaru->devices_cnt);
308 		if (ret)
309 			break;
310 	}
311 	if (ret >= 0)
312 		ret = dmar_iommu_notify_scope_dev(info);
313 	if (ret < 0 && dmar_dev_scope_status == 0)
314 		dmar_dev_scope_status = ret;
315 
316 	if (ret >= 0)
317 		intel_irq_remap_add_device(info);
318 
319 	return ret;
320 }
321 
dmar_pci_bus_del_dev(struct dmar_pci_notify_info * info)322 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
323 {
324 	struct dmar_drhd_unit *dmaru;
325 
326 	for_each_drhd_unit(dmaru)
327 		if (dmar_remove_dev_scope(info, dmaru->segment,
328 			dmaru->devices, dmaru->devices_cnt))
329 			break;
330 	dmar_iommu_notify_scope_dev(info);
331 }
332 
vf_inherit_msi_domain(struct pci_dev * pdev)333 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
334 {
335 	struct pci_dev *physfn = pci_physfn(pdev);
336 
337 	dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
338 }
339 
dmar_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)340 static int dmar_pci_bus_notifier(struct notifier_block *nb,
341 				 unsigned long action, void *data)
342 {
343 	struct pci_dev *pdev = to_pci_dev(data);
344 	struct dmar_pci_notify_info *info;
345 
346 	/* Only care about add/remove events for physical functions.
347 	 * For VFs we actually do the lookup based on the corresponding
348 	 * PF in device_to_iommu() anyway. */
349 	if (pdev->is_virtfn) {
350 		/*
351 		 * Ensure that the VF device inherits the irq domain of the
352 		 * PF device. Ideally the device would inherit the domain
353 		 * from the bus, but DMAR can have multiple units per bus
354 		 * which makes this impossible. The VF 'bus' could inherit
355 		 * from the PF device, but that's yet another x86'sism to
356 		 * inflict on everybody else.
357 		 */
358 		if (action == BUS_NOTIFY_ADD_DEVICE)
359 			vf_inherit_msi_domain(pdev);
360 		return NOTIFY_DONE;
361 	}
362 
363 	if (action != BUS_NOTIFY_ADD_DEVICE &&
364 	    action != BUS_NOTIFY_REMOVED_DEVICE)
365 		return NOTIFY_DONE;
366 
367 	info = dmar_alloc_pci_notify_info(pdev, action);
368 	if (!info)
369 		return NOTIFY_DONE;
370 
371 	down_write(&dmar_global_lock);
372 	if (action == BUS_NOTIFY_ADD_DEVICE)
373 		dmar_pci_bus_add_dev(info);
374 	else if (action == BUS_NOTIFY_REMOVED_DEVICE)
375 		dmar_pci_bus_del_dev(info);
376 	up_write(&dmar_global_lock);
377 
378 	dmar_free_pci_notify_info(info);
379 
380 	return NOTIFY_OK;
381 }
382 
383 static struct notifier_block dmar_pci_bus_nb = {
384 	.notifier_call = dmar_pci_bus_notifier,
385 	.priority = 1,
386 };
387 
388 static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit * drhd)389 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
390 {
391 	struct dmar_drhd_unit *dmaru;
392 
393 	list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
394 				dmar_rcu_check())
395 		if (dmaru->segment == drhd->segment &&
396 		    dmaru->reg_base_addr == drhd->address)
397 			return dmaru;
398 
399 	return NULL;
400 }
401 
402 /*
403  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
404  * structure which uniquely represent one DMA remapping hardware unit
405  * present in the platform
406  */
dmar_parse_one_drhd(struct acpi_dmar_header * header,void * arg)407 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
408 {
409 	struct acpi_dmar_hardware_unit *drhd;
410 	struct dmar_drhd_unit *dmaru;
411 	int ret;
412 
413 	drhd = (struct acpi_dmar_hardware_unit *)header;
414 	dmaru = dmar_find_dmaru(drhd);
415 	if (dmaru)
416 		goto out;
417 
418 	dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
419 	if (!dmaru)
420 		return -ENOMEM;
421 
422 	/*
423 	 * If header is allocated from slab by ACPI _DSM method, we need to
424 	 * copy the content because the memory buffer will be freed on return.
425 	 */
426 	dmaru->hdr = (void *)(dmaru + 1);
427 	memcpy(dmaru->hdr, header, header->length);
428 	dmaru->reg_base_addr = drhd->address;
429 	dmaru->segment = drhd->segment;
430 	/* The size of the register set is 2 ^ N 4 KB pages. */
431 	dmaru->reg_size = 1UL << (drhd->size + 12);
432 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
433 	dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
434 					      ((void *)drhd) + drhd->header.length,
435 					      &dmaru->devices_cnt);
436 	if (dmaru->devices_cnt && dmaru->devices == NULL) {
437 		kfree(dmaru);
438 		return -ENOMEM;
439 	}
440 
441 	ret = alloc_iommu(dmaru);
442 	if (ret) {
443 		dmar_free_dev_scope(&dmaru->devices,
444 				    &dmaru->devices_cnt);
445 		kfree(dmaru);
446 		return ret;
447 	}
448 	dmar_register_drhd_unit(dmaru);
449 
450 out:
451 	if (arg)
452 		(*(int *)arg)++;
453 
454 	return 0;
455 }
456 
dmar_free_drhd(struct dmar_drhd_unit * dmaru)457 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
458 {
459 	if (dmaru->devices && dmaru->devices_cnt)
460 		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
461 	if (dmaru->iommu)
462 		free_iommu(dmaru->iommu);
463 	kfree(dmaru);
464 }
465 
dmar_parse_one_andd(struct acpi_dmar_header * header,void * arg)466 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
467 				      void *arg)
468 {
469 	struct acpi_dmar_andd *andd = (void *)header;
470 
471 	/* Check for NUL termination within the designated length */
472 	if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
473 		pr_warn(FW_BUG
474 			   "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
475 			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
476 			   dmi_get_system_info(DMI_BIOS_VENDOR),
477 			   dmi_get_system_info(DMI_BIOS_VERSION),
478 			   dmi_get_system_info(DMI_PRODUCT_VERSION));
479 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
480 		return -EINVAL;
481 	}
482 	pr_info("ANDD device: %x name: %s\n", andd->device_number,
483 		andd->device_name);
484 
485 	return 0;
486 }
487 
488 #ifdef CONFIG_ACPI_NUMA
dmar_parse_one_rhsa(struct acpi_dmar_header * header,void * arg)489 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
490 {
491 	struct acpi_dmar_rhsa *rhsa;
492 	struct dmar_drhd_unit *drhd;
493 
494 	rhsa = (struct acpi_dmar_rhsa *)header;
495 	for_each_drhd_unit(drhd) {
496 		if (drhd->reg_base_addr == rhsa->base_address) {
497 			int node = pxm_to_node(rhsa->proximity_domain);
498 
499 			if (node != NUMA_NO_NODE && !node_online(node))
500 				node = NUMA_NO_NODE;
501 			drhd->iommu->node = node;
502 			return 0;
503 		}
504 	}
505 	pr_warn(FW_BUG
506 		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
507 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
508 		rhsa->base_address,
509 		dmi_get_system_info(DMI_BIOS_VENDOR),
510 		dmi_get_system_info(DMI_BIOS_VERSION),
511 		dmi_get_system_info(DMI_PRODUCT_VERSION));
512 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
513 
514 	return 0;
515 }
516 #else
517 #define	dmar_parse_one_rhsa		dmar_res_noop
518 #endif
519 
520 static void
dmar_table_print_dmar_entry(struct acpi_dmar_header * header)521 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
522 {
523 	struct acpi_dmar_hardware_unit *drhd;
524 	struct acpi_dmar_reserved_memory *rmrr;
525 	struct acpi_dmar_atsr *atsr;
526 	struct acpi_dmar_rhsa *rhsa;
527 	struct acpi_dmar_satc *satc;
528 
529 	switch (header->type) {
530 	case ACPI_DMAR_TYPE_HARDWARE_UNIT:
531 		drhd = container_of(header, struct acpi_dmar_hardware_unit,
532 				    header);
533 		pr_info("DRHD base: %#016Lx flags: %#x\n",
534 			(unsigned long long)drhd->address, drhd->flags);
535 		break;
536 	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
537 		rmrr = container_of(header, struct acpi_dmar_reserved_memory,
538 				    header);
539 		pr_info("RMRR base: %#016Lx end: %#016Lx\n",
540 			(unsigned long long)rmrr->base_address,
541 			(unsigned long long)rmrr->end_address);
542 		break;
543 	case ACPI_DMAR_TYPE_ROOT_ATS:
544 		atsr = container_of(header, struct acpi_dmar_atsr, header);
545 		pr_info("ATSR flags: %#x\n", atsr->flags);
546 		break;
547 	case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
548 		rhsa = container_of(header, struct acpi_dmar_rhsa, header);
549 		pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
550 		       (unsigned long long)rhsa->base_address,
551 		       rhsa->proximity_domain);
552 		break;
553 	case ACPI_DMAR_TYPE_NAMESPACE:
554 		/* We don't print this here because we need to sanity-check
555 		   it first. So print it in dmar_parse_one_andd() instead. */
556 		break;
557 	case ACPI_DMAR_TYPE_SATC:
558 		satc = container_of(header, struct acpi_dmar_satc, header);
559 		pr_info("SATC flags: 0x%x\n", satc->flags);
560 		break;
561 	}
562 }
563 
564 /**
565  * dmar_table_detect - checks to see if the platform supports DMAR devices
566  */
dmar_table_detect(void)567 static int __init dmar_table_detect(void)
568 {
569 	acpi_status status = AE_OK;
570 
571 	/* if we could find DMAR table, then there are DMAR devices */
572 	status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
573 
574 	if (ACPI_SUCCESS(status) && !dmar_tbl) {
575 		pr_warn("Unable to map DMAR\n");
576 		status = AE_NOT_FOUND;
577 	}
578 
579 	return ACPI_SUCCESS(status) ? 0 : -ENOENT;
580 }
581 
dmar_walk_remapping_entries(struct acpi_dmar_header * start,size_t len,struct dmar_res_callback * cb)582 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
583 				       size_t len, struct dmar_res_callback *cb)
584 {
585 	struct acpi_dmar_header *iter, *next;
586 	struct acpi_dmar_header *end = ((void *)start) + len;
587 
588 	for (iter = start; iter < end; iter = next) {
589 		next = (void *)iter + iter->length;
590 		if (iter->length == 0) {
591 			/* Avoid looping forever on bad ACPI tables */
592 			pr_debug(FW_BUG "Invalid 0-length structure\n");
593 			break;
594 		} else if (next > end) {
595 			/* Avoid passing table end */
596 			pr_warn(FW_BUG "Record passes table end\n");
597 			return -EINVAL;
598 		}
599 
600 		if (cb->print_entry)
601 			dmar_table_print_dmar_entry(iter);
602 
603 		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
604 			/* continue for forward compatibility */
605 			pr_debug("Unknown DMAR structure type %d\n",
606 				 iter->type);
607 		} else if (cb->cb[iter->type]) {
608 			int ret;
609 
610 			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
611 			if (ret)
612 				return ret;
613 		} else if (!cb->ignore_unhandled) {
614 			pr_warn("No handler for DMAR structure type %d\n",
615 				iter->type);
616 			return -EINVAL;
617 		}
618 	}
619 
620 	return 0;
621 }
622 
dmar_walk_dmar_table(struct acpi_table_dmar * dmar,struct dmar_res_callback * cb)623 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
624 				       struct dmar_res_callback *cb)
625 {
626 	return dmar_walk_remapping_entries((void *)(dmar + 1),
627 			dmar->header.length - sizeof(*dmar), cb);
628 }
629 
630 /**
631  * parse_dmar_table - parses the DMA reporting table
632  */
633 static int __init
parse_dmar_table(void)634 parse_dmar_table(void)
635 {
636 	struct acpi_table_dmar *dmar;
637 	int drhd_count = 0;
638 	int ret;
639 	struct dmar_res_callback cb = {
640 		.print_entry = true,
641 		.ignore_unhandled = true,
642 		.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
643 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
644 		.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
645 		.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
646 		.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
647 		.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
648 		.cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
649 	};
650 
651 	/*
652 	 * Do it again, earlier dmar_tbl mapping could be mapped with
653 	 * fixed map.
654 	 */
655 	dmar_table_detect();
656 
657 	/*
658 	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
659 	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
660 	 */
661 	dmar_tbl = tboot_get_dmar_table(dmar_tbl);
662 
663 	dmar = (struct acpi_table_dmar *)dmar_tbl;
664 	if (!dmar)
665 		return -ENODEV;
666 
667 	if (dmar->width < PAGE_SHIFT - 1) {
668 		pr_warn("Invalid DMAR haw\n");
669 		return -EINVAL;
670 	}
671 
672 	pr_info("Host address width %d\n", dmar->width + 1);
673 	ret = dmar_walk_dmar_table(dmar, &cb);
674 	if (ret == 0 && drhd_count == 0)
675 		pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
676 
677 	return ret;
678 }
679 
dmar_pci_device_match(struct dmar_dev_scope devices[],int cnt,struct pci_dev * dev)680 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
681 				 int cnt, struct pci_dev *dev)
682 {
683 	int index;
684 	struct device *tmp;
685 
686 	while (dev) {
687 		for_each_active_dev_scope(devices, cnt, index, tmp)
688 			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
689 				return 1;
690 
691 		/* Check our parent */
692 		dev = dev->bus->self;
693 	}
694 
695 	return 0;
696 }
697 
698 struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev * dev)699 dmar_find_matched_drhd_unit(struct pci_dev *dev)
700 {
701 	struct dmar_drhd_unit *dmaru;
702 	struct acpi_dmar_hardware_unit *drhd;
703 
704 	dev = pci_physfn(dev);
705 
706 	rcu_read_lock();
707 	for_each_drhd_unit(dmaru) {
708 		drhd = container_of(dmaru->hdr,
709 				    struct acpi_dmar_hardware_unit,
710 				    header);
711 
712 		if (dmaru->include_all &&
713 		    drhd->segment == pci_domain_nr(dev->bus))
714 			goto out;
715 
716 		if (dmar_pci_device_match(dmaru->devices,
717 					  dmaru->devices_cnt, dev))
718 			goto out;
719 	}
720 	dmaru = NULL;
721 out:
722 	rcu_read_unlock();
723 
724 	return dmaru;
725 }
726 
dmar_acpi_insert_dev_scope(u8 device_number,struct acpi_device * adev)727 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
728 					      struct acpi_device *adev)
729 {
730 	struct dmar_drhd_unit *dmaru;
731 	struct acpi_dmar_hardware_unit *drhd;
732 	struct acpi_dmar_device_scope *scope;
733 	struct device *tmp;
734 	int i;
735 	struct acpi_dmar_pci_path *path;
736 
737 	for_each_drhd_unit(dmaru) {
738 		drhd = container_of(dmaru->hdr,
739 				    struct acpi_dmar_hardware_unit,
740 				    header);
741 
742 		for (scope = (void *)(drhd + 1);
743 		     (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
744 		     scope = ((void *)scope) + scope->length) {
745 			if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
746 				continue;
747 			if (scope->enumeration_id != device_number)
748 				continue;
749 
750 			path = (void *)(scope + 1);
751 			pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
752 				dev_name(&adev->dev), dmaru->reg_base_addr,
753 				scope->bus, path->device, path->function);
754 			for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
755 				if (tmp == NULL) {
756 					dmaru->devices[i].bus = scope->bus;
757 					dmaru->devices[i].devfn = PCI_DEVFN(path->device,
758 									    path->function);
759 					rcu_assign_pointer(dmaru->devices[i].dev,
760 							   get_device(&adev->dev));
761 					return;
762 				}
763 			BUG_ON(i >= dmaru->devices_cnt);
764 		}
765 	}
766 	pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
767 		device_number, dev_name(&adev->dev));
768 }
769 
dmar_acpi_dev_scope_init(void)770 static int __init dmar_acpi_dev_scope_init(void)
771 {
772 	struct acpi_dmar_andd *andd;
773 
774 	if (dmar_tbl == NULL)
775 		return -ENODEV;
776 
777 	for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
778 	     ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
779 	     andd = ((void *)andd) + andd->header.length) {
780 		if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
781 			acpi_handle h;
782 			struct acpi_device *adev;
783 
784 			if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
785 							  andd->device_name,
786 							  &h))) {
787 				pr_err("Failed to find handle for ACPI object %s\n",
788 				       andd->device_name);
789 				continue;
790 			}
791 			adev = acpi_fetch_acpi_dev(h);
792 			if (!adev) {
793 				pr_err("Failed to get device for ACPI object %s\n",
794 				       andd->device_name);
795 				continue;
796 			}
797 			dmar_acpi_insert_dev_scope(andd->device_number, adev);
798 		}
799 	}
800 	return 0;
801 }
802 
dmar_dev_scope_init(void)803 int __init dmar_dev_scope_init(void)
804 {
805 	struct pci_dev *dev = NULL;
806 	struct dmar_pci_notify_info *info;
807 
808 	if (dmar_dev_scope_status != 1)
809 		return dmar_dev_scope_status;
810 
811 	if (list_empty(&dmar_drhd_units)) {
812 		dmar_dev_scope_status = -ENODEV;
813 	} else {
814 		dmar_dev_scope_status = 0;
815 
816 		dmar_acpi_dev_scope_init();
817 
818 		for_each_pci_dev(dev) {
819 			if (dev->is_virtfn)
820 				continue;
821 
822 			info = dmar_alloc_pci_notify_info(dev,
823 					BUS_NOTIFY_ADD_DEVICE);
824 			if (!info) {
825 				pci_dev_put(dev);
826 				return dmar_dev_scope_status;
827 			} else {
828 				dmar_pci_bus_add_dev(info);
829 				dmar_free_pci_notify_info(info);
830 			}
831 		}
832 	}
833 
834 	return dmar_dev_scope_status;
835 }
836 
dmar_register_bus_notifier(void)837 void __init dmar_register_bus_notifier(void)
838 {
839 	bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
840 }
841 
842 
dmar_table_init(void)843 int __init dmar_table_init(void)
844 {
845 	static int dmar_table_initialized;
846 	int ret;
847 
848 	if (dmar_table_initialized == 0) {
849 		ret = parse_dmar_table();
850 		if (ret < 0) {
851 			if (ret != -ENODEV)
852 				pr_info("Parse DMAR table failure.\n");
853 		} else  if (list_empty(&dmar_drhd_units)) {
854 			pr_info("No DMAR devices found\n");
855 			ret = -ENODEV;
856 		}
857 
858 		if (ret < 0)
859 			dmar_table_initialized = ret;
860 		else
861 			dmar_table_initialized = 1;
862 	}
863 
864 	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
865 }
866 
warn_invalid_dmar(u64 addr,const char * message)867 static void warn_invalid_dmar(u64 addr, const char *message)
868 {
869 	pr_warn_once(FW_BUG
870 		"Your BIOS is broken; DMAR reported at address %llx%s!\n"
871 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
872 		addr, message,
873 		dmi_get_system_info(DMI_BIOS_VENDOR),
874 		dmi_get_system_info(DMI_BIOS_VERSION),
875 		dmi_get_system_info(DMI_PRODUCT_VERSION));
876 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
877 }
878 
879 static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header * entry,void * arg)880 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
881 {
882 	struct acpi_dmar_hardware_unit *drhd;
883 	void __iomem *addr;
884 	u64 cap, ecap;
885 
886 	drhd = (void *)entry;
887 	if (!drhd->address) {
888 		warn_invalid_dmar(0, "");
889 		return -EINVAL;
890 	}
891 
892 	if (arg)
893 		addr = ioremap(drhd->address, VTD_PAGE_SIZE);
894 	else
895 		addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
896 	if (!addr) {
897 		pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
898 		return -EINVAL;
899 	}
900 
901 	cap = dmar_readq(addr + DMAR_CAP_REG);
902 	ecap = dmar_readq(addr + DMAR_ECAP_REG);
903 
904 	if (arg)
905 		iounmap(addr);
906 	else
907 		early_iounmap(addr, VTD_PAGE_SIZE);
908 
909 	if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
910 		warn_invalid_dmar(drhd->address, " returns all ones");
911 		return -EINVAL;
912 	}
913 
914 	return 0;
915 }
916 
detect_intel_iommu(void)917 void __init detect_intel_iommu(void)
918 {
919 	int ret;
920 	struct dmar_res_callback validate_drhd_cb = {
921 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
922 		.ignore_unhandled = true,
923 	};
924 
925 	down_write(&dmar_global_lock);
926 	ret = dmar_table_detect();
927 	if (!ret)
928 		ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
929 					   &validate_drhd_cb);
930 	if (!ret && !no_iommu && !iommu_detected &&
931 	    (!dmar_disabled || dmar_platform_optin())) {
932 		iommu_detected = 1;
933 		/* Make sure ACS will be enabled */
934 		pci_request_acs();
935 	}
936 
937 #ifdef CONFIG_X86
938 	if (!ret) {
939 		x86_init.iommu.iommu_init = intel_iommu_init;
940 		x86_platform.iommu_shutdown = intel_iommu_shutdown;
941 	}
942 
943 #endif
944 
945 	if (dmar_tbl) {
946 		acpi_put_table(dmar_tbl);
947 		dmar_tbl = NULL;
948 	}
949 	up_write(&dmar_global_lock);
950 }
951 
unmap_iommu(struct intel_iommu * iommu)952 static void unmap_iommu(struct intel_iommu *iommu)
953 {
954 	iounmap(iommu->reg);
955 	release_mem_region(iommu->reg_phys, iommu->reg_size);
956 }
957 
958 /**
959  * map_iommu: map the iommu's registers
960  * @iommu: the iommu to map
961  * @drhd: DMA remapping hardware definition structure
962  *
963  * Memory map the iommu's registers.  Start w/ a single page, and
964  * possibly expand if that turns out to be insufficent.
965  */
map_iommu(struct intel_iommu * iommu,struct dmar_drhd_unit * drhd)966 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
967 {
968 	u64 phys_addr = drhd->reg_base_addr;
969 	int map_size, err=0;
970 
971 	iommu->reg_phys = phys_addr;
972 	iommu->reg_size = drhd->reg_size;
973 
974 	if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
975 		pr_err("Can't reserve memory\n");
976 		err = -EBUSY;
977 		goto out;
978 	}
979 
980 	iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
981 	if (!iommu->reg) {
982 		pr_err("Can't map the region\n");
983 		err = -ENOMEM;
984 		goto release;
985 	}
986 
987 	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
988 	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
989 
990 	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
991 		err = -EINVAL;
992 		warn_invalid_dmar(phys_addr, " returns all ones");
993 		goto unmap;
994 	}
995 
996 	/* the registers might be more than one page */
997 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
998 			 cap_max_fault_reg_offset(iommu->cap));
999 	map_size = VTD_PAGE_ALIGN(map_size);
1000 	if (map_size > iommu->reg_size) {
1001 		iounmap(iommu->reg);
1002 		release_mem_region(iommu->reg_phys, iommu->reg_size);
1003 		iommu->reg_size = map_size;
1004 		if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1005 					iommu->name)) {
1006 			pr_err("Can't reserve memory\n");
1007 			err = -EBUSY;
1008 			goto out;
1009 		}
1010 		iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1011 		if (!iommu->reg) {
1012 			pr_err("Can't map the region\n");
1013 			err = -ENOMEM;
1014 			goto release;
1015 		}
1016 	}
1017 
1018 	if (cap_ecmds(iommu->cap)) {
1019 		int i;
1020 
1021 		for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1022 			iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1023 						       i * DMA_ECMD_REG_STEP);
1024 		}
1025 	}
1026 
1027 	err = 0;
1028 	goto out;
1029 
1030 unmap:
1031 	iounmap(iommu->reg);
1032 release:
1033 	release_mem_region(iommu->reg_phys, iommu->reg_size);
1034 out:
1035 	return err;
1036 }
1037 
alloc_iommu(struct dmar_drhd_unit * drhd)1038 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1039 {
1040 	struct intel_iommu *iommu;
1041 	u32 ver, sts;
1042 	int agaw = -1;
1043 	int msagaw = -1;
1044 	int err;
1045 
1046 	if (!drhd->reg_base_addr) {
1047 		warn_invalid_dmar(0, "");
1048 		return -EINVAL;
1049 	}
1050 
1051 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1052 	if (!iommu)
1053 		return -ENOMEM;
1054 
1055 	iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1056 					DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1057 	if (iommu->seq_id < 0) {
1058 		pr_err("Failed to allocate seq_id\n");
1059 		err = iommu->seq_id;
1060 		goto error;
1061 	}
1062 	sprintf(iommu->name, "dmar%d", iommu->seq_id);
1063 
1064 	err = map_iommu(iommu, drhd);
1065 	if (err) {
1066 		pr_err("Failed to map %s\n", iommu->name);
1067 		goto error_free_seq_id;
1068 	}
1069 
1070 	err = -EINVAL;
1071 	if (!cap_sagaw(iommu->cap) &&
1072 	    (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1073 		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1074 			iommu->name);
1075 		drhd->ignored = 1;
1076 	}
1077 
1078 	if (!drhd->ignored) {
1079 		agaw = iommu_calculate_agaw(iommu);
1080 		if (agaw < 0) {
1081 			pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1082 			       iommu->seq_id);
1083 			drhd->ignored = 1;
1084 		}
1085 	}
1086 	if (!drhd->ignored) {
1087 		msagaw = iommu_calculate_max_sagaw(iommu);
1088 		if (msagaw < 0) {
1089 			pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1090 			       iommu->seq_id);
1091 			drhd->ignored = 1;
1092 			agaw = -1;
1093 		}
1094 	}
1095 	iommu->agaw = agaw;
1096 	iommu->msagaw = msagaw;
1097 	iommu->segment = drhd->segment;
1098 
1099 	iommu->node = NUMA_NO_NODE;
1100 
1101 	ver = readl(iommu->reg + DMAR_VER_REG);
1102 	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1103 		iommu->name,
1104 		(unsigned long long)drhd->reg_base_addr,
1105 		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1106 		(unsigned long long)iommu->cap,
1107 		(unsigned long long)iommu->ecap);
1108 
1109 	/* Reflect status in gcmd */
1110 	sts = readl(iommu->reg + DMAR_GSTS_REG);
1111 	if (sts & DMA_GSTS_IRES)
1112 		iommu->gcmd |= DMA_GCMD_IRE;
1113 	if (sts & DMA_GSTS_TES)
1114 		iommu->gcmd |= DMA_GCMD_TE;
1115 	if (sts & DMA_GSTS_QIES)
1116 		iommu->gcmd |= DMA_GCMD_QIE;
1117 
1118 	if (alloc_iommu_pmu(iommu))
1119 		pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1120 
1121 	raw_spin_lock_init(&iommu->register_lock);
1122 
1123 	/*
1124 	 * A value of N in PSS field of eCap register indicates hardware
1125 	 * supports PASID field of N+1 bits.
1126 	 */
1127 	if (pasid_supported(iommu))
1128 		iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1129 
1130 	/*
1131 	 * This is only for hotplug; at boot time intel_iommu_enabled won't
1132 	 * be set yet. When intel_iommu_init() runs, it registers the units
1133 	 * present at boot time, then sets intel_iommu_enabled.
1134 	 */
1135 	if (intel_iommu_enabled && !drhd->ignored) {
1136 		err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1137 					     intel_iommu_groups,
1138 					     "%s", iommu->name);
1139 		if (err)
1140 			goto err_unmap;
1141 
1142 		err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1143 		if (err)
1144 			goto err_sysfs;
1145 
1146 		iommu_pmu_register(iommu);
1147 	}
1148 
1149 	drhd->iommu = iommu;
1150 	iommu->drhd = drhd;
1151 
1152 	return 0;
1153 
1154 err_sysfs:
1155 	iommu_device_sysfs_remove(&iommu->iommu);
1156 err_unmap:
1157 	free_iommu_pmu(iommu);
1158 	unmap_iommu(iommu);
1159 error_free_seq_id:
1160 	ida_free(&dmar_seq_ids, iommu->seq_id);
1161 error:
1162 	kfree(iommu);
1163 	return err;
1164 }
1165 
free_iommu(struct intel_iommu * iommu)1166 static void free_iommu(struct intel_iommu *iommu)
1167 {
1168 	if (intel_iommu_enabled && !iommu->drhd->ignored) {
1169 		iommu_pmu_unregister(iommu);
1170 		iommu_device_unregister(&iommu->iommu);
1171 		iommu_device_sysfs_remove(&iommu->iommu);
1172 	}
1173 
1174 	free_iommu_pmu(iommu);
1175 
1176 	if (iommu->irq) {
1177 		if (iommu->pr_irq) {
1178 			free_irq(iommu->pr_irq, iommu);
1179 			dmar_free_hwirq(iommu->pr_irq);
1180 			iommu->pr_irq = 0;
1181 		}
1182 		free_irq(iommu->irq, iommu);
1183 		dmar_free_hwirq(iommu->irq);
1184 		iommu->irq = 0;
1185 	}
1186 
1187 	if (iommu->qi) {
1188 		free_page((unsigned long)iommu->qi->desc);
1189 		kfree(iommu->qi->desc_status);
1190 		kfree(iommu->qi);
1191 	}
1192 
1193 	if (iommu->reg)
1194 		unmap_iommu(iommu);
1195 
1196 	ida_free(&dmar_seq_ids, iommu->seq_id);
1197 	kfree(iommu);
1198 }
1199 
1200 /*
1201  * Reclaim all the submitted descriptors which have completed its work.
1202  */
reclaim_free_desc(struct q_inval * qi)1203 static inline void reclaim_free_desc(struct q_inval *qi)
1204 {
1205 	while (qi->desc_status[qi->free_tail] == QI_DONE ||
1206 	       qi->desc_status[qi->free_tail] == QI_ABORT) {
1207 		qi->desc_status[qi->free_tail] = QI_FREE;
1208 		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1209 		qi->free_cnt++;
1210 	}
1211 }
1212 
qi_type_string(u8 type)1213 static const char *qi_type_string(u8 type)
1214 {
1215 	switch (type) {
1216 	case QI_CC_TYPE:
1217 		return "Context-cache Invalidation";
1218 	case QI_IOTLB_TYPE:
1219 		return "IOTLB Invalidation";
1220 	case QI_DIOTLB_TYPE:
1221 		return "Device-TLB Invalidation";
1222 	case QI_IEC_TYPE:
1223 		return "Interrupt Entry Cache Invalidation";
1224 	case QI_IWD_TYPE:
1225 		return "Invalidation Wait";
1226 	case QI_EIOTLB_TYPE:
1227 		return "PASID-based IOTLB Invalidation";
1228 	case QI_PC_TYPE:
1229 		return "PASID-cache Invalidation";
1230 	case QI_DEIOTLB_TYPE:
1231 		return "PASID-based Device-TLB Invalidation";
1232 	case QI_PGRP_RESP_TYPE:
1233 		return "Page Group Response";
1234 	default:
1235 		return "UNKNOWN";
1236 	}
1237 }
1238 
qi_dump_fault(struct intel_iommu * iommu,u32 fault)1239 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1240 {
1241 	unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1242 	u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1243 	struct qi_desc *desc = iommu->qi->desc + head;
1244 
1245 	if (fault & DMA_FSTS_IQE)
1246 		pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1247 		       DMAR_IQER_REG_IQEI(iqe_err));
1248 	if (fault & DMA_FSTS_ITE)
1249 		pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1250 		       DMAR_IQER_REG_ITESID(iqe_err));
1251 	if (fault & DMA_FSTS_ICE)
1252 		pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1253 		       DMAR_IQER_REG_ICESID(iqe_err));
1254 
1255 	pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1256 	       qi_type_string(desc->qw0 & 0xf),
1257 	       (unsigned long long)desc->qw0,
1258 	       (unsigned long long)desc->qw1);
1259 
1260 	head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1261 	head <<= qi_shift(iommu);
1262 	desc = iommu->qi->desc + head;
1263 
1264 	pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1265 	       qi_type_string(desc->qw0 & 0xf),
1266 	       (unsigned long long)desc->qw0,
1267 	       (unsigned long long)desc->qw1);
1268 }
1269 
qi_check_fault(struct intel_iommu * iommu,int index,int wait_index)1270 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1271 {
1272 	u32 fault;
1273 	int head, tail;
1274 	struct q_inval *qi = iommu->qi;
1275 	int shift = qi_shift(iommu);
1276 
1277 	if (qi->desc_status[wait_index] == QI_ABORT)
1278 		return -EAGAIN;
1279 
1280 	fault = readl(iommu->reg + DMAR_FSTS_REG);
1281 	if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1282 		qi_dump_fault(iommu, fault);
1283 
1284 	/*
1285 	 * If IQE happens, the head points to the descriptor associated
1286 	 * with the error. No new descriptors are fetched until the IQE
1287 	 * is cleared.
1288 	 */
1289 	if (fault & DMA_FSTS_IQE) {
1290 		head = readl(iommu->reg + DMAR_IQH_REG);
1291 		if ((head >> shift) == index) {
1292 			struct qi_desc *desc = qi->desc + head;
1293 
1294 			/*
1295 			 * desc->qw2 and desc->qw3 are either reserved or
1296 			 * used by software as private data. We won't print
1297 			 * out these two qw's for security consideration.
1298 			 */
1299 			memcpy(desc, qi->desc + (wait_index << shift),
1300 			       1 << shift);
1301 			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1302 			pr_info("Invalidation Queue Error (IQE) cleared\n");
1303 			return -EINVAL;
1304 		}
1305 	}
1306 
1307 	/*
1308 	 * If ITE happens, all pending wait_desc commands are aborted.
1309 	 * No new descriptors are fetched until the ITE is cleared.
1310 	 */
1311 	if (fault & DMA_FSTS_ITE) {
1312 		head = readl(iommu->reg + DMAR_IQH_REG);
1313 		head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1314 		head |= 1;
1315 		tail = readl(iommu->reg + DMAR_IQT_REG);
1316 		tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1317 
1318 		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1319 		pr_info("Invalidation Time-out Error (ITE) cleared\n");
1320 
1321 		do {
1322 			if (qi->desc_status[head] == QI_IN_USE)
1323 				qi->desc_status[head] = QI_ABORT;
1324 			head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1325 		} while (head != tail);
1326 
1327 		if (qi->desc_status[wait_index] == QI_ABORT)
1328 			return -EAGAIN;
1329 	}
1330 
1331 	if (fault & DMA_FSTS_ICE) {
1332 		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1333 		pr_info("Invalidation Completion Error (ICE) cleared\n");
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 /*
1340  * Function to submit invalidation descriptors of all types to the queued
1341  * invalidation interface(QI). Multiple descriptors can be submitted at a
1342  * time, a wait descriptor will be appended to each submission to ensure
1343  * hardware has completed the invalidation before return. Wait descriptors
1344  * can be part of the submission but it will not be polled for completion.
1345  */
qi_submit_sync(struct intel_iommu * iommu,struct qi_desc * desc,unsigned int count,unsigned long options)1346 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1347 		   unsigned int count, unsigned long options)
1348 {
1349 	struct q_inval *qi = iommu->qi;
1350 	s64 devtlb_start_ktime = 0;
1351 	s64 iotlb_start_ktime = 0;
1352 	s64 iec_start_ktime = 0;
1353 	struct qi_desc wait_desc;
1354 	int wait_index, index;
1355 	unsigned long flags;
1356 	int offset, shift;
1357 	int rc, i;
1358 	u64 type;
1359 
1360 	if (!qi)
1361 		return 0;
1362 
1363 	type = desc->qw0 & GENMASK_ULL(3, 0);
1364 
1365 	if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1366 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1367 		iotlb_start_ktime = ktime_to_ns(ktime_get());
1368 
1369 	if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1370 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1371 		devtlb_start_ktime = ktime_to_ns(ktime_get());
1372 
1373 	if (type == QI_IEC_TYPE &&
1374 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1375 		iec_start_ktime = ktime_to_ns(ktime_get());
1376 
1377 restart:
1378 	rc = 0;
1379 
1380 	raw_spin_lock_irqsave(&qi->q_lock, flags);
1381 	/*
1382 	 * Check if we have enough empty slots in the queue to submit,
1383 	 * the calculation is based on:
1384 	 * # of desc + 1 wait desc + 1 space between head and tail
1385 	 */
1386 	while (qi->free_cnt < count + 2) {
1387 		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1388 		cpu_relax();
1389 		raw_spin_lock_irqsave(&qi->q_lock, flags);
1390 	}
1391 
1392 	index = qi->free_head;
1393 	wait_index = (index + count) % QI_LENGTH;
1394 	shift = qi_shift(iommu);
1395 
1396 	for (i = 0; i < count; i++) {
1397 		offset = ((index + i) % QI_LENGTH) << shift;
1398 		memcpy(qi->desc + offset, &desc[i], 1 << shift);
1399 		qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1400 		trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1401 				desc[i].qw2, desc[i].qw3);
1402 	}
1403 	qi->desc_status[wait_index] = QI_IN_USE;
1404 
1405 	wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1406 			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1407 	if (options & QI_OPT_WAIT_DRAIN)
1408 		wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1409 	wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1410 	wait_desc.qw2 = 0;
1411 	wait_desc.qw3 = 0;
1412 
1413 	offset = wait_index << shift;
1414 	memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1415 
1416 	qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1417 	qi->free_cnt -= count + 1;
1418 
1419 	/*
1420 	 * update the HW tail register indicating the presence of
1421 	 * new descriptors.
1422 	 */
1423 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1424 
1425 	while (qi->desc_status[wait_index] != QI_DONE) {
1426 		/*
1427 		 * We will leave the interrupts disabled, to prevent interrupt
1428 		 * context to queue another cmd while a cmd is already submitted
1429 		 * and waiting for completion on this cpu. This is to avoid
1430 		 * a deadlock where the interrupt context can wait indefinitely
1431 		 * for free slots in the queue.
1432 		 */
1433 		rc = qi_check_fault(iommu, index, wait_index);
1434 		if (rc)
1435 			break;
1436 
1437 		raw_spin_unlock(&qi->q_lock);
1438 		cpu_relax();
1439 		raw_spin_lock(&qi->q_lock);
1440 	}
1441 
1442 	for (i = 0; i < count; i++)
1443 		qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1444 
1445 	reclaim_free_desc(qi);
1446 	raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1447 
1448 	if (rc == -EAGAIN)
1449 		goto restart;
1450 
1451 	if (iotlb_start_ktime)
1452 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1453 				ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1454 
1455 	if (devtlb_start_ktime)
1456 		dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1457 				ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1458 
1459 	if (iec_start_ktime)
1460 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1461 				ktime_to_ns(ktime_get()) - iec_start_ktime);
1462 
1463 	return rc;
1464 }
1465 
1466 /*
1467  * Flush the global interrupt entry cache.
1468  */
qi_global_iec(struct intel_iommu * iommu)1469 void qi_global_iec(struct intel_iommu *iommu)
1470 {
1471 	struct qi_desc desc;
1472 
1473 	desc.qw0 = QI_IEC_TYPE;
1474 	desc.qw1 = 0;
1475 	desc.qw2 = 0;
1476 	desc.qw3 = 0;
1477 
1478 	/* should never fail */
1479 	qi_submit_sync(iommu, &desc, 1, 0);
1480 }
1481 
qi_flush_context(struct intel_iommu * iommu,u16 did,u16 sid,u8 fm,u64 type)1482 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1483 		      u64 type)
1484 {
1485 	struct qi_desc desc;
1486 
1487 	desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1488 			| QI_CC_GRAN(type) | QI_CC_TYPE;
1489 	desc.qw1 = 0;
1490 	desc.qw2 = 0;
1491 	desc.qw3 = 0;
1492 
1493 	qi_submit_sync(iommu, &desc, 1, 0);
1494 }
1495 
qi_flush_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type)1496 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1497 		    unsigned int size_order, u64 type)
1498 {
1499 	u8 dw = 0, dr = 0;
1500 
1501 	struct qi_desc desc;
1502 	int ih = 0;
1503 
1504 	if (cap_write_drain(iommu->cap))
1505 		dw = 1;
1506 
1507 	if (cap_read_drain(iommu->cap))
1508 		dr = 1;
1509 
1510 	desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1511 		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1512 	desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1513 		| QI_IOTLB_AM(size_order);
1514 	desc.qw2 = 0;
1515 	desc.qw3 = 0;
1516 
1517 	qi_submit_sync(iommu, &desc, 1, 0);
1518 }
1519 
qi_flush_dev_iotlb(struct intel_iommu * iommu,u16 sid,u16 pfsid,u16 qdep,u64 addr,unsigned mask)1520 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1521 			u16 qdep, u64 addr, unsigned mask)
1522 {
1523 	struct qi_desc desc;
1524 
1525 	/*
1526 	 * VT-d spec, section 4.3:
1527 	 *
1528 	 * Software is recommended to not submit any Device-TLB invalidation
1529 	 * requests while address remapping hardware is disabled.
1530 	 */
1531 	if (!(iommu->gcmd & DMA_GCMD_TE))
1532 		return;
1533 
1534 	if (mask) {
1535 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1536 		desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1537 	} else
1538 		desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1539 
1540 	if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1541 		qdep = 0;
1542 
1543 	desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1544 		   QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1545 	desc.qw2 = 0;
1546 	desc.qw3 = 0;
1547 
1548 	qi_submit_sync(iommu, &desc, 1, 0);
1549 }
1550 
1551 /* PASID-based IOTLB invalidation */
qi_flush_piotlb(struct intel_iommu * iommu,u16 did,u32 pasid,u64 addr,unsigned long npages,bool ih)1552 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1553 		     unsigned long npages, bool ih)
1554 {
1555 	struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1556 
1557 	/*
1558 	 * npages == -1 means a PASID-selective invalidation, otherwise,
1559 	 * a positive value for Page-selective-within-PASID invalidation.
1560 	 * 0 is not a valid input.
1561 	 */
1562 	if (WARN_ON(!npages)) {
1563 		pr_err("Invalid input npages = %ld\n", npages);
1564 		return;
1565 	}
1566 
1567 	if (npages == -1) {
1568 		desc.qw0 = QI_EIOTLB_PASID(pasid) |
1569 				QI_EIOTLB_DID(did) |
1570 				QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1571 				QI_EIOTLB_TYPE;
1572 		desc.qw1 = 0;
1573 	} else {
1574 		int mask = ilog2(__roundup_pow_of_two(npages));
1575 		unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1576 
1577 		if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1578 			addr = ALIGN_DOWN(addr, align);
1579 
1580 		desc.qw0 = QI_EIOTLB_PASID(pasid) |
1581 				QI_EIOTLB_DID(did) |
1582 				QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1583 				QI_EIOTLB_TYPE;
1584 		desc.qw1 = QI_EIOTLB_ADDR(addr) |
1585 				QI_EIOTLB_IH(ih) |
1586 				QI_EIOTLB_AM(mask);
1587 	}
1588 
1589 	qi_submit_sync(iommu, &desc, 1, 0);
1590 }
1591 
1592 /* PASID-based device IOTLB Invalidate */
qi_flush_dev_iotlb_pasid(struct intel_iommu * iommu,u16 sid,u16 pfsid,u32 pasid,u16 qdep,u64 addr,unsigned int size_order)1593 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1594 			      u32 pasid,  u16 qdep, u64 addr, unsigned int size_order)
1595 {
1596 	unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1597 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1598 
1599 	/*
1600 	 * VT-d spec, section 4.3:
1601 	 *
1602 	 * Software is recommended to not submit any Device-TLB invalidation
1603 	 * requests while address remapping hardware is disabled.
1604 	 */
1605 	if (!(iommu->gcmd & DMA_GCMD_TE))
1606 		return;
1607 
1608 	desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1609 		QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1610 		QI_DEV_IOTLB_PFSID(pfsid);
1611 
1612 	/*
1613 	 * If S bit is 0, we only flush a single page. If S bit is set,
1614 	 * The least significant zero bit indicates the invalidation address
1615 	 * range. VT-d spec 6.5.2.6.
1616 	 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1617 	 * size order = 0 is PAGE_SIZE 4KB
1618 	 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1619 	 * ECAP.
1620 	 */
1621 	if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1622 		pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1623 				    addr, size_order);
1624 
1625 	/* Take page address */
1626 	desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1627 
1628 	if (size_order) {
1629 		/*
1630 		 * Existing 0s in address below size_order may be the least
1631 		 * significant bit, we must set them to 1s to avoid having
1632 		 * smaller size than desired.
1633 		 */
1634 		desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1635 					VTD_PAGE_SHIFT);
1636 		/* Clear size_order bit to indicate size */
1637 		desc.qw1 &= ~mask;
1638 		/* Set the S bit to indicate flushing more than 1 page */
1639 		desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1640 	}
1641 
1642 	qi_submit_sync(iommu, &desc, 1, 0);
1643 }
1644 
qi_flush_pasid_cache(struct intel_iommu * iommu,u16 did,u64 granu,u32 pasid)1645 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1646 			  u64 granu, u32 pasid)
1647 {
1648 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1649 
1650 	desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1651 			QI_PC_GRAN(granu) | QI_PC_TYPE;
1652 	qi_submit_sync(iommu, &desc, 1, 0);
1653 }
1654 
1655 /*
1656  * Disable Queued Invalidation interface.
1657  */
dmar_disable_qi(struct intel_iommu * iommu)1658 void dmar_disable_qi(struct intel_iommu *iommu)
1659 {
1660 	unsigned long flags;
1661 	u32 sts;
1662 	cycles_t start_time = get_cycles();
1663 
1664 	if (!ecap_qis(iommu->ecap))
1665 		return;
1666 
1667 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1668 
1669 	sts =  readl(iommu->reg + DMAR_GSTS_REG);
1670 	if (!(sts & DMA_GSTS_QIES))
1671 		goto end;
1672 
1673 	/*
1674 	 * Give a chance to HW to complete the pending invalidation requests.
1675 	 */
1676 	while ((readl(iommu->reg + DMAR_IQT_REG) !=
1677 		readl(iommu->reg + DMAR_IQH_REG)) &&
1678 		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1679 		cpu_relax();
1680 
1681 	iommu->gcmd &= ~DMA_GCMD_QIE;
1682 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1683 
1684 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1685 		      !(sts & DMA_GSTS_QIES), sts);
1686 end:
1687 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1688 }
1689 
1690 /*
1691  * Enable queued invalidation.
1692  */
__dmar_enable_qi(struct intel_iommu * iommu)1693 static void __dmar_enable_qi(struct intel_iommu *iommu)
1694 {
1695 	u32 sts;
1696 	unsigned long flags;
1697 	struct q_inval *qi = iommu->qi;
1698 	u64 val = virt_to_phys(qi->desc);
1699 
1700 	qi->free_head = qi->free_tail = 0;
1701 	qi->free_cnt = QI_LENGTH;
1702 
1703 	/*
1704 	 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1705 	 * is present.
1706 	 */
1707 	if (ecap_smts(iommu->ecap))
1708 		val |= BIT_ULL(11) | BIT_ULL(0);
1709 
1710 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1711 
1712 	/* write zero to the tail reg */
1713 	writel(0, iommu->reg + DMAR_IQT_REG);
1714 
1715 	dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1716 
1717 	iommu->gcmd |= DMA_GCMD_QIE;
1718 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1719 
1720 	/* Make sure hardware complete it */
1721 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1722 
1723 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1724 }
1725 
1726 /*
1727  * Enable Queued Invalidation interface. This is a must to support
1728  * interrupt-remapping. Also used by DMA-remapping, which replaces
1729  * register based IOTLB invalidation.
1730  */
dmar_enable_qi(struct intel_iommu * iommu)1731 int dmar_enable_qi(struct intel_iommu *iommu)
1732 {
1733 	struct q_inval *qi;
1734 	struct page *desc_page;
1735 
1736 	if (!ecap_qis(iommu->ecap))
1737 		return -ENOENT;
1738 
1739 	/*
1740 	 * queued invalidation is already setup and enabled.
1741 	 */
1742 	if (iommu->qi)
1743 		return 0;
1744 
1745 	iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1746 	if (!iommu->qi)
1747 		return -ENOMEM;
1748 
1749 	qi = iommu->qi;
1750 
1751 	/*
1752 	 * Need two pages to accommodate 256 descriptors of 256 bits each
1753 	 * if the remapping hardware supports scalable mode translation.
1754 	 */
1755 	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1756 				     !!ecap_smts(iommu->ecap));
1757 	if (!desc_page) {
1758 		kfree(qi);
1759 		iommu->qi = NULL;
1760 		return -ENOMEM;
1761 	}
1762 
1763 	qi->desc = page_address(desc_page);
1764 
1765 	qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1766 	if (!qi->desc_status) {
1767 		free_page((unsigned long) qi->desc);
1768 		kfree(qi);
1769 		iommu->qi = NULL;
1770 		return -ENOMEM;
1771 	}
1772 
1773 	raw_spin_lock_init(&qi->q_lock);
1774 
1775 	__dmar_enable_qi(iommu);
1776 
1777 	return 0;
1778 }
1779 
1780 /* iommu interrupt handling. Most stuff are MSI-like. */
1781 
1782 enum faulttype {
1783 	DMA_REMAP,
1784 	INTR_REMAP,
1785 	UNKNOWN,
1786 };
1787 
1788 static const char *dma_remap_fault_reasons[] =
1789 {
1790 	"Software",
1791 	"Present bit in root entry is clear",
1792 	"Present bit in context entry is clear",
1793 	"Invalid context entry",
1794 	"Access beyond MGAW",
1795 	"PTE Write access is not set",
1796 	"PTE Read access is not set",
1797 	"Next page table ptr is invalid",
1798 	"Root table address invalid",
1799 	"Context table ptr is invalid",
1800 	"non-zero reserved fields in RTP",
1801 	"non-zero reserved fields in CTP",
1802 	"non-zero reserved fields in PTE",
1803 	"PCE for translation request specifies blocking",
1804 };
1805 
1806 static const char * const dma_remap_sm_fault_reasons[] = {
1807 	"SM: Invalid Root Table Address",
1808 	"SM: TTM 0 for request with PASID",
1809 	"SM: TTM 0 for page group request",
1810 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1811 	"SM: Error attempting to access Root Entry",
1812 	"SM: Present bit in Root Entry is clear",
1813 	"SM: Non-zero reserved field set in Root Entry",
1814 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1815 	"SM: Error attempting to access Context Entry",
1816 	"SM: Present bit in Context Entry is clear",
1817 	"SM: Non-zero reserved field set in the Context Entry",
1818 	"SM: Invalid Context Entry",
1819 	"SM: DTE field in Context Entry is clear",
1820 	"SM: PASID Enable field in Context Entry is clear",
1821 	"SM: PASID is larger than the max in Context Entry",
1822 	"SM: PRE field in Context-Entry is clear",
1823 	"SM: RID_PASID field error in Context-Entry",
1824 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1825 	"SM: Error attempting to access the PASID Directory Entry",
1826 	"SM: Present bit in Directory Entry is clear",
1827 	"SM: Non-zero reserved field set in PASID Directory Entry",
1828 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1829 	"SM: Error attempting to access PASID Table Entry",
1830 	"SM: Present bit in PASID Table Entry is clear",
1831 	"SM: Non-zero reserved field set in PASID Table Entry",
1832 	"SM: Invalid Scalable-Mode PASID Table Entry",
1833 	"SM: ERE field is clear in PASID Table Entry",
1834 	"SM: SRE field is clear in PASID Table Entry",
1835 	"Unknown", "Unknown",/* 0x5E-0x5F */
1836 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1837 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1838 	"SM: Error attempting to access first-level paging entry",
1839 	"SM: Present bit in first-level paging entry is clear",
1840 	"SM: Non-zero reserved field set in first-level paging entry",
1841 	"SM: Error attempting to access FL-PML4 entry",
1842 	"SM: First-level entry address beyond MGAW in Nested translation",
1843 	"SM: Read permission error in FL-PML4 entry in Nested translation",
1844 	"SM: Read permission error in first-level paging entry in Nested translation",
1845 	"SM: Write permission error in first-level paging entry in Nested translation",
1846 	"SM: Error attempting to access second-level paging entry",
1847 	"SM: Read/Write permission error in second-level paging entry",
1848 	"SM: Non-zero reserved field set in second-level paging entry",
1849 	"SM: Invalid second-level page table pointer",
1850 	"SM: A/D bit update needed in second-level entry when set up in no snoop",
1851 	"Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1852 	"SM: Address in first-level translation is not canonical",
1853 	"SM: U/S set 0 for first-level translation with user privilege",
1854 	"SM: No execute permission for request with PASID and ER=1",
1855 	"SM: Address beyond the DMA hardware max",
1856 	"SM: Second-level entry address beyond the max",
1857 	"SM: No write permission for Write/AtomicOp request",
1858 	"SM: No read permission for Read/AtomicOp request",
1859 	"SM: Invalid address-interrupt address",
1860 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1861 	"SM: A/D bit update needed in first-level entry when set up in no snoop",
1862 };
1863 
1864 static const char *irq_remap_fault_reasons[] =
1865 {
1866 	"Detected reserved fields in the decoded interrupt-remapped request",
1867 	"Interrupt index exceeded the interrupt-remapping table size",
1868 	"Present field in the IRTE entry is clear",
1869 	"Error accessing interrupt-remapping table pointed by IRTA_REG",
1870 	"Detected reserved fields in the IRTE entry",
1871 	"Blocked a compatibility format interrupt request",
1872 	"Blocked an interrupt request due to source-id verification failure",
1873 };
1874 
dmar_get_fault_reason(u8 fault_reason,int * fault_type)1875 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1876 {
1877 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1878 					ARRAY_SIZE(irq_remap_fault_reasons))) {
1879 		*fault_type = INTR_REMAP;
1880 		return irq_remap_fault_reasons[fault_reason - 0x20];
1881 	} else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1882 			ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1883 		*fault_type = DMA_REMAP;
1884 		return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1885 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1886 		*fault_type = DMA_REMAP;
1887 		return dma_remap_fault_reasons[fault_reason];
1888 	} else {
1889 		*fault_type = UNKNOWN;
1890 		return "Unknown";
1891 	}
1892 }
1893 
1894 
dmar_msi_reg(struct intel_iommu * iommu,int irq)1895 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1896 {
1897 	if (iommu->irq == irq)
1898 		return DMAR_FECTL_REG;
1899 	else if (iommu->pr_irq == irq)
1900 		return DMAR_PECTL_REG;
1901 	else if (iommu->perf_irq == irq)
1902 		return DMAR_PERFINTRCTL_REG;
1903 	else
1904 		BUG();
1905 }
1906 
dmar_msi_unmask(struct irq_data * data)1907 void dmar_msi_unmask(struct irq_data *data)
1908 {
1909 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1910 	int reg = dmar_msi_reg(iommu, data->irq);
1911 	unsigned long flag;
1912 
1913 	/* unmask it */
1914 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1915 	writel(0, iommu->reg + reg);
1916 	/* Read a reg to force flush the post write */
1917 	readl(iommu->reg + reg);
1918 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1919 }
1920 
dmar_msi_mask(struct irq_data * data)1921 void dmar_msi_mask(struct irq_data *data)
1922 {
1923 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1924 	int reg = dmar_msi_reg(iommu, data->irq);
1925 	unsigned long flag;
1926 
1927 	/* mask it */
1928 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1929 	writel(DMA_FECTL_IM, iommu->reg + reg);
1930 	/* Read a reg to force flush the post write */
1931 	readl(iommu->reg + reg);
1932 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1933 }
1934 
dmar_msi_write(int irq,struct msi_msg * msg)1935 void dmar_msi_write(int irq, struct msi_msg *msg)
1936 {
1937 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1938 	int reg = dmar_msi_reg(iommu, irq);
1939 	unsigned long flag;
1940 
1941 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1942 	writel(msg->data, iommu->reg + reg + 4);
1943 	writel(msg->address_lo, iommu->reg + reg + 8);
1944 	writel(msg->address_hi, iommu->reg + reg + 12);
1945 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1946 }
1947 
dmar_msi_read(int irq,struct msi_msg * msg)1948 void dmar_msi_read(int irq, struct msi_msg *msg)
1949 {
1950 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1951 	int reg = dmar_msi_reg(iommu, irq);
1952 	unsigned long flag;
1953 
1954 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1955 	msg->data = readl(iommu->reg + reg + 4);
1956 	msg->address_lo = readl(iommu->reg + reg + 8);
1957 	msg->address_hi = readl(iommu->reg + reg + 12);
1958 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1959 }
1960 
dmar_fault_do_one(struct intel_iommu * iommu,int type,u8 fault_reason,u32 pasid,u16 source_id,unsigned long long addr)1961 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1962 		u8 fault_reason, u32 pasid, u16 source_id,
1963 		unsigned long long addr)
1964 {
1965 	const char *reason;
1966 	int fault_type;
1967 
1968 	reason = dmar_get_fault_reason(fault_reason, &fault_type);
1969 
1970 	if (fault_type == INTR_REMAP) {
1971 		pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1972 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1973 		       PCI_FUNC(source_id & 0xFF), addr >> 48,
1974 		       fault_reason, reason);
1975 
1976 		return 0;
1977 	}
1978 
1979 	if (pasid == IOMMU_PASID_INVALID)
1980 		pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1981 		       type ? "DMA Read" : "DMA Write",
1982 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1983 		       PCI_FUNC(source_id & 0xFF), addr,
1984 		       fault_reason, reason);
1985 	else
1986 		pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1987 		       type ? "DMA Read" : "DMA Write", pasid,
1988 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1989 		       PCI_FUNC(source_id & 0xFF), addr,
1990 		       fault_reason, reason);
1991 
1992 	dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1993 
1994 	return 0;
1995 }
1996 
1997 #define PRIMARY_FAULT_REG_LEN (16)
dmar_fault(int irq,void * dev_id)1998 irqreturn_t dmar_fault(int irq, void *dev_id)
1999 {
2000 	struct intel_iommu *iommu = dev_id;
2001 	int reg, fault_index;
2002 	u32 fault_status;
2003 	unsigned long flag;
2004 	static DEFINE_RATELIMIT_STATE(rs,
2005 				      DEFAULT_RATELIMIT_INTERVAL,
2006 				      DEFAULT_RATELIMIT_BURST);
2007 
2008 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
2009 	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2010 	if (fault_status && __ratelimit(&rs))
2011 		pr_err("DRHD: handling fault status reg %x\n", fault_status);
2012 
2013 	/* TBD: ignore advanced fault log currently */
2014 	if (!(fault_status & DMA_FSTS_PPF))
2015 		goto unlock_exit;
2016 
2017 	fault_index = dma_fsts_fault_record_index(fault_status);
2018 	reg = cap_fault_reg_offset(iommu->cap);
2019 	while (1) {
2020 		/* Disable printing, simply clear the fault when ratelimited */
2021 		bool ratelimited = !__ratelimit(&rs);
2022 		u8 fault_reason;
2023 		u16 source_id;
2024 		u64 guest_addr;
2025 		u32 pasid;
2026 		int type;
2027 		u32 data;
2028 		bool pasid_present;
2029 
2030 		/* highest 32 bits */
2031 		data = readl(iommu->reg + reg +
2032 				fault_index * PRIMARY_FAULT_REG_LEN + 12);
2033 		if (!(data & DMA_FRCD_F))
2034 			break;
2035 
2036 		if (!ratelimited) {
2037 			fault_reason = dma_frcd_fault_reason(data);
2038 			type = dma_frcd_type(data);
2039 
2040 			pasid = dma_frcd_pasid_value(data);
2041 			data = readl(iommu->reg + reg +
2042 				     fault_index * PRIMARY_FAULT_REG_LEN + 8);
2043 			source_id = dma_frcd_source_id(data);
2044 
2045 			pasid_present = dma_frcd_pasid_present(data);
2046 			guest_addr = dmar_readq(iommu->reg + reg +
2047 					fault_index * PRIMARY_FAULT_REG_LEN);
2048 			guest_addr = dma_frcd_page_addr(guest_addr);
2049 		}
2050 
2051 		/* clear the fault */
2052 		writel(DMA_FRCD_F, iommu->reg + reg +
2053 			fault_index * PRIMARY_FAULT_REG_LEN + 12);
2054 
2055 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2056 
2057 		if (!ratelimited)
2058 			/* Using pasid -1 if pasid is not present */
2059 			dmar_fault_do_one(iommu, type, fault_reason,
2060 					  pasid_present ? pasid : IOMMU_PASID_INVALID,
2061 					  source_id, guest_addr);
2062 
2063 		fault_index++;
2064 		if (fault_index >= cap_num_fault_regs(iommu->cap))
2065 			fault_index = 0;
2066 		raw_spin_lock_irqsave(&iommu->register_lock, flag);
2067 	}
2068 
2069 	writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2070 	       iommu->reg + DMAR_FSTS_REG);
2071 
2072 unlock_exit:
2073 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2074 	return IRQ_HANDLED;
2075 }
2076 
dmar_set_interrupt(struct intel_iommu * iommu)2077 int dmar_set_interrupt(struct intel_iommu *iommu)
2078 {
2079 	int irq, ret;
2080 
2081 	/*
2082 	 * Check if the fault interrupt is already initialized.
2083 	 */
2084 	if (iommu->irq)
2085 		return 0;
2086 
2087 	irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2088 	if (irq > 0) {
2089 		iommu->irq = irq;
2090 	} else {
2091 		pr_err("No free IRQ vectors\n");
2092 		return -EINVAL;
2093 	}
2094 
2095 	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2096 	if (ret)
2097 		pr_err("Can't request irq\n");
2098 	return ret;
2099 }
2100 
enable_drhd_fault_handling(void)2101 int __init enable_drhd_fault_handling(void)
2102 {
2103 	struct dmar_drhd_unit *drhd;
2104 	struct intel_iommu *iommu;
2105 
2106 	/*
2107 	 * Enable fault control interrupt.
2108 	 */
2109 	for_each_iommu(iommu, drhd) {
2110 		u32 fault_status;
2111 		int ret = dmar_set_interrupt(iommu);
2112 
2113 		if (ret) {
2114 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2115 			       (unsigned long long)drhd->reg_base_addr, ret);
2116 			return -1;
2117 		}
2118 
2119 		/*
2120 		 * Clear any previous faults.
2121 		 */
2122 		dmar_fault(iommu->irq, iommu);
2123 		fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2124 		writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 /*
2131  * Re-enable Queued Invalidation interface.
2132  */
dmar_reenable_qi(struct intel_iommu * iommu)2133 int dmar_reenable_qi(struct intel_iommu *iommu)
2134 {
2135 	if (!ecap_qis(iommu->ecap))
2136 		return -ENOENT;
2137 
2138 	if (!iommu->qi)
2139 		return -ENOENT;
2140 
2141 	/*
2142 	 * First disable queued invalidation.
2143 	 */
2144 	dmar_disable_qi(iommu);
2145 	/*
2146 	 * Then enable queued invalidation again. Since there is no pending
2147 	 * invalidation requests now, it's safe to re-enable queued
2148 	 * invalidation.
2149 	 */
2150 	__dmar_enable_qi(iommu);
2151 
2152 	return 0;
2153 }
2154 
2155 /*
2156  * Check interrupt remapping support in DMAR table description.
2157  */
dmar_ir_support(void)2158 int __init dmar_ir_support(void)
2159 {
2160 	struct acpi_table_dmar *dmar;
2161 	dmar = (struct acpi_table_dmar *)dmar_tbl;
2162 	if (!dmar)
2163 		return 0;
2164 	return dmar->flags & 0x1;
2165 }
2166 
2167 /* Check whether DMAR units are in use */
dmar_in_use(void)2168 static inline bool dmar_in_use(void)
2169 {
2170 	return irq_remapping_enabled || intel_iommu_enabled;
2171 }
2172 
dmar_free_unused_resources(void)2173 static int __init dmar_free_unused_resources(void)
2174 {
2175 	struct dmar_drhd_unit *dmaru, *dmaru_n;
2176 
2177 	if (dmar_in_use())
2178 		return 0;
2179 
2180 	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2181 		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2182 
2183 	down_write(&dmar_global_lock);
2184 	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2185 		list_del(&dmaru->list);
2186 		dmar_free_drhd(dmaru);
2187 	}
2188 	up_write(&dmar_global_lock);
2189 
2190 	return 0;
2191 }
2192 
2193 late_initcall(dmar_free_unused_resources);
2194 
2195 /*
2196  * DMAR Hotplug Support
2197  * For more details, please refer to Intel(R) Virtualization Technology
2198  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2199  * "Remapping Hardware Unit Hot Plug".
2200  */
2201 static guid_t dmar_hp_guid =
2202 	GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2203 		  0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2204 
2205 /*
2206  * Currently there's only one revision and BIOS will not check the revision id,
2207  * so use 0 for safety.
2208  */
2209 #define	DMAR_DSM_REV_ID			0
2210 #define	DMAR_DSM_FUNC_DRHD		1
2211 #define	DMAR_DSM_FUNC_ATSR		2
2212 #define	DMAR_DSM_FUNC_RHSA		3
2213 #define	DMAR_DSM_FUNC_SATC		4
2214 
dmar_detect_dsm(acpi_handle handle,int func)2215 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2216 {
2217 	return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2218 }
2219 
dmar_walk_dsm_resource(acpi_handle handle,int func,dmar_res_handler_t handler,void * arg)2220 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2221 				  dmar_res_handler_t handler, void *arg)
2222 {
2223 	int ret = -ENODEV;
2224 	union acpi_object *obj;
2225 	struct acpi_dmar_header *start;
2226 	struct dmar_res_callback callback;
2227 	static int res_type[] = {
2228 		[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2229 		[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2230 		[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2231 		[DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2232 	};
2233 
2234 	if (!dmar_detect_dsm(handle, func))
2235 		return 0;
2236 
2237 	obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2238 				      func, NULL, ACPI_TYPE_BUFFER);
2239 	if (!obj)
2240 		return -ENODEV;
2241 
2242 	memset(&callback, 0, sizeof(callback));
2243 	callback.cb[res_type[func]] = handler;
2244 	callback.arg[res_type[func]] = arg;
2245 	start = (struct acpi_dmar_header *)obj->buffer.pointer;
2246 	ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2247 
2248 	ACPI_FREE(obj);
2249 
2250 	return ret;
2251 }
2252 
dmar_hp_add_drhd(struct acpi_dmar_header * header,void * arg)2253 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2254 {
2255 	int ret;
2256 	struct dmar_drhd_unit *dmaru;
2257 
2258 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2259 	if (!dmaru)
2260 		return -ENODEV;
2261 
2262 	ret = dmar_ir_hotplug(dmaru, true);
2263 	if (ret == 0)
2264 		ret = dmar_iommu_hotplug(dmaru, true);
2265 
2266 	return ret;
2267 }
2268 
dmar_hp_remove_drhd(struct acpi_dmar_header * header,void * arg)2269 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2270 {
2271 	int i, ret;
2272 	struct device *dev;
2273 	struct dmar_drhd_unit *dmaru;
2274 
2275 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2276 	if (!dmaru)
2277 		return 0;
2278 
2279 	/*
2280 	 * All PCI devices managed by this unit should have been destroyed.
2281 	 */
2282 	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2283 		for_each_active_dev_scope(dmaru->devices,
2284 					  dmaru->devices_cnt, i, dev)
2285 			return -EBUSY;
2286 	}
2287 
2288 	ret = dmar_ir_hotplug(dmaru, false);
2289 	if (ret == 0)
2290 		ret = dmar_iommu_hotplug(dmaru, false);
2291 
2292 	return ret;
2293 }
2294 
dmar_hp_release_drhd(struct acpi_dmar_header * header,void * arg)2295 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2296 {
2297 	struct dmar_drhd_unit *dmaru;
2298 
2299 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2300 	if (dmaru) {
2301 		list_del_rcu(&dmaru->list);
2302 		synchronize_rcu();
2303 		dmar_free_drhd(dmaru);
2304 	}
2305 
2306 	return 0;
2307 }
2308 
dmar_hotplug_insert(acpi_handle handle)2309 static int dmar_hotplug_insert(acpi_handle handle)
2310 {
2311 	int ret;
2312 	int drhd_count = 0;
2313 
2314 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2315 				     &dmar_validate_one_drhd, (void *)1);
2316 	if (ret)
2317 		goto out;
2318 
2319 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2320 				     &dmar_parse_one_drhd, (void *)&drhd_count);
2321 	if (ret == 0 && drhd_count == 0) {
2322 		pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2323 		goto out;
2324 	} else if (ret) {
2325 		goto release_drhd;
2326 	}
2327 
2328 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2329 				     &dmar_parse_one_rhsa, NULL);
2330 	if (ret)
2331 		goto release_drhd;
2332 
2333 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2334 				     &dmar_parse_one_atsr, NULL);
2335 	if (ret)
2336 		goto release_atsr;
2337 
2338 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2339 				     &dmar_hp_add_drhd, NULL);
2340 	if (!ret)
2341 		return 0;
2342 
2343 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2344 			       &dmar_hp_remove_drhd, NULL);
2345 release_atsr:
2346 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2347 			       &dmar_release_one_atsr, NULL);
2348 release_drhd:
2349 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2350 			       &dmar_hp_release_drhd, NULL);
2351 out:
2352 	return ret;
2353 }
2354 
dmar_hotplug_remove(acpi_handle handle)2355 static int dmar_hotplug_remove(acpi_handle handle)
2356 {
2357 	int ret;
2358 
2359 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2360 				     &dmar_check_one_atsr, NULL);
2361 	if (ret)
2362 		return ret;
2363 
2364 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2365 				     &dmar_hp_remove_drhd, NULL);
2366 	if (ret == 0) {
2367 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2368 					       &dmar_release_one_atsr, NULL));
2369 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2370 					       &dmar_hp_release_drhd, NULL));
2371 	} else {
2372 		dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2373 				       &dmar_hp_add_drhd, NULL);
2374 	}
2375 
2376 	return ret;
2377 }
2378 
dmar_get_dsm_handle(acpi_handle handle,u32 lvl,void * context,void ** retval)2379 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2380 				       void *context, void **retval)
2381 {
2382 	acpi_handle *phdl = retval;
2383 
2384 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2385 		*phdl = handle;
2386 		return AE_CTRL_TERMINATE;
2387 	}
2388 
2389 	return AE_OK;
2390 }
2391 
dmar_device_hotplug(acpi_handle handle,bool insert)2392 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2393 {
2394 	int ret;
2395 	acpi_handle tmp = NULL;
2396 	acpi_status status;
2397 
2398 	if (!dmar_in_use())
2399 		return 0;
2400 
2401 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2402 		tmp = handle;
2403 	} else {
2404 		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2405 					     ACPI_UINT32_MAX,
2406 					     dmar_get_dsm_handle,
2407 					     NULL, NULL, &tmp);
2408 		if (ACPI_FAILURE(status)) {
2409 			pr_warn("Failed to locate _DSM method.\n");
2410 			return -ENXIO;
2411 		}
2412 	}
2413 	if (tmp == NULL)
2414 		return 0;
2415 
2416 	down_write(&dmar_global_lock);
2417 	if (insert)
2418 		ret = dmar_hotplug_insert(tmp);
2419 	else
2420 		ret = dmar_hotplug_remove(tmp);
2421 	up_write(&dmar_global_lock);
2422 
2423 	return ret;
2424 }
2425 
dmar_device_add(acpi_handle handle)2426 int dmar_device_add(acpi_handle handle)
2427 {
2428 	return dmar_device_hotplug(handle, true);
2429 }
2430 
dmar_device_remove(acpi_handle handle)2431 int dmar_device_remove(acpi_handle handle)
2432 {
2433 	return dmar_device_hotplug(handle, false);
2434 }
2435 
2436 /*
2437  * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2438  *
2439  * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2440  * the ACPI DMAR table. This means that the platform boot firmware has made
2441  * sure no device can issue DMA outside of RMRR regions.
2442  */
dmar_platform_optin(void)2443 bool dmar_platform_optin(void)
2444 {
2445 	struct acpi_table_dmar *dmar;
2446 	acpi_status status;
2447 	bool ret;
2448 
2449 	status = acpi_get_table(ACPI_SIG_DMAR, 0,
2450 				(struct acpi_table_header **)&dmar);
2451 	if (ACPI_FAILURE(status))
2452 		return false;
2453 
2454 	ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2455 	acpi_put_table((struct acpi_table_header *)dmar);
2456 
2457 	return ret;
2458 }
2459 EXPORT_SYMBOL_GPL(dmar_platform_optin);
2460