1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include "uncore.h"
9 #include "uncore_discovery.h"
10 
11 static struct rb_root discovery_tables = RB_ROOT;
12 static int num_discovered_types[UNCORE_ACCESS_MAX];
13 
has_generic_discovery_table(void)14 static bool has_generic_discovery_table(void)
15 {
16 	struct pci_dev *dev;
17 	int dvsec;
18 
19 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
20 	if (!dev)
21 		return false;
22 
23 	/* A discovery table device has the unique capability ID. */
24 	dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
25 	pci_dev_put(dev);
26 	if (dvsec)
27 		return true;
28 
29 	return false;
30 }
31 
32 static int logical_die_id;
33 
get_device_die_id(struct pci_dev * dev)34 static int get_device_die_id(struct pci_dev *dev)
35 {
36 	int cpu, node = pcibus_to_node(dev->bus);
37 
38 	/*
39 	 * If the NUMA info is not available, assume that the logical die id is
40 	 * continuous in the order in which the discovery table devices are
41 	 * detected.
42 	 */
43 	if (node < 0)
44 		return logical_die_id++;
45 
46 	for_each_cpu(cpu, cpumask_of_node(node)) {
47 		struct cpuinfo_x86 *c = &cpu_data(cpu);
48 
49 		if (c->initialized && cpu_to_node(cpu) == node)
50 			return c->logical_die_id;
51 	}
52 
53 	/*
54 	 * All CPUs of a node may be offlined. For this case,
55 	 * the PCI and MMIO type of uncore blocks which are
56 	 * enumerated by the device will be unavailable.
57 	 */
58 	return -1;
59 }
60 
61 #define __node_2_type(cur)	\
62 	rb_entry((cur), struct intel_uncore_discovery_type, node)
63 
__type_cmp(const void * key,const struct rb_node * b)64 static inline int __type_cmp(const void *key, const struct rb_node *b)
65 {
66 	struct intel_uncore_discovery_type *type_b = __node_2_type(b);
67 	const u16 *type_id = key;
68 
69 	if (type_b->type > *type_id)
70 		return -1;
71 	else if (type_b->type < *type_id)
72 		return 1;
73 
74 	return 0;
75 }
76 
77 static inline struct intel_uncore_discovery_type *
search_uncore_discovery_type(u16 type_id)78 search_uncore_discovery_type(u16 type_id)
79 {
80 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
81 
82 	return (node) ? __node_2_type(node) : NULL;
83 }
84 
__type_less(struct rb_node * a,const struct rb_node * b)85 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
86 {
87 	return (__node_2_type(a)->type < __node_2_type(b)->type);
88 }
89 
90 static struct intel_uncore_discovery_type *
add_uncore_discovery_type(struct uncore_unit_discovery * unit)91 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
92 {
93 	struct intel_uncore_discovery_type *type;
94 
95 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
96 		pr_warn("Unsupported access type %d\n", unit->access_type);
97 		return NULL;
98 	}
99 
100 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
101 	if (!type)
102 		return NULL;
103 
104 	type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
105 	if (!type->box_ctrl_die)
106 		goto free_type;
107 
108 	type->access_type = unit->access_type;
109 	num_discovered_types[type->access_type]++;
110 	type->type = unit->box_type;
111 
112 	rb_add(&type->node, &discovery_tables, __type_less);
113 
114 	return type;
115 
116 free_type:
117 	kfree(type);
118 
119 	return NULL;
120 
121 }
122 
123 static struct intel_uncore_discovery_type *
get_uncore_discovery_type(struct uncore_unit_discovery * unit)124 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
125 {
126 	struct intel_uncore_discovery_type *type;
127 
128 	type = search_uncore_discovery_type(unit->box_type);
129 	if (type)
130 		return type;
131 
132 	return add_uncore_discovery_type(unit);
133 }
134 
135 static void
uncore_insert_box_info(struct uncore_unit_discovery * unit,int die,bool parsed)136 uncore_insert_box_info(struct uncore_unit_discovery *unit,
137 		       int die, bool parsed)
138 {
139 	struct intel_uncore_discovery_type *type;
140 	unsigned int *box_offset, *ids;
141 	int i;
142 
143 	if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
144 		return;
145 
146 	if (parsed) {
147 		type = search_uncore_discovery_type(unit->box_type);
148 		if (WARN_ON_ONCE(!type))
149 			return;
150 		/* Store the first box of each die */
151 		if (!type->box_ctrl_die[die])
152 			type->box_ctrl_die[die] = unit->ctl;
153 		return;
154 	}
155 
156 	type = get_uncore_discovery_type(unit);
157 	if (!type)
158 		return;
159 
160 	box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
161 	if (!box_offset)
162 		return;
163 
164 	ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
165 	if (!ids)
166 		goto free_box_offset;
167 
168 	/* Store generic information for the first box */
169 	if (!type->num_boxes) {
170 		type->box_ctrl = unit->ctl;
171 		type->box_ctrl_die[die] = unit->ctl;
172 		type->num_counters = unit->num_regs;
173 		type->counter_width = unit->bit_width;
174 		type->ctl_offset = unit->ctl_offset;
175 		type->ctr_offset = unit->ctr_offset;
176 		*ids = unit->box_id;
177 		goto end;
178 	}
179 
180 	for (i = 0; i < type->num_boxes; i++) {
181 		ids[i] = type->ids[i];
182 		box_offset[i] = type->box_offset[i];
183 
184 		if (WARN_ON_ONCE(unit->box_id == ids[i]))
185 			goto free_ids;
186 	}
187 	ids[i] = unit->box_id;
188 	box_offset[i] = unit->ctl - type->box_ctrl;
189 	kfree(type->ids);
190 	kfree(type->box_offset);
191 end:
192 	type->ids = ids;
193 	type->box_offset = box_offset;
194 	type->num_boxes++;
195 	return;
196 
197 free_ids:
198 	kfree(ids);
199 
200 free_box_offset:
201 	kfree(box_offset);
202 
203 }
204 
parse_discovery_table(struct pci_dev * dev,int die,u32 bar_offset,bool * parsed)205 static int parse_discovery_table(struct pci_dev *dev, int die,
206 				 u32 bar_offset, bool *parsed)
207 {
208 	struct uncore_global_discovery global;
209 	struct uncore_unit_discovery unit;
210 	void __iomem *io_addr;
211 	resource_size_t addr;
212 	unsigned long size;
213 	u32 val;
214 	int i;
215 
216 	pci_read_config_dword(dev, bar_offset, &val);
217 
218 	if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
219 		return -EINVAL;
220 
221 	addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
222 #ifdef CONFIG_PHYS_ADDR_T_64BIT
223 	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
224 		u32 val2;
225 
226 		pci_read_config_dword(dev, bar_offset + 4, &val2);
227 		addr |= ((resource_size_t)val2) << 32;
228 	}
229 #endif
230 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
231 	io_addr = ioremap(addr, size);
232 	if (!io_addr)
233 		return -ENOMEM;
234 
235 	/* Read Global Discovery State */
236 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
237 	if (uncore_discovery_invalid_unit(global)) {
238 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
239 			global.table1, global.ctl, global.table3);
240 		iounmap(io_addr);
241 		return -EINVAL;
242 	}
243 	iounmap(io_addr);
244 
245 	size = (1 + global.max_units) * global.stride * 8;
246 	io_addr = ioremap(addr, size);
247 	if (!io_addr)
248 		return -ENOMEM;
249 
250 	/* Parsing Unit Discovery State */
251 	for (i = 0; i < global.max_units; i++) {
252 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
253 			      sizeof(struct uncore_unit_discovery));
254 
255 		if (uncore_discovery_invalid_unit(unit))
256 			continue;
257 
258 		if (unit.access_type >= UNCORE_ACCESS_MAX)
259 			continue;
260 
261 		uncore_insert_box_info(&unit, die, *parsed);
262 	}
263 
264 	*parsed = true;
265 	iounmap(io_addr);
266 	return 0;
267 }
268 
intel_uncore_has_discovery_tables(void)269 bool intel_uncore_has_discovery_tables(void)
270 {
271 	u32 device, val, entry_id, bar_offset;
272 	int die, dvsec = 0, ret = true;
273 	struct pci_dev *dev = NULL;
274 	bool parsed = false;
275 
276 	if (has_generic_discovery_table())
277 		device = UNCORE_DISCOVERY_TABLE_DEVICE;
278 	else
279 		device = PCI_ANY_ID;
280 
281 	/*
282 	 * Start a new search and iterates through the list of
283 	 * the discovery table devices.
284 	 */
285 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
286 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
287 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
288 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
289 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
290 				continue;
291 
292 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
293 
294 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
295 				ret = false;
296 				goto err;
297 			}
298 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
299 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
300 
301 			die = get_device_die_id(dev);
302 			if (die < 0)
303 				continue;
304 
305 			parse_discovery_table(dev, die, bar_offset, &parsed);
306 		}
307 	}
308 
309 	/* None of the discovery tables are available */
310 	if (!parsed)
311 		ret = false;
312 err:
313 	pci_dev_put(dev);
314 
315 	return ret;
316 }
317 
intel_uncore_clear_discovery_tables(void)318 void intel_uncore_clear_discovery_tables(void)
319 {
320 	struct intel_uncore_discovery_type *type, *next;
321 
322 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
323 		kfree(type->box_ctrl_die);
324 		kfree(type);
325 	}
326 }
327 
328 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
329 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
330 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
331 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
332 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
333 
334 static struct attribute *generic_uncore_formats_attr[] = {
335 	&format_attr_event.attr,
336 	&format_attr_umask.attr,
337 	&format_attr_edge.attr,
338 	&format_attr_inv.attr,
339 	&format_attr_thresh.attr,
340 	NULL,
341 };
342 
343 static const struct attribute_group generic_uncore_format_group = {
344 	.name = "format",
345 	.attrs = generic_uncore_formats_attr,
346 };
347 
intel_generic_uncore_msr_init_box(struct intel_uncore_box * box)348 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
349 {
350 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
351 }
352 
intel_generic_uncore_msr_disable_box(struct intel_uncore_box * box)353 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
354 {
355 	wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
356 }
357 
intel_generic_uncore_msr_enable_box(struct intel_uncore_box * box)358 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
359 {
360 	wrmsrl(uncore_msr_box_ctl(box), 0);
361 }
362 
intel_generic_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)363 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
364 					    struct perf_event *event)
365 {
366 	struct hw_perf_event *hwc = &event->hw;
367 
368 	wrmsrl(hwc->config_base, hwc->config);
369 }
370 
intel_generic_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)371 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
372 					     struct perf_event *event)
373 {
374 	struct hw_perf_event *hwc = &event->hw;
375 
376 	wrmsrl(hwc->config_base, 0);
377 }
378 
379 static struct intel_uncore_ops generic_uncore_msr_ops = {
380 	.init_box		= intel_generic_uncore_msr_init_box,
381 	.disable_box		= intel_generic_uncore_msr_disable_box,
382 	.enable_box		= intel_generic_uncore_msr_enable_box,
383 	.disable_event		= intel_generic_uncore_msr_disable_event,
384 	.enable_event		= intel_generic_uncore_msr_enable_event,
385 	.read_counter		= uncore_msr_read_counter,
386 };
387 
intel_generic_uncore_pci_init_box(struct intel_uncore_box * box)388 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
389 {
390 	struct pci_dev *pdev = box->pci_dev;
391 	int box_ctl = uncore_pci_box_ctl(box);
392 
393 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
394 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
395 }
396 
intel_generic_uncore_pci_disable_box(struct intel_uncore_box * box)397 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
398 {
399 	struct pci_dev *pdev = box->pci_dev;
400 	int box_ctl = uncore_pci_box_ctl(box);
401 
402 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
403 }
404 
intel_generic_uncore_pci_enable_box(struct intel_uncore_box * box)405 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
406 {
407 	struct pci_dev *pdev = box->pci_dev;
408 	int box_ctl = uncore_pci_box_ctl(box);
409 
410 	pci_write_config_dword(pdev, box_ctl, 0);
411 }
412 
intel_generic_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)413 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
414 					    struct perf_event *event)
415 {
416 	struct pci_dev *pdev = box->pci_dev;
417 	struct hw_perf_event *hwc = &event->hw;
418 
419 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
420 }
421 
intel_generic_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)422 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
423 					    struct perf_event *event)
424 {
425 	struct pci_dev *pdev = box->pci_dev;
426 	struct hw_perf_event *hwc = &event->hw;
427 
428 	pci_write_config_dword(pdev, hwc->config_base, 0);
429 }
430 
intel_generic_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)431 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
432 					  struct perf_event *event)
433 {
434 	struct pci_dev *pdev = box->pci_dev;
435 	struct hw_perf_event *hwc = &event->hw;
436 	u64 count = 0;
437 
438 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
439 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
440 
441 	return count;
442 }
443 
444 static struct intel_uncore_ops generic_uncore_pci_ops = {
445 	.init_box	= intel_generic_uncore_pci_init_box,
446 	.disable_box	= intel_generic_uncore_pci_disable_box,
447 	.enable_box	= intel_generic_uncore_pci_enable_box,
448 	.disable_event	= intel_generic_uncore_pci_disable_event,
449 	.enable_event	= intel_generic_uncore_pci_enable_event,
450 	.read_counter	= intel_generic_uncore_pci_read_counter,
451 };
452 
453 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
454 
generic_uncore_mmio_box_ctl(struct intel_uncore_box * box)455 static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
456 {
457 	struct intel_uncore_type *type = box->pmu->type;
458 
459 	if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
460 		return 0;
461 
462 	return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
463 }
464 
intel_generic_uncore_mmio_init_box(struct intel_uncore_box * box)465 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
466 {
467 	u64 box_ctl = generic_uncore_mmio_box_ctl(box);
468 	struct intel_uncore_type *type = box->pmu->type;
469 	resource_size_t addr;
470 
471 	if (!box_ctl) {
472 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
473 			type->type_id, type->box_ids[box->pmu->pmu_idx]);
474 		return;
475 	}
476 
477 	addr = box_ctl;
478 	box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
479 	if (!box->io_addr) {
480 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
481 			type->type_id, type->box_ids[box->pmu->pmu_idx],
482 			(unsigned long long)addr);
483 		return;
484 	}
485 
486 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
487 }
488 
intel_generic_uncore_mmio_disable_box(struct intel_uncore_box * box)489 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
490 {
491 	if (!box->io_addr)
492 		return;
493 
494 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
495 }
496 
intel_generic_uncore_mmio_enable_box(struct intel_uncore_box * box)497 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
498 {
499 	if (!box->io_addr)
500 		return;
501 
502 	writel(0, box->io_addr);
503 }
504 
intel_generic_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)505 void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
506 					    struct perf_event *event)
507 {
508 	struct hw_perf_event *hwc = &event->hw;
509 
510 	if (!box->io_addr)
511 		return;
512 
513 	writel(hwc->config, box->io_addr + hwc->config_base);
514 }
515 
intel_generic_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)516 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
517 					     struct perf_event *event)
518 {
519 	struct hw_perf_event *hwc = &event->hw;
520 
521 	if (!box->io_addr)
522 		return;
523 
524 	writel(0, box->io_addr + hwc->config_base);
525 }
526 
527 static struct intel_uncore_ops generic_uncore_mmio_ops = {
528 	.init_box	= intel_generic_uncore_mmio_init_box,
529 	.exit_box	= uncore_mmio_exit_box,
530 	.disable_box	= intel_generic_uncore_mmio_disable_box,
531 	.enable_box	= intel_generic_uncore_mmio_enable_box,
532 	.disable_event	= intel_generic_uncore_mmio_disable_event,
533 	.enable_event	= intel_generic_uncore_mmio_enable_event,
534 	.read_counter	= uncore_mmio_read_counter,
535 };
536 
uncore_update_uncore_type(enum uncore_access_type type_id,struct intel_uncore_type * uncore,struct intel_uncore_discovery_type * type)537 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
538 				      struct intel_uncore_type *uncore,
539 				      struct intel_uncore_discovery_type *type)
540 {
541 	uncore->type_id = type->type;
542 	uncore->num_boxes = type->num_boxes;
543 	uncore->num_counters = type->num_counters;
544 	uncore->perf_ctr_bits = type->counter_width;
545 	uncore->box_ids = type->ids;
546 
547 	switch (type_id) {
548 	case UNCORE_ACCESS_MSR:
549 		uncore->ops = &generic_uncore_msr_ops;
550 		uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
551 		uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
552 		uncore->box_ctl = (unsigned int)type->box_ctrl;
553 		uncore->msr_offsets = type->box_offset;
554 		break;
555 	case UNCORE_ACCESS_PCI:
556 		uncore->ops = &generic_uncore_pci_ops;
557 		uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
558 		uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
559 		uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
560 		uncore->box_ctls = type->box_ctrl_die;
561 		uncore->pci_offsets = type->box_offset;
562 		break;
563 	case UNCORE_ACCESS_MMIO:
564 		uncore->ops = &generic_uncore_mmio_ops;
565 		uncore->perf_ctr = (unsigned int)type->ctr_offset;
566 		uncore->event_ctl = (unsigned int)type->ctl_offset;
567 		uncore->box_ctl = (unsigned int)type->box_ctrl;
568 		uncore->box_ctls = type->box_ctrl_die;
569 		uncore->mmio_offsets = type->box_offset;
570 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
571 		break;
572 	default:
573 		return false;
574 	}
575 
576 	return true;
577 }
578 
579 struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id,int num_extra)580 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
581 {
582 	struct intel_uncore_discovery_type *type;
583 	struct intel_uncore_type **uncores;
584 	struct intel_uncore_type *uncore;
585 	struct rb_node *node;
586 	int i = 0;
587 
588 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
589 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
590 	if (!uncores)
591 		return empty_uncore;
592 
593 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
594 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
595 		if (type->access_type != type_id)
596 			continue;
597 
598 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
599 		if (!uncore)
600 			break;
601 
602 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
603 		uncore->format_group = &generic_uncore_format_group;
604 
605 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
606 			kfree(uncore);
607 			continue;
608 		}
609 		uncores[i++] = uncore;
610 	}
611 
612 	return uncores;
613 }
614 
intel_uncore_generic_uncore_cpu_init(void)615 void intel_uncore_generic_uncore_cpu_init(void)
616 {
617 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
618 }
619 
intel_uncore_generic_uncore_pci_init(void)620 int intel_uncore_generic_uncore_pci_init(void)
621 {
622 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
623 
624 	return 0;
625 }
626 
intel_uncore_generic_uncore_mmio_init(void)627 void intel_uncore_generic_uncore_mmio_init(void)
628 {
629 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
630 }
631