1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_ROOT_PORTS 2
16 #define NR_CXL_SWITCH_PORTS 2
17 #define NR_CXL_PORT_DECODERS 2
18
19 static struct platform_device *cxl_acpi;
20 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
21 static struct platform_device
22 *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
23 static struct platform_device
24 *cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
25 static struct platform_device
26 *cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS *
27 NR_CXL_SWITCH_PORTS];
28 struct platform_device
29 *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS];
30
31 static struct acpi_device acpi0017_mock;
32 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
33 [0] = {
34 .handle = &host_bridge[0],
35 },
36 [1] = {
37 .handle = &host_bridge[1],
38 },
39 };
40
is_mock_dev(struct device * dev)41 static bool is_mock_dev(struct device *dev)
42 {
43 int i;
44
45 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
46 if (dev == &cxl_mem[i]->dev)
47 return true;
48 if (dev == &cxl_acpi->dev)
49 return true;
50 return false;
51 }
52
is_mock_adev(struct acpi_device * adev)53 static bool is_mock_adev(struct acpi_device *adev)
54 {
55 int i;
56
57 if (adev == &acpi0017_mock)
58 return true;
59
60 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
61 if (adev == &host_bridge[i])
62 return true;
63
64 return false;
65 }
66
67 static struct {
68 struct acpi_table_cedt cedt;
69 struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
70 struct {
71 struct acpi_cedt_cfmws cfmws;
72 u32 target[1];
73 } cfmws0;
74 struct {
75 struct acpi_cedt_cfmws cfmws;
76 u32 target[2];
77 } cfmws1;
78 struct {
79 struct acpi_cedt_cfmws cfmws;
80 u32 target[1];
81 } cfmws2;
82 struct {
83 struct acpi_cedt_cfmws cfmws;
84 u32 target[2];
85 } cfmws3;
86 } __packed mock_cedt = {
87 .cedt = {
88 .header = {
89 .signature = "CEDT",
90 .length = sizeof(mock_cedt),
91 .revision = 1,
92 },
93 },
94 .chbs[0] = {
95 .header = {
96 .type = ACPI_CEDT_TYPE_CHBS,
97 .length = sizeof(mock_cedt.chbs[0]),
98 },
99 .uid = 0,
100 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
101 },
102 .chbs[1] = {
103 .header = {
104 .type = ACPI_CEDT_TYPE_CHBS,
105 .length = sizeof(mock_cedt.chbs[0]),
106 },
107 .uid = 1,
108 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
109 },
110 .cfmws0 = {
111 .cfmws = {
112 .header = {
113 .type = ACPI_CEDT_TYPE_CFMWS,
114 .length = sizeof(mock_cedt.cfmws0),
115 },
116 .interleave_ways = 0,
117 .granularity = 4,
118 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
119 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
120 .qtg_id = 0,
121 .window_size = SZ_256M,
122 },
123 .target = { 0 },
124 },
125 .cfmws1 = {
126 .cfmws = {
127 .header = {
128 .type = ACPI_CEDT_TYPE_CFMWS,
129 .length = sizeof(mock_cedt.cfmws1),
130 },
131 .interleave_ways = 1,
132 .granularity = 4,
133 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
134 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
135 .qtg_id = 1,
136 .window_size = SZ_256M * 2,
137 },
138 .target = { 0, 1, },
139 },
140 .cfmws2 = {
141 .cfmws = {
142 .header = {
143 .type = ACPI_CEDT_TYPE_CFMWS,
144 .length = sizeof(mock_cedt.cfmws2),
145 },
146 .interleave_ways = 0,
147 .granularity = 4,
148 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
149 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
150 .qtg_id = 2,
151 .window_size = SZ_256M,
152 },
153 .target = { 0 },
154 },
155 .cfmws3 = {
156 .cfmws = {
157 .header = {
158 .type = ACPI_CEDT_TYPE_CFMWS,
159 .length = sizeof(mock_cedt.cfmws3),
160 },
161 .interleave_ways = 1,
162 .granularity = 4,
163 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
164 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
165 .qtg_id = 3,
166 .window_size = SZ_256M * 2,
167 },
168 .target = { 0, 1, },
169 },
170 };
171
172 struct acpi_cedt_cfmws *mock_cfmws[4] = {
173 [0] = &mock_cedt.cfmws0.cfmws,
174 [1] = &mock_cedt.cfmws1.cfmws,
175 [2] = &mock_cedt.cfmws2.cfmws,
176 [3] = &mock_cedt.cfmws3.cfmws,
177 };
178
179 struct cxl_mock_res {
180 struct list_head list;
181 struct range range;
182 };
183
184 static LIST_HEAD(mock_res);
185 static DEFINE_MUTEX(mock_res_lock);
186 static struct gen_pool *cxl_mock_pool;
187
depopulate_all_mock_resources(void)188 static void depopulate_all_mock_resources(void)
189 {
190 struct cxl_mock_res *res, *_res;
191
192 mutex_lock(&mock_res_lock);
193 list_for_each_entry_safe(res, _res, &mock_res, list) {
194 gen_pool_free(cxl_mock_pool, res->range.start,
195 range_len(&res->range));
196 list_del(&res->list);
197 kfree(res);
198 }
199 mutex_unlock(&mock_res_lock);
200 }
201
alloc_mock_res(resource_size_t size)202 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
203 {
204 struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
205 struct genpool_data_align data = {
206 .align = SZ_256M,
207 };
208 unsigned long phys;
209
210 INIT_LIST_HEAD(&res->list);
211 phys = gen_pool_alloc_algo(cxl_mock_pool, size,
212 gen_pool_first_fit_align, &data);
213 if (!phys)
214 return NULL;
215
216 res->range = (struct range) {
217 .start = phys,
218 .end = phys + size - 1,
219 };
220 mutex_lock(&mock_res_lock);
221 list_add(&res->list, &mock_res);
222 mutex_unlock(&mock_res_lock);
223
224 return res;
225 }
226
populate_cedt(void)227 static int populate_cedt(void)
228 {
229 struct cxl_mock_res *res;
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
233 struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
234 resource_size_t size;
235
236 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
237 size = ACPI_CEDT_CHBS_LENGTH_CXL20;
238 else
239 size = ACPI_CEDT_CHBS_LENGTH_CXL11;
240
241 res = alloc_mock_res(size);
242 if (!res)
243 return -ENOMEM;
244 chbs->base = res->range.start;
245 chbs->length = size;
246 }
247
248 for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
249 struct acpi_cedt_cfmws *window = mock_cfmws[i];
250
251 res = alloc_mock_res(window->window_size);
252 if (!res)
253 return -ENOMEM;
254 window->base_hpa = res->range.start;
255 }
256
257 return 0;
258 }
259
260 /*
261 * WARNING, this hack assumes the format of 'struct
262 * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
263 * the first struct member is the device being probed by the cxl_acpi
264 * driver.
265 */
266 struct cxl_cedt_context {
267 struct device *dev;
268 };
269
mock_acpi_table_parse_cedt(enum acpi_cedt_type id,acpi_tbl_entry_handler_arg handler_arg,void * arg)270 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
271 acpi_tbl_entry_handler_arg handler_arg,
272 void *arg)
273 {
274 struct cxl_cedt_context *ctx = arg;
275 struct device *dev = ctx->dev;
276 union acpi_subtable_headers *h;
277 unsigned long end;
278 int i;
279
280 if (dev != &cxl_acpi->dev)
281 return acpi_table_parse_cedt(id, handler_arg, arg);
282
283 if (id == ACPI_CEDT_TYPE_CHBS)
284 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
285 h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
286 end = (unsigned long)&mock_cedt.chbs[i + 1];
287 handler_arg(h, arg, end);
288 }
289
290 if (id == ACPI_CEDT_TYPE_CFMWS)
291 for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
292 h = (union acpi_subtable_headers *) mock_cfmws[i];
293 end = (unsigned long) h + mock_cfmws[i]->header.length;
294 handler_arg(h, arg, end);
295 }
296
297 return 0;
298 }
299
is_mock_bridge(struct device * dev)300 static bool is_mock_bridge(struct device *dev)
301 {
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
305 if (dev == &cxl_host_bridge[i]->dev)
306 return true;
307 return false;
308 }
309
is_mock_port(struct device * dev)310 static bool is_mock_port(struct device *dev)
311 {
312 int i;
313
314 if (is_mock_bridge(dev))
315 return true;
316
317 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
318 if (dev == &cxl_root_port[i]->dev)
319 return true;
320
321 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
322 if (dev == &cxl_switch_uport[i]->dev)
323 return true;
324
325 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
326 if (dev == &cxl_switch_dport[i]->dev)
327 return true;
328
329 if (is_cxl_memdev(dev))
330 return is_mock_dev(dev->parent);
331
332 return false;
333 }
334
host_bridge_index(struct acpi_device * adev)335 static int host_bridge_index(struct acpi_device *adev)
336 {
337 return adev - host_bridge;
338 }
339
find_host_bridge(acpi_handle handle)340 static struct acpi_device *find_host_bridge(acpi_handle handle)
341 {
342 int i;
343
344 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
345 if (handle == host_bridge[i].handle)
346 return &host_bridge[i];
347 return NULL;
348 }
349
350 static acpi_status
mock_acpi_evaluate_integer(acpi_handle handle,acpi_string pathname,struct acpi_object_list * arguments,unsigned long long * data)351 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
352 struct acpi_object_list *arguments,
353 unsigned long long *data)
354 {
355 struct acpi_device *adev = find_host_bridge(handle);
356
357 if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
358 return acpi_evaluate_integer(handle, pathname, arguments, data);
359
360 *data = host_bridge_index(adev);
361 return AE_OK;
362 }
363
364 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
365 static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
366 [0] = {
367 .bus = &mock_pci_bus[0],
368 },
369 [1] = {
370 .bus = &mock_pci_bus[1],
371 },
372 };
373
is_mock_bus(struct pci_bus * bus)374 static bool is_mock_bus(struct pci_bus *bus)
375 {
376 int i;
377
378 for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
379 if (bus == &mock_pci_bus[i])
380 return true;
381 return false;
382 }
383
mock_acpi_pci_find_root(acpi_handle handle)384 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
385 {
386 struct acpi_device *adev = find_host_bridge(handle);
387
388 if (!adev)
389 return acpi_pci_find_root(handle);
390 return &mock_pci_root[host_bridge_index(adev)];
391 }
392
mock_cxl_setup_hdm(struct cxl_port * port)393 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
394 {
395 struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
396
397 if (!cxlhdm)
398 return ERR_PTR(-ENOMEM);
399
400 cxlhdm->port = port;
401 return cxlhdm;
402 }
403
mock_cxl_add_passthrough_decoder(struct cxl_port * port)404 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
405 {
406 dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
407 return -EOPNOTSUPP;
408 }
409
410
411 struct target_map_ctx {
412 int *target_map;
413 int index;
414 int target_count;
415 };
416
map_targets(struct device * dev,void * data)417 static int map_targets(struct device *dev, void *data)
418 {
419 struct platform_device *pdev = to_platform_device(dev);
420 struct target_map_ctx *ctx = data;
421
422 ctx->target_map[ctx->index++] = pdev->id;
423
424 if (ctx->index > ctx->target_count) {
425 dev_WARN_ONCE(dev, 1, "too many targets found?\n");
426 return -ENXIO;
427 }
428
429 return 0;
430 }
431
mock_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm)432 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
433 {
434 struct cxl_port *port = cxlhdm->port;
435 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
436 int target_count, i;
437
438 if (is_cxl_endpoint(port))
439 target_count = 0;
440 else if (is_cxl_root(parent_port))
441 target_count = NR_CXL_ROOT_PORTS;
442 else
443 target_count = NR_CXL_SWITCH_PORTS;
444
445 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
446 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
447 struct target_map_ctx ctx = {
448 .target_map = target_map,
449 .target_count = target_count,
450 };
451 struct cxl_decoder *cxld;
452 int rc;
453
454 if (target_count)
455 cxld = cxl_switch_decoder_alloc(port, target_count);
456 else
457 cxld = cxl_endpoint_decoder_alloc(port);
458 if (IS_ERR(cxld)) {
459 dev_warn(&port->dev,
460 "Failed to allocate the decoder\n");
461 return PTR_ERR(cxld);
462 }
463
464 cxld->decoder_range = (struct range) {
465 .start = 0,
466 .end = -1,
467 };
468
469 cxld->interleave_ways = min_not_zero(target_count, 1);
470 cxld->interleave_granularity = SZ_4K;
471 cxld->target_type = CXL_DECODER_EXPANDER;
472
473 if (target_count) {
474 rc = device_for_each_child(port->uport, &ctx,
475 map_targets);
476 if (rc) {
477 put_device(&cxld->dev);
478 return rc;
479 }
480 }
481
482 rc = cxl_decoder_add_locked(cxld, target_map);
483 if (rc) {
484 put_device(&cxld->dev);
485 dev_err(&port->dev, "Failed to add decoder\n");
486 return rc;
487 }
488
489 rc = cxl_decoder_autoremove(&port->dev, cxld);
490 if (rc)
491 return rc;
492 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
493 }
494
495 return 0;
496 }
497
mock_cxl_port_enumerate_dports(struct cxl_port * port)498 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
499 {
500 struct device *dev = &port->dev;
501 struct platform_device **array;
502 int i, array_size;
503
504 if (port->depth == 1) {
505 array_size = ARRAY_SIZE(cxl_root_port);
506 array = cxl_root_port;
507 } else if (port->depth == 2) {
508 array_size = ARRAY_SIZE(cxl_switch_dport);
509 array = cxl_switch_dport;
510 } else {
511 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
512 port->depth);
513 return -ENXIO;
514 }
515
516 for (i = 0; i < array_size; i++) {
517 struct platform_device *pdev = array[i];
518 struct cxl_dport *dport;
519
520 if (pdev->dev.parent != port->uport)
521 continue;
522
523 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
524 CXL_RESOURCE_NONE);
525
526 if (IS_ERR(dport)) {
527 dev_err(dev, "failed to add dport: %s (%ld)\n",
528 dev_name(&pdev->dev), PTR_ERR(dport));
529 return PTR_ERR(dport);
530 }
531
532 dev_dbg(dev, "add dport%d: %s\n", pdev->id,
533 dev_name(&pdev->dev));
534 }
535
536 return 0;
537 }
538
539 static struct cxl_mock_ops cxl_mock_ops = {
540 .is_mock_adev = is_mock_adev,
541 .is_mock_bridge = is_mock_bridge,
542 .is_mock_bus = is_mock_bus,
543 .is_mock_port = is_mock_port,
544 .is_mock_dev = is_mock_dev,
545 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
546 .acpi_evaluate_integer = mock_acpi_evaluate_integer,
547 .acpi_pci_find_root = mock_acpi_pci_find_root,
548 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
549 .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
550 .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
551 .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
552 .list = LIST_HEAD_INIT(cxl_mock_ops.list),
553 };
554
mock_companion(struct acpi_device * adev,struct device * dev)555 static void mock_companion(struct acpi_device *adev, struct device *dev)
556 {
557 device_initialize(&adev->dev);
558 fwnode_init(&adev->fwnode, NULL);
559 dev->fwnode = &adev->fwnode;
560 adev->fwnode.dev = dev;
561 }
562
563 #ifndef SZ_64G
564 #define SZ_64G (SZ_32G * 2)
565 #endif
566
567 #ifndef SZ_512G
568 #define SZ_512G (SZ_64G * 8)
569 #endif
570
alloc_memdev(int id)571 static struct platform_device *alloc_memdev(int id)
572 {
573 struct resource res[] = {
574 [0] = {
575 .flags = IORESOURCE_MEM,
576 },
577 [1] = {
578 .flags = IORESOURCE_MEM,
579 .desc = IORES_DESC_PERSISTENT_MEMORY,
580 },
581 };
582 struct platform_device *pdev;
583 int i, rc;
584
585 for (i = 0; i < ARRAY_SIZE(res); i++) {
586 struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
587
588 if (!r)
589 return NULL;
590 res[i].start = r->range.start;
591 res[i].end = r->range.end;
592 }
593
594 pdev = platform_device_alloc("cxl_mem", id);
595 if (!pdev)
596 return NULL;
597
598 rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
599 if (rc)
600 goto err;
601
602 return pdev;
603
604 err:
605 platform_device_put(pdev);
606 return NULL;
607 }
608
cxl_test_init(void)609 static __init int cxl_test_init(void)
610 {
611 int rc, i;
612
613 register_cxl_mock_ops(&cxl_mock_ops);
614
615 cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
616 if (!cxl_mock_pool) {
617 rc = -ENOMEM;
618 goto err_gen_pool_create;
619 }
620
621 rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
622 if (rc)
623 goto err_gen_pool_add;
624
625 rc = populate_cedt();
626 if (rc)
627 goto err_populate;
628
629 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
630 struct acpi_device *adev = &host_bridge[i];
631 struct platform_device *pdev;
632
633 pdev = platform_device_alloc("cxl_host_bridge", i);
634 if (!pdev)
635 goto err_bridge;
636
637 mock_companion(adev, &pdev->dev);
638 rc = platform_device_add(pdev);
639 if (rc) {
640 platform_device_put(pdev);
641 goto err_bridge;
642 }
643
644 cxl_host_bridge[i] = pdev;
645 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
646 "physical_node");
647 if (rc)
648 goto err_bridge;
649 }
650
651 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
652 struct platform_device *bridge =
653 cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
654 struct platform_device *pdev;
655
656 pdev = platform_device_alloc("cxl_root_port", i);
657 if (!pdev)
658 goto err_port;
659 pdev->dev.parent = &bridge->dev;
660
661 rc = platform_device_add(pdev);
662 if (rc) {
663 platform_device_put(pdev);
664 goto err_port;
665 }
666 cxl_root_port[i] = pdev;
667 }
668
669 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
670 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
671 struct platform_device *root_port = cxl_root_port[i];
672 struct platform_device *pdev;
673
674 pdev = platform_device_alloc("cxl_switch_uport", i);
675 if (!pdev)
676 goto err_port;
677 pdev->dev.parent = &root_port->dev;
678
679 rc = platform_device_add(pdev);
680 if (rc) {
681 platform_device_put(pdev);
682 goto err_uport;
683 }
684 cxl_switch_uport[i] = pdev;
685 }
686
687 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
688 struct platform_device *uport =
689 cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
690 struct platform_device *pdev;
691
692 pdev = platform_device_alloc("cxl_switch_dport", i);
693 if (!pdev)
694 goto err_port;
695 pdev->dev.parent = &uport->dev;
696
697 rc = platform_device_add(pdev);
698 if (rc) {
699 platform_device_put(pdev);
700 goto err_dport;
701 }
702 cxl_switch_dport[i] = pdev;
703 }
704
705 BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport));
706 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
707 struct platform_device *dport = cxl_switch_dport[i];
708 struct platform_device *pdev;
709
710 pdev = alloc_memdev(i);
711 if (!pdev)
712 goto err_mem;
713 pdev->dev.parent = &dport->dev;
714 set_dev_node(&pdev->dev, i % 2);
715
716 rc = platform_device_add(pdev);
717 if (rc) {
718 platform_device_put(pdev);
719 goto err_mem;
720 }
721 cxl_mem[i] = pdev;
722 }
723
724 cxl_acpi = platform_device_alloc("cxl_acpi", 0);
725 if (!cxl_acpi)
726 goto err_mem;
727
728 mock_companion(&acpi0017_mock, &cxl_acpi->dev);
729 acpi0017_mock.dev.bus = &platform_bus_type;
730
731 rc = platform_device_add(cxl_acpi);
732 if (rc)
733 goto err_add;
734
735 return 0;
736
737 err_add:
738 platform_device_put(cxl_acpi);
739 err_mem:
740 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
741 platform_device_unregister(cxl_mem[i]);
742 err_dport:
743 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
744 platform_device_unregister(cxl_switch_dport[i]);
745 err_uport:
746 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
747 platform_device_unregister(cxl_switch_uport[i]);
748 err_port:
749 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
750 platform_device_unregister(cxl_root_port[i]);
751 err_bridge:
752 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
753 struct platform_device *pdev = cxl_host_bridge[i];
754
755 if (!pdev)
756 continue;
757 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
758 platform_device_unregister(cxl_host_bridge[i]);
759 }
760 err_populate:
761 depopulate_all_mock_resources();
762 err_gen_pool_add:
763 gen_pool_destroy(cxl_mock_pool);
764 err_gen_pool_create:
765 unregister_cxl_mock_ops(&cxl_mock_ops);
766 return rc;
767 }
768
cxl_test_exit(void)769 static __exit void cxl_test_exit(void)
770 {
771 int i;
772
773 platform_device_unregister(cxl_acpi);
774 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
775 platform_device_unregister(cxl_mem[i]);
776 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
777 platform_device_unregister(cxl_switch_dport[i]);
778 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
779 platform_device_unregister(cxl_switch_uport[i]);
780 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
781 platform_device_unregister(cxl_root_port[i]);
782 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
783 struct platform_device *pdev = cxl_host_bridge[i];
784
785 if (!pdev)
786 continue;
787 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
788 platform_device_unregister(cxl_host_bridge[i]);
789 }
790 depopulate_all_mock_resources();
791 gen_pool_destroy(cxl_mock_pool);
792 unregister_cxl_mock_ops(&cxl_mock_ops);
793 }
794
795 module_init(cxl_test_init);
796 module_exit(cxl_test_exit);
797 MODULE_LICENSE("GPL v2");
798 MODULE_IMPORT_NS(ACPI);
799 MODULE_IMPORT_NS(CXL);
800