1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/sort.h>
10 #include <linux/idr.h>
11 #include <cxlmem.h>
12 #include <cxl.h>
13 #include "core.h"
14
15 /**
16 * DOC: cxl core region
17 *
18 * CXL Regions represent mapped memory capacity in system physical address
19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20 * Memory ranges, Regions represent the active mapped capacity by the HDM
21 * Decoder Capability structures throughout the Host Bridges, Switches, and
22 * Endpoints in the topology.
23 *
24 * Region configuration has ordering constraints. UUID may be set at any time
25 * but is only visible for persistent regions.
26 * 1. Interleave granularity
27 * 2. Interleave size
28 * 3. Decoder targets
29 */
30
31 static struct cxl_region *to_cxl_region(struct device *dev);
32
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
34 char *buf)
35 {
36 struct cxl_region *cxlr = to_cxl_region(dev);
37 struct cxl_region_params *p = &cxlr->params;
38 ssize_t rc;
39
40 rc = down_read_interruptible(&cxl_region_rwsem);
41 if (rc)
42 return rc;
43 if (cxlr->mode != CXL_DECODER_PMEM)
44 rc = sysfs_emit(buf, "\n");
45 else
46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
47 up_read(&cxl_region_rwsem);
48
49 return rc;
50 }
51
is_dup(struct device * match,void * data)52 static int is_dup(struct device *match, void *data)
53 {
54 struct cxl_region_params *p;
55 struct cxl_region *cxlr;
56 uuid_t *uuid = data;
57
58 if (!is_cxl_region(match))
59 return 0;
60
61 lockdep_assert_held(&cxl_region_rwsem);
62 cxlr = to_cxl_region(match);
63 p = &cxlr->params;
64
65 if (uuid_equal(&p->uuid, uuid)) {
66 dev_dbg(match, "already has uuid: %pUb\n", uuid);
67 return -EBUSY;
68 }
69
70 return 0;
71 }
72
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
74 const char *buf, size_t len)
75 {
76 struct cxl_region *cxlr = to_cxl_region(dev);
77 struct cxl_region_params *p = &cxlr->params;
78 uuid_t temp;
79 ssize_t rc;
80
81 if (len != UUID_STRING_LEN + 1)
82 return -EINVAL;
83
84 rc = uuid_parse(buf, &temp);
85 if (rc)
86 return rc;
87
88 if (uuid_is_null(&temp))
89 return -EINVAL;
90
91 rc = down_write_killable(&cxl_region_rwsem);
92 if (rc)
93 return rc;
94
95 if (uuid_equal(&p->uuid, &temp))
96 goto out;
97
98 rc = -EBUSY;
99 if (p->state >= CXL_CONFIG_ACTIVE)
100 goto out;
101
102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
103 if (rc < 0)
104 goto out;
105
106 uuid_copy(&p->uuid, &temp);
107 out:
108 up_write(&cxl_region_rwsem);
109
110 if (rc)
111 return rc;
112 return len;
113 }
114 static DEVICE_ATTR_RW(uuid);
115
cxl_rr_load(struct cxl_port * port,struct cxl_region * cxlr)116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
117 struct cxl_region *cxlr)
118 {
119 return xa_load(&port->regions, (unsigned long)cxlr);
120 }
121
cxl_region_invalidate_memregion(struct cxl_region * cxlr)122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
123 {
124 if (!cpu_cache_has_invalidate_memregion()) {
125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
126 dev_warn_once(
127 &cxlr->dev,
128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
129 return 0;
130 } else {
131 dev_err(&cxlr->dev,
132 "Failed to synchronize CPU cache state\n");
133 return -ENXIO;
134 }
135 }
136
137 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
138 return 0;
139 }
140
cxl_region_decode_reset(struct cxl_region * cxlr,int count)141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
142 {
143 struct cxl_region_params *p = &cxlr->params;
144 int i, rc = 0;
145
146 /*
147 * Before region teardown attempt to flush, and if the flush
148 * fails cancel the region teardown for data consistency
149 * concerns
150 */
151 rc = cxl_region_invalidate_memregion(cxlr);
152 if (rc)
153 return rc;
154
155 for (i = count - 1; i >= 0; i--) {
156 struct cxl_endpoint_decoder *cxled = p->targets[i];
157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
158 struct cxl_port *iter = cxled_to_port(cxled);
159 struct cxl_dev_state *cxlds = cxlmd->cxlds;
160 struct cxl_ep *ep;
161
162 if (cxlds->rcd)
163 goto endpoint_reset;
164
165 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
166 iter = to_cxl_port(iter->dev.parent);
167
168 for (ep = cxl_ep_load(iter, cxlmd); iter;
169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
170 struct cxl_region_ref *cxl_rr;
171 struct cxl_decoder *cxld;
172
173 cxl_rr = cxl_rr_load(iter, cxlr);
174 cxld = cxl_rr->decoder;
175 if (cxld->reset)
176 rc = cxld->reset(cxld);
177 if (rc)
178 return rc;
179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
180 }
181
182 endpoint_reset:
183 rc = cxled->cxld.reset(&cxled->cxld);
184 if (rc)
185 return rc;
186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
187 }
188
189 /* all decoders associated with this region have been torn down */
190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
191
192 return 0;
193 }
194
commit_decoder(struct cxl_decoder * cxld)195 static int commit_decoder(struct cxl_decoder *cxld)
196 {
197 struct cxl_switch_decoder *cxlsd = NULL;
198
199 if (cxld->commit)
200 return cxld->commit(cxld);
201
202 if (is_switch_decoder(&cxld->dev))
203 cxlsd = to_cxl_switch_decoder(&cxld->dev);
204
205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
206 "->commit() is required\n"))
207 return -ENXIO;
208 return 0;
209 }
210
cxl_region_decode_commit(struct cxl_region * cxlr)211 static int cxl_region_decode_commit(struct cxl_region *cxlr)
212 {
213 struct cxl_region_params *p = &cxlr->params;
214 int i, rc = 0;
215
216 for (i = 0; i < p->nr_targets; i++) {
217 struct cxl_endpoint_decoder *cxled = p->targets[i];
218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
219 struct cxl_region_ref *cxl_rr;
220 struct cxl_decoder *cxld;
221 struct cxl_port *iter;
222 struct cxl_ep *ep;
223
224 /* commit bottom up */
225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
226 iter = to_cxl_port(iter->dev.parent)) {
227 cxl_rr = cxl_rr_load(iter, cxlr);
228 cxld = cxl_rr->decoder;
229 rc = commit_decoder(cxld);
230 if (rc)
231 break;
232 }
233
234 if (rc) {
235 /* programming @iter failed, teardown */
236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
238 cxl_rr = cxl_rr_load(iter, cxlr);
239 cxld = cxl_rr->decoder;
240 if (cxld->reset)
241 cxld->reset(cxld);
242 }
243
244 cxled->cxld.reset(&cxled->cxld);
245 goto err;
246 }
247 }
248
249 return 0;
250
251 err:
252 /* undo the targets that were successfully committed */
253 cxl_region_decode_reset(cxlr, i);
254 return rc;
255 }
256
commit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
258 const char *buf, size_t len)
259 {
260 struct cxl_region *cxlr = to_cxl_region(dev);
261 struct cxl_region_params *p = &cxlr->params;
262 bool commit;
263 ssize_t rc;
264
265 rc = kstrtobool(buf, &commit);
266 if (rc)
267 return rc;
268
269 rc = down_write_killable(&cxl_region_rwsem);
270 if (rc)
271 return rc;
272
273 /* Already in the requested state? */
274 if (commit && p->state >= CXL_CONFIG_COMMIT)
275 goto out;
276 if (!commit && p->state < CXL_CONFIG_COMMIT)
277 goto out;
278
279 /* Not ready to commit? */
280 if (commit && p->state < CXL_CONFIG_ACTIVE) {
281 rc = -ENXIO;
282 goto out;
283 }
284
285 /*
286 * Invalidate caches before region setup to drop any speculative
287 * consumption of this address space
288 */
289 rc = cxl_region_invalidate_memregion(cxlr);
290 if (rc)
291 goto out;
292
293 if (commit) {
294 rc = cxl_region_decode_commit(cxlr);
295 if (rc == 0)
296 p->state = CXL_CONFIG_COMMIT;
297 } else {
298 p->state = CXL_CONFIG_RESET_PENDING;
299 up_write(&cxl_region_rwsem);
300 device_release_driver(&cxlr->dev);
301 down_write(&cxl_region_rwsem);
302
303 /*
304 * The lock was dropped, so need to revalidate that the reset is
305 * still pending.
306 */
307 if (p->state == CXL_CONFIG_RESET_PENDING) {
308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
309 /*
310 * Revert to committed since there may still be active
311 * decoders associated with this region, or move forward
312 * to active to mark the reset successful
313 */
314 if (rc)
315 p->state = CXL_CONFIG_COMMIT;
316 else
317 p->state = CXL_CONFIG_ACTIVE;
318 }
319 }
320
321 out:
322 up_write(&cxl_region_rwsem);
323
324 if (rc)
325 return rc;
326 return len;
327 }
328
commit_show(struct device * dev,struct device_attribute * attr,char * buf)329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
330 char *buf)
331 {
332 struct cxl_region *cxlr = to_cxl_region(dev);
333 struct cxl_region_params *p = &cxlr->params;
334 ssize_t rc;
335
336 rc = down_read_interruptible(&cxl_region_rwsem);
337 if (rc)
338 return rc;
339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
340 up_read(&cxl_region_rwsem);
341
342 return rc;
343 }
344 static DEVICE_ATTR_RW(commit);
345
cxl_region_visible(struct kobject * kobj,struct attribute * a,int n)346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
347 int n)
348 {
349 struct device *dev = kobj_to_dev(kobj);
350 struct cxl_region *cxlr = to_cxl_region(dev);
351
352 /*
353 * Support tooling that expects to find a 'uuid' attribute for all
354 * regions regardless of mode.
355 */
356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
357 return 0444;
358 return a->mode;
359 }
360
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)361 static ssize_t interleave_ways_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363 {
364 struct cxl_region *cxlr = to_cxl_region(dev);
365 struct cxl_region_params *p = &cxlr->params;
366 ssize_t rc;
367
368 rc = down_read_interruptible(&cxl_region_rwsem);
369 if (rc)
370 return rc;
371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
372 up_read(&cxl_region_rwsem);
373
374 return rc;
375 }
376
377 static const struct attribute_group *get_cxl_region_target_group(void);
378
interleave_ways_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)379 static ssize_t interleave_ways_store(struct device *dev,
380 struct device_attribute *attr,
381 const char *buf, size_t len)
382 {
383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
385 struct cxl_region *cxlr = to_cxl_region(dev);
386 struct cxl_region_params *p = &cxlr->params;
387 unsigned int val, save;
388 int rc;
389 u8 iw;
390
391 rc = kstrtouint(buf, 0, &val);
392 if (rc)
393 return rc;
394
395 rc = ways_to_eiw(val, &iw);
396 if (rc)
397 return rc;
398
399 /*
400 * Even for x3, x6, and x12 interleaves the region interleave must be a
401 * power of 2 multiple of the host bridge interleave.
402 */
403 if (!is_power_of_2(val / cxld->interleave_ways) ||
404 (val % cxld->interleave_ways)) {
405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
406 return -EINVAL;
407 }
408
409 rc = down_write_killable(&cxl_region_rwsem);
410 if (rc)
411 return rc;
412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
413 rc = -EBUSY;
414 goto out;
415 }
416
417 save = p->interleave_ways;
418 p->interleave_ways = val;
419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
420 if (rc)
421 p->interleave_ways = save;
422 out:
423 up_write(&cxl_region_rwsem);
424 if (rc)
425 return rc;
426 return len;
427 }
428 static DEVICE_ATTR_RW(interleave_ways);
429
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)430 static ssize_t interleave_granularity_show(struct device *dev,
431 struct device_attribute *attr,
432 char *buf)
433 {
434 struct cxl_region *cxlr = to_cxl_region(dev);
435 struct cxl_region_params *p = &cxlr->params;
436 ssize_t rc;
437
438 rc = down_read_interruptible(&cxl_region_rwsem);
439 if (rc)
440 return rc;
441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
442 up_read(&cxl_region_rwsem);
443
444 return rc;
445 }
446
interleave_granularity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)447 static ssize_t interleave_granularity_store(struct device *dev,
448 struct device_attribute *attr,
449 const char *buf, size_t len)
450 {
451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
453 struct cxl_region *cxlr = to_cxl_region(dev);
454 struct cxl_region_params *p = &cxlr->params;
455 int rc, val;
456 u16 ig;
457
458 rc = kstrtoint(buf, 0, &val);
459 if (rc)
460 return rc;
461
462 rc = granularity_to_eig(val, &ig);
463 if (rc)
464 return rc;
465
466 /*
467 * When the host-bridge is interleaved, disallow region granularity !=
468 * root granularity. Regions with a granularity less than the root
469 * interleave result in needing multiple endpoints to support a single
470 * slot in the interleave (possible to support in the future). Regions
471 * with a granularity greater than the root interleave result in invalid
472 * DPA translations (invalid to support).
473 */
474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
475 return -EINVAL;
476
477 rc = down_write_killable(&cxl_region_rwsem);
478 if (rc)
479 return rc;
480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
481 rc = -EBUSY;
482 goto out;
483 }
484
485 p->interleave_granularity = val;
486 out:
487 up_write(&cxl_region_rwsem);
488 if (rc)
489 return rc;
490 return len;
491 }
492 static DEVICE_ATTR_RW(interleave_granularity);
493
resource_show(struct device * dev,struct device_attribute * attr,char * buf)494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
495 char *buf)
496 {
497 struct cxl_region *cxlr = to_cxl_region(dev);
498 struct cxl_region_params *p = &cxlr->params;
499 u64 resource = -1ULL;
500 ssize_t rc;
501
502 rc = down_read_interruptible(&cxl_region_rwsem);
503 if (rc)
504 return rc;
505 if (p->res)
506 resource = p->res->start;
507 rc = sysfs_emit(buf, "%#llx\n", resource);
508 up_read(&cxl_region_rwsem);
509
510 return rc;
511 }
512 static DEVICE_ATTR_RO(resource);
513
mode_show(struct device * dev,struct device_attribute * attr,char * buf)514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
515 char *buf)
516 {
517 struct cxl_region *cxlr = to_cxl_region(dev);
518
519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
520 }
521 static DEVICE_ATTR_RO(mode);
522
alloc_hpa(struct cxl_region * cxlr,resource_size_t size)523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
524 {
525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
526 struct cxl_region_params *p = &cxlr->params;
527 struct resource *res;
528 u64 remainder = 0;
529
530 lockdep_assert_held_write(&cxl_region_rwsem);
531
532 /* Nothing to do... */
533 if (p->res && resource_size(p->res) == size)
534 return 0;
535
536 /* To change size the old size must be freed first */
537 if (p->res)
538 return -EBUSY;
539
540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
541 return -EBUSY;
542
543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
544 if (!p->interleave_ways || !p->interleave_granularity ||
545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
546 return -ENXIO;
547
548 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
549 if (remainder)
550 return -EINVAL;
551
552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
553 dev_name(&cxlr->dev));
554 if (IS_ERR(res)) {
555 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
556 PTR_ERR(res));
557 return PTR_ERR(res);
558 }
559
560 p->res = res;
561 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
562
563 return 0;
564 }
565
cxl_region_iomem_release(struct cxl_region * cxlr)566 static void cxl_region_iomem_release(struct cxl_region *cxlr)
567 {
568 struct cxl_region_params *p = &cxlr->params;
569
570 if (device_is_registered(&cxlr->dev))
571 lockdep_assert_held_write(&cxl_region_rwsem);
572 if (p->res) {
573 /*
574 * Autodiscovered regions may not have been able to insert their
575 * resource.
576 */
577 if (p->res->parent)
578 remove_resource(p->res);
579 kfree(p->res);
580 p->res = NULL;
581 }
582 }
583
free_hpa(struct cxl_region * cxlr)584 static int free_hpa(struct cxl_region *cxlr)
585 {
586 struct cxl_region_params *p = &cxlr->params;
587
588 lockdep_assert_held_write(&cxl_region_rwsem);
589
590 if (!p->res)
591 return 0;
592
593 if (p->state >= CXL_CONFIG_ACTIVE)
594 return -EBUSY;
595
596 cxl_region_iomem_release(cxlr);
597 p->state = CXL_CONFIG_IDLE;
598 return 0;
599 }
600
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)601 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
602 const char *buf, size_t len)
603 {
604 struct cxl_region *cxlr = to_cxl_region(dev);
605 u64 val;
606 int rc;
607
608 rc = kstrtou64(buf, 0, &val);
609 if (rc)
610 return rc;
611
612 rc = down_write_killable(&cxl_region_rwsem);
613 if (rc)
614 return rc;
615
616 if (val)
617 rc = alloc_hpa(cxlr, val);
618 else
619 rc = free_hpa(cxlr);
620 up_write(&cxl_region_rwsem);
621
622 if (rc)
623 return rc;
624
625 return len;
626 }
627
size_show(struct device * dev,struct device_attribute * attr,char * buf)628 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
629 char *buf)
630 {
631 struct cxl_region *cxlr = to_cxl_region(dev);
632 struct cxl_region_params *p = &cxlr->params;
633 u64 size = 0;
634 ssize_t rc;
635
636 rc = down_read_interruptible(&cxl_region_rwsem);
637 if (rc)
638 return rc;
639 if (p->res)
640 size = resource_size(p->res);
641 rc = sysfs_emit(buf, "%#llx\n", size);
642 up_read(&cxl_region_rwsem);
643
644 return rc;
645 }
646 static DEVICE_ATTR_RW(size);
647
648 static struct attribute *cxl_region_attrs[] = {
649 &dev_attr_uuid.attr,
650 &dev_attr_commit.attr,
651 &dev_attr_interleave_ways.attr,
652 &dev_attr_interleave_granularity.attr,
653 &dev_attr_resource.attr,
654 &dev_attr_size.attr,
655 &dev_attr_mode.attr,
656 NULL,
657 };
658
659 static const struct attribute_group cxl_region_group = {
660 .attrs = cxl_region_attrs,
661 .is_visible = cxl_region_visible,
662 };
663
show_targetN(struct cxl_region * cxlr,char * buf,int pos)664 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
665 {
666 struct cxl_region_params *p = &cxlr->params;
667 struct cxl_endpoint_decoder *cxled;
668 int rc;
669
670 rc = down_read_interruptible(&cxl_region_rwsem);
671 if (rc)
672 return rc;
673
674 if (pos >= p->interleave_ways) {
675 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
676 p->interleave_ways);
677 rc = -ENXIO;
678 goto out;
679 }
680
681 cxled = p->targets[pos];
682 if (!cxled)
683 rc = sysfs_emit(buf, "\n");
684 else
685 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
686 out:
687 up_read(&cxl_region_rwsem);
688
689 return rc;
690 }
691
match_free_decoder(struct device * dev,void * data)692 static int match_free_decoder(struct device *dev, void *data)
693 {
694 struct cxl_decoder *cxld;
695 int *id = data;
696
697 if (!is_switch_decoder(dev))
698 return 0;
699
700 cxld = to_cxl_decoder(dev);
701
702 /* enforce ordered allocation */
703 if (cxld->id != *id)
704 return 0;
705
706 if (!cxld->region)
707 return 1;
708
709 (*id)++;
710
711 return 0;
712 }
713
match_auto_decoder(struct device * dev,void * data)714 static int match_auto_decoder(struct device *dev, void *data)
715 {
716 struct cxl_region_params *p = data;
717 struct cxl_decoder *cxld;
718 struct range *r;
719
720 if (!is_switch_decoder(dev))
721 return 0;
722
723 cxld = to_cxl_decoder(dev);
724 r = &cxld->hpa_range;
725
726 if (p->res && p->res->start == r->start && p->res->end == r->end)
727 return 1;
728
729 return 0;
730 }
731
cxl_region_find_decoder(struct cxl_port * port,struct cxl_region * cxlr)732 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
733 struct cxl_region *cxlr)
734 {
735 struct device *dev;
736 int id = 0;
737
738 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
739 dev = device_find_child(&port->dev, &cxlr->params,
740 match_auto_decoder);
741 else
742 dev = device_find_child(&port->dev, &id, match_free_decoder);
743 if (!dev)
744 return NULL;
745 /*
746 * This decoder is pinned registered as long as the endpoint decoder is
747 * registered, and endpoint decoder unregistration holds the
748 * cxl_region_rwsem over unregister events, so no need to hold on to
749 * this extra reference.
750 */
751 put_device(dev);
752 return to_cxl_decoder(dev);
753 }
754
alloc_region_ref(struct cxl_port * port,struct cxl_region * cxlr)755 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
756 struct cxl_region *cxlr)
757 {
758 struct cxl_region_params *p = &cxlr->params;
759 struct cxl_region_ref *cxl_rr, *iter;
760 unsigned long index;
761 int rc;
762
763 xa_for_each(&port->regions, index, iter) {
764 struct cxl_region_params *ip = &iter->region->params;
765
766 if (!ip->res)
767 continue;
768
769 if (ip->res->start > p->res->start) {
770 dev_dbg(&cxlr->dev,
771 "%s: HPA order violation %s:%pr vs %pr\n",
772 dev_name(&port->dev),
773 dev_name(&iter->region->dev), ip->res, p->res);
774 return ERR_PTR(-EBUSY);
775 }
776 }
777
778 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
779 if (!cxl_rr)
780 return ERR_PTR(-ENOMEM);
781 cxl_rr->port = port;
782 cxl_rr->region = cxlr;
783 cxl_rr->nr_targets = 1;
784 xa_init(&cxl_rr->endpoints);
785
786 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
787 if (rc) {
788 dev_dbg(&cxlr->dev,
789 "%s: failed to track region reference: %d\n",
790 dev_name(&port->dev), rc);
791 kfree(cxl_rr);
792 return ERR_PTR(rc);
793 }
794
795 return cxl_rr;
796 }
797
cxl_rr_free_decoder(struct cxl_region_ref * cxl_rr)798 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
799 {
800 struct cxl_region *cxlr = cxl_rr->region;
801 struct cxl_decoder *cxld = cxl_rr->decoder;
802
803 if (!cxld)
804 return;
805
806 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
807 if (cxld->region == cxlr) {
808 cxld->region = NULL;
809 put_device(&cxlr->dev);
810 }
811 }
812
free_region_ref(struct cxl_region_ref * cxl_rr)813 static void free_region_ref(struct cxl_region_ref *cxl_rr)
814 {
815 struct cxl_port *port = cxl_rr->port;
816 struct cxl_region *cxlr = cxl_rr->region;
817
818 cxl_rr_free_decoder(cxl_rr);
819 xa_erase(&port->regions, (unsigned long)cxlr);
820 xa_destroy(&cxl_rr->endpoints);
821 kfree(cxl_rr);
822 }
823
cxl_rr_ep_add(struct cxl_region_ref * cxl_rr,struct cxl_endpoint_decoder * cxled)824 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
825 struct cxl_endpoint_decoder *cxled)
826 {
827 int rc;
828 struct cxl_port *port = cxl_rr->port;
829 struct cxl_region *cxlr = cxl_rr->region;
830 struct cxl_decoder *cxld = cxl_rr->decoder;
831 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
832
833 if (ep) {
834 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
835 GFP_KERNEL);
836 if (rc)
837 return rc;
838 }
839 cxl_rr->nr_eps++;
840
841 if (!cxld->region) {
842 cxld->region = cxlr;
843 get_device(&cxlr->dev);
844 }
845
846 return 0;
847 }
848
cxl_rr_alloc_decoder(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct cxl_region_ref * cxl_rr)849 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
850 struct cxl_endpoint_decoder *cxled,
851 struct cxl_region_ref *cxl_rr)
852 {
853 struct cxl_decoder *cxld;
854
855 if (port == cxled_to_port(cxled))
856 cxld = &cxled->cxld;
857 else
858 cxld = cxl_region_find_decoder(port, cxlr);
859 if (!cxld) {
860 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
861 dev_name(&port->dev));
862 return -EBUSY;
863 }
864
865 if (cxld->region) {
866 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
867 dev_name(&port->dev), dev_name(&cxld->dev),
868 dev_name(&cxld->region->dev));
869 return -EBUSY;
870 }
871
872 /*
873 * Endpoints should already match the region type, but backstop that
874 * assumption with an assertion. Switch-decoders change mapping-type
875 * based on what is mapped when they are assigned to a region.
876 */
877 dev_WARN_ONCE(&cxlr->dev,
878 port == cxled_to_port(cxled) &&
879 cxld->target_type != cxlr->type,
880 "%s:%s mismatch decoder type %d -> %d\n",
881 dev_name(&cxled_to_memdev(cxled)->dev),
882 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
883 cxld->target_type = cxlr->type;
884 cxl_rr->decoder = cxld;
885 return 0;
886 }
887
888 /**
889 * cxl_port_attach_region() - track a region's interest in a port by endpoint
890 * @port: port to add a new region reference 'struct cxl_region_ref'
891 * @cxlr: region to attach to @port
892 * @cxled: endpoint decoder used to create or further pin a region reference
893 * @pos: interleave position of @cxled in @cxlr
894 *
895 * The attach event is an opportunity to validate CXL decode setup
896 * constraints and record metadata needed for programming HDM decoders,
897 * in particular decoder target lists.
898 *
899 * The steps are:
900 *
901 * - validate that there are no other regions with a higher HPA already
902 * associated with @port
903 * - establish a region reference if one is not already present
904 *
905 * - additionally allocate a decoder instance that will host @cxlr on
906 * @port
907 *
908 * - pin the region reference by the endpoint
909 * - account for how many entries in @port's target list are needed to
910 * cover all of the added endpoints.
911 */
cxl_port_attach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)912 static int cxl_port_attach_region(struct cxl_port *port,
913 struct cxl_region *cxlr,
914 struct cxl_endpoint_decoder *cxled, int pos)
915 {
916 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
917 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
918 struct cxl_region_ref *cxl_rr;
919 bool nr_targets_inc = false;
920 struct cxl_decoder *cxld;
921 unsigned long index;
922 int rc = -EBUSY;
923
924 lockdep_assert_held_write(&cxl_region_rwsem);
925
926 cxl_rr = cxl_rr_load(port, cxlr);
927 if (cxl_rr) {
928 struct cxl_ep *ep_iter;
929 int found = 0;
930
931 /*
932 * Walk the existing endpoints that have been attached to
933 * @cxlr at @port and see if they share the same 'next' port
934 * in the downstream direction. I.e. endpoints that share common
935 * upstream switch.
936 */
937 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
938 if (ep_iter == ep)
939 continue;
940 if (ep_iter->next == ep->next) {
941 found++;
942 break;
943 }
944 }
945
946 /*
947 * New target port, or @port is an endpoint port that always
948 * accounts its own local decode as a target.
949 */
950 if (!found || !ep->next) {
951 cxl_rr->nr_targets++;
952 nr_targets_inc = true;
953 }
954 } else {
955 cxl_rr = alloc_region_ref(port, cxlr);
956 if (IS_ERR(cxl_rr)) {
957 dev_dbg(&cxlr->dev,
958 "%s: failed to allocate region reference\n",
959 dev_name(&port->dev));
960 return PTR_ERR(cxl_rr);
961 }
962 nr_targets_inc = true;
963
964 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
965 if (rc)
966 goto out_erase;
967 }
968 cxld = cxl_rr->decoder;
969
970 rc = cxl_rr_ep_add(cxl_rr, cxled);
971 if (rc) {
972 dev_dbg(&cxlr->dev,
973 "%s: failed to track endpoint %s:%s reference\n",
974 dev_name(&port->dev), dev_name(&cxlmd->dev),
975 dev_name(&cxld->dev));
976 goto out_erase;
977 }
978
979 dev_dbg(&cxlr->dev,
980 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
981 dev_name(port->uport_dev), dev_name(&port->dev),
982 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
983 dev_name(&cxled->cxld.dev), pos,
984 ep ? ep->next ? dev_name(ep->next->uport_dev) :
985 dev_name(&cxlmd->dev) :
986 "none",
987 cxl_rr->nr_eps, cxl_rr->nr_targets);
988
989 return 0;
990 out_erase:
991 if (nr_targets_inc)
992 cxl_rr->nr_targets--;
993 if (cxl_rr->nr_eps == 0)
994 free_region_ref(cxl_rr);
995 return rc;
996 }
997
cxl_port_detach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)998 static void cxl_port_detach_region(struct cxl_port *port,
999 struct cxl_region *cxlr,
1000 struct cxl_endpoint_decoder *cxled)
1001 {
1002 struct cxl_region_ref *cxl_rr;
1003 struct cxl_ep *ep = NULL;
1004
1005 lockdep_assert_held_write(&cxl_region_rwsem);
1006
1007 cxl_rr = cxl_rr_load(port, cxlr);
1008 if (!cxl_rr)
1009 return;
1010
1011 /*
1012 * Endpoint ports do not carry cxl_ep references, and they
1013 * never target more than one endpoint by definition
1014 */
1015 if (cxl_rr->decoder == &cxled->cxld)
1016 cxl_rr->nr_eps--;
1017 else
1018 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1019 if (ep) {
1020 struct cxl_ep *ep_iter;
1021 unsigned long index;
1022 int found = 0;
1023
1024 cxl_rr->nr_eps--;
1025 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1026 if (ep_iter->next == ep->next) {
1027 found++;
1028 break;
1029 }
1030 }
1031 if (!found)
1032 cxl_rr->nr_targets--;
1033 }
1034
1035 if (cxl_rr->nr_eps == 0)
1036 free_region_ref(cxl_rr);
1037 }
1038
check_last_peer(struct cxl_endpoint_decoder * cxled,struct cxl_ep * ep,struct cxl_region_ref * cxl_rr,int distance)1039 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1040 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1041 int distance)
1042 {
1043 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1044 struct cxl_region *cxlr = cxl_rr->region;
1045 struct cxl_region_params *p = &cxlr->params;
1046 struct cxl_endpoint_decoder *cxled_peer;
1047 struct cxl_port *port = cxl_rr->port;
1048 struct cxl_memdev *cxlmd_peer;
1049 struct cxl_ep *ep_peer;
1050 int pos = cxled->pos;
1051
1052 /*
1053 * If this position wants to share a dport with the last endpoint mapped
1054 * then that endpoint, at index 'position - distance', must also be
1055 * mapped by this dport.
1056 */
1057 if (pos < distance) {
1058 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1059 dev_name(port->uport_dev), dev_name(&port->dev),
1060 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1061 return -ENXIO;
1062 }
1063 cxled_peer = p->targets[pos - distance];
1064 cxlmd_peer = cxled_to_memdev(cxled_peer);
1065 ep_peer = cxl_ep_load(port, cxlmd_peer);
1066 if (ep->dport != ep_peer->dport) {
1067 dev_dbg(&cxlr->dev,
1068 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1069 dev_name(port->uport_dev), dev_name(&port->dev),
1070 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1071 dev_name(&cxlmd_peer->dev),
1072 dev_name(&cxled_peer->cxld.dev));
1073 return -ENXIO;
1074 }
1075
1076 return 0;
1077 }
1078
cxl_port_setup_targets(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1079 static int cxl_port_setup_targets(struct cxl_port *port,
1080 struct cxl_region *cxlr,
1081 struct cxl_endpoint_decoder *cxled)
1082 {
1083 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1084 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1085 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1086 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1087 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1088 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1089 struct cxl_region_params *p = &cxlr->params;
1090 struct cxl_decoder *cxld = cxl_rr->decoder;
1091 struct cxl_switch_decoder *cxlsd;
1092 u16 eig, peig;
1093 u8 eiw, peiw;
1094
1095 /*
1096 * While root level decoders support x3, x6, x12, switch level
1097 * decoders only support powers of 2 up to x16.
1098 */
1099 if (!is_power_of_2(cxl_rr->nr_targets)) {
1100 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1101 dev_name(port->uport_dev), dev_name(&port->dev),
1102 cxl_rr->nr_targets);
1103 return -EINVAL;
1104 }
1105
1106 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1107 if (cxl_rr->nr_targets_set) {
1108 int i, distance;
1109
1110 /*
1111 * Passthrough decoders impose no distance requirements between
1112 * peers
1113 */
1114 if (cxl_rr->nr_targets == 1)
1115 distance = 0;
1116 else
1117 distance = p->nr_targets / cxl_rr->nr_targets;
1118 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1119 if (ep->dport == cxlsd->target[i]) {
1120 rc = check_last_peer(cxled, ep, cxl_rr,
1121 distance);
1122 if (rc)
1123 return rc;
1124 goto out_target_set;
1125 }
1126 goto add_target;
1127 }
1128
1129 if (is_cxl_root(parent_port)) {
1130 /*
1131 * Root decoder IG is always set to value in CFMWS which
1132 * may be different than this region's IG. We can use the
1133 * region's IG here since interleave_granularity_store()
1134 * does not allow interleaved host-bridges with
1135 * root IG != region IG.
1136 */
1137 parent_ig = p->interleave_granularity;
1138 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1139 /*
1140 * For purposes of address bit routing, use power-of-2 math for
1141 * switch ports.
1142 */
1143 if (!is_power_of_2(parent_iw))
1144 parent_iw /= 3;
1145 } else {
1146 struct cxl_region_ref *parent_rr;
1147 struct cxl_decoder *parent_cxld;
1148
1149 parent_rr = cxl_rr_load(parent_port, cxlr);
1150 parent_cxld = parent_rr->decoder;
1151 parent_ig = parent_cxld->interleave_granularity;
1152 parent_iw = parent_cxld->interleave_ways;
1153 }
1154
1155 rc = granularity_to_eig(parent_ig, &peig);
1156 if (rc) {
1157 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1158 dev_name(parent_port->uport_dev),
1159 dev_name(&parent_port->dev), parent_ig);
1160 return rc;
1161 }
1162
1163 rc = ways_to_eiw(parent_iw, &peiw);
1164 if (rc) {
1165 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1166 dev_name(parent_port->uport_dev),
1167 dev_name(&parent_port->dev), parent_iw);
1168 return rc;
1169 }
1170
1171 iw = cxl_rr->nr_targets;
1172 rc = ways_to_eiw(iw, &eiw);
1173 if (rc) {
1174 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1175 dev_name(port->uport_dev), dev_name(&port->dev), iw);
1176 return rc;
1177 }
1178
1179 /*
1180 * Interleave granularity is a multiple of @parent_port granularity.
1181 * Multiplier is the parent port interleave ways.
1182 */
1183 rc = granularity_to_eig(parent_ig * parent_iw, &eig);
1184 if (rc) {
1185 dev_dbg(&cxlr->dev,
1186 "%s: invalid granularity calculation (%d * %d)\n",
1187 dev_name(&parent_port->dev), parent_ig, parent_iw);
1188 return rc;
1189 }
1190
1191 rc = eig_to_granularity(eig, &ig);
1192 if (rc) {
1193 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1194 dev_name(port->uport_dev), dev_name(&port->dev),
1195 256 << eig);
1196 return rc;
1197 }
1198
1199 if (iw > 8 || iw > cxlsd->nr_targets) {
1200 dev_dbg(&cxlr->dev,
1201 "%s:%s:%s: ways: %d overflows targets: %d\n",
1202 dev_name(port->uport_dev), dev_name(&port->dev),
1203 dev_name(&cxld->dev), iw, cxlsd->nr_targets);
1204 return -ENXIO;
1205 }
1206
1207 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1208 if (cxld->interleave_ways != iw ||
1209 cxld->interleave_granularity != ig ||
1210 cxld->hpa_range.start != p->res->start ||
1211 cxld->hpa_range.end != p->res->end ||
1212 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1213 dev_err(&cxlr->dev,
1214 "%s:%s %s expected iw: %d ig: %d %pr\n",
1215 dev_name(port->uport_dev), dev_name(&port->dev),
1216 __func__, iw, ig, p->res);
1217 dev_err(&cxlr->dev,
1218 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1219 dev_name(port->uport_dev), dev_name(&port->dev),
1220 __func__, cxld->interleave_ways,
1221 cxld->interleave_granularity,
1222 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1223 "enabled" :
1224 "disabled",
1225 cxld->hpa_range.start, cxld->hpa_range.end);
1226 return -ENXIO;
1227 }
1228 } else {
1229 cxld->interleave_ways = iw;
1230 cxld->interleave_granularity = ig;
1231 cxld->hpa_range = (struct range) {
1232 .start = p->res->start,
1233 .end = p->res->end,
1234 };
1235 }
1236 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
1237 dev_name(&port->dev), iw, ig);
1238 add_target:
1239 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1240 dev_dbg(&cxlr->dev,
1241 "%s:%s: targets full trying to add %s:%s at %d\n",
1242 dev_name(port->uport_dev), dev_name(&port->dev),
1243 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1244 return -ENXIO;
1245 }
1246 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1247 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1248 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1249 dev_name(port->uport_dev), dev_name(&port->dev),
1250 dev_name(&cxlsd->cxld.dev),
1251 dev_name(ep->dport->dport_dev),
1252 cxl_rr->nr_targets_set);
1253 return -ENXIO;
1254 }
1255 } else
1256 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1257 inc = 1;
1258 out_target_set:
1259 cxl_rr->nr_targets_set += inc;
1260 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1261 dev_name(port->uport_dev), dev_name(&port->dev),
1262 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
1263 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1264
1265 return 0;
1266 }
1267
cxl_port_reset_targets(struct cxl_port * port,struct cxl_region * cxlr)1268 static void cxl_port_reset_targets(struct cxl_port *port,
1269 struct cxl_region *cxlr)
1270 {
1271 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1272 struct cxl_decoder *cxld;
1273
1274 /*
1275 * After the last endpoint has been detached the entire cxl_rr may now
1276 * be gone.
1277 */
1278 if (!cxl_rr)
1279 return;
1280 cxl_rr->nr_targets_set = 0;
1281
1282 cxld = cxl_rr->decoder;
1283 cxld->hpa_range = (struct range) {
1284 .start = 0,
1285 .end = -1,
1286 };
1287 }
1288
cxl_region_teardown_targets(struct cxl_region * cxlr)1289 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1290 {
1291 struct cxl_region_params *p = &cxlr->params;
1292 struct cxl_endpoint_decoder *cxled;
1293 struct cxl_dev_state *cxlds;
1294 struct cxl_memdev *cxlmd;
1295 struct cxl_port *iter;
1296 struct cxl_ep *ep;
1297 int i;
1298
1299 /*
1300 * In the auto-discovery case skip automatic teardown since the
1301 * address space is already active
1302 */
1303 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1304 return;
1305
1306 for (i = 0; i < p->nr_targets; i++) {
1307 cxled = p->targets[i];
1308 cxlmd = cxled_to_memdev(cxled);
1309 cxlds = cxlmd->cxlds;
1310
1311 if (cxlds->rcd)
1312 continue;
1313
1314 iter = cxled_to_port(cxled);
1315 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1316 iter = to_cxl_port(iter->dev.parent);
1317
1318 for (ep = cxl_ep_load(iter, cxlmd); iter;
1319 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1320 cxl_port_reset_targets(iter, cxlr);
1321 }
1322 }
1323
cxl_region_setup_targets(struct cxl_region * cxlr)1324 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1325 {
1326 struct cxl_region_params *p = &cxlr->params;
1327 struct cxl_endpoint_decoder *cxled;
1328 struct cxl_dev_state *cxlds;
1329 int i, rc, rch = 0, vh = 0;
1330 struct cxl_memdev *cxlmd;
1331 struct cxl_port *iter;
1332 struct cxl_ep *ep;
1333
1334 for (i = 0; i < p->nr_targets; i++) {
1335 cxled = p->targets[i];
1336 cxlmd = cxled_to_memdev(cxled);
1337 cxlds = cxlmd->cxlds;
1338
1339 /* validate that all targets agree on topology */
1340 if (!cxlds->rcd) {
1341 vh++;
1342 } else {
1343 rch++;
1344 continue;
1345 }
1346
1347 iter = cxled_to_port(cxled);
1348 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1349 iter = to_cxl_port(iter->dev.parent);
1350
1351 /*
1352 * Descend the topology tree programming / validating
1353 * targets while looking for conflicts.
1354 */
1355 for (ep = cxl_ep_load(iter, cxlmd); iter;
1356 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1357 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1358 if (rc) {
1359 cxl_region_teardown_targets(cxlr);
1360 return rc;
1361 }
1362 }
1363 }
1364
1365 if (rch && vh) {
1366 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1367 cxl_region_teardown_targets(cxlr);
1368 return -ENXIO;
1369 }
1370
1371 return 0;
1372 }
1373
cxl_region_validate_position(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1374 static int cxl_region_validate_position(struct cxl_region *cxlr,
1375 struct cxl_endpoint_decoder *cxled,
1376 int pos)
1377 {
1378 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1379 struct cxl_region_params *p = &cxlr->params;
1380 int i;
1381
1382 if (pos < 0 || pos >= p->interleave_ways) {
1383 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1384 p->interleave_ways);
1385 return -ENXIO;
1386 }
1387
1388 if (p->targets[pos] == cxled)
1389 return 0;
1390
1391 if (p->targets[pos]) {
1392 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1393 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1394
1395 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1396 pos, dev_name(&cxlmd_target->dev),
1397 dev_name(&cxled_target->cxld.dev));
1398 return -EBUSY;
1399 }
1400
1401 for (i = 0; i < p->interleave_ways; i++) {
1402 struct cxl_endpoint_decoder *cxled_target;
1403 struct cxl_memdev *cxlmd_target;
1404
1405 cxled_target = p->targets[i];
1406 if (!cxled_target)
1407 continue;
1408
1409 cxlmd_target = cxled_to_memdev(cxled_target);
1410 if (cxlmd_target == cxlmd) {
1411 dev_dbg(&cxlr->dev,
1412 "%s already specified at position %d via: %s\n",
1413 dev_name(&cxlmd->dev), pos,
1414 dev_name(&cxled_target->cxld.dev));
1415 return -EBUSY;
1416 }
1417 }
1418
1419 return 0;
1420 }
1421
cxl_region_attach_position(struct cxl_region * cxlr,struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled,const struct cxl_dport * dport,int pos)1422 static int cxl_region_attach_position(struct cxl_region *cxlr,
1423 struct cxl_root_decoder *cxlrd,
1424 struct cxl_endpoint_decoder *cxled,
1425 const struct cxl_dport *dport, int pos)
1426 {
1427 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1428 struct cxl_port *iter;
1429 int rc;
1430
1431 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1432 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1433 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1434 dev_name(&cxlrd->cxlsd.cxld.dev));
1435 return -ENXIO;
1436 }
1437
1438 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1439 iter = to_cxl_port(iter->dev.parent)) {
1440 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1441 if (rc)
1442 goto err;
1443 }
1444
1445 return 0;
1446
1447 err:
1448 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1449 iter = to_cxl_port(iter->dev.parent))
1450 cxl_port_detach_region(iter, cxlr, cxled);
1451 return rc;
1452 }
1453
cxl_region_attach_auto(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1454 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1455 struct cxl_endpoint_decoder *cxled, int pos)
1456 {
1457 struct cxl_region_params *p = &cxlr->params;
1458
1459 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1460 dev_err(&cxlr->dev,
1461 "%s: unable to add decoder to autodetected region\n",
1462 dev_name(&cxled->cxld.dev));
1463 return -EINVAL;
1464 }
1465
1466 if (pos >= 0) {
1467 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1468 dev_name(&cxled->cxld.dev), pos);
1469 return -EINVAL;
1470 }
1471
1472 if (p->nr_targets >= p->interleave_ways) {
1473 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1474 dev_name(&cxled->cxld.dev));
1475 return -ENXIO;
1476 }
1477
1478 /*
1479 * Temporarily record the endpoint decoder into the target array. Yes,
1480 * this means that userspace can view devices in the wrong position
1481 * before the region activates, and must be careful to understand when
1482 * it might be racing region autodiscovery.
1483 */
1484 pos = p->nr_targets;
1485 p->targets[pos] = cxled;
1486 cxled->pos = pos;
1487 p->nr_targets++;
1488
1489 return 0;
1490 }
1491
cmp_interleave_pos(const void * a,const void * b)1492 static int cmp_interleave_pos(const void *a, const void *b)
1493 {
1494 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1495 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1496
1497 return cxled_a->pos - cxled_b->pos;
1498 }
1499
next_port(struct cxl_port * port)1500 static struct cxl_port *next_port(struct cxl_port *port)
1501 {
1502 if (!port->parent_dport)
1503 return NULL;
1504 return port->parent_dport->port;
1505 }
1506
match_switch_decoder_by_range(struct device * dev,void * data)1507 static int match_switch_decoder_by_range(struct device *dev, void *data)
1508 {
1509 struct cxl_switch_decoder *cxlsd;
1510 struct range *r1, *r2 = data;
1511
1512 if (!is_switch_decoder(dev))
1513 return 0;
1514
1515 cxlsd = to_cxl_switch_decoder(dev);
1516 r1 = &cxlsd->cxld.hpa_range;
1517
1518 if (is_root_decoder(dev))
1519 return range_contains(r1, r2);
1520 return (r1->start == r2->start && r1->end == r2->end);
1521 }
1522
find_pos_and_ways(struct cxl_port * port,struct range * range,int * pos,int * ways)1523 static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1524 int *pos, int *ways)
1525 {
1526 struct cxl_switch_decoder *cxlsd;
1527 struct cxl_port *parent;
1528 struct device *dev;
1529 int rc = -ENXIO;
1530
1531 parent = next_port(port);
1532 if (!parent)
1533 return rc;
1534
1535 dev = device_find_child(&parent->dev, range,
1536 match_switch_decoder_by_range);
1537 if (!dev) {
1538 dev_err(port->uport_dev,
1539 "failed to find decoder mapping %#llx-%#llx\n",
1540 range->start, range->end);
1541 return rc;
1542 }
1543 cxlsd = to_cxl_switch_decoder(dev);
1544 *ways = cxlsd->cxld.interleave_ways;
1545
1546 for (int i = 0; i < *ways; i++) {
1547 if (cxlsd->target[i] == port->parent_dport) {
1548 *pos = i;
1549 rc = 0;
1550 break;
1551 }
1552 }
1553 put_device(dev);
1554
1555 return rc;
1556 }
1557
1558 /**
1559 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1560 * @cxled: endpoint decoder member of given region
1561 *
1562 * The endpoint position is calculated by traversing the topology from
1563 * the endpoint to the root decoder and iteratively applying this
1564 * calculation:
1565 *
1566 * position = position * parent_ways + parent_pos;
1567 *
1568 * ...where @position is inferred from switch and root decoder target lists.
1569 *
1570 * Return: position >= 0 on success
1571 * -ENXIO on failure
1572 */
cxl_calc_interleave_pos(struct cxl_endpoint_decoder * cxled)1573 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1574 {
1575 struct cxl_port *iter, *port = cxled_to_port(cxled);
1576 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1577 struct range *range = &cxled->cxld.hpa_range;
1578 int parent_ways = 0, parent_pos = 0, pos = 0;
1579 int rc;
1580
1581 /*
1582 * Example: the expected interleave order of the 4-way region shown
1583 * below is: mem0, mem2, mem1, mem3
1584 *
1585 * root_port
1586 * / \
1587 * host_bridge_0 host_bridge_1
1588 * | | | |
1589 * mem0 mem1 mem2 mem3
1590 *
1591 * In the example the calculator will iterate twice. The first iteration
1592 * uses the mem position in the host-bridge and the ways of the host-
1593 * bridge to generate the first, or local, position. The second
1594 * iteration uses the host-bridge position in the root_port and the ways
1595 * of the root_port to refine the position.
1596 *
1597 * A trace of the calculation per endpoint looks like this:
1598 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1599 * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1600 * pos: 0 pos: 1
1601 *
1602 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1603 * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1604 * pos: 2 pos = 3
1605 *
1606 * Note that while this example is simple, the method applies to more
1607 * complex topologies, including those with switches.
1608 */
1609
1610 /* Iterate from endpoint to root_port refining the position */
1611 for (iter = port; iter; iter = next_port(iter)) {
1612 if (is_cxl_root(iter))
1613 break;
1614
1615 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1616 if (rc)
1617 return rc;
1618
1619 pos = pos * parent_ways + parent_pos;
1620 }
1621
1622 dev_dbg(&cxlmd->dev,
1623 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1624 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1625 dev_name(&port->dev), range->start, range->end, pos);
1626
1627 return pos;
1628 }
1629
cxl_region_sort_targets(struct cxl_region * cxlr)1630 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1631 {
1632 struct cxl_region_params *p = &cxlr->params;
1633 int i, rc = 0;
1634
1635 for (i = 0; i < p->nr_targets; i++) {
1636 struct cxl_endpoint_decoder *cxled = p->targets[i];
1637
1638 cxled->pos = cxl_calc_interleave_pos(cxled);
1639 /*
1640 * Record that sorting failed, but still continue to calc
1641 * cxled->pos so that follow-on code paths can reliably
1642 * do p->targets[cxled->pos] to self-reference their entry.
1643 */
1644 if (cxled->pos < 0)
1645 rc = -ENXIO;
1646 }
1647 /* Keep the cxlr target list in interleave position order */
1648 sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
1649 cmp_interleave_pos, NULL);
1650
1651 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1652 return rc;
1653 }
1654
cxl_region_attach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1655 static int cxl_region_attach(struct cxl_region *cxlr,
1656 struct cxl_endpoint_decoder *cxled, int pos)
1657 {
1658 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1659 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1660 struct cxl_region_params *p = &cxlr->params;
1661 struct cxl_port *ep_port, *root_port;
1662 struct cxl_dport *dport;
1663 int rc = -ENXIO;
1664
1665 if (cxled->mode != cxlr->mode) {
1666 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1667 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1668 return -EINVAL;
1669 }
1670
1671 if (cxled->mode == CXL_DECODER_DEAD) {
1672 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1673 return -ENODEV;
1674 }
1675
1676 /* all full of members, or interleave config not established? */
1677 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1678 dev_dbg(&cxlr->dev, "region already active\n");
1679 return -EBUSY;
1680 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1681 dev_dbg(&cxlr->dev, "interleave config missing\n");
1682 return -ENXIO;
1683 }
1684
1685 if (p->nr_targets >= p->interleave_ways) {
1686 dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
1687 p->nr_targets);
1688 return -EINVAL;
1689 }
1690
1691 ep_port = cxled_to_port(cxled);
1692 root_port = cxlrd_to_port(cxlrd);
1693 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1694 if (!dport) {
1695 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1696 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1697 dev_name(cxlr->dev.parent));
1698 return -ENXIO;
1699 }
1700
1701 if (cxled->cxld.target_type != cxlr->type) {
1702 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1703 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1704 cxled->cxld.target_type, cxlr->type);
1705 return -ENXIO;
1706 }
1707
1708 if (!cxled->dpa_res) {
1709 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1710 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1711 return -ENXIO;
1712 }
1713
1714 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1715 resource_size(p->res)) {
1716 dev_dbg(&cxlr->dev,
1717 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1718 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1719 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1720 (u64)resource_size(p->res));
1721 return -EINVAL;
1722 }
1723
1724 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1725 int i;
1726
1727 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1728 if (rc)
1729 return rc;
1730
1731 /* await more targets to arrive... */
1732 if (p->nr_targets < p->interleave_ways)
1733 return 0;
1734
1735 /*
1736 * All targets are here, which implies all PCI enumeration that
1737 * affects this region has been completed. Walk the topology to
1738 * sort the devices into their relative region decode position.
1739 */
1740 rc = cxl_region_sort_targets(cxlr);
1741 if (rc)
1742 return rc;
1743
1744 for (i = 0; i < p->nr_targets; i++) {
1745 cxled = p->targets[i];
1746 ep_port = cxled_to_port(cxled);
1747 dport = cxl_find_dport_by_dev(root_port,
1748 ep_port->host_bridge);
1749 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1750 dport, i);
1751 if (rc)
1752 return rc;
1753 }
1754
1755 rc = cxl_region_setup_targets(cxlr);
1756 if (rc)
1757 return rc;
1758
1759 /*
1760 * If target setup succeeds in the autodiscovery case
1761 * then the region is already committed.
1762 */
1763 p->state = CXL_CONFIG_COMMIT;
1764
1765 return 0;
1766 }
1767
1768 rc = cxl_region_validate_position(cxlr, cxled, pos);
1769 if (rc)
1770 return rc;
1771
1772 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
1773 if (rc)
1774 return rc;
1775
1776 p->targets[pos] = cxled;
1777 cxled->pos = pos;
1778 p->nr_targets++;
1779
1780 if (p->nr_targets == p->interleave_ways) {
1781 rc = cxl_region_setup_targets(cxlr);
1782 if (rc)
1783 return rc;
1784 p->state = CXL_CONFIG_ACTIVE;
1785 }
1786
1787 cxled->cxld.interleave_ways = p->interleave_ways;
1788 cxled->cxld.interleave_granularity = p->interleave_granularity;
1789 cxled->cxld.hpa_range = (struct range) {
1790 .start = p->res->start,
1791 .end = p->res->end,
1792 };
1793
1794 if (p->nr_targets != p->interleave_ways)
1795 return 0;
1796
1797 /*
1798 * Test the auto-discovery position calculator function
1799 * against this successfully created user-defined region.
1800 * A fail message here means that this interleave config
1801 * will fail when presented as CXL_REGION_F_AUTO.
1802 */
1803 for (int i = 0; i < p->nr_targets; i++) {
1804 struct cxl_endpoint_decoder *cxled = p->targets[i];
1805 int test_pos;
1806
1807 test_pos = cxl_calc_interleave_pos(cxled);
1808 dev_dbg(&cxled->cxld.dev,
1809 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
1810 (test_pos == cxled->pos) ? "success" : "fail",
1811 test_pos, cxled->pos);
1812 }
1813
1814 return 0;
1815 }
1816
cxl_region_detach(struct cxl_endpoint_decoder * cxled)1817 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1818 {
1819 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1820 struct cxl_region *cxlr = cxled->cxld.region;
1821 struct cxl_region_params *p;
1822 int rc = 0;
1823
1824 lockdep_assert_held_write(&cxl_region_rwsem);
1825
1826 if (!cxlr)
1827 return 0;
1828
1829 p = &cxlr->params;
1830 get_device(&cxlr->dev);
1831
1832 if (p->state > CXL_CONFIG_ACTIVE) {
1833 /*
1834 * TODO: tear down all impacted regions if a device is
1835 * removed out of order
1836 */
1837 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1838 if (rc)
1839 goto out;
1840 p->state = CXL_CONFIG_ACTIVE;
1841 }
1842
1843 for (iter = ep_port; !is_cxl_root(iter);
1844 iter = to_cxl_port(iter->dev.parent))
1845 cxl_port_detach_region(iter, cxlr, cxled);
1846
1847 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1848 p->targets[cxled->pos] != cxled) {
1849 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1850
1851 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1852 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1853 cxled->pos);
1854 goto out;
1855 }
1856
1857 if (p->state == CXL_CONFIG_ACTIVE) {
1858 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1859 cxl_region_teardown_targets(cxlr);
1860 }
1861 p->targets[cxled->pos] = NULL;
1862 p->nr_targets--;
1863 cxled->cxld.hpa_range = (struct range) {
1864 .start = 0,
1865 .end = -1,
1866 };
1867
1868 /* notify the region driver that one of its targets has departed */
1869 up_write(&cxl_region_rwsem);
1870 device_release_driver(&cxlr->dev);
1871 down_write(&cxl_region_rwsem);
1872 out:
1873 put_device(&cxlr->dev);
1874 return rc;
1875 }
1876
cxl_decoder_kill_region(struct cxl_endpoint_decoder * cxled)1877 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1878 {
1879 down_write(&cxl_region_rwsem);
1880 cxled->mode = CXL_DECODER_DEAD;
1881 cxl_region_detach(cxled);
1882 up_write(&cxl_region_rwsem);
1883 }
1884
attach_target(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,unsigned int state)1885 static int attach_target(struct cxl_region *cxlr,
1886 struct cxl_endpoint_decoder *cxled, int pos,
1887 unsigned int state)
1888 {
1889 int rc = 0;
1890
1891 if (state == TASK_INTERRUPTIBLE)
1892 rc = down_write_killable(&cxl_region_rwsem);
1893 else
1894 down_write(&cxl_region_rwsem);
1895 if (rc)
1896 return rc;
1897
1898 down_read(&cxl_dpa_rwsem);
1899 rc = cxl_region_attach(cxlr, cxled, pos);
1900 up_read(&cxl_dpa_rwsem);
1901 up_write(&cxl_region_rwsem);
1902 return rc;
1903 }
1904
detach_target(struct cxl_region * cxlr,int pos)1905 static int detach_target(struct cxl_region *cxlr, int pos)
1906 {
1907 struct cxl_region_params *p = &cxlr->params;
1908 int rc;
1909
1910 rc = down_write_killable(&cxl_region_rwsem);
1911 if (rc)
1912 return rc;
1913
1914 if (pos >= p->interleave_ways) {
1915 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1916 p->interleave_ways);
1917 rc = -ENXIO;
1918 goto out;
1919 }
1920
1921 if (!p->targets[pos]) {
1922 rc = 0;
1923 goto out;
1924 }
1925
1926 rc = cxl_region_detach(p->targets[pos]);
1927 out:
1928 up_write(&cxl_region_rwsem);
1929 return rc;
1930 }
1931
store_targetN(struct cxl_region * cxlr,const char * buf,int pos,size_t len)1932 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1933 size_t len)
1934 {
1935 int rc;
1936
1937 if (sysfs_streq(buf, "\n"))
1938 rc = detach_target(cxlr, pos);
1939 else {
1940 struct device *dev;
1941
1942 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
1943 if (!dev)
1944 return -ENODEV;
1945
1946 if (!is_endpoint_decoder(dev)) {
1947 rc = -EINVAL;
1948 goto out;
1949 }
1950
1951 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
1952 TASK_INTERRUPTIBLE);
1953 out:
1954 put_device(dev);
1955 }
1956
1957 if (rc < 0)
1958 return rc;
1959 return len;
1960 }
1961
1962 #define TARGET_ATTR_RW(n) \
1963 static ssize_t target##n##_show( \
1964 struct device *dev, struct device_attribute *attr, char *buf) \
1965 { \
1966 return show_targetN(to_cxl_region(dev), buf, (n)); \
1967 } \
1968 static ssize_t target##n##_store(struct device *dev, \
1969 struct device_attribute *attr, \
1970 const char *buf, size_t len) \
1971 { \
1972 return store_targetN(to_cxl_region(dev), buf, (n), len); \
1973 } \
1974 static DEVICE_ATTR_RW(target##n)
1975
1976 TARGET_ATTR_RW(0);
1977 TARGET_ATTR_RW(1);
1978 TARGET_ATTR_RW(2);
1979 TARGET_ATTR_RW(3);
1980 TARGET_ATTR_RW(4);
1981 TARGET_ATTR_RW(5);
1982 TARGET_ATTR_RW(6);
1983 TARGET_ATTR_RW(7);
1984 TARGET_ATTR_RW(8);
1985 TARGET_ATTR_RW(9);
1986 TARGET_ATTR_RW(10);
1987 TARGET_ATTR_RW(11);
1988 TARGET_ATTR_RW(12);
1989 TARGET_ATTR_RW(13);
1990 TARGET_ATTR_RW(14);
1991 TARGET_ATTR_RW(15);
1992
1993 static struct attribute *target_attrs[] = {
1994 &dev_attr_target0.attr,
1995 &dev_attr_target1.attr,
1996 &dev_attr_target2.attr,
1997 &dev_attr_target3.attr,
1998 &dev_attr_target4.attr,
1999 &dev_attr_target5.attr,
2000 &dev_attr_target6.attr,
2001 &dev_attr_target7.attr,
2002 &dev_attr_target8.attr,
2003 &dev_attr_target9.attr,
2004 &dev_attr_target10.attr,
2005 &dev_attr_target11.attr,
2006 &dev_attr_target12.attr,
2007 &dev_attr_target13.attr,
2008 &dev_attr_target14.attr,
2009 &dev_attr_target15.attr,
2010 NULL,
2011 };
2012
cxl_region_target_visible(struct kobject * kobj,struct attribute * a,int n)2013 static umode_t cxl_region_target_visible(struct kobject *kobj,
2014 struct attribute *a, int n)
2015 {
2016 struct device *dev = kobj_to_dev(kobj);
2017 struct cxl_region *cxlr = to_cxl_region(dev);
2018 struct cxl_region_params *p = &cxlr->params;
2019
2020 if (n < p->interleave_ways)
2021 return a->mode;
2022 return 0;
2023 }
2024
2025 static const struct attribute_group cxl_region_target_group = {
2026 .attrs = target_attrs,
2027 .is_visible = cxl_region_target_visible,
2028 };
2029
get_cxl_region_target_group(void)2030 static const struct attribute_group *get_cxl_region_target_group(void)
2031 {
2032 return &cxl_region_target_group;
2033 }
2034
2035 static const struct attribute_group *region_groups[] = {
2036 &cxl_base_attribute_group,
2037 &cxl_region_group,
2038 &cxl_region_target_group,
2039 NULL,
2040 };
2041
cxl_region_release(struct device * dev)2042 static void cxl_region_release(struct device *dev)
2043 {
2044 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2045 struct cxl_region *cxlr = to_cxl_region(dev);
2046 int id = atomic_read(&cxlrd->region_id);
2047
2048 /*
2049 * Try to reuse the recently idled id rather than the cached
2050 * next id to prevent the region id space from increasing
2051 * unnecessarily.
2052 */
2053 if (cxlr->id < id)
2054 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
2055 memregion_free(id);
2056 goto out;
2057 }
2058
2059 memregion_free(cxlr->id);
2060 out:
2061 put_device(dev->parent);
2062 kfree(cxlr);
2063 }
2064
2065 const struct device_type cxl_region_type = {
2066 .name = "cxl_region",
2067 .release = cxl_region_release,
2068 .groups = region_groups
2069 };
2070
is_cxl_region(struct device * dev)2071 bool is_cxl_region(struct device *dev)
2072 {
2073 return dev->type == &cxl_region_type;
2074 }
2075 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2076
to_cxl_region(struct device * dev)2077 static struct cxl_region *to_cxl_region(struct device *dev)
2078 {
2079 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2080 "not a cxl_region device\n"))
2081 return NULL;
2082
2083 return container_of(dev, struct cxl_region, dev);
2084 }
2085
unregister_region(void * dev)2086 static void unregister_region(void *dev)
2087 {
2088 struct cxl_region *cxlr = to_cxl_region(dev);
2089 struct cxl_region_params *p = &cxlr->params;
2090 int i;
2091
2092 device_del(dev);
2093
2094 /*
2095 * Now that region sysfs is shutdown, the parameter block is now
2096 * read-only, so no need to hold the region rwsem to access the
2097 * region parameters.
2098 */
2099 for (i = 0; i < p->interleave_ways; i++)
2100 detach_target(cxlr, i);
2101
2102 cxl_region_iomem_release(cxlr);
2103 put_device(dev);
2104 }
2105
2106 static struct lock_class_key cxl_region_key;
2107
cxl_region_alloc(struct cxl_root_decoder * cxlrd,int id)2108 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2109 {
2110 struct cxl_region *cxlr;
2111 struct device *dev;
2112
2113 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2114 if (!cxlr) {
2115 memregion_free(id);
2116 return ERR_PTR(-ENOMEM);
2117 }
2118
2119 dev = &cxlr->dev;
2120 device_initialize(dev);
2121 lockdep_set_class(&dev->mutex, &cxl_region_key);
2122 dev->parent = &cxlrd->cxlsd.cxld.dev;
2123 /*
2124 * Keep root decoder pinned through cxl_region_release to fixup
2125 * region id allocations
2126 */
2127 get_device(dev->parent);
2128 device_set_pm_not_required(dev);
2129 dev->bus = &cxl_bus_type;
2130 dev->type = &cxl_region_type;
2131 cxlr->id = id;
2132
2133 return cxlr;
2134 }
2135
2136 /**
2137 * devm_cxl_add_region - Adds a region to a decoder
2138 * @cxlrd: root decoder
2139 * @id: memregion id to create, or memregion_free() on failure
2140 * @mode: mode for the endpoint decoders of this region
2141 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2142 *
2143 * This is the second step of region initialization. Regions exist within an
2144 * address space which is mapped by a @cxlrd.
2145 *
2146 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2147 * code. The region will be named "regionZ" where Z is the unique region number.
2148 */
devm_cxl_add_region(struct cxl_root_decoder * cxlrd,int id,enum cxl_decoder_mode mode,enum cxl_decoder_type type)2149 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2150 int id,
2151 enum cxl_decoder_mode mode,
2152 enum cxl_decoder_type type)
2153 {
2154 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2155 struct cxl_region *cxlr;
2156 struct device *dev;
2157 int rc;
2158
2159 switch (mode) {
2160 case CXL_DECODER_RAM:
2161 case CXL_DECODER_PMEM:
2162 break;
2163 default:
2164 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2165 return ERR_PTR(-EINVAL);
2166 }
2167
2168 cxlr = cxl_region_alloc(cxlrd, id);
2169 if (IS_ERR(cxlr))
2170 return cxlr;
2171 cxlr->mode = mode;
2172 cxlr->type = type;
2173
2174 dev = &cxlr->dev;
2175 rc = dev_set_name(dev, "region%d", id);
2176 if (rc)
2177 goto err;
2178
2179 rc = device_add(dev);
2180 if (rc)
2181 goto err;
2182
2183 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2184 if (rc)
2185 return ERR_PTR(rc);
2186
2187 dev_dbg(port->uport_dev, "%s: created %s\n",
2188 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2189 return cxlr;
2190
2191 err:
2192 put_device(dev);
2193 return ERR_PTR(rc);
2194 }
2195
__create_region_show(struct cxl_root_decoder * cxlrd,char * buf)2196 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2197 {
2198 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2199 }
2200
create_pmem_region_show(struct device * dev,struct device_attribute * attr,char * buf)2201 static ssize_t create_pmem_region_show(struct device *dev,
2202 struct device_attribute *attr, char *buf)
2203 {
2204 return __create_region_show(to_cxl_root_decoder(dev), buf);
2205 }
2206
create_ram_region_show(struct device * dev,struct device_attribute * attr,char * buf)2207 static ssize_t create_ram_region_show(struct device *dev,
2208 struct device_attribute *attr, char *buf)
2209 {
2210 return __create_region_show(to_cxl_root_decoder(dev), buf);
2211 }
2212
__create_region(struct cxl_root_decoder * cxlrd,enum cxl_decoder_mode mode,int id)2213 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2214 enum cxl_decoder_mode mode, int id)
2215 {
2216 int rc;
2217
2218 rc = memregion_alloc(GFP_KERNEL);
2219 if (rc < 0)
2220 return ERR_PTR(rc);
2221
2222 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2223 memregion_free(rc);
2224 return ERR_PTR(-EBUSY);
2225 }
2226
2227 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2228 }
2229
create_pmem_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2230 static ssize_t create_pmem_region_store(struct device *dev,
2231 struct device_attribute *attr,
2232 const char *buf, size_t len)
2233 {
2234 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2235 struct cxl_region *cxlr;
2236 int rc, id;
2237
2238 rc = sscanf(buf, "region%d\n", &id);
2239 if (rc != 1)
2240 return -EINVAL;
2241
2242 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2243 if (IS_ERR(cxlr))
2244 return PTR_ERR(cxlr);
2245
2246 return len;
2247 }
2248 DEVICE_ATTR_RW(create_pmem_region);
2249
create_ram_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2250 static ssize_t create_ram_region_store(struct device *dev,
2251 struct device_attribute *attr,
2252 const char *buf, size_t len)
2253 {
2254 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2255 struct cxl_region *cxlr;
2256 int rc, id;
2257
2258 rc = sscanf(buf, "region%d\n", &id);
2259 if (rc != 1)
2260 return -EINVAL;
2261
2262 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2263 if (IS_ERR(cxlr))
2264 return PTR_ERR(cxlr);
2265
2266 return len;
2267 }
2268 DEVICE_ATTR_RW(create_ram_region);
2269
region_show(struct device * dev,struct device_attribute * attr,char * buf)2270 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2271 char *buf)
2272 {
2273 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2274 ssize_t rc;
2275
2276 rc = down_read_interruptible(&cxl_region_rwsem);
2277 if (rc)
2278 return rc;
2279
2280 if (cxld->region)
2281 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2282 else
2283 rc = sysfs_emit(buf, "\n");
2284 up_read(&cxl_region_rwsem);
2285
2286 return rc;
2287 }
2288 DEVICE_ATTR_RO(region);
2289
2290 static struct cxl_region *
cxl_find_region_by_name(struct cxl_root_decoder * cxlrd,const char * name)2291 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2292 {
2293 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2294 struct device *region_dev;
2295
2296 region_dev = device_find_child_by_name(&cxld->dev, name);
2297 if (!region_dev)
2298 return ERR_PTR(-ENODEV);
2299
2300 return to_cxl_region(region_dev);
2301 }
2302
delete_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2303 static ssize_t delete_region_store(struct device *dev,
2304 struct device_attribute *attr,
2305 const char *buf, size_t len)
2306 {
2307 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2308 struct cxl_port *port = to_cxl_port(dev->parent);
2309 struct cxl_region *cxlr;
2310
2311 cxlr = cxl_find_region_by_name(cxlrd, buf);
2312 if (IS_ERR(cxlr))
2313 return PTR_ERR(cxlr);
2314
2315 devm_release_action(port->uport_dev, unregister_region, cxlr);
2316 put_device(&cxlr->dev);
2317
2318 return len;
2319 }
2320 DEVICE_ATTR_WO(delete_region);
2321
cxl_pmem_region_release(struct device * dev)2322 static void cxl_pmem_region_release(struct device *dev)
2323 {
2324 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2325 int i;
2326
2327 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2328 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2329
2330 put_device(&cxlmd->dev);
2331 }
2332
2333 kfree(cxlr_pmem);
2334 }
2335
2336 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2337 &cxl_base_attribute_group,
2338 NULL,
2339 };
2340
2341 const struct device_type cxl_pmem_region_type = {
2342 .name = "cxl_pmem_region",
2343 .release = cxl_pmem_region_release,
2344 .groups = cxl_pmem_region_attribute_groups,
2345 };
2346
is_cxl_pmem_region(struct device * dev)2347 bool is_cxl_pmem_region(struct device *dev)
2348 {
2349 return dev->type == &cxl_pmem_region_type;
2350 }
2351 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2352
to_cxl_pmem_region(struct device * dev)2353 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2354 {
2355 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2356 "not a cxl_pmem_region device\n"))
2357 return NULL;
2358 return container_of(dev, struct cxl_pmem_region, dev);
2359 }
2360 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2361
2362 struct cxl_poison_context {
2363 struct cxl_port *port;
2364 enum cxl_decoder_mode mode;
2365 u64 offset;
2366 };
2367
cxl_get_poison_unmapped(struct cxl_memdev * cxlmd,struct cxl_poison_context * ctx)2368 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2369 struct cxl_poison_context *ctx)
2370 {
2371 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2372 u64 offset, length;
2373 int rc = 0;
2374
2375 /*
2376 * Collect poison for the remaining unmapped resources
2377 * after poison is collected by committed endpoints.
2378 *
2379 * Knowing that PMEM must always follow RAM, get poison
2380 * for unmapped resources based on the last decoder's mode:
2381 * ram: scan remains of ram range, then any pmem range
2382 * pmem: scan remains of pmem range
2383 */
2384
2385 if (ctx->mode == CXL_DECODER_RAM) {
2386 offset = ctx->offset;
2387 length = resource_size(&cxlds->ram_res) - offset;
2388 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2389 if (rc == -EFAULT)
2390 rc = 0;
2391 if (rc)
2392 return rc;
2393 }
2394 if (ctx->mode == CXL_DECODER_PMEM) {
2395 offset = ctx->offset;
2396 length = resource_size(&cxlds->dpa_res) - offset;
2397 if (!length)
2398 return 0;
2399 } else if (resource_size(&cxlds->pmem_res)) {
2400 offset = cxlds->pmem_res.start;
2401 length = resource_size(&cxlds->pmem_res);
2402 } else {
2403 return 0;
2404 }
2405
2406 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2407 }
2408
poison_by_decoder(struct device * dev,void * arg)2409 static int poison_by_decoder(struct device *dev, void *arg)
2410 {
2411 struct cxl_poison_context *ctx = arg;
2412 struct cxl_endpoint_decoder *cxled;
2413 struct cxl_memdev *cxlmd;
2414 u64 offset, length;
2415 int rc = 0;
2416
2417 if (!is_endpoint_decoder(dev))
2418 return rc;
2419
2420 cxled = to_cxl_endpoint_decoder(dev);
2421 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2422 return rc;
2423
2424 /*
2425 * Regions are only created with single mode decoders: pmem or ram.
2426 * Linux does not support mixed mode decoders. This means that
2427 * reading poison per endpoint decoder adheres to the requirement
2428 * that poison reads of pmem and ram must be separated.
2429 * CXL 3.0 Spec 8.2.9.8.4.1
2430 */
2431 if (cxled->mode == CXL_DECODER_MIXED) {
2432 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2433 return rc;
2434 }
2435
2436 cxlmd = cxled_to_memdev(cxled);
2437 if (cxled->skip) {
2438 offset = cxled->dpa_res->start - cxled->skip;
2439 length = cxled->skip;
2440 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2441 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2442 rc = 0;
2443 if (rc)
2444 return rc;
2445 }
2446
2447 offset = cxled->dpa_res->start;
2448 length = cxled->dpa_res->end - offset + 1;
2449 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2450 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2451 rc = 0;
2452 if (rc)
2453 return rc;
2454
2455 /* Iterate until commit_end is reached */
2456 if (cxled->cxld.id == ctx->port->commit_end) {
2457 ctx->offset = cxled->dpa_res->end + 1;
2458 ctx->mode = cxled->mode;
2459 return 1;
2460 }
2461
2462 return 0;
2463 }
2464
cxl_get_poison_by_endpoint(struct cxl_port * port)2465 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2466 {
2467 struct cxl_poison_context ctx;
2468 int rc = 0;
2469
2470 ctx = (struct cxl_poison_context) {
2471 .port = port
2472 };
2473
2474 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2475 if (rc == 1)
2476 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
2477 &ctx);
2478
2479 return rc;
2480 }
2481
2482 static struct lock_class_key cxl_pmem_region_key;
2483
cxl_pmem_region_alloc(struct cxl_region * cxlr)2484 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2485 {
2486 struct cxl_region_params *p = &cxlr->params;
2487 struct cxl_nvdimm_bridge *cxl_nvb;
2488 struct cxl_pmem_region *cxlr_pmem;
2489 struct device *dev;
2490 int i;
2491
2492 down_read(&cxl_region_rwsem);
2493 if (p->state != CXL_CONFIG_COMMIT) {
2494 cxlr_pmem = ERR_PTR(-ENXIO);
2495 goto out;
2496 }
2497
2498 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
2499 GFP_KERNEL);
2500 if (!cxlr_pmem) {
2501 cxlr_pmem = ERR_PTR(-ENOMEM);
2502 goto out;
2503 }
2504
2505 cxlr_pmem->hpa_range.start = p->res->start;
2506 cxlr_pmem->hpa_range.end = p->res->end;
2507
2508 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2509 cxlr_pmem->nr_mappings = p->nr_targets;
2510 for (i = 0; i < p->nr_targets; i++) {
2511 struct cxl_endpoint_decoder *cxled = p->targets[i];
2512 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2513 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2514
2515 /*
2516 * Regions never span CXL root devices, so by definition the
2517 * bridge for one device is the same for all.
2518 */
2519 if (i == 0) {
2520 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
2521 if (!cxl_nvb) {
2522 cxlr_pmem = ERR_PTR(-ENODEV);
2523 goto out;
2524 }
2525 cxlr->cxl_nvb = cxl_nvb;
2526 }
2527 m->cxlmd = cxlmd;
2528 get_device(&cxlmd->dev);
2529 m->start = cxled->dpa_res->start;
2530 m->size = resource_size(cxled->dpa_res);
2531 m->position = i;
2532 }
2533
2534 dev = &cxlr_pmem->dev;
2535 cxlr_pmem->cxlr = cxlr;
2536 cxlr->cxlr_pmem = cxlr_pmem;
2537 device_initialize(dev);
2538 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2539 device_set_pm_not_required(dev);
2540 dev->parent = &cxlr->dev;
2541 dev->bus = &cxl_bus_type;
2542 dev->type = &cxl_pmem_region_type;
2543 out:
2544 up_read(&cxl_region_rwsem);
2545
2546 return cxlr_pmem;
2547 }
2548
cxl_dax_region_release(struct device * dev)2549 static void cxl_dax_region_release(struct device *dev)
2550 {
2551 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
2552
2553 kfree(cxlr_dax);
2554 }
2555
2556 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
2557 &cxl_base_attribute_group,
2558 NULL,
2559 };
2560
2561 const struct device_type cxl_dax_region_type = {
2562 .name = "cxl_dax_region",
2563 .release = cxl_dax_region_release,
2564 .groups = cxl_dax_region_attribute_groups,
2565 };
2566
is_cxl_dax_region(struct device * dev)2567 static bool is_cxl_dax_region(struct device *dev)
2568 {
2569 return dev->type == &cxl_dax_region_type;
2570 }
2571
to_cxl_dax_region(struct device * dev)2572 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
2573 {
2574 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
2575 "not a cxl_dax_region device\n"))
2576 return NULL;
2577 return container_of(dev, struct cxl_dax_region, dev);
2578 }
2579 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
2580
2581 static struct lock_class_key cxl_dax_region_key;
2582
cxl_dax_region_alloc(struct cxl_region * cxlr)2583 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
2584 {
2585 struct cxl_region_params *p = &cxlr->params;
2586 struct cxl_dax_region *cxlr_dax;
2587 struct device *dev;
2588
2589 down_read(&cxl_region_rwsem);
2590 if (p->state != CXL_CONFIG_COMMIT) {
2591 cxlr_dax = ERR_PTR(-ENXIO);
2592 goto out;
2593 }
2594
2595 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
2596 if (!cxlr_dax) {
2597 cxlr_dax = ERR_PTR(-ENOMEM);
2598 goto out;
2599 }
2600
2601 cxlr_dax->hpa_range.start = p->res->start;
2602 cxlr_dax->hpa_range.end = p->res->end;
2603
2604 dev = &cxlr_dax->dev;
2605 cxlr_dax->cxlr = cxlr;
2606 device_initialize(dev);
2607 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
2608 device_set_pm_not_required(dev);
2609 dev->parent = &cxlr->dev;
2610 dev->bus = &cxl_bus_type;
2611 dev->type = &cxl_dax_region_type;
2612 out:
2613 up_read(&cxl_region_rwsem);
2614
2615 return cxlr_dax;
2616 }
2617
cxlr_pmem_unregister(void * _cxlr_pmem)2618 static void cxlr_pmem_unregister(void *_cxlr_pmem)
2619 {
2620 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
2621 struct cxl_region *cxlr = cxlr_pmem->cxlr;
2622 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2623
2624 /*
2625 * Either the bridge is in ->remove() context under the device_lock(),
2626 * or cxlr_release_nvdimm() is cancelling the bridge's release action
2627 * for @cxlr_pmem and doing it itself (while manually holding the bridge
2628 * lock).
2629 */
2630 device_lock_assert(&cxl_nvb->dev);
2631 cxlr->cxlr_pmem = NULL;
2632 cxlr_pmem->cxlr = NULL;
2633 device_unregister(&cxlr_pmem->dev);
2634 }
2635
cxlr_release_nvdimm(void * _cxlr)2636 static void cxlr_release_nvdimm(void *_cxlr)
2637 {
2638 struct cxl_region *cxlr = _cxlr;
2639 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2640
2641 device_lock(&cxl_nvb->dev);
2642 if (cxlr->cxlr_pmem)
2643 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
2644 cxlr->cxlr_pmem);
2645 device_unlock(&cxl_nvb->dev);
2646 cxlr->cxl_nvb = NULL;
2647 put_device(&cxl_nvb->dev);
2648 }
2649
2650 /**
2651 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
2652 * @cxlr: parent CXL region for this pmem region bridge device
2653 *
2654 * Return: 0 on success negative error code on failure.
2655 */
devm_cxl_add_pmem_region(struct cxl_region * cxlr)2656 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
2657 {
2658 struct cxl_pmem_region *cxlr_pmem;
2659 struct cxl_nvdimm_bridge *cxl_nvb;
2660 struct device *dev;
2661 int rc;
2662
2663 cxlr_pmem = cxl_pmem_region_alloc(cxlr);
2664 if (IS_ERR(cxlr_pmem))
2665 return PTR_ERR(cxlr_pmem);
2666 cxl_nvb = cxlr->cxl_nvb;
2667
2668 dev = &cxlr_pmem->dev;
2669 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
2670 if (rc)
2671 goto err;
2672
2673 rc = device_add(dev);
2674 if (rc)
2675 goto err;
2676
2677 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2678 dev_name(dev));
2679
2680 device_lock(&cxl_nvb->dev);
2681 if (cxl_nvb->dev.driver)
2682 rc = devm_add_action_or_reset(&cxl_nvb->dev,
2683 cxlr_pmem_unregister, cxlr_pmem);
2684 else
2685 rc = -ENXIO;
2686 device_unlock(&cxl_nvb->dev);
2687
2688 if (rc)
2689 goto err_bridge;
2690
2691 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2692 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
2693
2694 err:
2695 put_device(dev);
2696 err_bridge:
2697 put_device(&cxl_nvb->dev);
2698 cxlr->cxl_nvb = NULL;
2699 return rc;
2700 }
2701
cxlr_dax_unregister(void * _cxlr_dax)2702 static void cxlr_dax_unregister(void *_cxlr_dax)
2703 {
2704 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
2705
2706 device_unregister(&cxlr_dax->dev);
2707 }
2708
devm_cxl_add_dax_region(struct cxl_region * cxlr)2709 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
2710 {
2711 struct cxl_dax_region *cxlr_dax;
2712 struct device *dev;
2713 int rc;
2714
2715 cxlr_dax = cxl_dax_region_alloc(cxlr);
2716 if (IS_ERR(cxlr_dax))
2717 return PTR_ERR(cxlr_dax);
2718
2719 dev = &cxlr_dax->dev;
2720 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
2721 if (rc)
2722 goto err;
2723
2724 rc = device_add(dev);
2725 if (rc)
2726 goto err;
2727
2728 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2729 dev_name(dev));
2730
2731 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
2732 cxlr_dax);
2733 err:
2734 put_device(dev);
2735 return rc;
2736 }
2737
match_root_decoder_by_range(struct device * dev,void * data)2738 static int match_root_decoder_by_range(struct device *dev, void *data)
2739 {
2740 struct range *r1, *r2 = data;
2741 struct cxl_root_decoder *cxlrd;
2742
2743 if (!is_root_decoder(dev))
2744 return 0;
2745
2746 cxlrd = to_cxl_root_decoder(dev);
2747 r1 = &cxlrd->cxlsd.cxld.hpa_range;
2748 return range_contains(r1, r2);
2749 }
2750
match_region_by_range(struct device * dev,void * data)2751 static int match_region_by_range(struct device *dev, void *data)
2752 {
2753 struct cxl_region_params *p;
2754 struct cxl_region *cxlr;
2755 struct range *r = data;
2756 int rc = 0;
2757
2758 if (!is_cxl_region(dev))
2759 return 0;
2760
2761 cxlr = to_cxl_region(dev);
2762 p = &cxlr->params;
2763
2764 down_read(&cxl_region_rwsem);
2765 if (p->res && p->res->start == r->start && p->res->end == r->end)
2766 rc = 1;
2767 up_read(&cxl_region_rwsem);
2768
2769 return rc;
2770 }
2771
2772 /* Establish an empty region covering the given HPA range */
construct_region(struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled)2773 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
2774 struct cxl_endpoint_decoder *cxled)
2775 {
2776 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2777 struct cxl_port *port = cxlrd_to_port(cxlrd);
2778 struct range *hpa = &cxled->cxld.hpa_range;
2779 struct cxl_region_params *p;
2780 struct cxl_region *cxlr;
2781 struct resource *res;
2782 int rc;
2783
2784 do {
2785 cxlr = __create_region(cxlrd, cxled->mode,
2786 atomic_read(&cxlrd->region_id));
2787 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
2788
2789 if (IS_ERR(cxlr)) {
2790 dev_err(cxlmd->dev.parent,
2791 "%s:%s: %s failed assign region: %ld\n",
2792 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2793 __func__, PTR_ERR(cxlr));
2794 return cxlr;
2795 }
2796
2797 down_write(&cxl_region_rwsem);
2798 p = &cxlr->params;
2799 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
2800 dev_err(cxlmd->dev.parent,
2801 "%s:%s: %s autodiscovery interrupted\n",
2802 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2803 __func__);
2804 rc = -EBUSY;
2805 goto err;
2806 }
2807
2808 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
2809
2810 res = kmalloc(sizeof(*res), GFP_KERNEL);
2811 if (!res) {
2812 rc = -ENOMEM;
2813 goto err;
2814 }
2815
2816 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
2817 dev_name(&cxlr->dev));
2818 rc = insert_resource(cxlrd->res, res);
2819 if (rc) {
2820 /*
2821 * Platform-firmware may not have split resources like "System
2822 * RAM" on CXL window boundaries see cxl_region_iomem_release()
2823 */
2824 dev_warn(cxlmd->dev.parent,
2825 "%s:%s: %s %s cannot insert resource\n",
2826 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2827 __func__, dev_name(&cxlr->dev));
2828 }
2829
2830 p->res = res;
2831 p->interleave_ways = cxled->cxld.interleave_ways;
2832 p->interleave_granularity = cxled->cxld.interleave_granularity;
2833 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2834
2835 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
2836 if (rc)
2837 goto err;
2838
2839 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
2840 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
2841 dev_name(&cxlr->dev), p->res, p->interleave_ways,
2842 p->interleave_granularity);
2843
2844 /* ...to match put_device() in cxl_add_to_region() */
2845 get_device(&cxlr->dev);
2846 up_write(&cxl_region_rwsem);
2847
2848 return cxlr;
2849
2850 err:
2851 up_write(&cxl_region_rwsem);
2852 devm_release_action(port->uport_dev, unregister_region, cxlr);
2853 return ERR_PTR(rc);
2854 }
2855
cxl_add_to_region(struct cxl_port * root,struct cxl_endpoint_decoder * cxled)2856 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
2857 {
2858 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2859 struct range *hpa = &cxled->cxld.hpa_range;
2860 struct cxl_decoder *cxld = &cxled->cxld;
2861 struct device *cxlrd_dev, *region_dev;
2862 struct cxl_root_decoder *cxlrd;
2863 struct cxl_region_params *p;
2864 struct cxl_region *cxlr;
2865 bool attach = false;
2866 int rc;
2867
2868 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
2869 match_root_decoder_by_range);
2870 if (!cxlrd_dev) {
2871 dev_err(cxlmd->dev.parent,
2872 "%s:%s no CXL window for range %#llx:%#llx\n",
2873 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
2874 cxld->hpa_range.start, cxld->hpa_range.end);
2875 return -ENXIO;
2876 }
2877
2878 cxlrd = to_cxl_root_decoder(cxlrd_dev);
2879
2880 /*
2881 * Ensure that if multiple threads race to construct_region() for @hpa
2882 * one does the construction and the others add to that.
2883 */
2884 mutex_lock(&cxlrd->range_lock);
2885 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
2886 match_region_by_range);
2887 if (!region_dev) {
2888 cxlr = construct_region(cxlrd, cxled);
2889 region_dev = &cxlr->dev;
2890 } else
2891 cxlr = to_cxl_region(region_dev);
2892 mutex_unlock(&cxlrd->range_lock);
2893
2894 rc = PTR_ERR_OR_ZERO(cxlr);
2895 if (rc)
2896 goto out;
2897
2898 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
2899
2900 down_read(&cxl_region_rwsem);
2901 p = &cxlr->params;
2902 attach = p->state == CXL_CONFIG_COMMIT;
2903 up_read(&cxl_region_rwsem);
2904
2905 if (attach) {
2906 /*
2907 * If device_attach() fails the range may still be active via
2908 * the platform-firmware memory map, otherwise the driver for
2909 * regions is local to this file, so driver matching can't fail.
2910 */
2911 if (device_attach(&cxlr->dev) < 0)
2912 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
2913 p->res);
2914 }
2915
2916 put_device(region_dev);
2917 out:
2918 put_device(cxlrd_dev);
2919 return rc;
2920 }
2921 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
2922
is_system_ram(struct resource * res,void * arg)2923 static int is_system_ram(struct resource *res, void *arg)
2924 {
2925 struct cxl_region *cxlr = arg;
2926 struct cxl_region_params *p = &cxlr->params;
2927
2928 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
2929 return 1;
2930 }
2931
cxl_region_probe(struct device * dev)2932 static int cxl_region_probe(struct device *dev)
2933 {
2934 struct cxl_region *cxlr = to_cxl_region(dev);
2935 struct cxl_region_params *p = &cxlr->params;
2936 int rc;
2937
2938 rc = down_read_interruptible(&cxl_region_rwsem);
2939 if (rc) {
2940 dev_dbg(&cxlr->dev, "probe interrupted\n");
2941 return rc;
2942 }
2943
2944 if (p->state < CXL_CONFIG_COMMIT) {
2945 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
2946 rc = -ENXIO;
2947 goto out;
2948 }
2949
2950 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
2951 dev_err(&cxlr->dev,
2952 "failed to activate, re-commit region and retry\n");
2953 rc = -ENXIO;
2954 goto out;
2955 }
2956
2957 /*
2958 * From this point on any path that changes the region's state away from
2959 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
2960 */
2961 out:
2962 up_read(&cxl_region_rwsem);
2963
2964 if (rc)
2965 return rc;
2966
2967 switch (cxlr->mode) {
2968 case CXL_DECODER_PMEM:
2969 return devm_cxl_add_pmem_region(cxlr);
2970 case CXL_DECODER_RAM:
2971 /*
2972 * The region can not be manged by CXL if any portion of
2973 * it is already online as 'System RAM'
2974 */
2975 if (walk_iomem_res_desc(IORES_DESC_NONE,
2976 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
2977 p->res->start, p->res->end, cxlr,
2978 is_system_ram) > 0)
2979 return 0;
2980 return devm_cxl_add_dax_region(cxlr);
2981 default:
2982 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
2983 cxlr->mode);
2984 return -ENXIO;
2985 }
2986 }
2987
2988 static struct cxl_driver cxl_region_driver = {
2989 .name = "cxl_region",
2990 .probe = cxl_region_probe,
2991 .id = CXL_DEVICE_REGION,
2992 };
2993
cxl_region_init(void)2994 int cxl_region_init(void)
2995 {
2996 return cxl_driver_register(&cxl_region_driver);
2997 }
2998
cxl_region_exit(void)2999 void cxl_region_exit(void)
3000 {
3001 cxl_driver_unregister(&cxl_region_driver);
3002 }
3003
3004 MODULE_IMPORT_NS(CXL);
3005 MODULE_IMPORT_NS(DEVMEM);
3006 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
3007