1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11 * DOC: cxl core hdm
12 *
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
17 */
18
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map)21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22 int *target_map)
23 {
24 int rc;
25
26 rc = cxl_decoder_add_locked(cxld, target_map);
27 if (rc) {
28 put_device(&cxld->dev);
29 dev_err(&port->dev, "Failed to add decoder\n");
30 return rc;
31 }
32
33 rc = cxl_decoder_autoremove(&port->dev, cxld);
34 if (rc)
35 return rc;
36
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38
39 return 0;
40 }
41
42 /*
43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44 * single ported host-bridges need not publish a decoder capability when a
45 * passthrough decode can be assumed, i.e. all transactions that the uport sees
46 * are claimed and passed to the single dport. Disable the range until the first
47 * CXL region is enumerated / activated.
48 */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51 struct cxl_switch_decoder *cxlsd;
52 struct cxl_dport *dport = NULL;
53 int single_port_map[1];
54 unsigned long index;
55
56 cxlsd = cxl_switch_decoder_alloc(port, 1);
57 if (IS_ERR(cxlsd))
58 return PTR_ERR(cxlsd);
59
60 device_lock_assert(&port->dev);
61
62 xa_for_each(&port->dports, index, dport)
63 break;
64 single_port_map[0] = dport->port_id;
65
66 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
67 }
68 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
69
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)70 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
71 {
72 u32 hdm_cap;
73
74 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
75 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
76 cxlhdm->target_count =
77 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
78 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
79 cxlhdm->interleave_mask |= GENMASK(11, 8);
80 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
81 cxlhdm->interleave_mask |= GENMASK(14, 12);
82 }
83
map_hdm_decoder_regs(struct cxl_port * port,void __iomem * crb,struct cxl_component_regs * regs)84 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
85 struct cxl_component_regs *regs)
86 {
87 struct cxl_register_map map = {
88 .host = &port->dev,
89 .resource = port->component_reg_phys,
90 .base = crb,
91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
92 };
93
94 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
95 if (!map.component_map.hdm_decoder.valid) {
96 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
97 /* unique error code to indicate no HDM decoder capability */
98 return -ENODEV;
99 }
100
101 return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
102 }
103
should_emulate_decoders(struct cxl_endpoint_dvsec_info * info)104 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
105 {
106 struct cxl_hdm *cxlhdm;
107 void __iomem *hdm;
108 u32 ctrl;
109 int i;
110
111 if (!info)
112 return false;
113
114 cxlhdm = dev_get_drvdata(&info->port->dev);
115 hdm = cxlhdm->regs.hdm_decoder;
116
117 if (!hdm)
118 return true;
119
120 /*
121 * If HDM decoders are present and the driver is in control of
122 * Mem_Enable skip DVSEC based emulation
123 */
124 if (!info->mem_enabled)
125 return false;
126
127 /*
128 * If any decoders are committed already, there should not be any
129 * emulated DVSEC decoders.
130 */
131 for (i = 0; i < cxlhdm->decoder_count; i++) {
132 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
133 dev_dbg(&info->port->dev,
134 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
135 info->port->id, i,
136 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
137 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
138 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
139 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
140 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
141 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
142 return false;
143 }
144
145 return true;
146 }
147
148 /**
149 * devm_cxl_setup_hdm - map HDM decoder component registers
150 * @port: cxl_port to map
151 * @info: cached DVSEC range register info
152 */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)153 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
154 struct cxl_endpoint_dvsec_info *info)
155 {
156 struct device *dev = &port->dev;
157 struct cxl_hdm *cxlhdm;
158 void __iomem *crb;
159 int rc;
160
161 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
162 if (!cxlhdm)
163 return ERR_PTR(-ENOMEM);
164 cxlhdm->port = port;
165 dev_set_drvdata(dev, cxlhdm);
166
167 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
168 if (!crb && info && info->mem_enabled) {
169 cxlhdm->decoder_count = info->ranges;
170 return cxlhdm;
171 } else if (!crb) {
172 dev_err(dev, "No component registers mapped\n");
173 return ERR_PTR(-ENXIO);
174 }
175
176 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
177 iounmap(crb);
178 if (rc)
179 return ERR_PTR(rc);
180
181 parse_hdm_decoder_caps(cxlhdm);
182 if (cxlhdm->decoder_count == 0) {
183 dev_err(dev, "Spec violation. Caps invalid\n");
184 return ERR_PTR(-ENXIO);
185 }
186
187 /*
188 * Now that the hdm capability is parsed, decide if range
189 * register emulation is needed and fixup cxlhdm accordingly.
190 */
191 if (should_emulate_decoders(info)) {
192 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
193 info->ranges > 1 ? "s" : "");
194 cxlhdm->decoder_count = info->ranges;
195 }
196
197 return cxlhdm;
198 }
199 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
200
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)201 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
202 {
203 unsigned long long start = r->start, end = r->end;
204
205 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
206 r->name);
207 }
208
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)209 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
210 {
211 struct resource *p1, *p2;
212
213 down_read(&cxl_dpa_rwsem);
214 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
215 __cxl_dpa_debug(file, p1, 0);
216 for (p2 = p1->child; p2; p2 = p2->sibling)
217 __cxl_dpa_debug(file, p2, 1);
218 }
219 up_read(&cxl_dpa_rwsem);
220 }
221 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
222
223 /*
224 * Must be called in a context that synchronizes against this decoder's
225 * port ->remove() callback (like an endpoint decoder sysfs attribute)
226 */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)227 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
228 {
229 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
230 struct cxl_port *port = cxled_to_port(cxled);
231 struct cxl_dev_state *cxlds = cxlmd->cxlds;
232 struct resource *res = cxled->dpa_res;
233 resource_size_t skip_start;
234
235 lockdep_assert_held_write(&cxl_dpa_rwsem);
236
237 /* save @skip_start, before @res is released */
238 skip_start = res->start - cxled->skip;
239 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
240 if (cxled->skip)
241 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
242 cxled->skip = 0;
243 cxled->dpa_res = NULL;
244 put_device(&cxled->cxld.dev);
245 port->hdm_end--;
246 }
247
cxl_dpa_release(void * cxled)248 static void cxl_dpa_release(void *cxled)
249 {
250 down_write(&cxl_dpa_rwsem);
251 __cxl_dpa_release(cxled);
252 up_write(&cxl_dpa_rwsem);
253 }
254
255 /*
256 * Must be called from context that will not race port device
257 * unregistration, like decoder sysfs attribute methods
258 */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)259 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
260 {
261 struct cxl_port *port = cxled_to_port(cxled);
262
263 lockdep_assert_held_write(&cxl_dpa_rwsem);
264 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
265 __cxl_dpa_release(cxled);
266 }
267
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)268 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
269 resource_size_t base, resource_size_t len,
270 resource_size_t skipped)
271 {
272 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
273 struct cxl_port *port = cxled_to_port(cxled);
274 struct cxl_dev_state *cxlds = cxlmd->cxlds;
275 struct device *dev = &port->dev;
276 struct resource *res;
277
278 lockdep_assert_held_write(&cxl_dpa_rwsem);
279
280 if (!len) {
281 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
282 port->id, cxled->cxld.id);
283 return -EINVAL;
284 }
285
286 if (cxled->dpa_res) {
287 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
288 port->id, cxled->cxld.id, cxled->dpa_res);
289 return -EBUSY;
290 }
291
292 if (port->hdm_end + 1 != cxled->cxld.id) {
293 /*
294 * Assumes alloc and commit order is always in hardware instance
295 * order per expectations from 8.2.5.12.20 Committing Decoder
296 * Programming that enforce decoder[m] committed before
297 * decoder[m+1] commit start.
298 */
299 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
300 cxled->cxld.id, port->id, port->hdm_end + 1);
301 return -EBUSY;
302 }
303
304 if (skipped) {
305 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
306 dev_name(&cxled->cxld.dev), 0);
307 if (!res) {
308 dev_dbg(dev,
309 "decoder%d.%d: failed to reserve skipped space\n",
310 port->id, cxled->cxld.id);
311 return -EBUSY;
312 }
313 }
314 res = __request_region(&cxlds->dpa_res, base, len,
315 dev_name(&cxled->cxld.dev), 0);
316 if (!res) {
317 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
318 port->id, cxled->cxld.id);
319 if (skipped)
320 __release_region(&cxlds->dpa_res, base - skipped,
321 skipped);
322 return -EBUSY;
323 }
324 cxled->dpa_res = res;
325 cxled->skip = skipped;
326
327 if (resource_contains(&cxlds->pmem_res, res))
328 cxled->mode = CXL_DECODER_PMEM;
329 else if (resource_contains(&cxlds->ram_res, res))
330 cxled->mode = CXL_DECODER_RAM;
331 else {
332 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
333 cxled->cxld.id, cxled->dpa_res);
334 cxled->mode = CXL_DECODER_MIXED;
335 }
336
337 port->hdm_end++;
338 get_device(&cxled->cxld.dev);
339 return 0;
340 }
341
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)342 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
343 resource_size_t base, resource_size_t len,
344 resource_size_t skipped)
345 {
346 struct cxl_port *port = cxled_to_port(cxled);
347 int rc;
348
349 down_write(&cxl_dpa_rwsem);
350 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
351 up_write(&cxl_dpa_rwsem);
352
353 if (rc)
354 return rc;
355
356 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
357 }
358 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
359
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)360 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
361 {
362 resource_size_t size = 0;
363
364 down_read(&cxl_dpa_rwsem);
365 if (cxled->dpa_res)
366 size = resource_size(cxled->dpa_res);
367 up_read(&cxl_dpa_rwsem);
368
369 return size;
370 }
371
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)372 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
373 {
374 resource_size_t base = -1;
375
376 lockdep_assert_held(&cxl_dpa_rwsem);
377 if (cxled->dpa_res)
378 base = cxled->dpa_res->start;
379
380 return base;
381 }
382
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)383 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
384 {
385 struct cxl_port *port = cxled_to_port(cxled);
386 struct device *dev = &cxled->cxld.dev;
387 int rc;
388
389 down_write(&cxl_dpa_rwsem);
390 if (!cxled->dpa_res) {
391 rc = 0;
392 goto out;
393 }
394 if (cxled->cxld.region) {
395 dev_dbg(dev, "decoder assigned to: %s\n",
396 dev_name(&cxled->cxld.region->dev));
397 rc = -EBUSY;
398 goto out;
399 }
400 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
401 dev_dbg(dev, "decoder enabled\n");
402 rc = -EBUSY;
403 goto out;
404 }
405 if (cxled->cxld.id != port->hdm_end) {
406 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
407 port->hdm_end);
408 rc = -EBUSY;
409 goto out;
410 }
411 devm_cxl_dpa_release(cxled);
412 rc = 0;
413 out:
414 up_write(&cxl_dpa_rwsem);
415 return rc;
416 }
417
cxl_dpa_set_mode(struct cxl_endpoint_decoder * cxled,enum cxl_decoder_mode mode)418 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
419 enum cxl_decoder_mode mode)
420 {
421 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
422 struct cxl_dev_state *cxlds = cxlmd->cxlds;
423 struct device *dev = &cxled->cxld.dev;
424 int rc;
425
426 switch (mode) {
427 case CXL_DECODER_RAM:
428 case CXL_DECODER_PMEM:
429 break;
430 default:
431 dev_dbg(dev, "unsupported mode: %d\n", mode);
432 return -EINVAL;
433 }
434
435 down_write(&cxl_dpa_rwsem);
436 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
437 rc = -EBUSY;
438 goto out;
439 }
440
441 /*
442 * Only allow modes that are supported by the current partition
443 * configuration
444 */
445 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
446 dev_dbg(dev, "no available pmem capacity\n");
447 rc = -ENXIO;
448 goto out;
449 }
450 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
451 dev_dbg(dev, "no available ram capacity\n");
452 rc = -ENXIO;
453 goto out;
454 }
455
456 cxled->mode = mode;
457 rc = 0;
458 out:
459 up_write(&cxl_dpa_rwsem);
460
461 return rc;
462 }
463
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,unsigned long long size)464 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
465 {
466 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
467 resource_size_t free_ram_start, free_pmem_start;
468 struct cxl_port *port = cxled_to_port(cxled);
469 struct cxl_dev_state *cxlds = cxlmd->cxlds;
470 struct device *dev = &cxled->cxld.dev;
471 resource_size_t start, avail, skip;
472 struct resource *p, *last;
473 int rc;
474
475 down_write(&cxl_dpa_rwsem);
476 if (cxled->cxld.region) {
477 dev_dbg(dev, "decoder attached to %s\n",
478 dev_name(&cxled->cxld.region->dev));
479 rc = -EBUSY;
480 goto out;
481 }
482
483 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
484 dev_dbg(dev, "decoder enabled\n");
485 rc = -EBUSY;
486 goto out;
487 }
488
489 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
490 last = p;
491 if (last)
492 free_ram_start = last->end + 1;
493 else
494 free_ram_start = cxlds->ram_res.start;
495
496 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
497 last = p;
498 if (last)
499 free_pmem_start = last->end + 1;
500 else
501 free_pmem_start = cxlds->pmem_res.start;
502
503 if (cxled->mode == CXL_DECODER_RAM) {
504 start = free_ram_start;
505 avail = cxlds->ram_res.end - start + 1;
506 skip = 0;
507 } else if (cxled->mode == CXL_DECODER_PMEM) {
508 resource_size_t skip_start, skip_end;
509
510 start = free_pmem_start;
511 avail = cxlds->pmem_res.end - start + 1;
512 skip_start = free_ram_start;
513
514 /*
515 * If some pmem is already allocated, then that allocation
516 * already handled the skip.
517 */
518 if (cxlds->pmem_res.child &&
519 skip_start == cxlds->pmem_res.child->start)
520 skip_end = skip_start - 1;
521 else
522 skip_end = start - 1;
523 skip = skip_end - skip_start + 1;
524 } else {
525 dev_dbg(dev, "mode not set\n");
526 rc = -EINVAL;
527 goto out;
528 }
529
530 if (size > avail) {
531 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
532 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
533 &avail);
534 rc = -ENOSPC;
535 goto out;
536 }
537
538 rc = __cxl_dpa_reserve(cxled, start, size, skip);
539 out:
540 up_write(&cxl_dpa_rwsem);
541
542 if (rc)
543 return rc;
544
545 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
546 }
547
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)548 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
549 {
550 u16 eig;
551 u8 eiw;
552
553 /*
554 * Input validation ensures these warns never fire, but otherwise
555 * suppress unititalized variable usage warnings.
556 */
557 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
558 "invalid interleave_ways: %d\n", cxld->interleave_ways))
559 return;
560 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
561 "invalid interleave_granularity: %d\n",
562 cxld->interleave_granularity))
563 return;
564
565 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
566 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
567 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
568 }
569
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)570 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
571 {
572 u32p_replace_bits(ctrl,
573 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
574 CXL_HDM_DECODER0_CTRL_HOSTONLY);
575 }
576
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)577 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
578 {
579 struct cxl_dport **t = &cxlsd->target[0];
580 int ways = cxlsd->cxld.interleave_ways;
581
582 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
583 if (ways > 1)
584 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
585 if (ways > 2)
586 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
587 if (ways > 3)
588 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
589 if (ways > 4)
590 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
591 if (ways > 5)
592 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
593 if (ways > 6)
594 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
595 if (ways > 7)
596 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
597 }
598
599 /*
600 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
601 * committed or error within 10ms, but just be generous with 20ms to account for
602 * clock skew and other marginal behavior
603 */
604 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)605 static int cxld_await_commit(void __iomem *hdm, int id)
606 {
607 u32 ctrl;
608 int i;
609
610 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
611 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
612 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
613 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
614 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
615 return -EIO;
616 }
617 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
618 return 0;
619 fsleep(1000);
620 }
621
622 return -ETIMEDOUT;
623 }
624
cxl_decoder_commit(struct cxl_decoder * cxld)625 static int cxl_decoder_commit(struct cxl_decoder *cxld)
626 {
627 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
628 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
629 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
630 int id = cxld->id, rc;
631 u64 base, size;
632 u32 ctrl;
633
634 if (cxld->flags & CXL_DECODER_F_ENABLE)
635 return 0;
636
637 if (cxl_num_decoders_committed(port) != id) {
638 dev_dbg(&port->dev,
639 "%s: out of order commit, expected decoder%d.%d\n",
640 dev_name(&cxld->dev), port->id,
641 cxl_num_decoders_committed(port));
642 return -EBUSY;
643 }
644
645 /*
646 * For endpoint decoders hosted on CXL memory devices that
647 * support the sanitize operation, make sure sanitize is not in-flight.
648 */
649 if (is_endpoint_decoder(&cxld->dev)) {
650 struct cxl_endpoint_decoder *cxled =
651 to_cxl_endpoint_decoder(&cxld->dev);
652 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
653 struct cxl_memdev_state *mds =
654 to_cxl_memdev_state(cxlmd->cxlds);
655
656 if (mds && mds->security.sanitize_active) {
657 dev_dbg(&cxlmd->dev,
658 "attempted to commit %s during sanitize\n",
659 dev_name(&cxld->dev));
660 return -EBUSY;
661 }
662 }
663
664 down_read(&cxl_dpa_rwsem);
665 /* common decoder settings */
666 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
667 cxld_set_interleave(cxld, &ctrl);
668 cxld_set_type(cxld, &ctrl);
669 base = cxld->hpa_range.start;
670 size = range_len(&cxld->hpa_range);
671
672 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
673 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
674 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
675 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
676
677 if (is_switch_decoder(&cxld->dev)) {
678 struct cxl_switch_decoder *cxlsd =
679 to_cxl_switch_decoder(&cxld->dev);
680 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
681 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
682 u64 targets;
683
684 cxlsd_set_targets(cxlsd, &targets);
685 writel(upper_32_bits(targets), tl_hi);
686 writel(lower_32_bits(targets), tl_lo);
687 } else {
688 struct cxl_endpoint_decoder *cxled =
689 to_cxl_endpoint_decoder(&cxld->dev);
690 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
691 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
692
693 writel(upper_32_bits(cxled->skip), sk_hi);
694 writel(lower_32_bits(cxled->skip), sk_lo);
695 }
696
697 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
698 up_read(&cxl_dpa_rwsem);
699
700 port->commit_end++;
701 rc = cxld_await_commit(hdm, cxld->id);
702 if (rc) {
703 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
704 dev_name(&cxld->dev), rc);
705 cxld->reset(cxld);
706 return rc;
707 }
708 cxld->flags |= CXL_DECODER_F_ENABLE;
709
710 return 0;
711 }
712
cxl_decoder_reset(struct cxl_decoder * cxld)713 static int cxl_decoder_reset(struct cxl_decoder *cxld)
714 {
715 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
716 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
717 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
718 int id = cxld->id;
719 u32 ctrl;
720
721 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
722 return 0;
723
724 if (port->commit_end != id) {
725 dev_dbg(&port->dev,
726 "%s: out of order reset, expected decoder%d.%d\n",
727 dev_name(&cxld->dev), port->id, port->commit_end);
728 return -EBUSY;
729 }
730
731 down_read(&cxl_dpa_rwsem);
732 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
733 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
734 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
735
736 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
737 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
738 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
739 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
740 up_read(&cxl_dpa_rwsem);
741
742 port->commit_end--;
743 cxld->flags &= ~CXL_DECODER_F_ENABLE;
744
745 /* Userspace is now responsible for reconfiguring this decoder */
746 if (is_endpoint_decoder(&cxld->dev)) {
747 struct cxl_endpoint_decoder *cxled;
748
749 cxled = to_cxl_endpoint_decoder(&cxld->dev);
750 cxled->state = CXL_DECODER_STATE_MANUAL;
751 }
752
753 return 0;
754 }
755
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,u64 * dpa_base,int which,struct cxl_endpoint_dvsec_info * info)756 static int cxl_setup_hdm_decoder_from_dvsec(
757 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
758 int which, struct cxl_endpoint_dvsec_info *info)
759 {
760 struct cxl_endpoint_decoder *cxled;
761 u64 len;
762 int rc;
763
764 if (!is_cxl_endpoint(port))
765 return -EOPNOTSUPP;
766
767 cxled = to_cxl_endpoint_decoder(&cxld->dev);
768 len = range_len(&info->dvsec_range[which]);
769 if (!len)
770 return -ENOENT;
771
772 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
773 cxld->commit = NULL;
774 cxld->reset = NULL;
775 cxld->hpa_range = info->dvsec_range[which];
776
777 /*
778 * Set the emulated decoder as locked pending additional support to
779 * change the range registers at run time.
780 */
781 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
782 port->commit_end = cxld->id;
783
784 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
785 if (rc) {
786 dev_err(&port->dev,
787 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
788 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
789 return rc;
790 }
791 *dpa_base += len;
792 cxled->state = CXL_DECODER_STATE_AUTO;
793
794 return 0;
795 }
796
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)797 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
798 int *target_map, void __iomem *hdm, int which,
799 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
800 {
801 struct cxl_endpoint_decoder *cxled = NULL;
802 u64 size, base, skip, dpa_size, lo, hi;
803 bool committed;
804 u32 remainder;
805 int i, rc;
806 u32 ctrl;
807 union {
808 u64 value;
809 unsigned char target_id[8];
810 } target_list;
811
812 if (should_emulate_decoders(info))
813 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
814 which, info);
815
816 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
817 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
818 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
819 base = (hi << 32) + lo;
820 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
821 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
822 size = (hi << 32) + lo;
823 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
824 cxld->commit = cxl_decoder_commit;
825 cxld->reset = cxl_decoder_reset;
826
827 if (!committed)
828 size = 0;
829 if (base == U64_MAX || size == U64_MAX) {
830 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
831 port->id, cxld->id);
832 return -ENXIO;
833 }
834
835 if (info)
836 cxled = to_cxl_endpoint_decoder(&cxld->dev);
837 cxld->hpa_range = (struct range) {
838 .start = base,
839 .end = base + size - 1,
840 };
841
842 /* decoders are enabled if committed */
843 if (committed) {
844 cxld->flags |= CXL_DECODER_F_ENABLE;
845 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
846 cxld->flags |= CXL_DECODER_F_LOCK;
847 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
848 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
849 else
850 cxld->target_type = CXL_DECODER_DEVMEM;
851
852 guard(rwsem_write)(&cxl_region_rwsem);
853 if (cxld->id != cxl_num_decoders_committed(port)) {
854 dev_warn(&port->dev,
855 "decoder%d.%d: Committed out of order\n",
856 port->id, cxld->id);
857 return -ENXIO;
858 }
859
860 if (size == 0) {
861 dev_warn(&port->dev,
862 "decoder%d.%d: Committed with zero size\n",
863 port->id, cxld->id);
864 return -ENXIO;
865 }
866 port->commit_end = cxld->id;
867 } else {
868 if (cxled) {
869 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
870 struct cxl_dev_state *cxlds = cxlmd->cxlds;
871
872 /*
873 * Default by devtype until a device arrives that needs
874 * more precision.
875 */
876 if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
877 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
878 else
879 cxld->target_type = CXL_DECODER_DEVMEM;
880 } else {
881 /* To be overridden by region type at commit time */
882 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
883 }
884
885 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
886 cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
887 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
888 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
889 }
890 }
891 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
892 &cxld->interleave_ways);
893 if (rc) {
894 dev_warn(&port->dev,
895 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
896 port->id, cxld->id, ctrl);
897 return rc;
898 }
899 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
900 &cxld->interleave_granularity);
901 if (rc)
902 return rc;
903
904 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
905 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
906 cxld->interleave_ways, cxld->interleave_granularity);
907
908 if (!cxled) {
909 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
910 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
911 target_list.value = (hi << 32) + lo;
912 for (i = 0; i < cxld->interleave_ways; i++)
913 target_map[i] = target_list.target_id[i];
914
915 return 0;
916 }
917
918 if (!committed)
919 return 0;
920
921 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
922 if (remainder) {
923 dev_err(&port->dev,
924 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
925 port->id, cxld->id, size, cxld->interleave_ways);
926 return -ENXIO;
927 }
928 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
929 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
930 skip = (hi << 32) + lo;
931 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
932 if (rc) {
933 dev_err(&port->dev,
934 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
935 port->id, cxld->id, *dpa_base,
936 *dpa_base + dpa_size + skip - 1, rc);
937 return rc;
938 }
939 *dpa_base += dpa_size + skip;
940
941 cxled->state = CXL_DECODER_STATE_AUTO;
942
943 return 0;
944 }
945
cxl_settle_decoders(struct cxl_hdm * cxlhdm)946 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
947 {
948 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
949 int committed, i;
950 u32 ctrl;
951
952 if (!hdm)
953 return;
954
955 /*
956 * Since the register resource was recently claimed via request_region()
957 * be careful about trusting the "not-committed" status until the commit
958 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
959 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
960 * host and target.
961 */
962 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
963 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
964 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
965 committed++;
966 }
967
968 /* ensure that future checks of committed can be trusted */
969 if (committed != cxlhdm->decoder_count)
970 msleep(20);
971 }
972
973 /**
974 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
975 * @cxlhdm: Structure to populate with HDM capabilities
976 * @info: cached DVSEC range register info
977 */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)978 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
979 struct cxl_endpoint_dvsec_info *info)
980 {
981 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
982 struct cxl_port *port = cxlhdm->port;
983 int i;
984 u64 dpa_base = 0;
985
986 cxl_settle_decoders(cxlhdm);
987
988 for (i = 0; i < cxlhdm->decoder_count; i++) {
989 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
990 int rc, target_count = cxlhdm->target_count;
991 struct cxl_decoder *cxld;
992
993 if (is_cxl_endpoint(port)) {
994 struct cxl_endpoint_decoder *cxled;
995
996 cxled = cxl_endpoint_decoder_alloc(port);
997 if (IS_ERR(cxled)) {
998 dev_warn(&port->dev,
999 "Failed to allocate decoder%d.%d\n",
1000 port->id, i);
1001 return PTR_ERR(cxled);
1002 }
1003 cxld = &cxled->cxld;
1004 } else {
1005 struct cxl_switch_decoder *cxlsd;
1006
1007 cxlsd = cxl_switch_decoder_alloc(port, target_count);
1008 if (IS_ERR(cxlsd)) {
1009 dev_warn(&port->dev,
1010 "Failed to allocate decoder%d.%d\n",
1011 port->id, i);
1012 return PTR_ERR(cxlsd);
1013 }
1014 cxld = &cxlsd->cxld;
1015 }
1016
1017 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1018 &dpa_base, info);
1019 if (rc) {
1020 dev_warn(&port->dev,
1021 "Failed to initialize decoder%d.%d\n",
1022 port->id, i);
1023 put_device(&cxld->dev);
1024 return rc;
1025 }
1026 rc = add_hdm_decoder(port, cxld, target_map);
1027 if (rc) {
1028 dev_warn(&port->dev,
1029 "Failed to add decoder%d.%d\n", port->id, i);
1030 return rc;
1031 }
1032 }
1033
1034 return 0;
1035 }
1036 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
1037