1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/firmware.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
8 #include <linux/idr.h>
9 #include <linux/pci.h>
10 #include <cxlmem.h>
11 #include "trace.h"
12 #include "core.h"
13
14 static DECLARE_RWSEM(cxl_memdev_rwsem);
15
16 /*
17 * An entire PCI topology full of devices should be enough for any
18 * config
19 */
20 #define CXL_MEM_MAX_DEVS 65536
21
22 static int cxl_mem_major;
23 static DEFINE_IDA(cxl_memdev_ida);
24
cxl_memdev_release(struct device * dev)25 static void cxl_memdev_release(struct device *dev)
26 {
27 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
28
29 ida_free(&cxl_memdev_ida, cxlmd->id);
30 kfree(cxlmd);
31 }
32
cxl_memdev_devnode(const struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid)33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
34 kgid_t *gid)
35 {
36 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
37 }
38
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)39 static ssize_t firmware_version_show(struct device *dev,
40 struct device_attribute *attr, char *buf)
41 {
42 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
43 struct cxl_dev_state *cxlds = cxlmd->cxlds;
44 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
45
46 if (!mds)
47 return sysfs_emit(buf, "\n");
48 return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
49 }
50 static DEVICE_ATTR_RO(firmware_version);
51
payload_max_show(struct device * dev,struct device_attribute * attr,char * buf)52 static ssize_t payload_max_show(struct device *dev,
53 struct device_attribute *attr, char *buf)
54 {
55 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
56 struct cxl_dev_state *cxlds = cxlmd->cxlds;
57 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
58
59 if (!mds)
60 return sysfs_emit(buf, "\n");
61 return sysfs_emit(buf, "%zu\n", mds->payload_size);
62 }
63 static DEVICE_ATTR_RO(payload_max);
64
label_storage_size_show(struct device * dev,struct device_attribute * attr,char * buf)65 static ssize_t label_storage_size_show(struct device *dev,
66 struct device_attribute *attr, char *buf)
67 {
68 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
69 struct cxl_dev_state *cxlds = cxlmd->cxlds;
70 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
71
72 if (!mds)
73 return sysfs_emit(buf, "\n");
74 return sysfs_emit(buf, "%zu\n", mds->lsa_size);
75 }
76 static DEVICE_ATTR_RO(label_storage_size);
77
ram_size_show(struct device * dev,struct device_attribute * attr,char * buf)78 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
79 char *buf)
80 {
81 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
82 struct cxl_dev_state *cxlds = cxlmd->cxlds;
83 unsigned long long len = resource_size(&cxlds->ram_res);
84
85 return sysfs_emit(buf, "%#llx\n", len);
86 }
87
88 static struct device_attribute dev_attr_ram_size =
89 __ATTR(size, 0444, ram_size_show, NULL);
90
pmem_size_show(struct device * dev,struct device_attribute * attr,char * buf)91 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
92 char *buf)
93 {
94 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
95 struct cxl_dev_state *cxlds = cxlmd->cxlds;
96 unsigned long long len = resource_size(&cxlds->pmem_res);
97
98 return sysfs_emit(buf, "%#llx\n", len);
99 }
100
101 static struct device_attribute dev_attr_pmem_size =
102 __ATTR(size, 0444, pmem_size_show, NULL);
103
serial_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
105 char *buf)
106 {
107 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
108 struct cxl_dev_state *cxlds = cxlmd->cxlds;
109
110 return sysfs_emit(buf, "%#llx\n", cxlds->serial);
111 }
112 static DEVICE_ATTR_RO(serial);
113
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)114 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116 {
117 return sprintf(buf, "%d\n", dev_to_node(dev));
118 }
119 static DEVICE_ATTR_RO(numa_node);
120
security_state_show(struct device * dev,struct device_attribute * attr,char * buf)121 static ssize_t security_state_show(struct device *dev,
122 struct device_attribute *attr,
123 char *buf)
124 {
125 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
126 struct cxl_dev_state *cxlds = cxlmd->cxlds;
127 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
128 unsigned long state = mds->security.state;
129 int rc = 0;
130
131 /* sync with latest submission state */
132 mutex_lock(&mds->mbox_mutex);
133 if (mds->security.sanitize_active)
134 rc = sysfs_emit(buf, "sanitize\n");
135 mutex_unlock(&mds->mbox_mutex);
136 if (rc)
137 return rc;
138
139 if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
140 return sysfs_emit(buf, "disabled\n");
141 if (state & CXL_PMEM_SEC_STATE_FROZEN ||
142 state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
143 state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
144 return sysfs_emit(buf, "frozen\n");
145 if (state & CXL_PMEM_SEC_STATE_LOCKED)
146 return sysfs_emit(buf, "locked\n");
147 else
148 return sysfs_emit(buf, "unlocked\n");
149 }
150 static struct device_attribute dev_attr_security_state =
151 __ATTR(state, 0444, security_state_show, NULL);
152
security_sanitize_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)153 static ssize_t security_sanitize_store(struct device *dev,
154 struct device_attribute *attr,
155 const char *buf, size_t len)
156 {
157 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
158 bool sanitize;
159 ssize_t rc;
160
161 if (kstrtobool(buf, &sanitize) || !sanitize)
162 return -EINVAL;
163
164 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
165 if (rc)
166 return rc;
167
168 return len;
169 }
170 static struct device_attribute dev_attr_security_sanitize =
171 __ATTR(sanitize, 0200, NULL, security_sanitize_store);
172
security_erase_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)173 static ssize_t security_erase_store(struct device *dev,
174 struct device_attribute *attr,
175 const char *buf, size_t len)
176 {
177 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
178 ssize_t rc;
179 bool erase;
180
181 if (kstrtobool(buf, &erase) || !erase)
182 return -EINVAL;
183
184 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
185 if (rc)
186 return rc;
187
188 return len;
189 }
190 static struct device_attribute dev_attr_security_erase =
191 __ATTR(erase, 0200, NULL, security_erase_store);
192
cxl_get_poison_by_memdev(struct cxl_memdev * cxlmd)193 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
194 {
195 struct cxl_dev_state *cxlds = cxlmd->cxlds;
196 u64 offset, length;
197 int rc = 0;
198
199 /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
200 if (resource_size(&cxlds->pmem_res)) {
201 offset = cxlds->pmem_res.start;
202 length = resource_size(&cxlds->pmem_res);
203 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
204 if (rc)
205 return rc;
206 }
207 if (resource_size(&cxlds->ram_res)) {
208 offset = cxlds->ram_res.start;
209 length = resource_size(&cxlds->ram_res);
210 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
211 /*
212 * Invalid Physical Address is not an error for
213 * volatile addresses. Device support is optional.
214 */
215 if (rc == -EFAULT)
216 rc = 0;
217 }
218 return rc;
219 }
220
cxl_trigger_poison_list(struct cxl_memdev * cxlmd)221 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
222 {
223 struct cxl_port *port;
224 int rc;
225
226 port = cxlmd->endpoint;
227 if (!port || !is_cxl_endpoint(port))
228 return -EINVAL;
229
230 rc = down_read_interruptible(&cxl_region_rwsem);
231 if (rc)
232 return rc;
233
234 rc = down_read_interruptible(&cxl_dpa_rwsem);
235 if (rc) {
236 up_read(&cxl_region_rwsem);
237 return rc;
238 }
239
240 if (cxl_num_decoders_committed(port) == 0) {
241 /* No regions mapped to this memdev */
242 rc = cxl_get_poison_by_memdev(cxlmd);
243 } else {
244 /* Regions mapped, collect poison by endpoint */
245 rc = cxl_get_poison_by_endpoint(port);
246 }
247 up_read(&cxl_dpa_rwsem);
248 up_read(&cxl_region_rwsem);
249
250 return rc;
251 }
252 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
253
254 struct cxl_dpa_to_region_context {
255 struct cxl_region *cxlr;
256 u64 dpa;
257 };
258
__cxl_dpa_to_region(struct device * dev,void * arg)259 static int __cxl_dpa_to_region(struct device *dev, void *arg)
260 {
261 struct cxl_dpa_to_region_context *ctx = arg;
262 struct cxl_endpoint_decoder *cxled;
263 u64 dpa = ctx->dpa;
264
265 if (!is_endpoint_decoder(dev))
266 return 0;
267
268 cxled = to_cxl_endpoint_decoder(dev);
269 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
270 return 0;
271
272 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
273 return 0;
274
275 dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
276 dev_name(&cxled->cxld.region->dev));
277
278 ctx->cxlr = cxled->cxld.region;
279
280 return 1;
281 }
282
cxl_dpa_to_region(struct cxl_memdev * cxlmd,u64 dpa)283 static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
284 {
285 struct cxl_dpa_to_region_context ctx;
286 struct cxl_port *port;
287
288 ctx = (struct cxl_dpa_to_region_context) {
289 .dpa = dpa,
290 };
291 port = cxlmd->endpoint;
292 if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
293 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
294
295 return ctx.cxlr;
296 }
297
cxl_validate_poison_dpa(struct cxl_memdev * cxlmd,u64 dpa)298 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
299 {
300 struct cxl_dev_state *cxlds = cxlmd->cxlds;
301
302 if (!IS_ENABLED(CONFIG_DEBUG_FS))
303 return 0;
304
305 if (!resource_size(&cxlds->dpa_res)) {
306 dev_dbg(cxlds->dev, "device has no dpa resource\n");
307 return -EINVAL;
308 }
309 if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
310 dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
311 dpa, &cxlds->dpa_res);
312 return -EINVAL;
313 }
314 if (!IS_ALIGNED(dpa, 64)) {
315 dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
316 return -EINVAL;
317 }
318
319 return 0;
320 }
321
cxl_inject_poison(struct cxl_memdev * cxlmd,u64 dpa)322 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
323 {
324 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
325 struct cxl_mbox_inject_poison inject;
326 struct cxl_poison_record record;
327 struct cxl_mbox_cmd mbox_cmd;
328 struct cxl_region *cxlr;
329 int rc;
330
331 if (!IS_ENABLED(CONFIG_DEBUG_FS))
332 return 0;
333
334 rc = down_read_interruptible(&cxl_region_rwsem);
335 if (rc)
336 return rc;
337
338 rc = down_read_interruptible(&cxl_dpa_rwsem);
339 if (rc) {
340 up_read(&cxl_region_rwsem);
341 return rc;
342 }
343
344 rc = cxl_validate_poison_dpa(cxlmd, dpa);
345 if (rc)
346 goto out;
347
348 inject.address = cpu_to_le64(dpa);
349 mbox_cmd = (struct cxl_mbox_cmd) {
350 .opcode = CXL_MBOX_OP_INJECT_POISON,
351 .size_in = sizeof(inject),
352 .payload_in = &inject,
353 };
354 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
355 if (rc)
356 goto out;
357
358 cxlr = cxl_dpa_to_region(cxlmd, dpa);
359 if (cxlr)
360 dev_warn_once(mds->cxlds.dev,
361 "poison inject dpa:%#llx region: %s\n", dpa,
362 dev_name(&cxlr->dev));
363
364 record = (struct cxl_poison_record) {
365 .address = cpu_to_le64(dpa),
366 .length = cpu_to_le32(1),
367 };
368 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
369 out:
370 up_read(&cxl_dpa_rwsem);
371 up_read(&cxl_region_rwsem);
372
373 return rc;
374 }
375 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
376
cxl_clear_poison(struct cxl_memdev * cxlmd,u64 dpa)377 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
378 {
379 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
380 struct cxl_mbox_clear_poison clear;
381 struct cxl_poison_record record;
382 struct cxl_mbox_cmd mbox_cmd;
383 struct cxl_region *cxlr;
384 int rc;
385
386 if (!IS_ENABLED(CONFIG_DEBUG_FS))
387 return 0;
388
389 rc = down_read_interruptible(&cxl_region_rwsem);
390 if (rc)
391 return rc;
392
393 rc = down_read_interruptible(&cxl_dpa_rwsem);
394 if (rc) {
395 up_read(&cxl_region_rwsem);
396 return rc;
397 }
398
399 rc = cxl_validate_poison_dpa(cxlmd, dpa);
400 if (rc)
401 goto out;
402
403 /*
404 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
405 * is defined to accept 64 bytes of write-data, along with the
406 * address to clear. This driver uses zeroes as write-data.
407 */
408 clear = (struct cxl_mbox_clear_poison) {
409 .address = cpu_to_le64(dpa)
410 };
411
412 mbox_cmd = (struct cxl_mbox_cmd) {
413 .opcode = CXL_MBOX_OP_CLEAR_POISON,
414 .size_in = sizeof(clear),
415 .payload_in = &clear,
416 };
417
418 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
419 if (rc)
420 goto out;
421
422 cxlr = cxl_dpa_to_region(cxlmd, dpa);
423 if (cxlr)
424 dev_warn_once(mds->cxlds.dev,
425 "poison clear dpa:%#llx region: %s\n", dpa,
426 dev_name(&cxlr->dev));
427
428 record = (struct cxl_poison_record) {
429 .address = cpu_to_le64(dpa),
430 .length = cpu_to_le32(1),
431 };
432 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
433 out:
434 up_read(&cxl_dpa_rwsem);
435 up_read(&cxl_region_rwsem);
436
437 return rc;
438 }
439 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
440
441 static struct attribute *cxl_memdev_attributes[] = {
442 &dev_attr_serial.attr,
443 &dev_attr_firmware_version.attr,
444 &dev_attr_payload_max.attr,
445 &dev_attr_label_storage_size.attr,
446 &dev_attr_numa_node.attr,
447 NULL,
448 };
449
450 static struct attribute *cxl_memdev_pmem_attributes[] = {
451 &dev_attr_pmem_size.attr,
452 NULL,
453 };
454
455 static struct attribute *cxl_memdev_ram_attributes[] = {
456 &dev_attr_ram_size.attr,
457 NULL,
458 };
459
460 static struct attribute *cxl_memdev_security_attributes[] = {
461 &dev_attr_security_state.attr,
462 &dev_attr_security_sanitize.attr,
463 &dev_attr_security_erase.attr,
464 NULL,
465 };
466
cxl_memdev_visible(struct kobject * kobj,struct attribute * a,int n)467 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
468 int n)
469 {
470 if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
471 return 0;
472 return a->mode;
473 }
474
475 static struct attribute_group cxl_memdev_attribute_group = {
476 .attrs = cxl_memdev_attributes,
477 .is_visible = cxl_memdev_visible,
478 };
479
480 static struct attribute_group cxl_memdev_ram_attribute_group = {
481 .name = "ram",
482 .attrs = cxl_memdev_ram_attributes,
483 };
484
485 static struct attribute_group cxl_memdev_pmem_attribute_group = {
486 .name = "pmem",
487 .attrs = cxl_memdev_pmem_attributes,
488 };
489
cxl_memdev_security_visible(struct kobject * kobj,struct attribute * a,int n)490 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
491 struct attribute *a, int n)
492 {
493 struct device *dev = kobj_to_dev(kobj);
494 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
495 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
496
497 if (a == &dev_attr_security_sanitize.attr &&
498 !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
499 return 0;
500
501 if (a == &dev_attr_security_erase.attr &&
502 !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
503 return 0;
504
505 return a->mode;
506 }
507
508 static struct attribute_group cxl_memdev_security_attribute_group = {
509 .name = "security",
510 .attrs = cxl_memdev_security_attributes,
511 .is_visible = cxl_memdev_security_visible,
512 };
513
514 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
515 &cxl_memdev_attribute_group,
516 &cxl_memdev_ram_attribute_group,
517 &cxl_memdev_pmem_attribute_group,
518 &cxl_memdev_security_attribute_group,
519 NULL,
520 };
521
522 static const struct device_type cxl_memdev_type = {
523 .name = "cxl_memdev",
524 .release = cxl_memdev_release,
525 .devnode = cxl_memdev_devnode,
526 .groups = cxl_memdev_attribute_groups,
527 };
528
is_cxl_memdev(const struct device * dev)529 bool is_cxl_memdev(const struct device *dev)
530 {
531 return dev->type == &cxl_memdev_type;
532 }
533 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
534
535 /**
536 * set_exclusive_cxl_commands() - atomically disable user cxl commands
537 * @mds: The device state to operate on
538 * @cmds: bitmap of commands to mark exclusive
539 *
540 * Grab the cxl_memdev_rwsem in write mode to flush in-flight
541 * invocations of the ioctl path and then disable future execution of
542 * commands with the command ids set in @cmds.
543 */
set_exclusive_cxl_commands(struct cxl_memdev_state * mds,unsigned long * cmds)544 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
545 unsigned long *cmds)
546 {
547 down_write(&cxl_memdev_rwsem);
548 bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
549 CXL_MEM_COMMAND_ID_MAX);
550 up_write(&cxl_memdev_rwsem);
551 }
552 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
553
554 /**
555 * clear_exclusive_cxl_commands() - atomically enable user cxl commands
556 * @mds: The device state to modify
557 * @cmds: bitmap of commands to mark available for userspace
558 */
clear_exclusive_cxl_commands(struct cxl_memdev_state * mds,unsigned long * cmds)559 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
560 unsigned long *cmds)
561 {
562 down_write(&cxl_memdev_rwsem);
563 bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
564 CXL_MEM_COMMAND_ID_MAX);
565 up_write(&cxl_memdev_rwsem);
566 }
567 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
568
cxl_memdev_shutdown(struct device * dev)569 static void cxl_memdev_shutdown(struct device *dev)
570 {
571 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
572
573 down_write(&cxl_memdev_rwsem);
574 cxlmd->cxlds = NULL;
575 up_write(&cxl_memdev_rwsem);
576 }
577
cxl_memdev_unregister(void * _cxlmd)578 static void cxl_memdev_unregister(void *_cxlmd)
579 {
580 struct cxl_memdev *cxlmd = _cxlmd;
581 struct device *dev = &cxlmd->dev;
582
583 cdev_device_del(&cxlmd->cdev, dev);
584 cxl_memdev_shutdown(dev);
585 put_device(dev);
586 }
587
detach_memdev(struct work_struct * work)588 static void detach_memdev(struct work_struct *work)
589 {
590 struct cxl_memdev *cxlmd;
591
592 cxlmd = container_of(work, typeof(*cxlmd), detach_work);
593 device_release_driver(&cxlmd->dev);
594 put_device(&cxlmd->dev);
595 }
596
597 static struct lock_class_key cxl_memdev_key;
598
cxl_memdev_alloc(struct cxl_dev_state * cxlds,const struct file_operations * fops)599 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
600 const struct file_operations *fops)
601 {
602 struct cxl_memdev *cxlmd;
603 struct device *dev;
604 struct cdev *cdev;
605 int rc;
606
607 cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
608 if (!cxlmd)
609 return ERR_PTR(-ENOMEM);
610
611 rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
612 if (rc < 0)
613 goto err;
614 cxlmd->id = rc;
615 cxlmd->depth = -1;
616
617 dev = &cxlmd->dev;
618 device_initialize(dev);
619 lockdep_set_class(&dev->mutex, &cxl_memdev_key);
620 dev->parent = cxlds->dev;
621 dev->bus = &cxl_bus_type;
622 dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
623 dev->type = &cxl_memdev_type;
624 device_set_pm_not_required(dev);
625 INIT_WORK(&cxlmd->detach_work, detach_memdev);
626
627 cdev = &cxlmd->cdev;
628 cdev_init(cdev, fops);
629 return cxlmd;
630
631 err:
632 kfree(cxlmd);
633 return ERR_PTR(rc);
634 }
635
__cxl_memdev_ioctl(struct cxl_memdev * cxlmd,unsigned int cmd,unsigned long arg)636 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
637 unsigned long arg)
638 {
639 switch (cmd) {
640 case CXL_MEM_QUERY_COMMANDS:
641 return cxl_query_cmd(cxlmd, (void __user *)arg);
642 case CXL_MEM_SEND_COMMAND:
643 return cxl_send_cmd(cxlmd, (void __user *)arg);
644 default:
645 return -ENOTTY;
646 }
647 }
648
cxl_memdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)649 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
650 unsigned long arg)
651 {
652 struct cxl_memdev *cxlmd = file->private_data;
653 struct cxl_dev_state *cxlds;
654 int rc = -ENXIO;
655
656 down_read(&cxl_memdev_rwsem);
657 cxlds = cxlmd->cxlds;
658 if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
659 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
660 up_read(&cxl_memdev_rwsem);
661
662 return rc;
663 }
664
cxl_memdev_open(struct inode * inode,struct file * file)665 static int cxl_memdev_open(struct inode *inode, struct file *file)
666 {
667 struct cxl_memdev *cxlmd =
668 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
669
670 get_device(&cxlmd->dev);
671 file->private_data = cxlmd;
672
673 return 0;
674 }
675
cxl_memdev_release_file(struct inode * inode,struct file * file)676 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
677 {
678 struct cxl_memdev *cxlmd =
679 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
680
681 put_device(&cxlmd->dev);
682
683 return 0;
684 }
685
686 /**
687 * cxl_mem_get_fw_info - Get Firmware info
688 * @mds: The device data for the operation
689 *
690 * Retrieve firmware info for the device specified.
691 *
692 * Return: 0 if no error: or the result of the mailbox command.
693 *
694 * See CXL-3.0 8.2.9.3.1 Get FW Info
695 */
cxl_mem_get_fw_info(struct cxl_memdev_state * mds)696 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
697 {
698 struct cxl_mbox_get_fw_info info;
699 struct cxl_mbox_cmd mbox_cmd;
700 int rc;
701
702 mbox_cmd = (struct cxl_mbox_cmd) {
703 .opcode = CXL_MBOX_OP_GET_FW_INFO,
704 .size_out = sizeof(info),
705 .payload_out = &info,
706 };
707
708 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
709 if (rc < 0)
710 return rc;
711
712 mds->fw.num_slots = info.num_slots;
713 mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
714 info.slot_info);
715
716 return 0;
717 }
718
719 /**
720 * cxl_mem_activate_fw - Activate Firmware
721 * @mds: The device data for the operation
722 * @slot: slot number to activate
723 *
724 * Activate firmware in a given slot for the device specified.
725 *
726 * Return: 0 if no error: or the result of the mailbox command.
727 *
728 * See CXL-3.0 8.2.9.3.3 Activate FW
729 */
cxl_mem_activate_fw(struct cxl_memdev_state * mds,int slot)730 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
731 {
732 struct cxl_mbox_activate_fw activate;
733 struct cxl_mbox_cmd mbox_cmd;
734
735 if (slot == 0 || slot > mds->fw.num_slots)
736 return -EINVAL;
737
738 mbox_cmd = (struct cxl_mbox_cmd) {
739 .opcode = CXL_MBOX_OP_ACTIVATE_FW,
740 .size_in = sizeof(activate),
741 .payload_in = &activate,
742 };
743
744 /* Only offline activation supported for now */
745 activate.action = CXL_FW_ACTIVATE_OFFLINE;
746 activate.slot = slot;
747
748 return cxl_internal_send_cmd(mds, &mbox_cmd);
749 }
750
751 /**
752 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
753 * @mds: The device data for the operation
754 *
755 * Abort an in-progress firmware transfer for the device specified.
756 *
757 * Return: 0 if no error: or the result of the mailbox command.
758 *
759 * See CXL-3.0 8.2.9.3.2 Transfer FW
760 */
cxl_mem_abort_fw_xfer(struct cxl_memdev_state * mds)761 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
762 {
763 struct cxl_mbox_transfer_fw *transfer;
764 struct cxl_mbox_cmd mbox_cmd;
765 int rc;
766
767 transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
768 if (!transfer)
769 return -ENOMEM;
770
771 /* Set a 1s poll interval and a total wait time of 30s */
772 mbox_cmd = (struct cxl_mbox_cmd) {
773 .opcode = CXL_MBOX_OP_TRANSFER_FW,
774 .size_in = sizeof(*transfer),
775 .payload_in = transfer,
776 .poll_interval_ms = 1000,
777 .poll_count = 30,
778 };
779
780 transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
781
782 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
783 kfree(transfer);
784 return rc;
785 }
786
cxl_fw_cleanup(struct fw_upload * fwl)787 static void cxl_fw_cleanup(struct fw_upload *fwl)
788 {
789 struct cxl_memdev_state *mds = fwl->dd_handle;
790
791 mds->fw.next_slot = 0;
792 }
793
cxl_fw_do_cancel(struct fw_upload * fwl)794 static int cxl_fw_do_cancel(struct fw_upload *fwl)
795 {
796 struct cxl_memdev_state *mds = fwl->dd_handle;
797 struct cxl_dev_state *cxlds = &mds->cxlds;
798 struct cxl_memdev *cxlmd = cxlds->cxlmd;
799 int rc;
800
801 rc = cxl_mem_abort_fw_xfer(mds);
802 if (rc < 0)
803 dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
804
805 return FW_UPLOAD_ERR_CANCELED;
806 }
807
cxl_fw_prepare(struct fw_upload * fwl,const u8 * data,u32 size)808 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
809 u32 size)
810 {
811 struct cxl_memdev_state *mds = fwl->dd_handle;
812 struct cxl_mbox_transfer_fw *transfer;
813
814 if (!size)
815 return FW_UPLOAD_ERR_INVALID_SIZE;
816
817 mds->fw.oneshot = struct_size(transfer, data, size) <
818 mds->payload_size;
819
820 if (cxl_mem_get_fw_info(mds))
821 return FW_UPLOAD_ERR_HW_ERROR;
822
823 /*
824 * So far no state has been changed, hence no other cleanup is
825 * necessary. Simply return the cancelled status.
826 */
827 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
828 return FW_UPLOAD_ERR_CANCELED;
829
830 return FW_UPLOAD_ERR_NONE;
831 }
832
cxl_fw_write(struct fw_upload * fwl,const u8 * data,u32 offset,u32 size,u32 * written)833 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
834 u32 offset, u32 size, u32 *written)
835 {
836 struct cxl_memdev_state *mds = fwl->dd_handle;
837 struct cxl_dev_state *cxlds = &mds->cxlds;
838 struct cxl_memdev *cxlmd = cxlds->cxlmd;
839 struct cxl_mbox_transfer_fw *transfer;
840 struct cxl_mbox_cmd mbox_cmd;
841 u32 cur_size, remaining;
842 size_t size_in;
843 int rc;
844
845 *written = 0;
846
847 /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
848 if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
849 dev_err(&cxlmd->dev,
850 "misaligned offset for FW transfer slice (%u)\n",
851 offset);
852 return FW_UPLOAD_ERR_RW_ERROR;
853 }
854
855 /*
856 * Pick transfer size based on mds->payload_size @size must bw 128-byte
857 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
858 * sizeof(*transfer) is 128. These constraints imply that @cur_size
859 * will always be 128b aligned.
860 */
861 cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
862
863 remaining = size - cur_size;
864 size_in = struct_size(transfer, data, cur_size);
865
866 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
867 return cxl_fw_do_cancel(fwl);
868
869 /*
870 * Slot numbers are 1-indexed
871 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
872 * Check for rollover using modulo, and 1-index it by adding 1
873 */
874 mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
875
876 /* Do the transfer via mailbox cmd */
877 transfer = kzalloc(size_in, GFP_KERNEL);
878 if (!transfer)
879 return FW_UPLOAD_ERR_RW_ERROR;
880
881 transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
882 memcpy(transfer->data, data + offset, cur_size);
883 if (mds->fw.oneshot) {
884 transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
885 transfer->slot = mds->fw.next_slot;
886 } else {
887 if (offset == 0) {
888 transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
889 } else if (remaining == 0) {
890 transfer->action = CXL_FW_TRANSFER_ACTION_END;
891 transfer->slot = mds->fw.next_slot;
892 } else {
893 transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
894 }
895 }
896
897 mbox_cmd = (struct cxl_mbox_cmd) {
898 .opcode = CXL_MBOX_OP_TRANSFER_FW,
899 .size_in = size_in,
900 .payload_in = transfer,
901 .poll_interval_ms = 1000,
902 .poll_count = 30,
903 };
904
905 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
906 if (rc < 0) {
907 rc = FW_UPLOAD_ERR_RW_ERROR;
908 goto out_free;
909 }
910
911 *written = cur_size;
912
913 /* Activate FW if oneshot or if the last slice was written */
914 if (mds->fw.oneshot || remaining == 0) {
915 dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
916 mds->fw.next_slot);
917 rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
918 if (rc < 0) {
919 dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
920 rc);
921 rc = FW_UPLOAD_ERR_HW_ERROR;
922 goto out_free;
923 }
924 }
925
926 rc = FW_UPLOAD_ERR_NONE;
927
928 out_free:
929 kfree(transfer);
930 return rc;
931 }
932
cxl_fw_poll_complete(struct fw_upload * fwl)933 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
934 {
935 struct cxl_memdev_state *mds = fwl->dd_handle;
936
937 /*
938 * cxl_internal_send_cmd() handles background operations synchronously.
939 * No need to wait for completions here - any errors would've been
940 * reported and handled during the ->write() call(s).
941 * Just check if a cancel request was received, and return success.
942 */
943 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
944 return cxl_fw_do_cancel(fwl);
945
946 return FW_UPLOAD_ERR_NONE;
947 }
948
cxl_fw_cancel(struct fw_upload * fwl)949 static void cxl_fw_cancel(struct fw_upload *fwl)
950 {
951 struct cxl_memdev_state *mds = fwl->dd_handle;
952
953 set_bit(CXL_FW_CANCEL, mds->fw.state);
954 }
955
956 static const struct fw_upload_ops cxl_memdev_fw_ops = {
957 .prepare = cxl_fw_prepare,
958 .write = cxl_fw_write,
959 .poll_complete = cxl_fw_poll_complete,
960 .cancel = cxl_fw_cancel,
961 .cleanup = cxl_fw_cleanup,
962 };
963
cxl_remove_fw_upload(void * fwl)964 static void cxl_remove_fw_upload(void *fwl)
965 {
966 firmware_upload_unregister(fwl);
967 }
968
devm_cxl_setup_fw_upload(struct device * host,struct cxl_memdev_state * mds)969 int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
970 {
971 struct cxl_dev_state *cxlds = &mds->cxlds;
972 struct device *dev = &cxlds->cxlmd->dev;
973 struct fw_upload *fwl;
974
975 if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
976 return 0;
977
978 fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
979 &cxl_memdev_fw_ops, mds);
980 if (IS_ERR(fwl))
981 return PTR_ERR(fwl);
982 return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
983 }
984 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
985
986 static const struct file_operations cxl_memdev_fops = {
987 .owner = THIS_MODULE,
988 .unlocked_ioctl = cxl_memdev_ioctl,
989 .open = cxl_memdev_open,
990 .release = cxl_memdev_release_file,
991 .compat_ioctl = compat_ptr_ioctl,
992 .llseek = noop_llseek,
993 };
994
devm_cxl_add_memdev(struct device * host,struct cxl_dev_state * cxlds)995 struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
996 struct cxl_dev_state *cxlds)
997 {
998 struct cxl_memdev *cxlmd;
999 struct device *dev;
1000 struct cdev *cdev;
1001 int rc;
1002
1003 cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
1004 if (IS_ERR(cxlmd))
1005 return cxlmd;
1006
1007 dev = &cxlmd->dev;
1008 rc = dev_set_name(dev, "mem%d", cxlmd->id);
1009 if (rc)
1010 goto err;
1011
1012 /*
1013 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
1014 * needed as this is ordered with cdev_add() publishing the device.
1015 */
1016 cxlmd->cxlds = cxlds;
1017 cxlds->cxlmd = cxlmd;
1018
1019 cdev = &cxlmd->cdev;
1020 rc = cdev_device_add(cdev, dev);
1021 if (rc)
1022 goto err;
1023
1024 rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
1025 if (rc)
1026 return ERR_PTR(rc);
1027 return cxlmd;
1028
1029 err:
1030 /*
1031 * The cdev was briefly live, shutdown any ioctl operations that
1032 * saw that state.
1033 */
1034 cxl_memdev_shutdown(dev);
1035 put_device(dev);
1036 return ERR_PTR(rc);
1037 }
1038 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
1039
sanitize_teardown_notifier(void * data)1040 static void sanitize_teardown_notifier(void *data)
1041 {
1042 struct cxl_memdev_state *mds = data;
1043 struct kernfs_node *state;
1044
1045 /*
1046 * Prevent new irq triggered invocations of the workqueue and
1047 * flush inflight invocations.
1048 */
1049 mutex_lock(&mds->mbox_mutex);
1050 state = mds->security.sanitize_node;
1051 mds->security.sanitize_node = NULL;
1052 mutex_unlock(&mds->mbox_mutex);
1053
1054 cancel_delayed_work_sync(&mds->security.poll_dwork);
1055 sysfs_put(state);
1056 }
1057
devm_cxl_sanitize_setup_notifier(struct device * host,struct cxl_memdev * cxlmd)1058 int devm_cxl_sanitize_setup_notifier(struct device *host,
1059 struct cxl_memdev *cxlmd)
1060 {
1061 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1062 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
1063 struct kernfs_node *sec;
1064
1065 if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
1066 return 0;
1067
1068 /*
1069 * Note, the expectation is that @cxlmd would have failed to be
1070 * created if these sysfs_get_dirent calls fail.
1071 */
1072 sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
1073 if (!sec)
1074 return -ENOENT;
1075 mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1076 sysfs_put(sec);
1077 if (!mds->security.sanitize_node)
1078 return -ENOENT;
1079
1080 return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
1081 }
1082 EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
1083
cxl_memdev_init(void)1084 __init int cxl_memdev_init(void)
1085 {
1086 dev_t devt;
1087 int rc;
1088
1089 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1090 if (rc)
1091 return rc;
1092
1093 cxl_mem_major = MAJOR(devt);
1094
1095 return 0;
1096 }
1097
cxl_memdev_exit(void)1098 void cxl_memdev_exit(void)
1099 {
1100 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1101 }
1102