1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/security.h>
5 #include <linux/debugfs.h>
6 #include <linux/mutex.h>
7 #include <cxlmem.h>
8 #include <cxl.h>
9
10 #include "core.h"
11
12 static bool cxl_raw_allow_all;
13
14 /**
15 * DOC: cxl mbox
16 *
17 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
18 * implementation is used by the cxl_pci driver to initialize the device
19 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
20 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
21 */
22
23 #define cxl_for_each_cmd(cmd) \
24 for ((cmd) = &cxl_mem_commands[0]; \
25 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
26
27 #define CXL_CMD(_id, sin, sout, _flags) \
28 [CXL_MEM_COMMAND_ID_##_id] = { \
29 .info = { \
30 .id = CXL_MEM_COMMAND_ID_##_id, \
31 .size_in = sin, \
32 .size_out = sout, \
33 }, \
34 .opcode = CXL_MBOX_OP_##_id, \
35 .flags = _flags, \
36 }
37
38 #define CXL_VARIABLE_PAYLOAD ~0U
39 /*
40 * This table defines the supported mailbox commands for the driver. This table
41 * is made up of a UAPI structure. Non-negative values as parameters in the
42 * table will be validated against the user's input. For example, if size_in is
43 * 0, and the user passed in 1, it is an error.
44 */
45 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
46 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
47 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
48 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
49 #endif
50 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
51 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
52 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
53 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
54 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
55 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
56 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
57 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
58 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
59 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
60 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
61 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
62 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
63 CXL_CMD(INJECT_POISON, 0x8, 0, 0),
64 CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
65 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
66 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
67 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
68 };
69
70 /*
71 * Commands that RAW doesn't permit. The rationale for each:
72 *
73 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
74 * coordination of transaction timeout values at the root bridge level.
75 *
76 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
77 * and needs to be coordinated with HDM updates.
78 *
79 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
80 * driver and any writes from userspace invalidates those contents.
81 *
82 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
83 * to the device after it is marked clean, userspace can not make that
84 * assertion.
85 *
86 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
87 * is kept up to date with patrol notifications and error management.
88 */
89 static u16 cxl_disabled_raw_commands[] = {
90 CXL_MBOX_OP_ACTIVATE_FW,
91 CXL_MBOX_OP_SET_PARTITION_INFO,
92 CXL_MBOX_OP_SET_LSA,
93 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
94 CXL_MBOX_OP_SCAN_MEDIA,
95 CXL_MBOX_OP_GET_SCAN_MEDIA,
96 };
97
98 /*
99 * Command sets that RAW doesn't permit. All opcodes in this set are
100 * disabled because they pass plain text security payloads over the
101 * user/kernel boundary. This functionality is intended to be wrapped
102 * behind the keys ABI which allows for encrypted payloads in the UAPI
103 */
104 static u8 security_command_sets[] = {
105 0x44, /* Sanitize */
106 0x45, /* Persistent Memory Data-at-rest Security */
107 0x46, /* Security Passthrough */
108 };
109
cxl_is_security_command(u16 opcode)110 static bool cxl_is_security_command(u16 opcode)
111 {
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
115 if (security_command_sets[i] == (opcode >> 8))
116 return true;
117 return false;
118 }
119
cxl_mem_find_command(u16 opcode)120 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
121 {
122 struct cxl_mem_command *c;
123
124 cxl_for_each_cmd(c)
125 if (c->opcode == opcode)
126 return c;
127
128 return NULL;
129 }
130
cxl_mem_opcode_to_name(u16 opcode)131 static const char *cxl_mem_opcode_to_name(u16 opcode)
132 {
133 struct cxl_mem_command *c;
134
135 c = cxl_mem_find_command(opcode);
136 if (!c)
137 return NULL;
138
139 return cxl_command_names[c->info.id].name;
140 }
141
142 /**
143 * cxl_mbox_send_cmd() - Send a mailbox command to a device.
144 * @cxlds: The device data for the operation
145 * @opcode: Opcode for the mailbox command.
146 * @in: The input payload for the mailbox command.
147 * @in_size: The length of the input payload
148 * @out: Caller allocated buffer for the output.
149 * @out_size: Expected size of output.
150 *
151 * Context: Any context.
152 * Return:
153 * * %>=0 - Number of bytes returned in @out.
154 * * %-E2BIG - Payload is too large for hardware.
155 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
156 * * %-EFAULT - Hardware error occurred.
157 * * %-ENXIO - Command completed, but device reported an error.
158 * * %-EIO - Unexpected output size.
159 *
160 * Mailbox commands may execute successfully yet the device itself reported an
161 * error. While this distinction can be useful for commands from userspace, the
162 * kernel will only be able to use results when both are successful.
163 */
cxl_mbox_send_cmd(struct cxl_dev_state * cxlds,u16 opcode,void * in,size_t in_size,void * out,size_t out_size)164 int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
165 size_t in_size, void *out, size_t out_size)
166 {
167 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
168 struct cxl_mbox_cmd mbox_cmd = {
169 .opcode = opcode,
170 .payload_in = in,
171 .size_in = in_size,
172 .size_out = out_size,
173 .payload_out = out,
174 };
175 int rc;
176
177 if (out_size > cxlds->payload_size)
178 return -E2BIG;
179
180 rc = cxlds->mbox_send(cxlds, &mbox_cmd);
181 if (rc)
182 return rc;
183
184 if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
185 return cxl_mbox_cmd_rc2errno(&mbox_cmd);
186
187 /*
188 * Variable sized commands can't be validated and so it's up to the
189 * caller to do that if they wish.
190 */
191 if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
192 if (mbox_cmd.size_out != out_size)
193 return -EIO;
194 }
195 return 0;
196 }
197 EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
198
cxl_mem_raw_command_allowed(u16 opcode)199 static bool cxl_mem_raw_command_allowed(u16 opcode)
200 {
201 int i;
202
203 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
204 return false;
205
206 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
207 return false;
208
209 if (cxl_raw_allow_all)
210 return true;
211
212 if (cxl_is_security_command(opcode))
213 return false;
214
215 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
216 if (cxl_disabled_raw_commands[i] == opcode)
217 return false;
218
219 return true;
220 }
221
222 /**
223 * cxl_payload_from_user_allowed() - Check contents of in_payload.
224 * @opcode: The mailbox command opcode.
225 * @payload_in: Pointer to the input payload passed in from user space.
226 *
227 * Return:
228 * * true - payload_in passes check for @opcode.
229 * * false - payload_in contains invalid or unsupported values.
230 *
231 * The driver may inspect payload contents before sending a mailbox
232 * command from user space to the device. The intent is to reject
233 * commands with input payloads that are known to be unsafe. This
234 * check is not intended to replace the users careful selection of
235 * mailbox command parameters and makes no guarantee that the user
236 * command will succeed, nor that it is appropriate.
237 *
238 * The specific checks are determined by the opcode.
239 */
cxl_payload_from_user_allowed(u16 opcode,void * payload_in)240 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
241 {
242 switch (opcode) {
243 case CXL_MBOX_OP_SET_PARTITION_INFO: {
244 struct cxl_mbox_set_partition_info *pi = payload_in;
245
246 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
247 return false;
248 break;
249 }
250 default:
251 break;
252 }
253 return true;
254 }
255
cxl_mbox_cmd_ctor(struct cxl_mbox_cmd * mbox,struct cxl_dev_state * cxlds,u16 opcode,size_t in_size,size_t out_size,u64 in_payload)256 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
257 struct cxl_dev_state *cxlds, u16 opcode,
258 size_t in_size, size_t out_size, u64 in_payload)
259 {
260 *mbox = (struct cxl_mbox_cmd) {
261 .opcode = opcode,
262 .size_in = in_size,
263 };
264
265 if (in_size) {
266 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
267 in_size);
268 if (IS_ERR(mbox->payload_in))
269 return PTR_ERR(mbox->payload_in);
270
271 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
272 dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
273 cxl_mem_opcode_to_name(opcode));
274 kvfree(mbox->payload_in);
275 return -EBUSY;
276 }
277 }
278
279 /* Prepare to handle a full payload for variable sized output */
280 if (out_size == CXL_VARIABLE_PAYLOAD)
281 mbox->size_out = cxlds->payload_size;
282 else
283 mbox->size_out = out_size;
284
285 if (mbox->size_out) {
286 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
287 if (!mbox->payload_out) {
288 kvfree(mbox->payload_in);
289 return -ENOMEM;
290 }
291 }
292 return 0;
293 }
294
cxl_mbox_cmd_dtor(struct cxl_mbox_cmd * mbox)295 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
296 {
297 kvfree(mbox->payload_in);
298 kvfree(mbox->payload_out);
299 }
300
cxl_to_mem_cmd_raw(struct cxl_mem_command * mem_cmd,const struct cxl_send_command * send_cmd,struct cxl_dev_state * cxlds)301 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
302 const struct cxl_send_command *send_cmd,
303 struct cxl_dev_state *cxlds)
304 {
305 if (send_cmd->raw.rsvd)
306 return -EINVAL;
307
308 /*
309 * Unlike supported commands, the output size of RAW commands
310 * gets passed along without further checking, so it must be
311 * validated here.
312 */
313 if (send_cmd->out.size > cxlds->payload_size)
314 return -EINVAL;
315
316 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
317 return -EPERM;
318
319 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
320
321 *mem_cmd = (struct cxl_mem_command) {
322 .info = {
323 .id = CXL_MEM_COMMAND_ID_RAW,
324 .size_in = send_cmd->in.size,
325 .size_out = send_cmd->out.size,
326 },
327 .opcode = send_cmd->raw.opcode
328 };
329
330 return 0;
331 }
332
cxl_to_mem_cmd(struct cxl_mem_command * mem_cmd,const struct cxl_send_command * send_cmd,struct cxl_dev_state * cxlds)333 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
334 const struct cxl_send_command *send_cmd,
335 struct cxl_dev_state *cxlds)
336 {
337 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
338 const struct cxl_command_info *info = &c->info;
339
340 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
341 return -EINVAL;
342
343 if (send_cmd->rsvd)
344 return -EINVAL;
345
346 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
347 return -EINVAL;
348
349 /* Check that the command is enabled for hardware */
350 if (!test_bit(info->id, cxlds->enabled_cmds))
351 return -ENOTTY;
352
353 /* Check that the command is not claimed for exclusive kernel use */
354 if (test_bit(info->id, cxlds->exclusive_cmds))
355 return -EBUSY;
356
357 /* Check the input buffer is the expected size */
358 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
359 (info->size_in != send_cmd->in.size))
360 return -ENOMEM;
361
362 /* Check the output buffer is at least large enough */
363 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
364 (send_cmd->out.size < info->size_out))
365 return -ENOMEM;
366
367 *mem_cmd = (struct cxl_mem_command) {
368 .info = {
369 .id = info->id,
370 .flags = info->flags,
371 .size_in = send_cmd->in.size,
372 .size_out = send_cmd->out.size,
373 },
374 .opcode = c->opcode
375 };
376
377 return 0;
378 }
379
380 /**
381 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
382 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
383 * @cxlds: The device data for the operation
384 * @send_cmd: &struct cxl_send_command copied in from userspace.
385 *
386 * Return:
387 * * %0 - @out_cmd is ready to send.
388 * * %-ENOTTY - Invalid command specified.
389 * * %-EINVAL - Reserved fields or invalid values were used.
390 * * %-ENOMEM - Input or output buffer wasn't sized properly.
391 * * %-EPERM - Attempted to use a protected command.
392 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
393 *
394 * The result of this command is a fully validated command in @mbox_cmd that is
395 * safe to send to the hardware.
396 */
cxl_validate_cmd_from_user(struct cxl_mbox_cmd * mbox_cmd,struct cxl_dev_state * cxlds,const struct cxl_send_command * send_cmd)397 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
398 struct cxl_dev_state *cxlds,
399 const struct cxl_send_command *send_cmd)
400 {
401 struct cxl_mem_command mem_cmd;
402 int rc;
403
404 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
405 return -ENOTTY;
406
407 /*
408 * The user can never specify an input payload larger than what hardware
409 * supports, but output can be arbitrarily large (simply write out as
410 * much data as the hardware provides).
411 */
412 if (send_cmd->in.size > cxlds->payload_size)
413 return -EINVAL;
414
415 /* Sanitize and construct a cxl_mem_command */
416 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
417 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
418 else
419 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
420
421 if (rc)
422 return rc;
423
424 /* Sanitize and construct a cxl_mbox_cmd */
425 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
426 mem_cmd.info.size_in, mem_cmd.info.size_out,
427 send_cmd->in.payload);
428 }
429
cxl_query_cmd(struct cxl_memdev * cxlmd,struct cxl_mem_query_commands __user * q)430 int cxl_query_cmd(struct cxl_memdev *cxlmd,
431 struct cxl_mem_query_commands __user *q)
432 {
433 struct device *dev = &cxlmd->dev;
434 struct cxl_mem_command *cmd;
435 u32 n_commands;
436 int j = 0;
437
438 dev_dbg(dev, "Query IOCTL\n");
439
440 if (get_user(n_commands, &q->n_commands))
441 return -EFAULT;
442
443 /* returns the total number if 0 elements are requested. */
444 if (n_commands == 0)
445 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
446
447 /*
448 * otherwise, return max(n_commands, total commands) cxl_command_info
449 * structures.
450 */
451 cxl_for_each_cmd(cmd) {
452 const struct cxl_command_info *info = &cmd->info;
453
454 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
455 return -EFAULT;
456
457 if (j == n_commands)
458 break;
459 }
460
461 return 0;
462 }
463
464 /**
465 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
466 * @cxlds: The device data for the operation
467 * @mbox_cmd: The validated mailbox command.
468 * @out_payload: Pointer to userspace's output payload.
469 * @size_out: (Input) Max payload size to copy out.
470 * (Output) Payload size hardware generated.
471 * @retval: Hardware generated return code from the operation.
472 *
473 * Return:
474 * * %0 - Mailbox transaction succeeded. This implies the mailbox
475 * protocol completed successfully not that the operation itself
476 * was successful.
477 * * %-ENOMEM - Couldn't allocate a bounce buffer.
478 * * %-EFAULT - Something happened with copy_to/from_user.
479 * * %-EINTR - Mailbox acquisition interrupted.
480 * * %-EXXX - Transaction level failures.
481 *
482 * Dispatches a mailbox command on behalf of a userspace request.
483 * The output payload is copied to userspace.
484 *
485 * See cxl_send_cmd().
486 */
handle_mailbox_cmd_from_user(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * mbox_cmd,u64 out_payload,s32 * size_out,u32 * retval)487 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
488 struct cxl_mbox_cmd *mbox_cmd,
489 u64 out_payload, s32 *size_out,
490 u32 *retval)
491 {
492 struct device *dev = cxlds->dev;
493 int rc;
494
495 dev_dbg(dev,
496 "Submitting %s command for user\n"
497 "\topcode: %x\n"
498 "\tsize: %zx\n",
499 cxl_mem_opcode_to_name(mbox_cmd->opcode),
500 mbox_cmd->opcode, mbox_cmd->size_in);
501
502 rc = cxlds->mbox_send(cxlds, mbox_cmd);
503 if (rc)
504 goto out;
505
506 /*
507 * @size_out contains the max size that's allowed to be written back out
508 * to userspace. While the payload may have written more output than
509 * this it will have to be ignored.
510 */
511 if (mbox_cmd->size_out) {
512 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
513 "Invalid return size\n");
514 if (copy_to_user(u64_to_user_ptr(out_payload),
515 mbox_cmd->payload_out, mbox_cmd->size_out)) {
516 rc = -EFAULT;
517 goto out;
518 }
519 }
520
521 *size_out = mbox_cmd->size_out;
522 *retval = mbox_cmd->return_code;
523
524 out:
525 cxl_mbox_cmd_dtor(mbox_cmd);
526 return rc;
527 }
528
cxl_send_cmd(struct cxl_memdev * cxlmd,struct cxl_send_command __user * s)529 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
530 {
531 struct cxl_dev_state *cxlds = cxlmd->cxlds;
532 struct device *dev = &cxlmd->dev;
533 struct cxl_send_command send;
534 struct cxl_mbox_cmd mbox_cmd;
535 int rc;
536
537 dev_dbg(dev, "Send IOCTL\n");
538
539 if (copy_from_user(&send, s, sizeof(send)))
540 return -EFAULT;
541
542 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
543 if (rc)
544 return rc;
545
546 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
547 &send.out.size, &send.retval);
548 if (rc)
549 return rc;
550
551 if (copy_to_user(s, &send, sizeof(send)))
552 return -EFAULT;
553
554 return 0;
555 }
556
cxl_xfer_log(struct cxl_dev_state * cxlds,uuid_t * uuid,u32 size,u8 * out)557 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
558 {
559 u32 remaining = size;
560 u32 offset = 0;
561
562 while (remaining) {
563 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
564 struct cxl_mbox_get_log log = {
565 .uuid = *uuid,
566 .offset = cpu_to_le32(offset),
567 .length = cpu_to_le32(xfer_size)
568 };
569 int rc;
570
571 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
572 out, xfer_size);
573 if (rc < 0)
574 return rc;
575
576 out += xfer_size;
577 remaining -= xfer_size;
578 offset += xfer_size;
579 }
580
581 return 0;
582 }
583
584 /**
585 * cxl_walk_cel() - Walk through the Command Effects Log.
586 * @cxlds: The device data for the operation
587 * @size: Length of the Command Effects Log.
588 * @cel: CEL
589 *
590 * Iterate over each entry in the CEL and determine if the driver supports the
591 * command. If so, the command is enabled for the device and can be used later.
592 */
cxl_walk_cel(struct cxl_dev_state * cxlds,size_t size,u8 * cel)593 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
594 {
595 struct cxl_cel_entry *cel_entry;
596 const int cel_entries = size / sizeof(*cel_entry);
597 int i;
598
599 cel_entry = (struct cxl_cel_entry *) cel;
600
601 for (i = 0; i < cel_entries; i++) {
602 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
603 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
604
605 if (!cmd) {
606 dev_dbg(cxlds->dev,
607 "Opcode 0x%04x unsupported by driver", opcode);
608 continue;
609 }
610
611 set_bit(cmd->info.id, cxlds->enabled_cmds);
612 }
613 }
614
cxl_get_gsl(struct cxl_dev_state * cxlds)615 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
616 {
617 struct cxl_mbox_get_supported_logs *ret;
618 int rc;
619
620 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
621 if (!ret)
622 return ERR_PTR(-ENOMEM);
623
624 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
625 cxlds->payload_size);
626 if (rc < 0) {
627 kvfree(ret);
628 return ERR_PTR(rc);
629 }
630
631 return ret;
632 }
633
634 enum {
635 CEL_UUID,
636 VENDOR_DEBUG_UUID,
637 };
638
639 /* See CXL 2.0 Table 170. Get Log Input Payload */
640 static const uuid_t log_uuid[] = {
641 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
642 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
643 };
644
645 /**
646 * cxl_enumerate_cmds() - Enumerate commands for a device.
647 * @cxlds: The device data for the operation
648 *
649 * Returns 0 if enumerate completed successfully.
650 *
651 * CXL devices have optional support for certain commands. This function will
652 * determine the set of supported commands for the hardware and update the
653 * enabled_cmds bitmap in the @cxlds.
654 */
cxl_enumerate_cmds(struct cxl_dev_state * cxlds)655 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
656 {
657 struct cxl_mbox_get_supported_logs *gsl;
658 struct device *dev = cxlds->dev;
659 struct cxl_mem_command *cmd;
660 int i, rc;
661
662 gsl = cxl_get_gsl(cxlds);
663 if (IS_ERR(gsl))
664 return PTR_ERR(gsl);
665
666 rc = -ENOENT;
667 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
668 u32 size = le32_to_cpu(gsl->entry[i].size);
669 uuid_t uuid = gsl->entry[i].uuid;
670 u8 *log;
671
672 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
673
674 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
675 continue;
676
677 log = kvmalloc(size, GFP_KERNEL);
678 if (!log) {
679 rc = -ENOMEM;
680 goto out;
681 }
682
683 rc = cxl_xfer_log(cxlds, &uuid, size, log);
684 if (rc) {
685 kvfree(log);
686 goto out;
687 }
688
689 cxl_walk_cel(cxlds, size, log);
690 kvfree(log);
691
692 /* In case CEL was bogus, enable some default commands. */
693 cxl_for_each_cmd(cmd)
694 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
695 set_bit(cmd->info.id, cxlds->enabled_cmds);
696
697 /* Found the required CEL */
698 rc = 0;
699 }
700
701 out:
702 kvfree(gsl);
703 return rc;
704 }
705 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
706
707 /**
708 * cxl_mem_get_partition_info - Get partition info
709 * @cxlds: The device data for the operation
710 *
711 * Retrieve the current partition info for the device specified. The active
712 * values are the current capacity in bytes. If not 0, the 'next' values are
713 * the pending values, in bytes, which take affect on next cold reset.
714 *
715 * Return: 0 if no error: or the result of the mailbox command.
716 *
717 * See CXL @8.2.9.5.2.1 Get Partition Info
718 */
cxl_mem_get_partition_info(struct cxl_dev_state * cxlds)719 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
720 {
721 struct cxl_mbox_get_partition_info {
722 __le64 active_volatile_cap;
723 __le64 active_persistent_cap;
724 __le64 next_volatile_cap;
725 __le64 next_persistent_cap;
726 } __packed pi;
727 int rc;
728
729 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
730 &pi, sizeof(pi));
731
732 if (rc)
733 return rc;
734
735 cxlds->active_volatile_bytes =
736 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
737 cxlds->active_persistent_bytes =
738 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
739 cxlds->next_volatile_bytes =
740 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
741 cxlds->next_persistent_bytes =
742 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
743
744 return 0;
745 }
746
747 /**
748 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
749 * @cxlds: The device data for the operation
750 *
751 * Return: 0 if identify was executed successfully.
752 *
753 * This will dispatch the identify command to the device and on success populate
754 * structures to be exported to sysfs.
755 */
cxl_dev_state_identify(struct cxl_dev_state * cxlds)756 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
757 {
758 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
759 struct cxl_mbox_identify id;
760 int rc;
761
762 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
763 sizeof(id));
764 if (rc < 0)
765 return rc;
766
767 cxlds->total_bytes =
768 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
769 cxlds->volatile_only_bytes =
770 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
771 cxlds->persistent_only_bytes =
772 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
773 cxlds->partition_align_bytes =
774 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
775
776 dev_dbg(cxlds->dev,
777 "Identify Memory Device\n"
778 " total_bytes = %#llx\n"
779 " volatile_only_bytes = %#llx\n"
780 " persistent_only_bytes = %#llx\n"
781 " partition_align_bytes = %#llx\n",
782 cxlds->total_bytes, cxlds->volatile_only_bytes,
783 cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
784
785 cxlds->lsa_size = le32_to_cpu(id.lsa_size);
786 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
787
788 return 0;
789 }
790 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
791
cxl_mem_create_range_info(struct cxl_dev_state * cxlds)792 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
793 {
794 int rc;
795
796 if (cxlds->partition_align_bytes == 0) {
797 cxlds->ram_range.start = 0;
798 cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
799 cxlds->pmem_range.start = cxlds->volatile_only_bytes;
800 cxlds->pmem_range.end = cxlds->volatile_only_bytes +
801 cxlds->persistent_only_bytes - 1;
802 return 0;
803 }
804
805 rc = cxl_mem_get_partition_info(cxlds);
806 if (rc) {
807 dev_err(cxlds->dev, "Failed to query partition information\n");
808 return rc;
809 }
810
811 dev_dbg(cxlds->dev,
812 "Get Partition Info\n"
813 " active_volatile_bytes = %#llx\n"
814 " active_persistent_bytes = %#llx\n"
815 " next_volatile_bytes = %#llx\n"
816 " next_persistent_bytes = %#llx\n",
817 cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
818 cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
819
820 cxlds->ram_range.start = 0;
821 cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
822
823 cxlds->pmem_range.start = cxlds->active_volatile_bytes;
824 cxlds->pmem_range.end =
825 cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
826
827 return 0;
828 }
829 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
830
cxl_dev_state_create(struct device * dev)831 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
832 {
833 struct cxl_dev_state *cxlds;
834
835 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
836 if (!cxlds) {
837 dev_err(dev, "No memory available\n");
838 return ERR_PTR(-ENOMEM);
839 }
840
841 mutex_init(&cxlds->mbox_mutex);
842 cxlds->dev = dev;
843
844 return cxlds;
845 }
846 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
847
848 static struct dentry *cxl_debugfs;
849
cxl_mbox_init(void)850 void __init cxl_mbox_init(void)
851 {
852 struct dentry *mbox_debugfs;
853
854 cxl_debugfs = debugfs_create_dir("cxl", NULL);
855 mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
856 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
857 &cxl_raw_allow_all);
858 }
859
cxl_mbox_exit(void)860 void cxl_mbox_exit(void)
861 {
862 debugfs_remove_recursive(cxl_debugfs);
863 }
864