1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <crypto/hash.h>
19 #include <crypto/kpp.h>
20 #include <linux/nospec.h>
21
22 #include "nvmet.h"
23
24 static const struct config_item_type nvmet_host_type;
25 static const struct config_item_type nvmet_subsys_type;
26
27 static LIST_HEAD(nvmet_ports_list);
28 struct list_head *nvmet_ports = &nvmet_ports_list;
29
30 struct nvmet_type_name_map {
31 u8 type;
32 const char *name;
33 };
34
35 static struct nvmet_type_name_map nvmet_transport[] = {
36 { NVMF_TRTYPE_RDMA, "rdma" },
37 { NVMF_TRTYPE_FC, "fc" },
38 { NVMF_TRTYPE_TCP, "tcp" },
39 { NVMF_TRTYPE_LOOP, "loop" },
40 };
41
42 static const struct nvmet_type_name_map nvmet_addr_family[] = {
43 { NVMF_ADDR_FAMILY_PCI, "pcie" },
44 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
45 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
46 { NVMF_ADDR_FAMILY_IB, "ib" },
47 { NVMF_ADDR_FAMILY_FC, "fc" },
48 { NVMF_ADDR_FAMILY_LOOP, "loop" },
49 };
50
nvmet_is_port_enabled(struct nvmet_port * p,const char * caller)51 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
52 {
53 if (p->enabled)
54 pr_err("Disable port '%u' before changing attribute in %s\n",
55 le16_to_cpu(p->disc_addr.portid), caller);
56 return p->enabled;
57 }
58
59 /*
60 * nvmet_port Generic ConfigFS definitions.
61 * Used in any place in the ConfigFS tree that refers to an address.
62 */
nvmet_addr_adrfam_show(struct config_item * item,char * page)63 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
64 {
65 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
66 int i;
67
68 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
69 if (nvmet_addr_family[i].type == adrfam)
70 return snprintf(page, PAGE_SIZE, "%s\n",
71 nvmet_addr_family[i].name);
72 }
73
74 return snprintf(page, PAGE_SIZE, "\n");
75 }
76
nvmet_addr_adrfam_store(struct config_item * item,const char * page,size_t count)77 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
78 const char *page, size_t count)
79 {
80 struct nvmet_port *port = to_nvmet_port(item);
81 int i;
82
83 if (nvmet_is_port_enabled(port, __func__))
84 return -EACCES;
85
86 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
87 if (sysfs_streq(page, nvmet_addr_family[i].name))
88 goto found;
89 }
90
91 pr_err("Invalid value '%s' for adrfam\n", page);
92 return -EINVAL;
93
94 found:
95 port->disc_addr.adrfam = nvmet_addr_family[i].type;
96 return count;
97 }
98
99 CONFIGFS_ATTR(nvmet_, addr_adrfam);
100
nvmet_addr_portid_show(struct config_item * item,char * page)101 static ssize_t nvmet_addr_portid_show(struct config_item *item,
102 char *page)
103 {
104 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
105
106 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
107 }
108
nvmet_addr_portid_store(struct config_item * item,const char * page,size_t count)109 static ssize_t nvmet_addr_portid_store(struct config_item *item,
110 const char *page, size_t count)
111 {
112 struct nvmet_port *port = to_nvmet_port(item);
113 u16 portid = 0;
114
115 if (kstrtou16(page, 0, &portid)) {
116 pr_err("Invalid value '%s' for portid\n", page);
117 return -EINVAL;
118 }
119
120 if (nvmet_is_port_enabled(port, __func__))
121 return -EACCES;
122
123 port->disc_addr.portid = cpu_to_le16(portid);
124 return count;
125 }
126
127 CONFIGFS_ATTR(nvmet_, addr_portid);
128
nvmet_addr_traddr_show(struct config_item * item,char * page)129 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
130 char *page)
131 {
132 struct nvmet_port *port = to_nvmet_port(item);
133
134 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
135 }
136
nvmet_addr_traddr_store(struct config_item * item,const char * page,size_t count)137 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
138 const char *page, size_t count)
139 {
140 struct nvmet_port *port = to_nvmet_port(item);
141
142 if (count > NVMF_TRADDR_SIZE) {
143 pr_err("Invalid value '%s' for traddr\n", page);
144 return -EINVAL;
145 }
146
147 if (nvmet_is_port_enabled(port, __func__))
148 return -EACCES;
149
150 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
151 return -EINVAL;
152 return count;
153 }
154
155 CONFIGFS_ATTR(nvmet_, addr_traddr);
156
157 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
158 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
159 { NVMF_TREQ_REQUIRED, "required" },
160 { NVMF_TREQ_NOT_REQUIRED, "not required" },
161 };
162
nvmet_addr_treq_show(struct config_item * item,char * page)163 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
164 {
165 u8 treq = to_nvmet_port(item)->disc_addr.treq &
166 NVME_TREQ_SECURE_CHANNEL_MASK;
167 int i;
168
169 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
170 if (treq == nvmet_addr_treq[i].type)
171 return snprintf(page, PAGE_SIZE, "%s\n",
172 nvmet_addr_treq[i].name);
173 }
174
175 return snprintf(page, PAGE_SIZE, "\n");
176 }
177
nvmet_addr_treq_store(struct config_item * item,const char * page,size_t count)178 static ssize_t nvmet_addr_treq_store(struct config_item *item,
179 const char *page, size_t count)
180 {
181 struct nvmet_port *port = to_nvmet_port(item);
182 u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
183 int i;
184
185 if (nvmet_is_port_enabled(port, __func__))
186 return -EACCES;
187
188 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
189 if (sysfs_streq(page, nvmet_addr_treq[i].name))
190 goto found;
191 }
192
193 pr_err("Invalid value '%s' for treq\n", page);
194 return -EINVAL;
195
196 found:
197 treq |= nvmet_addr_treq[i].type;
198 port->disc_addr.treq = treq;
199 return count;
200 }
201
202 CONFIGFS_ATTR(nvmet_, addr_treq);
203
nvmet_addr_trsvcid_show(struct config_item * item,char * page)204 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
205 char *page)
206 {
207 struct nvmet_port *port = to_nvmet_port(item);
208
209 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
210 }
211
nvmet_addr_trsvcid_store(struct config_item * item,const char * page,size_t count)212 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
213 const char *page, size_t count)
214 {
215 struct nvmet_port *port = to_nvmet_port(item);
216
217 if (count > NVMF_TRSVCID_SIZE) {
218 pr_err("Invalid value '%s' for trsvcid\n", page);
219 return -EINVAL;
220 }
221 if (nvmet_is_port_enabled(port, __func__))
222 return -EACCES;
223
224 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
225 return -EINVAL;
226 return count;
227 }
228
229 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
230
nvmet_param_inline_data_size_show(struct config_item * item,char * page)231 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
232 char *page)
233 {
234 struct nvmet_port *port = to_nvmet_port(item);
235
236 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
237 }
238
nvmet_param_inline_data_size_store(struct config_item * item,const char * page,size_t count)239 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
240 const char *page, size_t count)
241 {
242 struct nvmet_port *port = to_nvmet_port(item);
243 int ret;
244
245 if (nvmet_is_port_enabled(port, __func__))
246 return -EACCES;
247 ret = kstrtoint(page, 0, &port->inline_data_size);
248 if (ret) {
249 pr_err("Invalid value '%s' for inline_data_size\n", page);
250 return -EINVAL;
251 }
252 return count;
253 }
254
255 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
256
257 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_param_pi_enable_show(struct config_item * item,char * page)258 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
259 char *page)
260 {
261 struct nvmet_port *port = to_nvmet_port(item);
262
263 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
264 }
265
nvmet_param_pi_enable_store(struct config_item * item,const char * page,size_t count)266 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
267 const char *page, size_t count)
268 {
269 struct nvmet_port *port = to_nvmet_port(item);
270 bool val;
271
272 if (kstrtobool(page, &val))
273 return -EINVAL;
274
275 if (nvmet_is_port_enabled(port, __func__))
276 return -EACCES;
277
278 port->pi_enable = val;
279 return count;
280 }
281
282 CONFIGFS_ATTR(nvmet_, param_pi_enable);
283 #endif
284
nvmet_addr_trtype_show(struct config_item * item,char * page)285 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
286 char *page)
287 {
288 struct nvmet_port *port = to_nvmet_port(item);
289 int i;
290
291 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
292 if (port->disc_addr.trtype == nvmet_transport[i].type)
293 return snprintf(page, PAGE_SIZE,
294 "%s\n", nvmet_transport[i].name);
295 }
296
297 return sprintf(page, "\n");
298 }
299
nvmet_port_init_tsas_rdma(struct nvmet_port * port)300 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
301 {
302 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
303 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
304 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
305 }
306
nvmet_addr_trtype_store(struct config_item * item,const char * page,size_t count)307 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
308 const char *page, size_t count)
309 {
310 struct nvmet_port *port = to_nvmet_port(item);
311 int i;
312
313 if (nvmet_is_port_enabled(port, __func__))
314 return -EACCES;
315
316 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
317 if (sysfs_streq(page, nvmet_transport[i].name))
318 goto found;
319 }
320
321 pr_err("Invalid value '%s' for trtype\n", page);
322 return -EINVAL;
323
324 found:
325 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
326 port->disc_addr.trtype = nvmet_transport[i].type;
327 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
328 nvmet_port_init_tsas_rdma(port);
329 return count;
330 }
331
332 CONFIGFS_ATTR(nvmet_, addr_trtype);
333
334 /*
335 * Namespace structures & file operation functions below
336 */
nvmet_ns_device_path_show(struct config_item * item,char * page)337 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
338 {
339 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
340 }
341
nvmet_ns_device_path_store(struct config_item * item,const char * page,size_t count)342 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
343 const char *page, size_t count)
344 {
345 struct nvmet_ns *ns = to_nvmet_ns(item);
346 struct nvmet_subsys *subsys = ns->subsys;
347 size_t len;
348 int ret;
349
350 mutex_lock(&subsys->lock);
351 ret = -EBUSY;
352 if (ns->enabled)
353 goto out_unlock;
354
355 ret = -EINVAL;
356 len = strcspn(page, "\n");
357 if (!len)
358 goto out_unlock;
359
360 kfree(ns->device_path);
361 ret = -ENOMEM;
362 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
363 if (!ns->device_path)
364 goto out_unlock;
365
366 mutex_unlock(&subsys->lock);
367 return count;
368
369 out_unlock:
370 mutex_unlock(&subsys->lock);
371 return ret;
372 }
373
374 CONFIGFS_ATTR(nvmet_ns_, device_path);
375
376 #ifdef CONFIG_PCI_P2PDMA
nvmet_ns_p2pmem_show(struct config_item * item,char * page)377 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
378 {
379 struct nvmet_ns *ns = to_nvmet_ns(item);
380
381 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
382 }
383
nvmet_ns_p2pmem_store(struct config_item * item,const char * page,size_t count)384 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
385 const char *page, size_t count)
386 {
387 struct nvmet_ns *ns = to_nvmet_ns(item);
388 struct pci_dev *p2p_dev = NULL;
389 bool use_p2pmem;
390 int ret = count;
391 int error;
392
393 mutex_lock(&ns->subsys->lock);
394 if (ns->enabled) {
395 ret = -EBUSY;
396 goto out_unlock;
397 }
398
399 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
400 if (error) {
401 ret = error;
402 goto out_unlock;
403 }
404
405 ns->use_p2pmem = use_p2pmem;
406 pci_dev_put(ns->p2p_dev);
407 ns->p2p_dev = p2p_dev;
408
409 out_unlock:
410 mutex_unlock(&ns->subsys->lock);
411
412 return ret;
413 }
414
415 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
416 #endif /* CONFIG_PCI_P2PDMA */
417
nvmet_ns_device_uuid_show(struct config_item * item,char * page)418 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
419 {
420 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
421 }
422
nvmet_ns_device_uuid_store(struct config_item * item,const char * page,size_t count)423 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
424 const char *page, size_t count)
425 {
426 struct nvmet_ns *ns = to_nvmet_ns(item);
427 struct nvmet_subsys *subsys = ns->subsys;
428 int ret = 0;
429
430 mutex_lock(&subsys->lock);
431 if (ns->enabled) {
432 ret = -EBUSY;
433 goto out_unlock;
434 }
435
436 if (uuid_parse(page, &ns->uuid))
437 ret = -EINVAL;
438
439 out_unlock:
440 mutex_unlock(&subsys->lock);
441 return ret ? ret : count;
442 }
443
444 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
445
nvmet_ns_device_nguid_show(struct config_item * item,char * page)446 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
447 {
448 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
449 }
450
nvmet_ns_device_nguid_store(struct config_item * item,const char * page,size_t count)451 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
452 const char *page, size_t count)
453 {
454 struct nvmet_ns *ns = to_nvmet_ns(item);
455 struct nvmet_subsys *subsys = ns->subsys;
456 u8 nguid[16];
457 const char *p = page;
458 int i;
459 int ret = 0;
460
461 mutex_lock(&subsys->lock);
462 if (ns->enabled) {
463 ret = -EBUSY;
464 goto out_unlock;
465 }
466
467 for (i = 0; i < 16; i++) {
468 if (p + 2 > page + count) {
469 ret = -EINVAL;
470 goto out_unlock;
471 }
472 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
473 ret = -EINVAL;
474 goto out_unlock;
475 }
476
477 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
478 p += 2;
479
480 if (*p == '-' || *p == ':')
481 p++;
482 }
483
484 memcpy(&ns->nguid, nguid, sizeof(nguid));
485 out_unlock:
486 mutex_unlock(&subsys->lock);
487 return ret ? ret : count;
488 }
489
490 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
491
nvmet_ns_ana_grpid_show(struct config_item * item,char * page)492 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
493 {
494 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
495 }
496
nvmet_ns_ana_grpid_store(struct config_item * item,const char * page,size_t count)497 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
498 const char *page, size_t count)
499 {
500 struct nvmet_ns *ns = to_nvmet_ns(item);
501 u32 oldgrpid, newgrpid;
502 int ret;
503
504 ret = kstrtou32(page, 0, &newgrpid);
505 if (ret)
506 return ret;
507
508 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
509 return -EINVAL;
510
511 down_write(&nvmet_ana_sem);
512 oldgrpid = ns->anagrpid;
513 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
514 nvmet_ana_group_enabled[newgrpid]++;
515 ns->anagrpid = newgrpid;
516 nvmet_ana_group_enabled[oldgrpid]--;
517 nvmet_ana_chgcnt++;
518 up_write(&nvmet_ana_sem);
519
520 nvmet_send_ana_event(ns->subsys, NULL);
521 return count;
522 }
523
524 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
525
nvmet_ns_enable_show(struct config_item * item,char * page)526 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
527 {
528 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
529 }
530
nvmet_ns_enable_store(struct config_item * item,const char * page,size_t count)531 static ssize_t nvmet_ns_enable_store(struct config_item *item,
532 const char *page, size_t count)
533 {
534 struct nvmet_ns *ns = to_nvmet_ns(item);
535 bool enable;
536 int ret = 0;
537
538 if (kstrtobool(page, &enable))
539 return -EINVAL;
540
541 if (enable)
542 ret = nvmet_ns_enable(ns);
543 else
544 nvmet_ns_disable(ns);
545
546 return ret ? ret : count;
547 }
548
549 CONFIGFS_ATTR(nvmet_ns_, enable);
550
nvmet_ns_buffered_io_show(struct config_item * item,char * page)551 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
552 {
553 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
554 }
555
nvmet_ns_buffered_io_store(struct config_item * item,const char * page,size_t count)556 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
557 const char *page, size_t count)
558 {
559 struct nvmet_ns *ns = to_nvmet_ns(item);
560 bool val;
561
562 if (kstrtobool(page, &val))
563 return -EINVAL;
564
565 mutex_lock(&ns->subsys->lock);
566 if (ns->enabled) {
567 pr_err("disable ns before setting buffered_io value.\n");
568 mutex_unlock(&ns->subsys->lock);
569 return -EINVAL;
570 }
571
572 ns->buffered_io = val;
573 mutex_unlock(&ns->subsys->lock);
574 return count;
575 }
576
577 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
578
nvmet_ns_revalidate_size_store(struct config_item * item,const char * page,size_t count)579 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
580 const char *page, size_t count)
581 {
582 struct nvmet_ns *ns = to_nvmet_ns(item);
583 bool val;
584
585 if (kstrtobool(page, &val))
586 return -EINVAL;
587
588 if (!val)
589 return -EINVAL;
590
591 mutex_lock(&ns->subsys->lock);
592 if (!ns->enabled) {
593 pr_err("enable ns before revalidate.\n");
594 mutex_unlock(&ns->subsys->lock);
595 return -EINVAL;
596 }
597 if (nvmet_ns_revalidate(ns))
598 nvmet_ns_changed(ns->subsys, ns->nsid);
599 mutex_unlock(&ns->subsys->lock);
600 return count;
601 }
602
603 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
604
605 static struct configfs_attribute *nvmet_ns_attrs[] = {
606 &nvmet_ns_attr_device_path,
607 &nvmet_ns_attr_device_nguid,
608 &nvmet_ns_attr_device_uuid,
609 &nvmet_ns_attr_ana_grpid,
610 &nvmet_ns_attr_enable,
611 &nvmet_ns_attr_buffered_io,
612 &nvmet_ns_attr_revalidate_size,
613 #ifdef CONFIG_PCI_P2PDMA
614 &nvmet_ns_attr_p2pmem,
615 #endif
616 NULL,
617 };
618
nvmet_ns_release(struct config_item * item)619 static void nvmet_ns_release(struct config_item *item)
620 {
621 struct nvmet_ns *ns = to_nvmet_ns(item);
622
623 nvmet_ns_free(ns);
624 }
625
626 static struct configfs_item_operations nvmet_ns_item_ops = {
627 .release = nvmet_ns_release,
628 };
629
630 static const struct config_item_type nvmet_ns_type = {
631 .ct_item_ops = &nvmet_ns_item_ops,
632 .ct_attrs = nvmet_ns_attrs,
633 .ct_owner = THIS_MODULE,
634 };
635
nvmet_ns_make(struct config_group * group,const char * name)636 static struct config_group *nvmet_ns_make(struct config_group *group,
637 const char *name)
638 {
639 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
640 struct nvmet_ns *ns;
641 int ret;
642 u32 nsid;
643
644 ret = kstrtou32(name, 0, &nsid);
645 if (ret)
646 goto out;
647
648 ret = -EINVAL;
649 if (nsid == 0 || nsid == NVME_NSID_ALL) {
650 pr_err("invalid nsid %#x", nsid);
651 goto out;
652 }
653
654 ret = -ENOMEM;
655 ns = nvmet_ns_alloc(subsys, nsid);
656 if (!ns)
657 goto out;
658 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
659
660 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
661
662 return &ns->group;
663 out:
664 return ERR_PTR(ret);
665 }
666
667 static struct configfs_group_operations nvmet_namespaces_group_ops = {
668 .make_group = nvmet_ns_make,
669 };
670
671 static const struct config_item_type nvmet_namespaces_type = {
672 .ct_group_ops = &nvmet_namespaces_group_ops,
673 .ct_owner = THIS_MODULE,
674 };
675
676 #ifdef CONFIG_NVME_TARGET_PASSTHRU
677
nvmet_passthru_device_path_show(struct config_item * item,char * page)678 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
679 char *page)
680 {
681 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
682
683 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
684 }
685
nvmet_passthru_device_path_store(struct config_item * item,const char * page,size_t count)686 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
687 const char *page, size_t count)
688 {
689 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
690 size_t len;
691 int ret;
692
693 mutex_lock(&subsys->lock);
694
695 ret = -EBUSY;
696 if (subsys->passthru_ctrl)
697 goto out_unlock;
698
699 ret = -EINVAL;
700 len = strcspn(page, "\n");
701 if (!len)
702 goto out_unlock;
703
704 kfree(subsys->passthru_ctrl_path);
705 ret = -ENOMEM;
706 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
707 if (!subsys->passthru_ctrl_path)
708 goto out_unlock;
709
710 mutex_unlock(&subsys->lock);
711
712 return count;
713 out_unlock:
714 mutex_unlock(&subsys->lock);
715 return ret;
716 }
717 CONFIGFS_ATTR(nvmet_passthru_, device_path);
718
nvmet_passthru_enable_show(struct config_item * item,char * page)719 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
720 char *page)
721 {
722 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
723
724 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
725 }
726
nvmet_passthru_enable_store(struct config_item * item,const char * page,size_t count)727 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
728 const char *page, size_t count)
729 {
730 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
731 bool enable;
732 int ret = 0;
733
734 if (kstrtobool(page, &enable))
735 return -EINVAL;
736
737 if (enable)
738 ret = nvmet_passthru_ctrl_enable(subsys);
739 else
740 nvmet_passthru_ctrl_disable(subsys);
741
742 return ret ? ret : count;
743 }
744 CONFIGFS_ATTR(nvmet_passthru_, enable);
745
nvmet_passthru_admin_timeout_show(struct config_item * item,char * page)746 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
747 char *page)
748 {
749 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
750 }
751
nvmet_passthru_admin_timeout_store(struct config_item * item,const char * page,size_t count)752 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
753 const char *page, size_t count)
754 {
755 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
756 unsigned int timeout;
757
758 if (kstrtouint(page, 0, &timeout))
759 return -EINVAL;
760 subsys->admin_timeout = timeout;
761 return count;
762 }
763 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
764
nvmet_passthru_io_timeout_show(struct config_item * item,char * page)765 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
766 char *page)
767 {
768 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
769 }
770
nvmet_passthru_io_timeout_store(struct config_item * item,const char * page,size_t count)771 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
772 const char *page, size_t count)
773 {
774 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
775 unsigned int timeout;
776
777 if (kstrtouint(page, 0, &timeout))
778 return -EINVAL;
779 subsys->io_timeout = timeout;
780 return count;
781 }
782 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
783
nvmet_passthru_clear_ids_show(struct config_item * item,char * page)784 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
785 char *page)
786 {
787 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
788 }
789
nvmet_passthru_clear_ids_store(struct config_item * item,const char * page,size_t count)790 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
791 const char *page, size_t count)
792 {
793 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
794 unsigned int clear_ids;
795
796 if (kstrtouint(page, 0, &clear_ids))
797 return -EINVAL;
798 subsys->clear_ids = clear_ids;
799 return count;
800 }
801 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
802
803 static struct configfs_attribute *nvmet_passthru_attrs[] = {
804 &nvmet_passthru_attr_device_path,
805 &nvmet_passthru_attr_enable,
806 &nvmet_passthru_attr_admin_timeout,
807 &nvmet_passthru_attr_io_timeout,
808 &nvmet_passthru_attr_clear_ids,
809 NULL,
810 };
811
812 static const struct config_item_type nvmet_passthru_type = {
813 .ct_attrs = nvmet_passthru_attrs,
814 .ct_owner = THIS_MODULE,
815 };
816
nvmet_add_passthru_group(struct nvmet_subsys * subsys)817 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
818 {
819 config_group_init_type_name(&subsys->passthru_group,
820 "passthru", &nvmet_passthru_type);
821 configfs_add_default_group(&subsys->passthru_group,
822 &subsys->group);
823 }
824
825 #else /* CONFIG_NVME_TARGET_PASSTHRU */
826
nvmet_add_passthru_group(struct nvmet_subsys * subsys)827 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
828 {
829 }
830
831 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
832
nvmet_port_subsys_allow_link(struct config_item * parent,struct config_item * target)833 static int nvmet_port_subsys_allow_link(struct config_item *parent,
834 struct config_item *target)
835 {
836 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
837 struct nvmet_subsys *subsys;
838 struct nvmet_subsys_link *link, *p;
839 int ret;
840
841 if (target->ci_type != &nvmet_subsys_type) {
842 pr_err("can only link subsystems into the subsystems dir.!\n");
843 return -EINVAL;
844 }
845 subsys = to_subsys(target);
846 link = kmalloc(sizeof(*link), GFP_KERNEL);
847 if (!link)
848 return -ENOMEM;
849 link->subsys = subsys;
850
851 down_write(&nvmet_config_sem);
852 ret = -EEXIST;
853 list_for_each_entry(p, &port->subsystems, entry) {
854 if (p->subsys == subsys)
855 goto out_free_link;
856 }
857
858 if (list_empty(&port->subsystems)) {
859 ret = nvmet_enable_port(port);
860 if (ret)
861 goto out_free_link;
862 }
863
864 list_add_tail(&link->entry, &port->subsystems);
865 nvmet_port_disc_changed(port, subsys);
866
867 up_write(&nvmet_config_sem);
868 return 0;
869
870 out_free_link:
871 up_write(&nvmet_config_sem);
872 kfree(link);
873 return ret;
874 }
875
nvmet_port_subsys_drop_link(struct config_item * parent,struct config_item * target)876 static void nvmet_port_subsys_drop_link(struct config_item *parent,
877 struct config_item *target)
878 {
879 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
880 struct nvmet_subsys *subsys = to_subsys(target);
881 struct nvmet_subsys_link *p;
882
883 down_write(&nvmet_config_sem);
884 list_for_each_entry(p, &port->subsystems, entry) {
885 if (p->subsys == subsys)
886 goto found;
887 }
888 up_write(&nvmet_config_sem);
889 return;
890
891 found:
892 list_del(&p->entry);
893 nvmet_port_del_ctrls(port, subsys);
894 nvmet_port_disc_changed(port, subsys);
895
896 if (list_empty(&port->subsystems))
897 nvmet_disable_port(port);
898 up_write(&nvmet_config_sem);
899 kfree(p);
900 }
901
902 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
903 .allow_link = nvmet_port_subsys_allow_link,
904 .drop_link = nvmet_port_subsys_drop_link,
905 };
906
907 static const struct config_item_type nvmet_port_subsys_type = {
908 .ct_item_ops = &nvmet_port_subsys_item_ops,
909 .ct_owner = THIS_MODULE,
910 };
911
nvmet_allowed_hosts_allow_link(struct config_item * parent,struct config_item * target)912 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
913 struct config_item *target)
914 {
915 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
916 struct nvmet_host *host;
917 struct nvmet_host_link *link, *p;
918 int ret;
919
920 if (target->ci_type != &nvmet_host_type) {
921 pr_err("can only link hosts into the allowed_hosts directory!\n");
922 return -EINVAL;
923 }
924
925 host = to_host(target);
926 link = kmalloc(sizeof(*link), GFP_KERNEL);
927 if (!link)
928 return -ENOMEM;
929 link->host = host;
930
931 down_write(&nvmet_config_sem);
932 ret = -EINVAL;
933 if (subsys->allow_any_host) {
934 pr_err("can't add hosts when allow_any_host is set!\n");
935 goto out_free_link;
936 }
937
938 ret = -EEXIST;
939 list_for_each_entry(p, &subsys->hosts, entry) {
940 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
941 goto out_free_link;
942 }
943 list_add_tail(&link->entry, &subsys->hosts);
944 nvmet_subsys_disc_changed(subsys, host);
945
946 up_write(&nvmet_config_sem);
947 return 0;
948 out_free_link:
949 up_write(&nvmet_config_sem);
950 kfree(link);
951 return ret;
952 }
953
nvmet_allowed_hosts_drop_link(struct config_item * parent,struct config_item * target)954 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
955 struct config_item *target)
956 {
957 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
958 struct nvmet_host *host = to_host(target);
959 struct nvmet_host_link *p;
960
961 down_write(&nvmet_config_sem);
962 list_for_each_entry(p, &subsys->hosts, entry) {
963 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
964 goto found;
965 }
966 up_write(&nvmet_config_sem);
967 return;
968
969 found:
970 list_del(&p->entry);
971 nvmet_subsys_disc_changed(subsys, host);
972
973 up_write(&nvmet_config_sem);
974 kfree(p);
975 }
976
977 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
978 .allow_link = nvmet_allowed_hosts_allow_link,
979 .drop_link = nvmet_allowed_hosts_drop_link,
980 };
981
982 static const struct config_item_type nvmet_allowed_hosts_type = {
983 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
984 .ct_owner = THIS_MODULE,
985 };
986
nvmet_subsys_attr_allow_any_host_show(struct config_item * item,char * page)987 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
988 char *page)
989 {
990 return snprintf(page, PAGE_SIZE, "%d\n",
991 to_subsys(item)->allow_any_host);
992 }
993
nvmet_subsys_attr_allow_any_host_store(struct config_item * item,const char * page,size_t count)994 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
995 const char *page, size_t count)
996 {
997 struct nvmet_subsys *subsys = to_subsys(item);
998 bool allow_any_host;
999 int ret = 0;
1000
1001 if (kstrtobool(page, &allow_any_host))
1002 return -EINVAL;
1003
1004 down_write(&nvmet_config_sem);
1005 if (allow_any_host && !list_empty(&subsys->hosts)) {
1006 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1007 ret = -EINVAL;
1008 goto out_unlock;
1009 }
1010
1011 if (subsys->allow_any_host != allow_any_host) {
1012 subsys->allow_any_host = allow_any_host;
1013 nvmet_subsys_disc_changed(subsys, NULL);
1014 }
1015
1016 out_unlock:
1017 up_write(&nvmet_config_sem);
1018 return ret ? ret : count;
1019 }
1020
1021 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1022
nvmet_subsys_attr_version_show(struct config_item * item,char * page)1023 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1024 char *page)
1025 {
1026 struct nvmet_subsys *subsys = to_subsys(item);
1027
1028 if (NVME_TERTIARY(subsys->ver))
1029 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1030 NVME_MAJOR(subsys->ver),
1031 NVME_MINOR(subsys->ver),
1032 NVME_TERTIARY(subsys->ver));
1033
1034 return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1035 NVME_MAJOR(subsys->ver),
1036 NVME_MINOR(subsys->ver));
1037 }
1038
1039 static ssize_t
nvmet_subsys_attr_version_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1040 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1041 const char *page, size_t count)
1042 {
1043 int major, minor, tertiary = 0;
1044 int ret;
1045
1046 if (subsys->subsys_discovered) {
1047 if (NVME_TERTIARY(subsys->ver))
1048 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1049 NVME_MAJOR(subsys->ver),
1050 NVME_MINOR(subsys->ver),
1051 NVME_TERTIARY(subsys->ver));
1052 else
1053 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1054 NVME_MAJOR(subsys->ver),
1055 NVME_MINOR(subsys->ver));
1056 return -EINVAL;
1057 }
1058
1059 /* passthru subsystems use the underlying controller's version */
1060 if (nvmet_is_passthru_subsys(subsys))
1061 return -EINVAL;
1062
1063 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1064 if (ret != 2 && ret != 3)
1065 return -EINVAL;
1066
1067 subsys->ver = NVME_VS(major, minor, tertiary);
1068
1069 return count;
1070 }
1071
nvmet_subsys_attr_version_store(struct config_item * item,const char * page,size_t count)1072 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1073 const char *page, size_t count)
1074 {
1075 struct nvmet_subsys *subsys = to_subsys(item);
1076 ssize_t ret;
1077
1078 down_write(&nvmet_config_sem);
1079 mutex_lock(&subsys->lock);
1080 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1081 mutex_unlock(&subsys->lock);
1082 up_write(&nvmet_config_sem);
1083
1084 return ret;
1085 }
1086 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1087
1088 /* See Section 1.5 of NVMe 1.4 */
nvmet_is_ascii(const char c)1089 static bool nvmet_is_ascii(const char c)
1090 {
1091 return c >= 0x20 && c <= 0x7e;
1092 }
1093
nvmet_subsys_attr_serial_show(struct config_item * item,char * page)1094 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1095 char *page)
1096 {
1097 struct nvmet_subsys *subsys = to_subsys(item);
1098
1099 return snprintf(page, PAGE_SIZE, "%.*s\n",
1100 NVMET_SN_MAX_SIZE, subsys->serial);
1101 }
1102
1103 static ssize_t
nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1104 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1105 const char *page, size_t count)
1106 {
1107 int pos, len = strcspn(page, "\n");
1108
1109 if (subsys->subsys_discovered) {
1110 pr_err("Can't set serial number. %s is already assigned\n",
1111 subsys->serial);
1112 return -EINVAL;
1113 }
1114
1115 if (!len || len > NVMET_SN_MAX_SIZE) {
1116 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1117 NVMET_SN_MAX_SIZE);
1118 return -EINVAL;
1119 }
1120
1121 for (pos = 0; pos < len; pos++) {
1122 if (!nvmet_is_ascii(page[pos])) {
1123 pr_err("Serial Number must contain only ASCII strings\n");
1124 return -EINVAL;
1125 }
1126 }
1127
1128 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1129
1130 return count;
1131 }
1132
nvmet_subsys_attr_serial_store(struct config_item * item,const char * page,size_t count)1133 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1134 const char *page, size_t count)
1135 {
1136 struct nvmet_subsys *subsys = to_subsys(item);
1137 ssize_t ret;
1138
1139 down_write(&nvmet_config_sem);
1140 mutex_lock(&subsys->lock);
1141 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1142 mutex_unlock(&subsys->lock);
1143 up_write(&nvmet_config_sem);
1144
1145 return ret;
1146 }
1147 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1148
nvmet_subsys_attr_cntlid_min_show(struct config_item * item,char * page)1149 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1150 char *page)
1151 {
1152 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1153 }
1154
nvmet_subsys_attr_cntlid_min_store(struct config_item * item,const char * page,size_t cnt)1155 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1156 const char *page, size_t cnt)
1157 {
1158 u16 cntlid_min;
1159
1160 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1161 return -EINVAL;
1162
1163 if (cntlid_min == 0)
1164 return -EINVAL;
1165
1166 down_write(&nvmet_config_sem);
1167 if (cntlid_min >= to_subsys(item)->cntlid_max)
1168 goto out_unlock;
1169 to_subsys(item)->cntlid_min = cntlid_min;
1170 up_write(&nvmet_config_sem);
1171 return cnt;
1172
1173 out_unlock:
1174 up_write(&nvmet_config_sem);
1175 return -EINVAL;
1176 }
1177 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1178
nvmet_subsys_attr_cntlid_max_show(struct config_item * item,char * page)1179 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1180 char *page)
1181 {
1182 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1183 }
1184
nvmet_subsys_attr_cntlid_max_store(struct config_item * item,const char * page,size_t cnt)1185 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1186 const char *page, size_t cnt)
1187 {
1188 u16 cntlid_max;
1189
1190 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1191 return -EINVAL;
1192
1193 if (cntlid_max == 0)
1194 return -EINVAL;
1195
1196 down_write(&nvmet_config_sem);
1197 if (cntlid_max <= to_subsys(item)->cntlid_min)
1198 goto out_unlock;
1199 to_subsys(item)->cntlid_max = cntlid_max;
1200 up_write(&nvmet_config_sem);
1201 return cnt;
1202
1203 out_unlock:
1204 up_write(&nvmet_config_sem);
1205 return -EINVAL;
1206 }
1207 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1208
nvmet_subsys_attr_model_show(struct config_item * item,char * page)1209 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1210 char *page)
1211 {
1212 struct nvmet_subsys *subsys = to_subsys(item);
1213
1214 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1215 }
1216
nvmet_subsys_attr_model_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1217 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1218 const char *page, size_t count)
1219 {
1220 int pos = 0, len;
1221 char *val;
1222
1223 if (subsys->subsys_discovered) {
1224 pr_err("Can't set model number. %s is already assigned\n",
1225 subsys->model_number);
1226 return -EINVAL;
1227 }
1228
1229 len = strcspn(page, "\n");
1230 if (!len)
1231 return -EINVAL;
1232
1233 if (len > NVMET_MN_MAX_SIZE) {
1234 pr_err("Model number size can not exceed %d Bytes\n",
1235 NVMET_MN_MAX_SIZE);
1236 return -EINVAL;
1237 }
1238
1239 for (pos = 0; pos < len; pos++) {
1240 if (!nvmet_is_ascii(page[pos]))
1241 return -EINVAL;
1242 }
1243
1244 val = kmemdup_nul(page, len, GFP_KERNEL);
1245 if (!val)
1246 return -ENOMEM;
1247 kfree(subsys->model_number);
1248 subsys->model_number = val;
1249 return count;
1250 }
1251
nvmet_subsys_attr_model_store(struct config_item * item,const char * page,size_t count)1252 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1253 const char *page, size_t count)
1254 {
1255 struct nvmet_subsys *subsys = to_subsys(item);
1256 ssize_t ret;
1257
1258 down_write(&nvmet_config_sem);
1259 mutex_lock(&subsys->lock);
1260 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1261 mutex_unlock(&subsys->lock);
1262 up_write(&nvmet_config_sem);
1263
1264 return ret;
1265 }
1266 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1267
nvmet_subsys_attr_ieee_oui_show(struct config_item * item,char * page)1268 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1269 char *page)
1270 {
1271 struct nvmet_subsys *subsys = to_subsys(item);
1272
1273 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1274 }
1275
nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1276 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1277 const char *page, size_t count)
1278 {
1279 uint32_t val = 0;
1280 int ret;
1281
1282 if (subsys->subsys_discovered) {
1283 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1284 subsys->ieee_oui);
1285 return -EINVAL;
1286 }
1287
1288 ret = kstrtou32(page, 0, &val);
1289 if (ret < 0)
1290 return ret;
1291
1292 if (val >= 0x1000000)
1293 return -EINVAL;
1294
1295 subsys->ieee_oui = val;
1296
1297 return count;
1298 }
1299
nvmet_subsys_attr_ieee_oui_store(struct config_item * item,const char * page,size_t count)1300 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1301 const char *page, size_t count)
1302 {
1303 struct nvmet_subsys *subsys = to_subsys(item);
1304 ssize_t ret;
1305
1306 down_write(&nvmet_config_sem);
1307 mutex_lock(&subsys->lock);
1308 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1309 mutex_unlock(&subsys->lock);
1310 up_write(&nvmet_config_sem);
1311
1312 return ret;
1313 }
1314 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1315
nvmet_subsys_attr_firmware_show(struct config_item * item,char * page)1316 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1317 char *page)
1318 {
1319 struct nvmet_subsys *subsys = to_subsys(item);
1320
1321 return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1322 }
1323
nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys * subsys,const char * page,size_t count)1324 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1325 const char *page, size_t count)
1326 {
1327 int pos = 0, len;
1328 char *val;
1329
1330 if (subsys->subsys_discovered) {
1331 pr_err("Can't set firmware revision. %s is already assigned\n",
1332 subsys->firmware_rev);
1333 return -EINVAL;
1334 }
1335
1336 len = strcspn(page, "\n");
1337 if (!len)
1338 return -EINVAL;
1339
1340 if (len > NVMET_FR_MAX_SIZE) {
1341 pr_err("Firmware revision size can not exceed %d Bytes\n",
1342 NVMET_FR_MAX_SIZE);
1343 return -EINVAL;
1344 }
1345
1346 for (pos = 0; pos < len; pos++) {
1347 if (!nvmet_is_ascii(page[pos]))
1348 return -EINVAL;
1349 }
1350
1351 val = kmemdup_nul(page, len, GFP_KERNEL);
1352 if (!val)
1353 return -ENOMEM;
1354
1355 kfree(subsys->firmware_rev);
1356
1357 subsys->firmware_rev = val;
1358
1359 return count;
1360 }
1361
nvmet_subsys_attr_firmware_store(struct config_item * item,const char * page,size_t count)1362 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1363 const char *page, size_t count)
1364 {
1365 struct nvmet_subsys *subsys = to_subsys(item);
1366 ssize_t ret;
1367
1368 down_write(&nvmet_config_sem);
1369 mutex_lock(&subsys->lock);
1370 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1371 mutex_unlock(&subsys->lock);
1372 up_write(&nvmet_config_sem);
1373
1374 return ret;
1375 }
1376 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1377
1378 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_subsys_attr_pi_enable_show(struct config_item * item,char * page)1379 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1380 char *page)
1381 {
1382 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1383 }
1384
nvmet_subsys_attr_pi_enable_store(struct config_item * item,const char * page,size_t count)1385 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1386 const char *page, size_t count)
1387 {
1388 struct nvmet_subsys *subsys = to_subsys(item);
1389 bool pi_enable;
1390
1391 if (kstrtobool(page, &pi_enable))
1392 return -EINVAL;
1393
1394 subsys->pi_support = pi_enable;
1395 return count;
1396 }
1397 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1398 #endif
1399
nvmet_subsys_attr_qid_max_show(struct config_item * item,char * page)1400 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1401 char *page)
1402 {
1403 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1404 }
1405
nvmet_subsys_attr_qid_max_store(struct config_item * item,const char * page,size_t cnt)1406 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1407 const char *page, size_t cnt)
1408 {
1409 struct nvmet_subsys *subsys = to_subsys(item);
1410 struct nvmet_ctrl *ctrl;
1411 u16 qid_max;
1412
1413 if (sscanf(page, "%hu\n", &qid_max) != 1)
1414 return -EINVAL;
1415
1416 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1417 return -EINVAL;
1418
1419 down_write(&nvmet_config_sem);
1420 subsys->max_qid = qid_max;
1421
1422 /* Force reconnect */
1423 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1424 ctrl->ops->delete_ctrl(ctrl);
1425 up_write(&nvmet_config_sem);
1426
1427 return cnt;
1428 }
1429 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1430
1431 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1432 &nvmet_subsys_attr_attr_allow_any_host,
1433 &nvmet_subsys_attr_attr_version,
1434 &nvmet_subsys_attr_attr_serial,
1435 &nvmet_subsys_attr_attr_cntlid_min,
1436 &nvmet_subsys_attr_attr_cntlid_max,
1437 &nvmet_subsys_attr_attr_model,
1438 &nvmet_subsys_attr_attr_qid_max,
1439 &nvmet_subsys_attr_attr_ieee_oui,
1440 &nvmet_subsys_attr_attr_firmware,
1441 #ifdef CONFIG_BLK_DEV_INTEGRITY
1442 &nvmet_subsys_attr_attr_pi_enable,
1443 #endif
1444 NULL,
1445 };
1446
1447 /*
1448 * Subsystem structures & folder operation functions below
1449 */
nvmet_subsys_release(struct config_item * item)1450 static void nvmet_subsys_release(struct config_item *item)
1451 {
1452 struct nvmet_subsys *subsys = to_subsys(item);
1453
1454 nvmet_subsys_del_ctrls(subsys);
1455 nvmet_subsys_put(subsys);
1456 }
1457
1458 static struct configfs_item_operations nvmet_subsys_item_ops = {
1459 .release = nvmet_subsys_release,
1460 };
1461
1462 static const struct config_item_type nvmet_subsys_type = {
1463 .ct_item_ops = &nvmet_subsys_item_ops,
1464 .ct_attrs = nvmet_subsys_attrs,
1465 .ct_owner = THIS_MODULE,
1466 };
1467
nvmet_subsys_make(struct config_group * group,const char * name)1468 static struct config_group *nvmet_subsys_make(struct config_group *group,
1469 const char *name)
1470 {
1471 struct nvmet_subsys *subsys;
1472
1473 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1474 pr_err("can't create discovery subsystem through configfs\n");
1475 return ERR_PTR(-EINVAL);
1476 }
1477
1478 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1479 if (IS_ERR(subsys))
1480 return ERR_CAST(subsys);
1481
1482 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1483
1484 config_group_init_type_name(&subsys->namespaces_group,
1485 "namespaces", &nvmet_namespaces_type);
1486 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1487
1488 config_group_init_type_name(&subsys->allowed_hosts_group,
1489 "allowed_hosts", &nvmet_allowed_hosts_type);
1490 configfs_add_default_group(&subsys->allowed_hosts_group,
1491 &subsys->group);
1492
1493 nvmet_add_passthru_group(subsys);
1494
1495 return &subsys->group;
1496 }
1497
1498 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1499 .make_group = nvmet_subsys_make,
1500 };
1501
1502 static const struct config_item_type nvmet_subsystems_type = {
1503 .ct_group_ops = &nvmet_subsystems_group_ops,
1504 .ct_owner = THIS_MODULE,
1505 };
1506
nvmet_referral_enable_show(struct config_item * item,char * page)1507 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1508 char *page)
1509 {
1510 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1511 }
1512
nvmet_referral_enable_store(struct config_item * item,const char * page,size_t count)1513 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1514 const char *page, size_t count)
1515 {
1516 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1517 struct nvmet_port *port = to_nvmet_port(item);
1518 bool enable;
1519
1520 if (kstrtobool(page, &enable))
1521 goto inval;
1522
1523 if (enable)
1524 nvmet_referral_enable(parent, port);
1525 else
1526 nvmet_referral_disable(parent, port);
1527
1528 return count;
1529 inval:
1530 pr_err("Invalid value '%s' for enable\n", page);
1531 return -EINVAL;
1532 }
1533
1534 CONFIGFS_ATTR(nvmet_referral_, enable);
1535
1536 /*
1537 * Discovery Service subsystem definitions
1538 */
1539 static struct configfs_attribute *nvmet_referral_attrs[] = {
1540 &nvmet_attr_addr_adrfam,
1541 &nvmet_attr_addr_portid,
1542 &nvmet_attr_addr_treq,
1543 &nvmet_attr_addr_traddr,
1544 &nvmet_attr_addr_trsvcid,
1545 &nvmet_attr_addr_trtype,
1546 &nvmet_referral_attr_enable,
1547 NULL,
1548 };
1549
nvmet_referral_notify(struct config_group * group,struct config_item * item)1550 static void nvmet_referral_notify(struct config_group *group,
1551 struct config_item *item)
1552 {
1553 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1554 struct nvmet_port *port = to_nvmet_port(item);
1555
1556 nvmet_referral_disable(parent, port);
1557 }
1558
nvmet_referral_release(struct config_item * item)1559 static void nvmet_referral_release(struct config_item *item)
1560 {
1561 struct nvmet_port *port = to_nvmet_port(item);
1562
1563 kfree(port);
1564 }
1565
1566 static struct configfs_item_operations nvmet_referral_item_ops = {
1567 .release = nvmet_referral_release,
1568 };
1569
1570 static const struct config_item_type nvmet_referral_type = {
1571 .ct_owner = THIS_MODULE,
1572 .ct_attrs = nvmet_referral_attrs,
1573 .ct_item_ops = &nvmet_referral_item_ops,
1574 };
1575
nvmet_referral_make(struct config_group * group,const char * name)1576 static struct config_group *nvmet_referral_make(
1577 struct config_group *group, const char *name)
1578 {
1579 struct nvmet_port *port;
1580
1581 port = kzalloc(sizeof(*port), GFP_KERNEL);
1582 if (!port)
1583 return ERR_PTR(-ENOMEM);
1584
1585 INIT_LIST_HEAD(&port->entry);
1586 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1587
1588 return &port->group;
1589 }
1590
1591 static struct configfs_group_operations nvmet_referral_group_ops = {
1592 .make_group = nvmet_referral_make,
1593 .disconnect_notify = nvmet_referral_notify,
1594 };
1595
1596 static const struct config_item_type nvmet_referrals_type = {
1597 .ct_owner = THIS_MODULE,
1598 .ct_group_ops = &nvmet_referral_group_ops,
1599 };
1600
1601 static struct nvmet_type_name_map nvmet_ana_state[] = {
1602 { NVME_ANA_OPTIMIZED, "optimized" },
1603 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1604 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1605 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1606 { NVME_ANA_CHANGE, "change" },
1607 };
1608
nvmet_ana_group_ana_state_show(struct config_item * item,char * page)1609 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1610 char *page)
1611 {
1612 struct nvmet_ana_group *grp = to_ana_group(item);
1613 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1614 int i;
1615
1616 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1617 if (state == nvmet_ana_state[i].type)
1618 return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1619 }
1620
1621 return sprintf(page, "\n");
1622 }
1623
nvmet_ana_group_ana_state_store(struct config_item * item,const char * page,size_t count)1624 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1625 const char *page, size_t count)
1626 {
1627 struct nvmet_ana_group *grp = to_ana_group(item);
1628 enum nvme_ana_state *ana_state = grp->port->ana_state;
1629 int i;
1630
1631 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1632 if (sysfs_streq(page, nvmet_ana_state[i].name))
1633 goto found;
1634 }
1635
1636 pr_err("Invalid value '%s' for ana_state\n", page);
1637 return -EINVAL;
1638
1639 found:
1640 down_write(&nvmet_ana_sem);
1641 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1642 nvmet_ana_chgcnt++;
1643 up_write(&nvmet_ana_sem);
1644 nvmet_port_send_ana_event(grp->port);
1645 return count;
1646 }
1647
1648 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1649
1650 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1651 &nvmet_ana_group_attr_ana_state,
1652 NULL,
1653 };
1654
nvmet_ana_group_release(struct config_item * item)1655 static void nvmet_ana_group_release(struct config_item *item)
1656 {
1657 struct nvmet_ana_group *grp = to_ana_group(item);
1658
1659 if (grp == &grp->port->ana_default_group)
1660 return;
1661
1662 down_write(&nvmet_ana_sem);
1663 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1664 nvmet_ana_group_enabled[grp->grpid]--;
1665 up_write(&nvmet_ana_sem);
1666
1667 nvmet_port_send_ana_event(grp->port);
1668 kfree(grp);
1669 }
1670
1671 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1672 .release = nvmet_ana_group_release,
1673 };
1674
1675 static const struct config_item_type nvmet_ana_group_type = {
1676 .ct_item_ops = &nvmet_ana_group_item_ops,
1677 .ct_attrs = nvmet_ana_group_attrs,
1678 .ct_owner = THIS_MODULE,
1679 };
1680
nvmet_ana_groups_make_group(struct config_group * group,const char * name)1681 static struct config_group *nvmet_ana_groups_make_group(
1682 struct config_group *group, const char *name)
1683 {
1684 struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1685 struct nvmet_ana_group *grp;
1686 u32 grpid;
1687 int ret;
1688
1689 ret = kstrtou32(name, 0, &grpid);
1690 if (ret)
1691 goto out;
1692
1693 ret = -EINVAL;
1694 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1695 goto out;
1696
1697 ret = -ENOMEM;
1698 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1699 if (!grp)
1700 goto out;
1701 grp->port = port;
1702 grp->grpid = grpid;
1703
1704 down_write(&nvmet_ana_sem);
1705 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1706 nvmet_ana_group_enabled[grpid]++;
1707 up_write(&nvmet_ana_sem);
1708
1709 nvmet_port_send_ana_event(grp->port);
1710
1711 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1712 return &grp->group;
1713 out:
1714 return ERR_PTR(ret);
1715 }
1716
1717 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1718 .make_group = nvmet_ana_groups_make_group,
1719 };
1720
1721 static const struct config_item_type nvmet_ana_groups_type = {
1722 .ct_group_ops = &nvmet_ana_groups_group_ops,
1723 .ct_owner = THIS_MODULE,
1724 };
1725
1726 /*
1727 * Ports definitions.
1728 */
nvmet_port_release(struct config_item * item)1729 static void nvmet_port_release(struct config_item *item)
1730 {
1731 struct nvmet_port *port = to_nvmet_port(item);
1732
1733 /* Let inflight controllers teardown complete */
1734 flush_workqueue(nvmet_wq);
1735 list_del(&port->global_entry);
1736
1737 kfree(port->ana_state);
1738 kfree(port);
1739 }
1740
1741 static struct configfs_attribute *nvmet_port_attrs[] = {
1742 &nvmet_attr_addr_adrfam,
1743 &nvmet_attr_addr_treq,
1744 &nvmet_attr_addr_traddr,
1745 &nvmet_attr_addr_trsvcid,
1746 &nvmet_attr_addr_trtype,
1747 &nvmet_attr_param_inline_data_size,
1748 #ifdef CONFIG_BLK_DEV_INTEGRITY
1749 &nvmet_attr_param_pi_enable,
1750 #endif
1751 NULL,
1752 };
1753
1754 static struct configfs_item_operations nvmet_port_item_ops = {
1755 .release = nvmet_port_release,
1756 };
1757
1758 static const struct config_item_type nvmet_port_type = {
1759 .ct_attrs = nvmet_port_attrs,
1760 .ct_item_ops = &nvmet_port_item_ops,
1761 .ct_owner = THIS_MODULE,
1762 };
1763
nvmet_ports_make(struct config_group * group,const char * name)1764 static struct config_group *nvmet_ports_make(struct config_group *group,
1765 const char *name)
1766 {
1767 struct nvmet_port *port;
1768 u16 portid;
1769 u32 i;
1770
1771 if (kstrtou16(name, 0, &portid))
1772 return ERR_PTR(-EINVAL);
1773
1774 port = kzalloc(sizeof(*port), GFP_KERNEL);
1775 if (!port)
1776 return ERR_PTR(-ENOMEM);
1777
1778 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1779 sizeof(*port->ana_state), GFP_KERNEL);
1780 if (!port->ana_state) {
1781 kfree(port);
1782 return ERR_PTR(-ENOMEM);
1783 }
1784
1785 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1786 if (i == NVMET_DEFAULT_ANA_GRPID)
1787 port->ana_state[1] = NVME_ANA_OPTIMIZED;
1788 else
1789 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1790 }
1791
1792 list_add(&port->global_entry, &nvmet_ports_list);
1793
1794 INIT_LIST_HEAD(&port->entry);
1795 INIT_LIST_HEAD(&port->subsystems);
1796 INIT_LIST_HEAD(&port->referrals);
1797 port->inline_data_size = -1; /* < 0 == let the transport choose */
1798
1799 port->disc_addr.portid = cpu_to_le16(portid);
1800 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1801 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1802 config_group_init_type_name(&port->group, name, &nvmet_port_type);
1803
1804 config_group_init_type_name(&port->subsys_group,
1805 "subsystems", &nvmet_port_subsys_type);
1806 configfs_add_default_group(&port->subsys_group, &port->group);
1807
1808 config_group_init_type_name(&port->referrals_group,
1809 "referrals", &nvmet_referrals_type);
1810 configfs_add_default_group(&port->referrals_group, &port->group);
1811
1812 config_group_init_type_name(&port->ana_groups_group,
1813 "ana_groups", &nvmet_ana_groups_type);
1814 configfs_add_default_group(&port->ana_groups_group, &port->group);
1815
1816 port->ana_default_group.port = port;
1817 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1818 config_group_init_type_name(&port->ana_default_group.group,
1819 __stringify(NVMET_DEFAULT_ANA_GRPID),
1820 &nvmet_ana_group_type);
1821 configfs_add_default_group(&port->ana_default_group.group,
1822 &port->ana_groups_group);
1823
1824 return &port->group;
1825 }
1826
1827 static struct configfs_group_operations nvmet_ports_group_ops = {
1828 .make_group = nvmet_ports_make,
1829 };
1830
1831 static const struct config_item_type nvmet_ports_type = {
1832 .ct_group_ops = &nvmet_ports_group_ops,
1833 .ct_owner = THIS_MODULE,
1834 };
1835
1836 static struct config_group nvmet_subsystems_group;
1837 static struct config_group nvmet_ports_group;
1838
1839 #ifdef CONFIG_NVME_TARGET_AUTH
nvmet_host_dhchap_key_show(struct config_item * item,char * page)1840 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1841 char *page)
1842 {
1843 u8 *dhchap_secret = to_host(item)->dhchap_secret;
1844
1845 if (!dhchap_secret)
1846 return sprintf(page, "\n");
1847 return sprintf(page, "%s\n", dhchap_secret);
1848 }
1849
nvmet_host_dhchap_key_store(struct config_item * item,const char * page,size_t count)1850 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1851 const char *page, size_t count)
1852 {
1853 struct nvmet_host *host = to_host(item);
1854 int ret;
1855
1856 ret = nvmet_auth_set_key(host, page, false);
1857 /*
1858 * Re-authentication is a soft state, so keep the
1859 * current authentication valid until the host
1860 * requests re-authentication.
1861 */
1862 return ret < 0 ? ret : count;
1863 }
1864
1865 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1866
nvmet_host_dhchap_ctrl_key_show(struct config_item * item,char * page)1867 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1868 char *page)
1869 {
1870 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1871
1872 if (!dhchap_secret)
1873 return sprintf(page, "\n");
1874 return sprintf(page, "%s\n", dhchap_secret);
1875 }
1876
nvmet_host_dhchap_ctrl_key_store(struct config_item * item,const char * page,size_t count)1877 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1878 const char *page, size_t count)
1879 {
1880 struct nvmet_host *host = to_host(item);
1881 int ret;
1882
1883 ret = nvmet_auth_set_key(host, page, true);
1884 /*
1885 * Re-authentication is a soft state, so keep the
1886 * current authentication valid until the host
1887 * requests re-authentication.
1888 */
1889 return ret < 0 ? ret : count;
1890 }
1891
1892 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
1893
nvmet_host_dhchap_hash_show(struct config_item * item,char * page)1894 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
1895 char *page)
1896 {
1897 struct nvmet_host *host = to_host(item);
1898 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
1899
1900 return sprintf(page, "%s\n", hash_name ? hash_name : "none");
1901 }
1902
nvmet_host_dhchap_hash_store(struct config_item * item,const char * page,size_t count)1903 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
1904 const char *page, size_t count)
1905 {
1906 struct nvmet_host *host = to_host(item);
1907 u8 hmac_id;
1908
1909 hmac_id = nvme_auth_hmac_id(page);
1910 if (hmac_id == NVME_AUTH_HASH_INVALID)
1911 return -EINVAL;
1912 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
1913 return -ENOTSUPP;
1914 host->dhchap_hash_id = hmac_id;
1915 return count;
1916 }
1917
1918 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
1919
nvmet_host_dhchap_dhgroup_show(struct config_item * item,char * page)1920 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
1921 char *page)
1922 {
1923 struct nvmet_host *host = to_host(item);
1924 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
1925
1926 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
1927 }
1928
nvmet_host_dhchap_dhgroup_store(struct config_item * item,const char * page,size_t count)1929 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
1930 const char *page, size_t count)
1931 {
1932 struct nvmet_host *host = to_host(item);
1933 int dhgroup_id;
1934
1935 dhgroup_id = nvme_auth_dhgroup_id(page);
1936 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
1937 return -EINVAL;
1938 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
1939 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
1940
1941 if (!crypto_has_kpp(kpp, 0, 0))
1942 return -EINVAL;
1943 }
1944 host->dhchap_dhgroup_id = dhgroup_id;
1945 return count;
1946 }
1947
1948 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
1949
1950 static struct configfs_attribute *nvmet_host_attrs[] = {
1951 &nvmet_host_attr_dhchap_key,
1952 &nvmet_host_attr_dhchap_ctrl_key,
1953 &nvmet_host_attr_dhchap_hash,
1954 &nvmet_host_attr_dhchap_dhgroup,
1955 NULL,
1956 };
1957 #endif /* CONFIG_NVME_TARGET_AUTH */
1958
nvmet_host_release(struct config_item * item)1959 static void nvmet_host_release(struct config_item *item)
1960 {
1961 struct nvmet_host *host = to_host(item);
1962
1963 #ifdef CONFIG_NVME_TARGET_AUTH
1964 kfree(host->dhchap_secret);
1965 kfree(host->dhchap_ctrl_secret);
1966 #endif
1967 kfree(host);
1968 }
1969
1970 static struct configfs_item_operations nvmet_host_item_ops = {
1971 .release = nvmet_host_release,
1972 };
1973
1974 static const struct config_item_type nvmet_host_type = {
1975 .ct_item_ops = &nvmet_host_item_ops,
1976 #ifdef CONFIG_NVME_TARGET_AUTH
1977 .ct_attrs = nvmet_host_attrs,
1978 #endif
1979 .ct_owner = THIS_MODULE,
1980 };
1981
nvmet_hosts_make_group(struct config_group * group,const char * name)1982 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1983 const char *name)
1984 {
1985 struct nvmet_host *host;
1986
1987 host = kzalloc(sizeof(*host), GFP_KERNEL);
1988 if (!host)
1989 return ERR_PTR(-ENOMEM);
1990
1991 #ifdef CONFIG_NVME_TARGET_AUTH
1992 /* Default to SHA256 */
1993 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
1994 #endif
1995
1996 config_group_init_type_name(&host->group, name, &nvmet_host_type);
1997
1998 return &host->group;
1999 }
2000
2001 static struct configfs_group_operations nvmet_hosts_group_ops = {
2002 .make_group = nvmet_hosts_make_group,
2003 };
2004
2005 static const struct config_item_type nvmet_hosts_type = {
2006 .ct_group_ops = &nvmet_hosts_group_ops,
2007 .ct_owner = THIS_MODULE,
2008 };
2009
2010 static struct config_group nvmet_hosts_group;
2011
2012 static const struct config_item_type nvmet_root_type = {
2013 .ct_owner = THIS_MODULE,
2014 };
2015
2016 static struct configfs_subsystem nvmet_configfs_subsystem = {
2017 .su_group = {
2018 .cg_item = {
2019 .ci_namebuf = "nvmet",
2020 .ci_type = &nvmet_root_type,
2021 },
2022 },
2023 };
2024
nvmet_init_configfs(void)2025 int __init nvmet_init_configfs(void)
2026 {
2027 int ret;
2028
2029 config_group_init(&nvmet_configfs_subsystem.su_group);
2030 mutex_init(&nvmet_configfs_subsystem.su_mutex);
2031
2032 config_group_init_type_name(&nvmet_subsystems_group,
2033 "subsystems", &nvmet_subsystems_type);
2034 configfs_add_default_group(&nvmet_subsystems_group,
2035 &nvmet_configfs_subsystem.su_group);
2036
2037 config_group_init_type_name(&nvmet_ports_group,
2038 "ports", &nvmet_ports_type);
2039 configfs_add_default_group(&nvmet_ports_group,
2040 &nvmet_configfs_subsystem.su_group);
2041
2042 config_group_init_type_name(&nvmet_hosts_group,
2043 "hosts", &nvmet_hosts_type);
2044 configfs_add_default_group(&nvmet_hosts_group,
2045 &nvmet_configfs_subsystem.su_group);
2046
2047 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2048 if (ret) {
2049 pr_err("configfs_register_subsystem: %d\n", ret);
2050 return ret;
2051 }
2052
2053 return 0;
2054 }
2055
nvmet_exit_configfs(void)2056 void __exit nvmet_exit_configfs(void)
2057 {
2058 configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2059 }
2060