1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14 
nvmet_get_log_page_len(struct nvme_command * cmd)15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18 
19 	len <<= 16;
20 	len += le16_to_cpu(cmd->get_log_page.numdl);
21 	/* NUMD is a 0's based value */
22 	len += 1;
23 	len *= sizeof(u32);
24 
25 	return len;
26 }
27 
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 	switch (cdw10 & 0xff) {
31 	case NVME_FEAT_HOST_ID:
32 		return sizeof(req->sq->ctrl->hostid);
33 	default:
34 		return 0;
35 	}
36 }
37 
nvmet_get_log_page_offset(struct nvme_command * cmd)38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 	return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42 
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47 
nvmet_execute_get_log_page_error(struct nvmet_req * req)48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 	unsigned long flags;
52 	off_t offset = 0;
53 	u64 slot;
54 	u64 i;
55 
56 	spin_lock_irqsave(&ctrl->error_lock, flags);
57 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 
59 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 				sizeof(struct nvme_error_slot)))
62 			break;
63 
64 		if (slot == 0)
65 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66 		else
67 			slot--;
68 		offset += sizeof(struct nvme_error_slot);
69 	}
70 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 	nvmet_req_complete(req, 0);
72 }
73 
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 		struct nvme_smart_log *slog)
76 {
77 	u64 host_reads, host_writes, data_units_read, data_units_written;
78 	u16 status;
79 
80 	status = nvmet_req_find_ns(req);
81 	if (status)
82 		return status;
83 
84 	/* we don't have the right data for file backed ns */
85 	if (!req->ns->bdev)
86 		return NVME_SC_SUCCESS;
87 
88 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89 	data_units_read =
90 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92 	data_units_written =
93 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94 
95 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99 
100 	return NVME_SC_SUCCESS;
101 }
102 
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 		struct nvme_smart_log *slog)
105 {
106 	u64 host_reads = 0, host_writes = 0;
107 	u64 data_units_read = 0, data_units_written = 0;
108 	struct nvmet_ns *ns;
109 	struct nvmet_ctrl *ctrl;
110 	unsigned long idx;
111 
112 	ctrl = req->sq->ctrl;
113 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 		/* we don't have the right data for file backed ns */
115 		if (!ns->bdev)
116 			continue;
117 		host_reads += part_stat_read(ns->bdev, ios[READ]);
118 		data_units_read += DIV_ROUND_UP(
119 			part_stat_read(ns->bdev, sectors[READ]), 1000);
120 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 		data_units_written += DIV_ROUND_UP(
122 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123 	}
124 
125 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129 
130 	return NVME_SC_SUCCESS;
131 }
132 
nvmet_execute_get_log_page_smart(struct nvmet_req * req)133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135 	struct nvme_smart_log *log;
136 	u16 status = NVME_SC_INTERNAL;
137 	unsigned long flags;
138 
139 	if (req->transfer_len != sizeof(*log))
140 		goto out;
141 
142 	log = kzalloc(sizeof(*log), GFP_KERNEL);
143 	if (!log)
144 		goto out;
145 
146 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 		status = nvmet_get_smart_log_all(req, log);
148 	else
149 		status = nvmet_get_smart_log_nsid(req, log);
150 	if (status)
151 		goto out_free_log;
152 
153 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 	put_unaligned_le64(req->sq->ctrl->err_counter,
155 			&log->num_err_log_entries);
156 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157 
158 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160 	kfree(log);
161 out:
162 	nvmet_req_complete(req, status);
163 }
164 
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167 	log->acs[nvme_admin_get_log_page] =
168 	log->acs[nvme_admin_identify] =
169 	log->acs[nvme_admin_abort_cmd] =
170 	log->acs[nvme_admin_set_features] =
171 	log->acs[nvme_admin_get_features] =
172 	log->acs[nvme_admin_async_event] =
173 	log->acs[nvme_admin_keep_alive] =
174 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
175 
176 	log->iocs[nvme_cmd_read] =
177 	log->iocs[nvme_cmd_write] =
178 	log->iocs[nvme_cmd_flush] =
179 	log->iocs[nvme_cmd_dsm]	=
180 	log->iocs[nvme_cmd_write_zeroes] =
181 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
182 }
183 
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)184 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
185 {
186 	log->iocs[nvme_cmd_zone_append] =
187 	log->iocs[nvme_cmd_zone_mgmt_send] =
188 	log->iocs[nvme_cmd_zone_mgmt_recv] =
189 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
190 }
191 
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)192 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
193 {
194 	struct nvme_effects_log *log;
195 	u16 status = NVME_SC_SUCCESS;
196 
197 	log = kzalloc(sizeof(*log), GFP_KERNEL);
198 	if (!log) {
199 		status = NVME_SC_INTERNAL;
200 		goto out;
201 	}
202 
203 	switch (req->cmd->get_log_page.csi) {
204 	case NVME_CSI_NVM:
205 		nvmet_get_cmd_effects_nvm(log);
206 		break;
207 	case NVME_CSI_ZNS:
208 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
209 			status = NVME_SC_INVALID_IO_CMD_SET;
210 			goto free;
211 		}
212 		nvmet_get_cmd_effects_nvm(log);
213 		nvmet_get_cmd_effects_zns(log);
214 		break;
215 	default:
216 		status = NVME_SC_INVALID_LOG_PAGE;
217 		goto free;
218 	}
219 
220 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
221 free:
222 	kfree(log);
223 out:
224 	nvmet_req_complete(req, status);
225 }
226 
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)227 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
228 {
229 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
230 	u16 status = NVME_SC_INTERNAL;
231 	size_t len;
232 
233 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
234 		goto out;
235 
236 	mutex_lock(&ctrl->lock);
237 	if (ctrl->nr_changed_ns == U32_MAX)
238 		len = sizeof(__le32);
239 	else
240 		len = ctrl->nr_changed_ns * sizeof(__le32);
241 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
242 	if (!status)
243 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
244 	ctrl->nr_changed_ns = 0;
245 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
246 	mutex_unlock(&ctrl->lock);
247 out:
248 	nvmet_req_complete(req, status);
249 }
250 
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)251 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
252 		struct nvme_ana_group_desc *desc)
253 {
254 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
255 	struct nvmet_ns *ns;
256 	unsigned long idx;
257 	u32 count = 0;
258 
259 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
260 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
261 			if (ns->anagrpid == grpid)
262 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
263 	}
264 
265 	desc->grpid = cpu_to_le32(grpid);
266 	desc->nnsids = cpu_to_le32(count);
267 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
268 	desc->state = req->port->ana_state[grpid];
269 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
270 	return struct_size(desc, nsids, count);
271 }
272 
nvmet_execute_get_log_page_ana(struct nvmet_req * req)273 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
274 {
275 	struct nvme_ana_rsp_hdr hdr = { 0, };
276 	struct nvme_ana_group_desc *desc;
277 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
278 	size_t len;
279 	u32 grpid;
280 	u16 ngrps = 0;
281 	u16 status;
282 
283 	status = NVME_SC_INTERNAL;
284 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
285 		       GFP_KERNEL);
286 	if (!desc)
287 		goto out;
288 
289 	down_read(&nvmet_ana_sem);
290 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
291 		if (!nvmet_ana_group_enabled[grpid])
292 			continue;
293 		len = nvmet_format_ana_group(req, grpid, desc);
294 		status = nvmet_copy_to_sgl(req, offset, desc, len);
295 		if (status)
296 			break;
297 		offset += len;
298 		ngrps++;
299 	}
300 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
301 		if (nvmet_ana_group_enabled[grpid])
302 			ngrps++;
303 	}
304 
305 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
306 	hdr.ngrps = cpu_to_le16(ngrps);
307 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
308 	up_read(&nvmet_ana_sem);
309 
310 	kfree(desc);
311 
312 	/* copy the header last once we know the number of groups */
313 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
314 out:
315 	nvmet_req_complete(req, status);
316 }
317 
nvmet_execute_get_log_page(struct nvmet_req * req)318 static void nvmet_execute_get_log_page(struct nvmet_req *req)
319 {
320 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
321 		return;
322 
323 	switch (req->cmd->get_log_page.lid) {
324 	case NVME_LOG_ERROR:
325 		return nvmet_execute_get_log_page_error(req);
326 	case NVME_LOG_SMART:
327 		return nvmet_execute_get_log_page_smart(req);
328 	case NVME_LOG_FW_SLOT:
329 		/*
330 		 * We only support a single firmware slot which always is
331 		 * active, so we can zero out the whole firmware slot log and
332 		 * still claim to fully implement this mandatory log page.
333 		 */
334 		return nvmet_execute_get_log_page_noop(req);
335 	case NVME_LOG_CHANGED_NS:
336 		return nvmet_execute_get_log_changed_ns(req);
337 	case NVME_LOG_CMD_EFFECTS:
338 		return nvmet_execute_get_log_cmd_effects_ns(req);
339 	case NVME_LOG_ANA:
340 		return nvmet_execute_get_log_page_ana(req);
341 	}
342 	pr_debug("unhandled lid %d on qid %d\n",
343 	       req->cmd->get_log_page.lid, req->sq->qid);
344 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
345 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
346 }
347 
nvmet_execute_identify_ctrl(struct nvmet_req * req)348 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
349 {
350 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
351 	struct nvmet_subsys *subsys = ctrl->subsys;
352 	struct nvme_id_ctrl *id;
353 	u32 cmd_capsule_size;
354 	u16 status = 0;
355 
356 	if (!subsys->subsys_discovered) {
357 		mutex_lock(&subsys->lock);
358 		subsys->subsys_discovered = true;
359 		mutex_unlock(&subsys->lock);
360 	}
361 
362 	id = kzalloc(sizeof(*id), GFP_KERNEL);
363 	if (!id) {
364 		status = NVME_SC_INTERNAL;
365 		goto out;
366 	}
367 
368 	/* XXX: figure out how to assign real vendors IDs. */
369 	id->vid = 0;
370 	id->ssvid = 0;
371 
372 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
373 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
374 		       strlen(subsys->model_number), ' ');
375 	memcpy_and_pad(id->fr, sizeof(id->fr),
376 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
377 
378 	id->rab = 6;
379 
380 	if (nvmet_is_disc_subsys(ctrl->subsys))
381 		id->cntrltype = NVME_CTRL_DISC;
382 	else
383 		id->cntrltype = NVME_CTRL_IO;
384 
385 	/*
386 	 * XXX: figure out how we can assign a IEEE OUI, but until then
387 	 * the safest is to leave it as zeroes.
388 	 */
389 
390 	/* we support multiple ports, multiples hosts and ANA: */
391 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
392 		NVME_CTRL_CMIC_ANA;
393 
394 	/* Limit MDTS according to transport capability */
395 	if (ctrl->ops->get_mdts)
396 		id->mdts = ctrl->ops->get_mdts(ctrl);
397 	else
398 		id->mdts = 0;
399 
400 	id->cntlid = cpu_to_le16(ctrl->cntlid);
401 	id->ver = cpu_to_le32(ctrl->subsys->ver);
402 
403 	/* XXX: figure out what to do about RTD3R/RTD3 */
404 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
405 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
406 		NVME_CTRL_ATTR_TBKAS);
407 
408 	id->oacs = 0;
409 
410 	/*
411 	 * We don't really have a practical limit on the number of abort
412 	 * comands.  But we don't do anything useful for abort either, so
413 	 * no point in allowing more abort commands than the spec requires.
414 	 */
415 	id->acl = 3;
416 
417 	id->aerl = NVMET_ASYNC_EVENTS - 1;
418 
419 	/* first slot is read-only, only one slot supported */
420 	id->frmw = (1 << 0) | (1 << 1);
421 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
422 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
423 	id->npss = 0;
424 
425 	/* We support keep-alive timeout in granularity of seconds */
426 	id->kas = cpu_to_le16(NVMET_KAS);
427 
428 	id->sqes = (0x6 << 4) | 0x6;
429 	id->cqes = (0x4 << 4) | 0x4;
430 
431 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
432 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
433 
434 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
435 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
436 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
437 			NVME_CTRL_ONCS_WRITE_ZEROES);
438 
439 	/* XXX: don't report vwc if the underlying device is write through */
440 	id->vwc = NVME_CTRL_VWC_PRESENT;
441 
442 	/*
443 	 * We can't support atomic writes bigger than a LBA without support
444 	 * from the backend device.
445 	 */
446 	id->awun = 0;
447 	id->awupf = 0;
448 
449 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
450 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
451 		id->sgls |= cpu_to_le32(1 << 2);
452 	if (req->port->inline_data_size)
453 		id->sgls |= cpu_to_le32(1 << 20);
454 
455 	strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
456 
457 	/*
458 	 * Max command capsule size is sqe + in-capsule data size.
459 	 * Disable in-capsule data for Metadata capable controllers.
460 	 */
461 	cmd_capsule_size = sizeof(struct nvme_command);
462 	if (!ctrl->pi_support)
463 		cmd_capsule_size += req->port->inline_data_size;
464 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
465 
466 	/* Max response capsule size is cqe */
467 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
468 
469 	id->msdbd = ctrl->ops->msdbd;
470 
471 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
472 	id->anatt = 10; /* random value */
473 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
474 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
475 
476 	/*
477 	 * Meh, we don't really support any power state.  Fake up the same
478 	 * values that qemu does.
479 	 */
480 	id->psd[0].max_power = cpu_to_le16(0x9c4);
481 	id->psd[0].entry_lat = cpu_to_le32(0x10);
482 	id->psd[0].exit_lat = cpu_to_le32(0x4);
483 
484 	id->nwpc = 1 << 0; /* write protect and no write protect */
485 
486 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
487 
488 	kfree(id);
489 out:
490 	nvmet_req_complete(req, status);
491 }
492 
nvmet_execute_identify_ns(struct nvmet_req * req)493 static void nvmet_execute_identify_ns(struct nvmet_req *req)
494 {
495 	struct nvme_id_ns *id;
496 	u16 status;
497 
498 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
499 		req->error_loc = offsetof(struct nvme_identify, nsid);
500 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
501 		goto out;
502 	}
503 
504 	id = kzalloc(sizeof(*id), GFP_KERNEL);
505 	if (!id) {
506 		status = NVME_SC_INTERNAL;
507 		goto out;
508 	}
509 
510 	/* return an all zeroed buffer if we can't find an active namespace */
511 	status = nvmet_req_find_ns(req);
512 	if (status) {
513 		status = 0;
514 		goto done;
515 	}
516 
517 	if (nvmet_ns_revalidate(req->ns)) {
518 		mutex_lock(&req->ns->subsys->lock);
519 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
520 		mutex_unlock(&req->ns->subsys->lock);
521 	}
522 
523 	/*
524 	 * nuse = ncap = nsze isn't always true, but we have no way to find
525 	 * that out from the underlying device.
526 	 */
527 	id->ncap = id->nsze =
528 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
529 	switch (req->port->ana_state[req->ns->anagrpid]) {
530 	case NVME_ANA_INACCESSIBLE:
531 	case NVME_ANA_PERSISTENT_LOSS:
532 		break;
533 	default:
534 		id->nuse = id->nsze;
535 		break;
536 	}
537 
538 	if (req->ns->bdev)
539 		nvmet_bdev_set_limits(req->ns->bdev, id);
540 
541 	/*
542 	 * We just provide a single LBA format that matches what the
543 	 * underlying device reports.
544 	 */
545 	id->nlbaf = 0;
546 	id->flbas = 0;
547 
548 	/*
549 	 * Our namespace might always be shared.  Not just with other
550 	 * controllers, but also with any other user of the block device.
551 	 */
552 	id->nmic = NVME_NS_NMIC_SHARED;
553 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
554 
555 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
556 
557 	id->lbaf[0].ds = req->ns->blksize_shift;
558 
559 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
560 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
561 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
562 			  NVME_NS_DPC_PI_TYPE3;
563 		id->mc = NVME_MC_EXTENDED_LBA;
564 		id->dps = req->ns->pi_type;
565 		id->flbas = NVME_NS_FLBAS_META_EXT;
566 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
567 	}
568 
569 	if (req->ns->readonly)
570 		id->nsattr |= (1 << 0);
571 done:
572 	if (!status)
573 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
574 
575 	kfree(id);
576 out:
577 	nvmet_req_complete(req, status);
578 }
579 
nvmet_execute_identify_nslist(struct nvmet_req * req)580 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
581 {
582 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
583 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
584 	struct nvmet_ns *ns;
585 	unsigned long idx;
586 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
587 	__le32 *list;
588 	u16 status = 0;
589 	int i = 0;
590 
591 	list = kzalloc(buf_size, GFP_KERNEL);
592 	if (!list) {
593 		status = NVME_SC_INTERNAL;
594 		goto out;
595 	}
596 
597 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
598 		if (ns->nsid <= min_nsid)
599 			continue;
600 		list[i++] = cpu_to_le32(ns->nsid);
601 		if (i == buf_size / sizeof(__le32))
602 			break;
603 	}
604 
605 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
606 
607 	kfree(list);
608 out:
609 	nvmet_req_complete(req, status);
610 }
611 
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)612 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
613 				    void *id, off_t *off)
614 {
615 	struct nvme_ns_id_desc desc = {
616 		.nidt = type,
617 		.nidl = len,
618 	};
619 	u16 status;
620 
621 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
622 	if (status)
623 		return status;
624 	*off += sizeof(desc);
625 
626 	status = nvmet_copy_to_sgl(req, *off, id, len);
627 	if (status)
628 		return status;
629 	*off += len;
630 
631 	return 0;
632 }
633 
nvmet_execute_identify_desclist(struct nvmet_req * req)634 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
635 {
636 	off_t off = 0;
637 	u16 status;
638 
639 	status = nvmet_req_find_ns(req);
640 	if (status)
641 		goto out;
642 
643 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
644 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
645 						  NVME_NIDT_UUID_LEN,
646 						  &req->ns->uuid, &off);
647 		if (status)
648 			goto out;
649 	}
650 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
651 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
652 						  NVME_NIDT_NGUID_LEN,
653 						  &req->ns->nguid, &off);
654 		if (status)
655 			goto out;
656 	}
657 
658 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
659 					  NVME_NIDT_CSI_LEN,
660 					  &req->ns->csi, &off);
661 	if (status)
662 		goto out;
663 
664 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
665 			off) != NVME_IDENTIFY_DATA_SIZE - off)
666 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
667 
668 out:
669 	nvmet_req_complete(req, status);
670 }
671 
nvmet_handle_identify_desclist(struct nvmet_req * req)672 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
673 {
674 	switch (req->cmd->identify.csi) {
675 	case NVME_CSI_NVM:
676 		nvmet_execute_identify_desclist(req);
677 		return true;
678 	case NVME_CSI_ZNS:
679 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
680 			nvmet_execute_identify_desclist(req);
681 			return true;
682 		}
683 		return false;
684 	default:
685 		return false;
686 	}
687 }
688 
nvmet_execute_identify(struct nvmet_req * req)689 static void nvmet_execute_identify(struct nvmet_req *req)
690 {
691 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
692 		return;
693 
694 	switch (req->cmd->identify.cns) {
695 	case NVME_ID_CNS_NS:
696 		switch (req->cmd->identify.csi) {
697 		case NVME_CSI_NVM:
698 			return nvmet_execute_identify_ns(req);
699 		default:
700 			break;
701 		}
702 		break;
703 	case NVME_ID_CNS_CS_NS:
704 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
705 			switch (req->cmd->identify.csi) {
706 			case NVME_CSI_ZNS:
707 				return nvmet_execute_identify_cns_cs_ns(req);
708 			default:
709 				break;
710 			}
711 		}
712 		break;
713 	case NVME_ID_CNS_CTRL:
714 		switch (req->cmd->identify.csi) {
715 		case NVME_CSI_NVM:
716 			return nvmet_execute_identify_ctrl(req);
717 		}
718 		break;
719 	case NVME_ID_CNS_CS_CTRL:
720 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
721 			switch (req->cmd->identify.csi) {
722 			case NVME_CSI_ZNS:
723 				return nvmet_execute_identify_cns_cs_ctrl(req);
724 			default:
725 				break;
726 			}
727 		}
728 		break;
729 	case NVME_ID_CNS_NS_ACTIVE_LIST:
730 		switch (req->cmd->identify.csi) {
731 		case NVME_CSI_NVM:
732 			return nvmet_execute_identify_nslist(req);
733 		default:
734 			break;
735 		}
736 		break;
737 	case NVME_ID_CNS_NS_DESC_LIST:
738 		if (nvmet_handle_identify_desclist(req) == true)
739 			return;
740 		break;
741 	}
742 
743 	nvmet_req_cns_error_complete(req);
744 }
745 
746 /*
747  * A "minimum viable" abort implementation: the command is mandatory in the
748  * spec, but we are not required to do any useful work.  We couldn't really
749  * do a useful abort, so don't bother even with waiting for the command
750  * to be exectuted and return immediately telling the command to abort
751  * wasn't found.
752  */
nvmet_execute_abort(struct nvmet_req * req)753 static void nvmet_execute_abort(struct nvmet_req *req)
754 {
755 	if (!nvmet_check_transfer_len(req, 0))
756 		return;
757 	nvmet_set_result(req, 1);
758 	nvmet_req_complete(req, 0);
759 }
760 
nvmet_write_protect_flush_sync(struct nvmet_req * req)761 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
762 {
763 	u16 status;
764 
765 	if (req->ns->file)
766 		status = nvmet_file_flush(req);
767 	else
768 		status = nvmet_bdev_flush(req);
769 
770 	if (status)
771 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
772 	return status;
773 }
774 
nvmet_set_feat_write_protect(struct nvmet_req * req)775 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
776 {
777 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
778 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
779 	u16 status;
780 
781 	status = nvmet_req_find_ns(req);
782 	if (status)
783 		return status;
784 
785 	mutex_lock(&subsys->lock);
786 	switch (write_protect) {
787 	case NVME_NS_WRITE_PROTECT:
788 		req->ns->readonly = true;
789 		status = nvmet_write_protect_flush_sync(req);
790 		if (status)
791 			req->ns->readonly = false;
792 		break;
793 	case NVME_NS_NO_WRITE_PROTECT:
794 		req->ns->readonly = false;
795 		status = 0;
796 		break;
797 	default:
798 		break;
799 	}
800 
801 	if (!status)
802 		nvmet_ns_changed(subsys, req->ns->nsid);
803 	mutex_unlock(&subsys->lock);
804 	return status;
805 }
806 
nvmet_set_feat_kato(struct nvmet_req * req)807 u16 nvmet_set_feat_kato(struct nvmet_req *req)
808 {
809 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
810 
811 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
812 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
813 	nvmet_start_keep_alive_timer(req->sq->ctrl);
814 
815 	nvmet_set_result(req, req->sq->ctrl->kato);
816 
817 	return 0;
818 }
819 
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)820 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
821 {
822 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
823 
824 	if (val32 & ~mask) {
825 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
826 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
827 	}
828 
829 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
830 	nvmet_set_result(req, val32);
831 
832 	return 0;
833 }
834 
nvmet_execute_set_features(struct nvmet_req * req)835 void nvmet_execute_set_features(struct nvmet_req *req)
836 {
837 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
838 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
839 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
840 	u16 status = 0;
841 	u16 nsqr;
842 	u16 ncqr;
843 
844 	if (!nvmet_check_transfer_len(req, 0))
845 		return;
846 
847 	switch (cdw10 & 0xff) {
848 	case NVME_FEAT_NUM_QUEUES:
849 		ncqr = (cdw11 >> 16) & 0xffff;
850 		nsqr = cdw11 & 0xffff;
851 		if (ncqr == 0xffff || nsqr == 0xffff) {
852 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
853 			break;
854 		}
855 		nvmet_set_result(req,
856 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
857 		break;
858 	case NVME_FEAT_KATO:
859 		status = nvmet_set_feat_kato(req);
860 		break;
861 	case NVME_FEAT_ASYNC_EVENT:
862 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
863 		break;
864 	case NVME_FEAT_HOST_ID:
865 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
866 		break;
867 	case NVME_FEAT_WRITE_PROTECT:
868 		status = nvmet_set_feat_write_protect(req);
869 		break;
870 	default:
871 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
872 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
873 		break;
874 	}
875 
876 	nvmet_req_complete(req, status);
877 }
878 
nvmet_get_feat_write_protect(struct nvmet_req * req)879 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
880 {
881 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
882 	u32 result;
883 
884 	result = nvmet_req_find_ns(req);
885 	if (result)
886 		return result;
887 
888 	mutex_lock(&subsys->lock);
889 	if (req->ns->readonly == true)
890 		result = NVME_NS_WRITE_PROTECT;
891 	else
892 		result = NVME_NS_NO_WRITE_PROTECT;
893 	nvmet_set_result(req, result);
894 	mutex_unlock(&subsys->lock);
895 
896 	return 0;
897 }
898 
nvmet_get_feat_kato(struct nvmet_req * req)899 void nvmet_get_feat_kato(struct nvmet_req *req)
900 {
901 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
902 }
903 
nvmet_get_feat_async_event(struct nvmet_req * req)904 void nvmet_get_feat_async_event(struct nvmet_req *req)
905 {
906 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
907 }
908 
nvmet_execute_get_features(struct nvmet_req * req)909 void nvmet_execute_get_features(struct nvmet_req *req)
910 {
911 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
912 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
913 	u16 status = 0;
914 
915 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
916 		return;
917 
918 	switch (cdw10 & 0xff) {
919 	/*
920 	 * These features are mandatory in the spec, but we don't
921 	 * have a useful way to implement them.  We'll eventually
922 	 * need to come up with some fake values for these.
923 	 */
924 #if 0
925 	case NVME_FEAT_ARBITRATION:
926 		break;
927 	case NVME_FEAT_POWER_MGMT:
928 		break;
929 	case NVME_FEAT_TEMP_THRESH:
930 		break;
931 	case NVME_FEAT_ERR_RECOVERY:
932 		break;
933 	case NVME_FEAT_IRQ_COALESCE:
934 		break;
935 	case NVME_FEAT_IRQ_CONFIG:
936 		break;
937 	case NVME_FEAT_WRITE_ATOMIC:
938 		break;
939 #endif
940 	case NVME_FEAT_ASYNC_EVENT:
941 		nvmet_get_feat_async_event(req);
942 		break;
943 	case NVME_FEAT_VOLATILE_WC:
944 		nvmet_set_result(req, 1);
945 		break;
946 	case NVME_FEAT_NUM_QUEUES:
947 		nvmet_set_result(req,
948 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
949 		break;
950 	case NVME_FEAT_KATO:
951 		nvmet_get_feat_kato(req);
952 		break;
953 	case NVME_FEAT_HOST_ID:
954 		/* need 128-bit host identifier flag */
955 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
956 			req->error_loc =
957 				offsetof(struct nvme_common_command, cdw11);
958 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
959 			break;
960 		}
961 
962 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
963 				sizeof(req->sq->ctrl->hostid));
964 		break;
965 	case NVME_FEAT_WRITE_PROTECT:
966 		status = nvmet_get_feat_write_protect(req);
967 		break;
968 	default:
969 		req->error_loc =
970 			offsetof(struct nvme_common_command, cdw10);
971 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
972 		break;
973 	}
974 
975 	nvmet_req_complete(req, status);
976 }
977 
nvmet_execute_async_event(struct nvmet_req * req)978 void nvmet_execute_async_event(struct nvmet_req *req)
979 {
980 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
981 
982 	if (!nvmet_check_transfer_len(req, 0))
983 		return;
984 
985 	mutex_lock(&ctrl->lock);
986 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
987 		mutex_unlock(&ctrl->lock);
988 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
989 		return;
990 	}
991 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
992 	mutex_unlock(&ctrl->lock);
993 
994 	queue_work(nvmet_wq, &ctrl->async_event_work);
995 }
996 
nvmet_execute_keep_alive(struct nvmet_req * req)997 void nvmet_execute_keep_alive(struct nvmet_req *req)
998 {
999 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1000 	u16 status = 0;
1001 
1002 	if (!nvmet_check_transfer_len(req, 0))
1003 		return;
1004 
1005 	if (!ctrl->kato) {
1006 		status = NVME_SC_KA_TIMEOUT_INVALID;
1007 		goto out;
1008 	}
1009 
1010 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1011 		ctrl->cntlid, ctrl->kato);
1012 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1013 out:
1014 	nvmet_req_complete(req, status);
1015 }
1016 
nvmet_parse_admin_cmd(struct nvmet_req * req)1017 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1018 {
1019 	struct nvme_command *cmd = req->cmd;
1020 	u16 ret;
1021 
1022 	if (nvme_is_fabrics(cmd))
1023 		return nvmet_parse_fabrics_admin_cmd(req);
1024 	if (unlikely(!nvmet_check_auth_status(req)))
1025 		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1026 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1027 		return nvmet_parse_discovery_cmd(req);
1028 
1029 	ret = nvmet_check_ctrl_status(req);
1030 	if (unlikely(ret))
1031 		return ret;
1032 
1033 	if (nvmet_is_passthru_req(req))
1034 		return nvmet_parse_passthru_admin_cmd(req);
1035 
1036 	switch (cmd->common.opcode) {
1037 	case nvme_admin_get_log_page:
1038 		req->execute = nvmet_execute_get_log_page;
1039 		return 0;
1040 	case nvme_admin_identify:
1041 		req->execute = nvmet_execute_identify;
1042 		return 0;
1043 	case nvme_admin_abort_cmd:
1044 		req->execute = nvmet_execute_abort;
1045 		return 0;
1046 	case nvme_admin_set_features:
1047 		req->execute = nvmet_execute_set_features;
1048 		return 0;
1049 	case nvme_admin_get_features:
1050 		req->execute = nvmet_execute_get_features;
1051 		return 0;
1052 	case nvme_admin_async_event:
1053 		req->execute = nvmet_execute_async_event;
1054 		return 0;
1055 	case nvme_admin_keep_alive:
1056 		req->execute = nvmet_execute_keep_alive;
1057 		return 0;
1058 	default:
1059 		return nvmet_report_invalid_opcode(req);
1060 	}
1061 }
1062