1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14 
nvmet_get_log_page_len(struct nvme_command * cmd)15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18 
19 	len <<= 16;
20 	len += le16_to_cpu(cmd->get_log_page.numdl);
21 	/* NUMD is a 0's based value */
22 	len += 1;
23 	len *= sizeof(u32);
24 
25 	return len;
26 }
27 
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 	switch (cdw10 & 0xff) {
31 	case NVME_FEAT_HOST_ID:
32 		return sizeof(req->sq->ctrl->hostid);
33 	default:
34 		return 0;
35 	}
36 }
37 
nvmet_get_log_page_offset(struct nvme_command * cmd)38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 	return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42 
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47 
nvmet_execute_get_log_page_error(struct nvmet_req * req)48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 	unsigned long flags;
52 	off_t offset = 0;
53 	u64 slot;
54 	u64 i;
55 
56 	spin_lock_irqsave(&ctrl->error_lock, flags);
57 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 
59 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 				sizeof(struct nvme_error_slot)))
62 			break;
63 
64 		if (slot == 0)
65 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66 		else
67 			slot--;
68 		offset += sizeof(struct nvme_error_slot);
69 	}
70 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 	nvmet_req_complete(req, 0);
72 }
73 
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 		struct nvme_smart_log *slog)
76 {
77 	u64 host_reads, host_writes, data_units_read, data_units_written;
78 	u16 status;
79 
80 	status = nvmet_req_find_ns(req);
81 	if (status)
82 		return status;
83 
84 	/* we don't have the right data for file backed ns */
85 	if (!req->ns->bdev)
86 		return NVME_SC_SUCCESS;
87 
88 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89 	data_units_read =
90 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92 	data_units_written =
93 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94 
95 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99 
100 	return NVME_SC_SUCCESS;
101 }
102 
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 		struct nvme_smart_log *slog)
105 {
106 	u64 host_reads = 0, host_writes = 0;
107 	u64 data_units_read = 0, data_units_written = 0;
108 	struct nvmet_ns *ns;
109 	struct nvmet_ctrl *ctrl;
110 	unsigned long idx;
111 
112 	ctrl = req->sq->ctrl;
113 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 		/* we don't have the right data for file backed ns */
115 		if (!ns->bdev)
116 			continue;
117 		host_reads += part_stat_read(ns->bdev, ios[READ]);
118 		data_units_read += DIV_ROUND_UP(
119 			part_stat_read(ns->bdev, sectors[READ]), 1000);
120 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 		data_units_written += DIV_ROUND_UP(
122 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123 	}
124 
125 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129 
130 	return NVME_SC_SUCCESS;
131 }
132 
nvmet_execute_get_log_page_smart(struct nvmet_req * req)133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135 	struct nvme_smart_log *log;
136 	u16 status = NVME_SC_INTERNAL;
137 	unsigned long flags;
138 
139 	if (req->transfer_len != sizeof(*log))
140 		goto out;
141 
142 	log = kzalloc(sizeof(*log), GFP_KERNEL);
143 	if (!log)
144 		goto out;
145 
146 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 		status = nvmet_get_smart_log_all(req, log);
148 	else
149 		status = nvmet_get_smart_log_nsid(req, log);
150 	if (status)
151 		goto out_free_log;
152 
153 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 	put_unaligned_le64(req->sq->ctrl->err_counter,
155 			&log->num_err_log_entries);
156 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157 
158 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160 	kfree(log);
161 out:
162 	nvmet_req_complete(req, status);
163 }
164 
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167 	log->acs[nvme_admin_get_log_page] =
168 	log->acs[nvme_admin_identify] =
169 	log->acs[nvme_admin_abort_cmd] =
170 	log->acs[nvme_admin_set_features] =
171 	log->acs[nvme_admin_get_features] =
172 	log->acs[nvme_admin_async_event] =
173 	log->acs[nvme_admin_keep_alive] =
174 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
175 
176 	log->iocs[nvme_cmd_read] =
177 	log->iocs[nvme_cmd_flush] =
178 	log->iocs[nvme_cmd_dsm]	=
179 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
180 	log->iocs[nvme_cmd_write] =
181 	log->iocs[nvme_cmd_write_zeroes] =
182 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
183 }
184 
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)185 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
186 {
187 	log->iocs[nvme_cmd_zone_append] =
188 	log->iocs[nvme_cmd_zone_mgmt_send] =
189 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
190 	log->iocs[nvme_cmd_zone_mgmt_recv] =
191 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
192 }
193 
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)194 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
195 {
196 	struct nvme_effects_log *log;
197 	u16 status = NVME_SC_SUCCESS;
198 
199 	log = kzalloc(sizeof(*log), GFP_KERNEL);
200 	if (!log) {
201 		status = NVME_SC_INTERNAL;
202 		goto out;
203 	}
204 
205 	switch (req->cmd->get_log_page.csi) {
206 	case NVME_CSI_NVM:
207 		nvmet_get_cmd_effects_nvm(log);
208 		break;
209 	case NVME_CSI_ZNS:
210 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
211 			status = NVME_SC_INVALID_IO_CMD_SET;
212 			goto free;
213 		}
214 		nvmet_get_cmd_effects_nvm(log);
215 		nvmet_get_cmd_effects_zns(log);
216 		break;
217 	default:
218 		status = NVME_SC_INVALID_LOG_PAGE;
219 		goto free;
220 	}
221 
222 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
223 free:
224 	kfree(log);
225 out:
226 	nvmet_req_complete(req, status);
227 }
228 
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)229 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
230 {
231 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
232 	u16 status = NVME_SC_INTERNAL;
233 	size_t len;
234 
235 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
236 		goto out;
237 
238 	mutex_lock(&ctrl->lock);
239 	if (ctrl->nr_changed_ns == U32_MAX)
240 		len = sizeof(__le32);
241 	else
242 		len = ctrl->nr_changed_ns * sizeof(__le32);
243 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
244 	if (!status)
245 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
246 	ctrl->nr_changed_ns = 0;
247 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
248 	mutex_unlock(&ctrl->lock);
249 out:
250 	nvmet_req_complete(req, status);
251 }
252 
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)253 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
254 		struct nvme_ana_group_desc *desc)
255 {
256 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
257 	struct nvmet_ns *ns;
258 	unsigned long idx;
259 	u32 count = 0;
260 
261 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
262 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
263 			if (ns->anagrpid == grpid)
264 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
265 	}
266 
267 	desc->grpid = cpu_to_le32(grpid);
268 	desc->nnsids = cpu_to_le32(count);
269 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
270 	desc->state = req->port->ana_state[grpid];
271 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
272 	return struct_size(desc, nsids, count);
273 }
274 
nvmet_execute_get_log_page_ana(struct nvmet_req * req)275 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
276 {
277 	struct nvme_ana_rsp_hdr hdr = { 0, };
278 	struct nvme_ana_group_desc *desc;
279 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
280 	size_t len;
281 	u32 grpid;
282 	u16 ngrps = 0;
283 	u16 status;
284 
285 	status = NVME_SC_INTERNAL;
286 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
287 		       GFP_KERNEL);
288 	if (!desc)
289 		goto out;
290 
291 	down_read(&nvmet_ana_sem);
292 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
293 		if (!nvmet_ana_group_enabled[grpid])
294 			continue;
295 		len = nvmet_format_ana_group(req, grpid, desc);
296 		status = nvmet_copy_to_sgl(req, offset, desc, len);
297 		if (status)
298 			break;
299 		offset += len;
300 		ngrps++;
301 	}
302 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
303 		if (nvmet_ana_group_enabled[grpid])
304 			ngrps++;
305 	}
306 
307 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
308 	hdr.ngrps = cpu_to_le16(ngrps);
309 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
310 	up_read(&nvmet_ana_sem);
311 
312 	kfree(desc);
313 
314 	/* copy the header last once we know the number of groups */
315 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
316 out:
317 	nvmet_req_complete(req, status);
318 }
319 
nvmet_execute_get_log_page(struct nvmet_req * req)320 static void nvmet_execute_get_log_page(struct nvmet_req *req)
321 {
322 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
323 		return;
324 
325 	switch (req->cmd->get_log_page.lid) {
326 	case NVME_LOG_ERROR:
327 		return nvmet_execute_get_log_page_error(req);
328 	case NVME_LOG_SMART:
329 		return nvmet_execute_get_log_page_smart(req);
330 	case NVME_LOG_FW_SLOT:
331 		/*
332 		 * We only support a single firmware slot which always is
333 		 * active, so we can zero out the whole firmware slot log and
334 		 * still claim to fully implement this mandatory log page.
335 		 */
336 		return nvmet_execute_get_log_page_noop(req);
337 	case NVME_LOG_CHANGED_NS:
338 		return nvmet_execute_get_log_changed_ns(req);
339 	case NVME_LOG_CMD_EFFECTS:
340 		return nvmet_execute_get_log_cmd_effects_ns(req);
341 	case NVME_LOG_ANA:
342 		return nvmet_execute_get_log_page_ana(req);
343 	}
344 	pr_debug("unhandled lid %d on qid %d\n",
345 	       req->cmd->get_log_page.lid, req->sq->qid);
346 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
347 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
348 }
349 
nvmet_execute_identify_ctrl(struct nvmet_req * req)350 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
351 {
352 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
353 	struct nvmet_subsys *subsys = ctrl->subsys;
354 	struct nvme_id_ctrl *id;
355 	u32 cmd_capsule_size;
356 	u16 status = 0;
357 
358 	if (!subsys->subsys_discovered) {
359 		mutex_lock(&subsys->lock);
360 		subsys->subsys_discovered = true;
361 		mutex_unlock(&subsys->lock);
362 	}
363 
364 	id = kzalloc(sizeof(*id), GFP_KERNEL);
365 	if (!id) {
366 		status = NVME_SC_INTERNAL;
367 		goto out;
368 	}
369 
370 	/* XXX: figure out how to assign real vendors IDs. */
371 	id->vid = 0;
372 	id->ssvid = 0;
373 
374 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
375 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
376 		       strlen(subsys->model_number), ' ');
377 	memcpy_and_pad(id->fr, sizeof(id->fr),
378 		       subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
379 
380 	put_unaligned_le24(subsys->ieee_oui, id->ieee);
381 
382 	id->rab = 6;
383 
384 	if (nvmet_is_disc_subsys(ctrl->subsys))
385 		id->cntrltype = NVME_CTRL_DISC;
386 	else
387 		id->cntrltype = NVME_CTRL_IO;
388 
389 	/* we support multiple ports, multiples hosts and ANA: */
390 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
391 		NVME_CTRL_CMIC_ANA;
392 
393 	/* Limit MDTS according to transport capability */
394 	if (ctrl->ops->get_mdts)
395 		id->mdts = ctrl->ops->get_mdts(ctrl);
396 	else
397 		id->mdts = 0;
398 
399 	id->cntlid = cpu_to_le16(ctrl->cntlid);
400 	id->ver = cpu_to_le32(ctrl->subsys->ver);
401 
402 	/* XXX: figure out what to do about RTD3R/RTD3 */
403 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
404 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
405 		NVME_CTRL_ATTR_TBKAS);
406 
407 	id->oacs = 0;
408 
409 	/*
410 	 * We don't really have a practical limit on the number of abort
411 	 * comands.  But we don't do anything useful for abort either, so
412 	 * no point in allowing more abort commands than the spec requires.
413 	 */
414 	id->acl = 3;
415 
416 	id->aerl = NVMET_ASYNC_EVENTS - 1;
417 
418 	/* first slot is read-only, only one slot supported */
419 	id->frmw = (1 << 0) | (1 << 1);
420 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
421 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
422 	id->npss = 0;
423 
424 	/* We support keep-alive timeout in granularity of seconds */
425 	id->kas = cpu_to_le16(NVMET_KAS);
426 
427 	id->sqes = (0x6 << 4) | 0x6;
428 	id->cqes = (0x4 << 4) | 0x4;
429 
430 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
431 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
432 
433 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
434 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
435 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
436 			NVME_CTRL_ONCS_WRITE_ZEROES);
437 
438 	/* XXX: don't report vwc if the underlying device is write through */
439 	id->vwc = NVME_CTRL_VWC_PRESENT;
440 
441 	/*
442 	 * We can't support atomic writes bigger than a LBA without support
443 	 * from the backend device.
444 	 */
445 	id->awun = 0;
446 	id->awupf = 0;
447 
448 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
449 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
450 		id->sgls |= cpu_to_le32(1 << 2);
451 	if (req->port->inline_data_size)
452 		id->sgls |= cpu_to_le32(1 << 20);
453 
454 	strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
455 
456 	/*
457 	 * Max command capsule size is sqe + in-capsule data size.
458 	 * Disable in-capsule data for Metadata capable controllers.
459 	 */
460 	cmd_capsule_size = sizeof(struct nvme_command);
461 	if (!ctrl->pi_support)
462 		cmd_capsule_size += req->port->inline_data_size;
463 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
464 
465 	/* Max response capsule size is cqe */
466 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
467 
468 	id->msdbd = ctrl->ops->msdbd;
469 
470 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
471 	id->anatt = 10; /* random value */
472 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
473 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
474 
475 	/*
476 	 * Meh, we don't really support any power state.  Fake up the same
477 	 * values that qemu does.
478 	 */
479 	id->psd[0].max_power = cpu_to_le16(0x9c4);
480 	id->psd[0].entry_lat = cpu_to_le32(0x10);
481 	id->psd[0].exit_lat = cpu_to_le32(0x4);
482 
483 	id->nwpc = 1 << 0; /* write protect and no write protect */
484 
485 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
486 
487 	kfree(id);
488 out:
489 	nvmet_req_complete(req, status);
490 }
491 
nvmet_execute_identify_ns(struct nvmet_req * req)492 static void nvmet_execute_identify_ns(struct nvmet_req *req)
493 {
494 	struct nvme_id_ns *id;
495 	u16 status;
496 
497 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
498 		req->error_loc = offsetof(struct nvme_identify, nsid);
499 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
500 		goto out;
501 	}
502 
503 	id = kzalloc(sizeof(*id), GFP_KERNEL);
504 	if (!id) {
505 		status = NVME_SC_INTERNAL;
506 		goto out;
507 	}
508 
509 	/* return an all zeroed buffer if we can't find an active namespace */
510 	status = nvmet_req_find_ns(req);
511 	if (status) {
512 		status = 0;
513 		goto done;
514 	}
515 
516 	if (nvmet_ns_revalidate(req->ns)) {
517 		mutex_lock(&req->ns->subsys->lock);
518 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
519 		mutex_unlock(&req->ns->subsys->lock);
520 	}
521 
522 	/*
523 	 * nuse = ncap = nsze isn't always true, but we have no way to find
524 	 * that out from the underlying device.
525 	 */
526 	id->ncap = id->nsze =
527 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
528 	switch (req->port->ana_state[req->ns->anagrpid]) {
529 	case NVME_ANA_INACCESSIBLE:
530 	case NVME_ANA_PERSISTENT_LOSS:
531 		break;
532 	default:
533 		id->nuse = id->nsze;
534 		break;
535 	}
536 
537 	if (req->ns->bdev)
538 		nvmet_bdev_set_limits(req->ns->bdev, id);
539 
540 	/*
541 	 * We just provide a single LBA format that matches what the
542 	 * underlying device reports.
543 	 */
544 	id->nlbaf = 0;
545 	id->flbas = 0;
546 
547 	/*
548 	 * Our namespace might always be shared.  Not just with other
549 	 * controllers, but also with any other user of the block device.
550 	 */
551 	id->nmic = NVME_NS_NMIC_SHARED;
552 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
553 
554 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
555 
556 	id->lbaf[0].ds = req->ns->blksize_shift;
557 
558 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
559 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
560 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
561 			  NVME_NS_DPC_PI_TYPE3;
562 		id->mc = NVME_MC_EXTENDED_LBA;
563 		id->dps = req->ns->pi_type;
564 		id->flbas = NVME_NS_FLBAS_META_EXT;
565 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
566 	}
567 
568 	if (req->ns->readonly)
569 		id->nsattr |= NVME_NS_ATTR_RO;
570 done:
571 	if (!status)
572 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
573 
574 	kfree(id);
575 out:
576 	nvmet_req_complete(req, status);
577 }
578 
nvmet_execute_identify_nslist(struct nvmet_req * req)579 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
580 {
581 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
582 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
583 	struct nvmet_ns *ns;
584 	unsigned long idx;
585 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
586 	__le32 *list;
587 	u16 status = 0;
588 	int i = 0;
589 
590 	list = kzalloc(buf_size, GFP_KERNEL);
591 	if (!list) {
592 		status = NVME_SC_INTERNAL;
593 		goto out;
594 	}
595 
596 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
597 		if (ns->nsid <= min_nsid)
598 			continue;
599 		list[i++] = cpu_to_le32(ns->nsid);
600 		if (i == buf_size / sizeof(__le32))
601 			break;
602 	}
603 
604 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
605 
606 	kfree(list);
607 out:
608 	nvmet_req_complete(req, status);
609 }
610 
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)611 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
612 				    void *id, off_t *off)
613 {
614 	struct nvme_ns_id_desc desc = {
615 		.nidt = type,
616 		.nidl = len,
617 	};
618 	u16 status;
619 
620 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
621 	if (status)
622 		return status;
623 	*off += sizeof(desc);
624 
625 	status = nvmet_copy_to_sgl(req, *off, id, len);
626 	if (status)
627 		return status;
628 	*off += len;
629 
630 	return 0;
631 }
632 
nvmet_execute_identify_desclist(struct nvmet_req * req)633 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
634 {
635 	off_t off = 0;
636 	u16 status;
637 
638 	status = nvmet_req_find_ns(req);
639 	if (status)
640 		goto out;
641 
642 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
643 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
644 						  NVME_NIDT_UUID_LEN,
645 						  &req->ns->uuid, &off);
646 		if (status)
647 			goto out;
648 	}
649 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
650 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
651 						  NVME_NIDT_NGUID_LEN,
652 						  &req->ns->nguid, &off);
653 		if (status)
654 			goto out;
655 	}
656 
657 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
658 					  NVME_NIDT_CSI_LEN,
659 					  &req->ns->csi, &off);
660 	if (status)
661 		goto out;
662 
663 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
664 			off) != NVME_IDENTIFY_DATA_SIZE - off)
665 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
666 
667 out:
668 	nvmet_req_complete(req, status);
669 }
670 
nvmet_execute_identify_ctrl_nvm(struct nvmet_req * req)671 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
672 {
673 	/* Not supported: return zeroes */
674 	nvmet_req_complete(req,
675 		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
676 }
677 
nvmet_execute_identify(struct nvmet_req * req)678 static void nvmet_execute_identify(struct nvmet_req *req)
679 {
680 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
681 		return;
682 
683 	switch (req->cmd->identify.cns) {
684 	case NVME_ID_CNS_NS:
685 		nvmet_execute_identify_ns(req);
686 		return;
687 	case NVME_ID_CNS_CTRL:
688 		nvmet_execute_identify_ctrl(req);
689 		return;
690 	case NVME_ID_CNS_NS_ACTIVE_LIST:
691 		nvmet_execute_identify_nslist(req);
692 		return;
693 	case NVME_ID_CNS_NS_DESC_LIST:
694 		nvmet_execute_identify_desclist(req);
695 		return;
696 	case NVME_ID_CNS_CS_NS:
697 		switch (req->cmd->identify.csi) {
698 		case NVME_CSI_NVM:
699 			/* Not supported */
700 			break;
701 		case NVME_CSI_ZNS:
702 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
703 				nvmet_execute_identify_ns_zns(req);
704 				return;
705 			}
706 			break;
707 		}
708 		break;
709 	case NVME_ID_CNS_CS_CTRL:
710 		switch (req->cmd->identify.csi) {
711 		case NVME_CSI_NVM:
712 			nvmet_execute_identify_ctrl_nvm(req);
713 			return;
714 		case NVME_CSI_ZNS:
715 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
716 				nvmet_execute_identify_ctrl_zns(req);
717 				return;
718 			}
719 			break;
720 		}
721 		break;
722 	}
723 
724 	pr_debug("unhandled identify cns %d on qid %d\n",
725 	       req->cmd->identify.cns, req->sq->qid);
726 	req->error_loc = offsetof(struct nvme_identify, cns);
727 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
728 }
729 
730 /*
731  * A "minimum viable" abort implementation: the command is mandatory in the
732  * spec, but we are not required to do any useful work.  We couldn't really
733  * do a useful abort, so don't bother even with waiting for the command
734  * to be exectuted and return immediately telling the command to abort
735  * wasn't found.
736  */
nvmet_execute_abort(struct nvmet_req * req)737 static void nvmet_execute_abort(struct nvmet_req *req)
738 {
739 	if (!nvmet_check_transfer_len(req, 0))
740 		return;
741 	nvmet_set_result(req, 1);
742 	nvmet_req_complete(req, 0);
743 }
744 
nvmet_write_protect_flush_sync(struct nvmet_req * req)745 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
746 {
747 	u16 status;
748 
749 	if (req->ns->file)
750 		status = nvmet_file_flush(req);
751 	else
752 		status = nvmet_bdev_flush(req);
753 
754 	if (status)
755 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
756 	return status;
757 }
758 
nvmet_set_feat_write_protect(struct nvmet_req * req)759 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
760 {
761 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
762 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
763 	u16 status;
764 
765 	status = nvmet_req_find_ns(req);
766 	if (status)
767 		return status;
768 
769 	mutex_lock(&subsys->lock);
770 	switch (write_protect) {
771 	case NVME_NS_WRITE_PROTECT:
772 		req->ns->readonly = true;
773 		status = nvmet_write_protect_flush_sync(req);
774 		if (status)
775 			req->ns->readonly = false;
776 		break;
777 	case NVME_NS_NO_WRITE_PROTECT:
778 		req->ns->readonly = false;
779 		status = 0;
780 		break;
781 	default:
782 		break;
783 	}
784 
785 	if (!status)
786 		nvmet_ns_changed(subsys, req->ns->nsid);
787 	mutex_unlock(&subsys->lock);
788 	return status;
789 }
790 
nvmet_set_feat_kato(struct nvmet_req * req)791 u16 nvmet_set_feat_kato(struct nvmet_req *req)
792 {
793 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
794 
795 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
796 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
797 	nvmet_start_keep_alive_timer(req->sq->ctrl);
798 
799 	nvmet_set_result(req, req->sq->ctrl->kato);
800 
801 	return 0;
802 }
803 
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)804 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
805 {
806 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
807 
808 	if (val32 & ~mask) {
809 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
810 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
811 	}
812 
813 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
814 	nvmet_set_result(req, val32);
815 
816 	return 0;
817 }
818 
nvmet_execute_set_features(struct nvmet_req * req)819 void nvmet_execute_set_features(struct nvmet_req *req)
820 {
821 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
822 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
823 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
824 	u16 status = 0;
825 	u16 nsqr;
826 	u16 ncqr;
827 
828 	if (!nvmet_check_data_len_lte(req, 0))
829 		return;
830 
831 	switch (cdw10 & 0xff) {
832 	case NVME_FEAT_NUM_QUEUES:
833 		ncqr = (cdw11 >> 16) & 0xffff;
834 		nsqr = cdw11 & 0xffff;
835 		if (ncqr == 0xffff || nsqr == 0xffff) {
836 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
837 			break;
838 		}
839 		nvmet_set_result(req,
840 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
841 		break;
842 	case NVME_FEAT_KATO:
843 		status = nvmet_set_feat_kato(req);
844 		break;
845 	case NVME_FEAT_ASYNC_EVENT:
846 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
847 		break;
848 	case NVME_FEAT_HOST_ID:
849 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
850 		break;
851 	case NVME_FEAT_WRITE_PROTECT:
852 		status = nvmet_set_feat_write_protect(req);
853 		break;
854 	default:
855 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
856 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
857 		break;
858 	}
859 
860 	nvmet_req_complete(req, status);
861 }
862 
nvmet_get_feat_write_protect(struct nvmet_req * req)863 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
864 {
865 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
866 	u32 result;
867 
868 	result = nvmet_req_find_ns(req);
869 	if (result)
870 		return result;
871 
872 	mutex_lock(&subsys->lock);
873 	if (req->ns->readonly == true)
874 		result = NVME_NS_WRITE_PROTECT;
875 	else
876 		result = NVME_NS_NO_WRITE_PROTECT;
877 	nvmet_set_result(req, result);
878 	mutex_unlock(&subsys->lock);
879 
880 	return 0;
881 }
882 
nvmet_get_feat_kato(struct nvmet_req * req)883 void nvmet_get_feat_kato(struct nvmet_req *req)
884 {
885 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
886 }
887 
nvmet_get_feat_async_event(struct nvmet_req * req)888 void nvmet_get_feat_async_event(struct nvmet_req *req)
889 {
890 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
891 }
892 
nvmet_execute_get_features(struct nvmet_req * req)893 void nvmet_execute_get_features(struct nvmet_req *req)
894 {
895 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
896 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
897 	u16 status = 0;
898 
899 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
900 		return;
901 
902 	switch (cdw10 & 0xff) {
903 	/*
904 	 * These features are mandatory in the spec, but we don't
905 	 * have a useful way to implement them.  We'll eventually
906 	 * need to come up with some fake values for these.
907 	 */
908 #if 0
909 	case NVME_FEAT_ARBITRATION:
910 		break;
911 	case NVME_FEAT_POWER_MGMT:
912 		break;
913 	case NVME_FEAT_TEMP_THRESH:
914 		break;
915 	case NVME_FEAT_ERR_RECOVERY:
916 		break;
917 	case NVME_FEAT_IRQ_COALESCE:
918 		break;
919 	case NVME_FEAT_IRQ_CONFIG:
920 		break;
921 	case NVME_FEAT_WRITE_ATOMIC:
922 		break;
923 #endif
924 	case NVME_FEAT_ASYNC_EVENT:
925 		nvmet_get_feat_async_event(req);
926 		break;
927 	case NVME_FEAT_VOLATILE_WC:
928 		nvmet_set_result(req, 1);
929 		break;
930 	case NVME_FEAT_NUM_QUEUES:
931 		nvmet_set_result(req,
932 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
933 		break;
934 	case NVME_FEAT_KATO:
935 		nvmet_get_feat_kato(req);
936 		break;
937 	case NVME_FEAT_HOST_ID:
938 		/* need 128-bit host identifier flag */
939 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
940 			req->error_loc =
941 				offsetof(struct nvme_common_command, cdw11);
942 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
943 			break;
944 		}
945 
946 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
947 				sizeof(req->sq->ctrl->hostid));
948 		break;
949 	case NVME_FEAT_WRITE_PROTECT:
950 		status = nvmet_get_feat_write_protect(req);
951 		break;
952 	default:
953 		req->error_loc =
954 			offsetof(struct nvme_common_command, cdw10);
955 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
956 		break;
957 	}
958 
959 	nvmet_req_complete(req, status);
960 }
961 
nvmet_execute_async_event(struct nvmet_req * req)962 void nvmet_execute_async_event(struct nvmet_req *req)
963 {
964 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
965 
966 	if (!nvmet_check_transfer_len(req, 0))
967 		return;
968 
969 	mutex_lock(&ctrl->lock);
970 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
971 		mutex_unlock(&ctrl->lock);
972 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
973 		return;
974 	}
975 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
976 	mutex_unlock(&ctrl->lock);
977 
978 	queue_work(nvmet_wq, &ctrl->async_event_work);
979 }
980 
nvmet_execute_keep_alive(struct nvmet_req * req)981 void nvmet_execute_keep_alive(struct nvmet_req *req)
982 {
983 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
984 	u16 status = 0;
985 
986 	if (!nvmet_check_transfer_len(req, 0))
987 		return;
988 
989 	if (!ctrl->kato) {
990 		status = NVME_SC_KA_TIMEOUT_INVALID;
991 		goto out;
992 	}
993 
994 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
995 		ctrl->cntlid, ctrl->kato);
996 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
997 out:
998 	nvmet_req_complete(req, status);
999 }
1000 
nvmet_parse_admin_cmd(struct nvmet_req * req)1001 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1002 {
1003 	struct nvme_command *cmd = req->cmd;
1004 	u16 ret;
1005 
1006 	if (nvme_is_fabrics(cmd))
1007 		return nvmet_parse_fabrics_admin_cmd(req);
1008 	if (unlikely(!nvmet_check_auth_status(req)))
1009 		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1010 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1011 		return nvmet_parse_discovery_cmd(req);
1012 
1013 	ret = nvmet_check_ctrl_status(req);
1014 	if (unlikely(ret))
1015 		return ret;
1016 
1017 	if (nvmet_is_passthru_req(req))
1018 		return nvmet_parse_passthru_admin_cmd(req);
1019 
1020 	switch (cmd->common.opcode) {
1021 	case nvme_admin_get_log_page:
1022 		req->execute = nvmet_execute_get_log_page;
1023 		return 0;
1024 	case nvme_admin_identify:
1025 		req->execute = nvmet_execute_identify;
1026 		return 0;
1027 	case nvme_admin_abort_cmd:
1028 		req->execute = nvmet_execute_abort;
1029 		return 0;
1030 	case nvme_admin_set_features:
1031 		req->execute = nvmet_execute_set_features;
1032 		return 0;
1033 	case nvme_admin_get_features:
1034 		req->execute = nvmet_execute_get_features;
1035 		return 0;
1036 	case nvme_admin_async_event:
1037 		req->execute = nvmet_execute_async_event;
1038 		return 0;
1039 	case nvme_admin_keep_alive:
1040 		req->execute = nvmet_execute_keep_alive;
1041 		return 0;
1042 	default:
1043 		return nvmet_report_invalid_opcode(req);
1044 	}
1045 }
1046