1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
13 #include "nvmet.h"
14 
nvmet_get_log_page_len(struct nvme_command * cmd)15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18 
19 	len <<= 16;
20 	len += le16_to_cpu(cmd->get_log_page.numdl);
21 	/* NUMD is a 0's based value */
22 	len += 1;
23 	len *= sizeof(u32);
24 
25 	return len;
26 }
27 
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 	switch (cdw10 & 0xff) {
31 	case NVME_FEAT_HOST_ID:
32 		return sizeof(req->sq->ctrl->hostid);
33 	default:
34 		return 0;
35 	}
36 }
37 
nvmet_get_log_page_offset(struct nvme_command * cmd)38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 	return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42 
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47 
nvmet_execute_get_log_page_error(struct nvmet_req * req)48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 	unsigned long flags;
52 	off_t offset = 0;
53 	u64 slot;
54 	u64 i;
55 
56 	spin_lock_irqsave(&ctrl->error_lock, flags);
57 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 
59 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 				sizeof(struct nvme_error_slot)))
62 			break;
63 
64 		if (slot == 0)
65 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66 		else
67 			slot--;
68 		offset += sizeof(struct nvme_error_slot);
69 	}
70 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 	nvmet_req_complete(req, 0);
72 }
73 
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 		struct nvme_smart_log *slog)
76 {
77 	u64 host_reads, host_writes, data_units_read, data_units_written;
78 	u16 status;
79 
80 	status = nvmet_req_find_ns(req);
81 	if (status)
82 		return status;
83 
84 	/* we don't have the right data for file backed ns */
85 	if (!req->ns->bdev)
86 		return NVME_SC_SUCCESS;
87 
88 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
89 	data_units_read =
90 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
92 	data_units_written =
93 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
94 
95 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
99 
100 	return NVME_SC_SUCCESS;
101 }
102 
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 		struct nvme_smart_log *slog)
105 {
106 	u64 host_reads = 0, host_writes = 0;
107 	u64 data_units_read = 0, data_units_written = 0;
108 	struct nvmet_ns *ns;
109 	struct nvmet_ctrl *ctrl;
110 	unsigned long idx;
111 
112 	ctrl = req->sq->ctrl;
113 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 		/* we don't have the right data for file backed ns */
115 		if (!ns->bdev)
116 			continue;
117 		host_reads += part_stat_read(ns->bdev, ios[READ]);
118 		data_units_read += DIV_ROUND_UP(
119 			part_stat_read(ns->bdev, sectors[READ]), 1000);
120 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 		data_units_written += DIV_ROUND_UP(
122 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
123 	}
124 
125 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
129 
130 	return NVME_SC_SUCCESS;
131 }
132 
nvmet_execute_get_log_page_smart(struct nvmet_req * req)133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
134 {
135 	struct nvme_smart_log *log;
136 	u16 status = NVME_SC_INTERNAL;
137 	unsigned long flags;
138 
139 	if (req->transfer_len != sizeof(*log))
140 		goto out;
141 
142 	log = kzalloc(sizeof(*log), GFP_KERNEL);
143 	if (!log)
144 		goto out;
145 
146 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 		status = nvmet_get_smart_log_all(req, log);
148 	else
149 		status = nvmet_get_smart_log_nsid(req, log);
150 	if (status)
151 		goto out_free_log;
152 
153 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 	put_unaligned_le64(req->sq->ctrl->err_counter,
155 			&log->num_err_log_entries);
156 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
157 
158 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
159 out_free_log:
160 	kfree(log);
161 out:
162 	nvmet_req_complete(req, status);
163 }
164 
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
166 {
167 	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
168 	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
169 	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
170 	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
171 	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
172 	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
173 	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
174 
175 	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
176 	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
177 	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
178 	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
179 	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
180 }
181 
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)182 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
183 {
184 	log->iocs[nvme_cmd_zone_append]		= cpu_to_le32(1 << 0);
185 	log->iocs[nvme_cmd_zone_mgmt_send]	= cpu_to_le32(1 << 0);
186 	log->iocs[nvme_cmd_zone_mgmt_recv]	= cpu_to_le32(1 << 0);
187 }
188 
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)189 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
190 {
191 	struct nvme_effects_log *log;
192 	u16 status = NVME_SC_SUCCESS;
193 
194 	log = kzalloc(sizeof(*log), GFP_KERNEL);
195 	if (!log) {
196 		status = NVME_SC_INTERNAL;
197 		goto out;
198 	}
199 
200 	switch (req->cmd->get_log_page.csi) {
201 	case NVME_CSI_NVM:
202 		nvmet_get_cmd_effects_nvm(log);
203 		break;
204 	case NVME_CSI_ZNS:
205 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
206 			status = NVME_SC_INVALID_IO_CMD_SET;
207 			goto free;
208 		}
209 		nvmet_get_cmd_effects_nvm(log);
210 		nvmet_get_cmd_effects_zns(log);
211 		break;
212 	default:
213 		status = NVME_SC_INVALID_LOG_PAGE;
214 		goto free;
215 	}
216 
217 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
218 free:
219 	kfree(log);
220 out:
221 	nvmet_req_complete(req, status);
222 }
223 
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)224 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
225 {
226 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
227 	u16 status = NVME_SC_INTERNAL;
228 	size_t len;
229 
230 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
231 		goto out;
232 
233 	mutex_lock(&ctrl->lock);
234 	if (ctrl->nr_changed_ns == U32_MAX)
235 		len = sizeof(__le32);
236 	else
237 		len = ctrl->nr_changed_ns * sizeof(__le32);
238 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
239 	if (!status)
240 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
241 	ctrl->nr_changed_ns = 0;
242 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
243 	mutex_unlock(&ctrl->lock);
244 out:
245 	nvmet_req_complete(req, status);
246 }
247 
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)248 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
249 		struct nvme_ana_group_desc *desc)
250 {
251 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
252 	struct nvmet_ns *ns;
253 	unsigned long idx;
254 	u32 count = 0;
255 
256 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
257 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
258 			if (ns->anagrpid == grpid)
259 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
260 	}
261 
262 	desc->grpid = cpu_to_le32(grpid);
263 	desc->nnsids = cpu_to_le32(count);
264 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
265 	desc->state = req->port->ana_state[grpid];
266 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
267 	return struct_size(desc, nsids, count);
268 }
269 
nvmet_execute_get_log_page_ana(struct nvmet_req * req)270 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
271 {
272 	struct nvme_ana_rsp_hdr hdr = { 0, };
273 	struct nvme_ana_group_desc *desc;
274 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
275 	size_t len;
276 	u32 grpid;
277 	u16 ngrps = 0;
278 	u16 status;
279 
280 	status = NVME_SC_INTERNAL;
281 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
282 		       GFP_KERNEL);
283 	if (!desc)
284 		goto out;
285 
286 	down_read(&nvmet_ana_sem);
287 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
288 		if (!nvmet_ana_group_enabled[grpid])
289 			continue;
290 		len = nvmet_format_ana_group(req, grpid, desc);
291 		status = nvmet_copy_to_sgl(req, offset, desc, len);
292 		if (status)
293 			break;
294 		offset += len;
295 		ngrps++;
296 	}
297 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
298 		if (nvmet_ana_group_enabled[grpid])
299 			ngrps++;
300 	}
301 
302 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
303 	hdr.ngrps = cpu_to_le16(ngrps);
304 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
305 	up_read(&nvmet_ana_sem);
306 
307 	kfree(desc);
308 
309 	/* copy the header last once we know the number of groups */
310 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
311 out:
312 	nvmet_req_complete(req, status);
313 }
314 
nvmet_execute_get_log_page(struct nvmet_req * req)315 static void nvmet_execute_get_log_page(struct nvmet_req *req)
316 {
317 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
318 		return;
319 
320 	switch (req->cmd->get_log_page.lid) {
321 	case NVME_LOG_ERROR:
322 		return nvmet_execute_get_log_page_error(req);
323 	case NVME_LOG_SMART:
324 		return nvmet_execute_get_log_page_smart(req);
325 	case NVME_LOG_FW_SLOT:
326 		/*
327 		 * We only support a single firmware slot which always is
328 		 * active, so we can zero out the whole firmware slot log and
329 		 * still claim to fully implement this mandatory log page.
330 		 */
331 		return nvmet_execute_get_log_page_noop(req);
332 	case NVME_LOG_CHANGED_NS:
333 		return nvmet_execute_get_log_changed_ns(req);
334 	case NVME_LOG_CMD_EFFECTS:
335 		return nvmet_execute_get_log_cmd_effects_ns(req);
336 	case NVME_LOG_ANA:
337 		return nvmet_execute_get_log_page_ana(req);
338 	}
339 	pr_debug("unhandled lid %d on qid %d\n",
340 	       req->cmd->get_log_page.lid, req->sq->qid);
341 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
342 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
343 }
344 
nvmet_execute_identify_ctrl(struct nvmet_req * req)345 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
346 {
347 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
348 	struct nvmet_subsys *subsys = ctrl->subsys;
349 	struct nvme_id_ctrl *id;
350 	u32 cmd_capsule_size;
351 	u16 status = 0;
352 
353 	if (!subsys->subsys_discovered) {
354 		mutex_lock(&subsys->lock);
355 		subsys->subsys_discovered = true;
356 		mutex_unlock(&subsys->lock);
357 	}
358 
359 	id = kzalloc(sizeof(*id), GFP_KERNEL);
360 	if (!id) {
361 		status = NVME_SC_INTERNAL;
362 		goto out;
363 	}
364 
365 	/* XXX: figure out how to assign real vendors IDs. */
366 	id->vid = 0;
367 	id->ssvid = 0;
368 
369 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
370 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
371 		       strlen(subsys->model_number), ' ');
372 	memcpy_and_pad(id->fr, sizeof(id->fr),
373 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
374 
375 	id->rab = 6;
376 
377 	if (nvmet_is_disc_subsys(ctrl->subsys))
378 		id->cntrltype = NVME_CTRL_DISC;
379 	else
380 		id->cntrltype = NVME_CTRL_IO;
381 
382 	/*
383 	 * XXX: figure out how we can assign a IEEE OUI, but until then
384 	 * the safest is to leave it as zeroes.
385 	 */
386 
387 	/* we support multiple ports, multiples hosts and ANA: */
388 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
389 		NVME_CTRL_CMIC_ANA;
390 
391 	/* Limit MDTS according to transport capability */
392 	if (ctrl->ops->get_mdts)
393 		id->mdts = ctrl->ops->get_mdts(ctrl);
394 	else
395 		id->mdts = 0;
396 
397 	id->cntlid = cpu_to_le16(ctrl->cntlid);
398 	id->ver = cpu_to_le32(ctrl->subsys->ver);
399 
400 	/* XXX: figure out what to do about RTD3R/RTD3 */
401 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
402 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
403 		NVME_CTRL_ATTR_TBKAS);
404 
405 	id->oacs = 0;
406 
407 	/*
408 	 * We don't really have a practical limit on the number of abort
409 	 * comands.  But we don't do anything useful for abort either, so
410 	 * no point in allowing more abort commands than the spec requires.
411 	 */
412 	id->acl = 3;
413 
414 	id->aerl = NVMET_ASYNC_EVENTS - 1;
415 
416 	/* first slot is read-only, only one slot supported */
417 	id->frmw = (1 << 0) | (1 << 1);
418 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
419 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
420 	id->npss = 0;
421 
422 	/* We support keep-alive timeout in granularity of seconds */
423 	id->kas = cpu_to_le16(NVMET_KAS);
424 
425 	id->sqes = (0x6 << 4) | 0x6;
426 	id->cqes = (0x4 << 4) | 0x4;
427 
428 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
429 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
430 
431 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
432 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
433 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
434 			NVME_CTRL_ONCS_WRITE_ZEROES);
435 
436 	/* XXX: don't report vwc if the underlying device is write through */
437 	id->vwc = NVME_CTRL_VWC_PRESENT;
438 
439 	/*
440 	 * We can't support atomic writes bigger than a LBA without support
441 	 * from the backend device.
442 	 */
443 	id->awun = 0;
444 	id->awupf = 0;
445 
446 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
447 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
448 		id->sgls |= cpu_to_le32(1 << 2);
449 	if (req->port->inline_data_size)
450 		id->sgls |= cpu_to_le32(1 << 20);
451 
452 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
453 
454 	/*
455 	 * Max command capsule size is sqe + in-capsule data size.
456 	 * Disable in-capsule data for Metadata capable controllers.
457 	 */
458 	cmd_capsule_size = sizeof(struct nvme_command);
459 	if (!ctrl->pi_support)
460 		cmd_capsule_size += req->port->inline_data_size;
461 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
462 
463 	/* Max response capsule size is cqe */
464 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
465 
466 	id->msdbd = ctrl->ops->msdbd;
467 
468 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
469 	id->anatt = 10; /* random value */
470 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
471 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
472 
473 	/*
474 	 * Meh, we don't really support any power state.  Fake up the same
475 	 * values that qemu does.
476 	 */
477 	id->psd[0].max_power = cpu_to_le16(0x9c4);
478 	id->psd[0].entry_lat = cpu_to_le32(0x10);
479 	id->psd[0].exit_lat = cpu_to_le32(0x4);
480 
481 	id->nwpc = 1 << 0; /* write protect and no write protect */
482 
483 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
484 
485 	kfree(id);
486 out:
487 	nvmet_req_complete(req, status);
488 }
489 
nvmet_execute_identify_ns(struct nvmet_req * req)490 static void nvmet_execute_identify_ns(struct nvmet_req *req)
491 {
492 	struct nvme_id_ns *id;
493 	u16 status;
494 
495 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
496 		req->error_loc = offsetof(struct nvme_identify, nsid);
497 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
498 		goto out;
499 	}
500 
501 	id = kzalloc(sizeof(*id), GFP_KERNEL);
502 	if (!id) {
503 		status = NVME_SC_INTERNAL;
504 		goto out;
505 	}
506 
507 	/* return an all zeroed buffer if we can't find an active namespace */
508 	status = nvmet_req_find_ns(req);
509 	if (status) {
510 		status = 0;
511 		goto done;
512 	}
513 
514 	if (nvmet_ns_revalidate(req->ns)) {
515 		mutex_lock(&req->ns->subsys->lock);
516 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
517 		mutex_unlock(&req->ns->subsys->lock);
518 	}
519 
520 	/*
521 	 * nuse = ncap = nsze isn't always true, but we have no way to find
522 	 * that out from the underlying device.
523 	 */
524 	id->ncap = id->nsze =
525 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
526 	switch (req->port->ana_state[req->ns->anagrpid]) {
527 	case NVME_ANA_INACCESSIBLE:
528 	case NVME_ANA_PERSISTENT_LOSS:
529 		break;
530 	default:
531 		id->nuse = id->nsze;
532 		break;
533 	}
534 
535 	if (req->ns->bdev)
536 		nvmet_bdev_set_limits(req->ns->bdev, id);
537 
538 	/*
539 	 * We just provide a single LBA format that matches what the
540 	 * underlying device reports.
541 	 */
542 	id->nlbaf = 0;
543 	id->flbas = 0;
544 
545 	/*
546 	 * Our namespace might always be shared.  Not just with other
547 	 * controllers, but also with any other user of the block device.
548 	 */
549 	id->nmic = NVME_NS_NMIC_SHARED;
550 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
551 
552 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
553 
554 	id->lbaf[0].ds = req->ns->blksize_shift;
555 
556 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
557 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
558 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
559 			  NVME_NS_DPC_PI_TYPE3;
560 		id->mc = NVME_MC_EXTENDED_LBA;
561 		id->dps = req->ns->pi_type;
562 		id->flbas = NVME_NS_FLBAS_META_EXT;
563 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
564 	}
565 
566 	if (req->ns->readonly)
567 		id->nsattr |= (1 << 0);
568 done:
569 	if (!status)
570 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
571 
572 	kfree(id);
573 out:
574 	nvmet_req_complete(req, status);
575 }
576 
nvmet_execute_identify_nslist(struct nvmet_req * req)577 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
578 {
579 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
580 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
581 	struct nvmet_ns *ns;
582 	unsigned long idx;
583 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
584 	__le32 *list;
585 	u16 status = 0;
586 	int i = 0;
587 
588 	list = kzalloc(buf_size, GFP_KERNEL);
589 	if (!list) {
590 		status = NVME_SC_INTERNAL;
591 		goto out;
592 	}
593 
594 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
595 		if (ns->nsid <= min_nsid)
596 			continue;
597 		list[i++] = cpu_to_le32(ns->nsid);
598 		if (i == buf_size / sizeof(__le32))
599 			break;
600 	}
601 
602 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
603 
604 	kfree(list);
605 out:
606 	nvmet_req_complete(req, status);
607 }
608 
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)609 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
610 				    void *id, off_t *off)
611 {
612 	struct nvme_ns_id_desc desc = {
613 		.nidt = type,
614 		.nidl = len,
615 	};
616 	u16 status;
617 
618 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
619 	if (status)
620 		return status;
621 	*off += sizeof(desc);
622 
623 	status = nvmet_copy_to_sgl(req, *off, id, len);
624 	if (status)
625 		return status;
626 	*off += len;
627 
628 	return 0;
629 }
630 
nvmet_execute_identify_desclist(struct nvmet_req * req)631 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
632 {
633 	off_t off = 0;
634 	u16 status;
635 
636 	status = nvmet_req_find_ns(req);
637 	if (status)
638 		goto out;
639 
640 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
641 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
642 						  NVME_NIDT_UUID_LEN,
643 						  &req->ns->uuid, &off);
644 		if (status)
645 			goto out;
646 	}
647 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
648 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
649 						  NVME_NIDT_NGUID_LEN,
650 						  &req->ns->nguid, &off);
651 		if (status)
652 			goto out;
653 	}
654 
655 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
656 					  NVME_NIDT_CSI_LEN,
657 					  &req->ns->csi, &off);
658 	if (status)
659 		goto out;
660 
661 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
662 			off) != NVME_IDENTIFY_DATA_SIZE - off)
663 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
664 
665 out:
666 	nvmet_req_complete(req, status);
667 }
668 
nvmet_handle_identify_desclist(struct nvmet_req * req)669 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
670 {
671 	switch (req->cmd->identify.csi) {
672 	case NVME_CSI_NVM:
673 		nvmet_execute_identify_desclist(req);
674 		return true;
675 	case NVME_CSI_ZNS:
676 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
677 			nvmet_execute_identify_desclist(req);
678 			return true;
679 		}
680 		return false;
681 	default:
682 		return false;
683 	}
684 }
685 
nvmet_execute_identify(struct nvmet_req * req)686 static void nvmet_execute_identify(struct nvmet_req *req)
687 {
688 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
689 		return;
690 
691 	switch (req->cmd->identify.cns) {
692 	case NVME_ID_CNS_NS:
693 		switch (req->cmd->identify.csi) {
694 		case NVME_CSI_NVM:
695 			return nvmet_execute_identify_ns(req);
696 		default:
697 			break;
698 		}
699 		break;
700 	case NVME_ID_CNS_CS_NS:
701 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
702 			switch (req->cmd->identify.csi) {
703 			case NVME_CSI_ZNS:
704 				return nvmet_execute_identify_cns_cs_ns(req);
705 			default:
706 				break;
707 			}
708 		}
709 		break;
710 	case NVME_ID_CNS_CTRL:
711 		switch (req->cmd->identify.csi) {
712 		case NVME_CSI_NVM:
713 			return nvmet_execute_identify_ctrl(req);
714 		}
715 		break;
716 	case NVME_ID_CNS_CS_CTRL:
717 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
718 			switch (req->cmd->identify.csi) {
719 			case NVME_CSI_ZNS:
720 				return nvmet_execute_identify_cns_cs_ctrl(req);
721 			default:
722 				break;
723 			}
724 		}
725 		break;
726 	case NVME_ID_CNS_NS_ACTIVE_LIST:
727 		switch (req->cmd->identify.csi) {
728 		case NVME_CSI_NVM:
729 			return nvmet_execute_identify_nslist(req);
730 		default:
731 			break;
732 		}
733 		break;
734 	case NVME_ID_CNS_NS_DESC_LIST:
735 		if (nvmet_handle_identify_desclist(req) == true)
736 			return;
737 		break;
738 	}
739 
740 	nvmet_req_cns_error_complete(req);
741 }
742 
743 /*
744  * A "minimum viable" abort implementation: the command is mandatory in the
745  * spec, but we are not required to do any useful work.  We couldn't really
746  * do a useful abort, so don't bother even with waiting for the command
747  * to be exectuted and return immediately telling the command to abort
748  * wasn't found.
749  */
nvmet_execute_abort(struct nvmet_req * req)750 static void nvmet_execute_abort(struct nvmet_req *req)
751 {
752 	if (!nvmet_check_transfer_len(req, 0))
753 		return;
754 	nvmet_set_result(req, 1);
755 	nvmet_req_complete(req, 0);
756 }
757 
nvmet_write_protect_flush_sync(struct nvmet_req * req)758 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
759 {
760 	u16 status;
761 
762 	if (req->ns->file)
763 		status = nvmet_file_flush(req);
764 	else
765 		status = nvmet_bdev_flush(req);
766 
767 	if (status)
768 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
769 	return status;
770 }
771 
nvmet_set_feat_write_protect(struct nvmet_req * req)772 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
773 {
774 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
775 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
776 	u16 status;
777 
778 	status = nvmet_req_find_ns(req);
779 	if (status)
780 		return status;
781 
782 	mutex_lock(&subsys->lock);
783 	switch (write_protect) {
784 	case NVME_NS_WRITE_PROTECT:
785 		req->ns->readonly = true;
786 		status = nvmet_write_protect_flush_sync(req);
787 		if (status)
788 			req->ns->readonly = false;
789 		break;
790 	case NVME_NS_NO_WRITE_PROTECT:
791 		req->ns->readonly = false;
792 		status = 0;
793 		break;
794 	default:
795 		break;
796 	}
797 
798 	if (!status)
799 		nvmet_ns_changed(subsys, req->ns->nsid);
800 	mutex_unlock(&subsys->lock);
801 	return status;
802 }
803 
nvmet_set_feat_kato(struct nvmet_req * req)804 u16 nvmet_set_feat_kato(struct nvmet_req *req)
805 {
806 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
807 
808 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
809 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
810 	nvmet_start_keep_alive_timer(req->sq->ctrl);
811 
812 	nvmet_set_result(req, req->sq->ctrl->kato);
813 
814 	return 0;
815 }
816 
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)817 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
818 {
819 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
820 
821 	if (val32 & ~mask) {
822 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
823 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
824 	}
825 
826 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
827 	nvmet_set_result(req, val32);
828 
829 	return 0;
830 }
831 
nvmet_execute_set_features(struct nvmet_req * req)832 void nvmet_execute_set_features(struct nvmet_req *req)
833 {
834 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
835 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
836 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
837 	u16 status = 0;
838 	u16 nsqr;
839 	u16 ncqr;
840 
841 	if (!nvmet_check_transfer_len(req, 0))
842 		return;
843 
844 	switch (cdw10 & 0xff) {
845 	case NVME_FEAT_NUM_QUEUES:
846 		ncqr = (cdw11 >> 16) & 0xffff;
847 		nsqr = cdw11 & 0xffff;
848 		if (ncqr == 0xffff || nsqr == 0xffff) {
849 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
850 			break;
851 		}
852 		nvmet_set_result(req,
853 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
854 		break;
855 	case NVME_FEAT_KATO:
856 		status = nvmet_set_feat_kato(req);
857 		break;
858 	case NVME_FEAT_ASYNC_EVENT:
859 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
860 		break;
861 	case NVME_FEAT_HOST_ID:
862 		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
863 		break;
864 	case NVME_FEAT_WRITE_PROTECT:
865 		status = nvmet_set_feat_write_protect(req);
866 		break;
867 	default:
868 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
869 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
870 		break;
871 	}
872 
873 	nvmet_req_complete(req, status);
874 }
875 
nvmet_get_feat_write_protect(struct nvmet_req * req)876 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
877 {
878 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
879 	u32 result;
880 
881 	result = nvmet_req_find_ns(req);
882 	if (result)
883 		return result;
884 
885 	mutex_lock(&subsys->lock);
886 	if (req->ns->readonly == true)
887 		result = NVME_NS_WRITE_PROTECT;
888 	else
889 		result = NVME_NS_NO_WRITE_PROTECT;
890 	nvmet_set_result(req, result);
891 	mutex_unlock(&subsys->lock);
892 
893 	return 0;
894 }
895 
nvmet_get_feat_kato(struct nvmet_req * req)896 void nvmet_get_feat_kato(struct nvmet_req *req)
897 {
898 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
899 }
900 
nvmet_get_feat_async_event(struct nvmet_req * req)901 void nvmet_get_feat_async_event(struct nvmet_req *req)
902 {
903 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
904 }
905 
nvmet_execute_get_features(struct nvmet_req * req)906 void nvmet_execute_get_features(struct nvmet_req *req)
907 {
908 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
909 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
910 	u16 status = 0;
911 
912 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
913 		return;
914 
915 	switch (cdw10 & 0xff) {
916 	/*
917 	 * These features are mandatory in the spec, but we don't
918 	 * have a useful way to implement them.  We'll eventually
919 	 * need to come up with some fake values for these.
920 	 */
921 #if 0
922 	case NVME_FEAT_ARBITRATION:
923 		break;
924 	case NVME_FEAT_POWER_MGMT:
925 		break;
926 	case NVME_FEAT_TEMP_THRESH:
927 		break;
928 	case NVME_FEAT_ERR_RECOVERY:
929 		break;
930 	case NVME_FEAT_IRQ_COALESCE:
931 		break;
932 	case NVME_FEAT_IRQ_CONFIG:
933 		break;
934 	case NVME_FEAT_WRITE_ATOMIC:
935 		break;
936 #endif
937 	case NVME_FEAT_ASYNC_EVENT:
938 		nvmet_get_feat_async_event(req);
939 		break;
940 	case NVME_FEAT_VOLATILE_WC:
941 		nvmet_set_result(req, 1);
942 		break;
943 	case NVME_FEAT_NUM_QUEUES:
944 		nvmet_set_result(req,
945 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
946 		break;
947 	case NVME_FEAT_KATO:
948 		nvmet_get_feat_kato(req);
949 		break;
950 	case NVME_FEAT_HOST_ID:
951 		/* need 128-bit host identifier flag */
952 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
953 			req->error_loc =
954 				offsetof(struct nvme_common_command, cdw11);
955 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
956 			break;
957 		}
958 
959 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
960 				sizeof(req->sq->ctrl->hostid));
961 		break;
962 	case NVME_FEAT_WRITE_PROTECT:
963 		status = nvmet_get_feat_write_protect(req);
964 		break;
965 	default:
966 		req->error_loc =
967 			offsetof(struct nvme_common_command, cdw10);
968 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
969 		break;
970 	}
971 
972 	nvmet_req_complete(req, status);
973 }
974 
nvmet_execute_async_event(struct nvmet_req * req)975 void nvmet_execute_async_event(struct nvmet_req *req)
976 {
977 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
978 
979 	if (!nvmet_check_transfer_len(req, 0))
980 		return;
981 
982 	mutex_lock(&ctrl->lock);
983 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
984 		mutex_unlock(&ctrl->lock);
985 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
986 		return;
987 	}
988 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
989 	mutex_unlock(&ctrl->lock);
990 
991 	queue_work(nvmet_wq, &ctrl->async_event_work);
992 }
993 
nvmet_execute_keep_alive(struct nvmet_req * req)994 void nvmet_execute_keep_alive(struct nvmet_req *req)
995 {
996 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
997 	u16 status = 0;
998 
999 	if (!nvmet_check_transfer_len(req, 0))
1000 		return;
1001 
1002 	if (!ctrl->kato) {
1003 		status = NVME_SC_KA_TIMEOUT_INVALID;
1004 		goto out;
1005 	}
1006 
1007 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1008 		ctrl->cntlid, ctrl->kato);
1009 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1010 out:
1011 	nvmet_req_complete(req, status);
1012 }
1013 
nvmet_parse_admin_cmd(struct nvmet_req * req)1014 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1015 {
1016 	struct nvme_command *cmd = req->cmd;
1017 	u16 ret;
1018 
1019 	if (nvme_is_fabrics(cmd))
1020 		return nvmet_parse_fabrics_cmd(req);
1021 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1022 		return nvmet_parse_discovery_cmd(req);
1023 
1024 	ret = nvmet_check_ctrl_status(req);
1025 	if (unlikely(ret))
1026 		return ret;
1027 
1028 	if (nvmet_is_passthru_req(req))
1029 		return nvmet_parse_passthru_admin_cmd(req);
1030 
1031 	switch (cmd->common.opcode) {
1032 	case nvme_admin_get_log_page:
1033 		req->execute = nvmet_execute_get_log_page;
1034 		return 0;
1035 	case nvme_admin_identify:
1036 		req->execute = nvmet_execute_identify;
1037 		return 0;
1038 	case nvme_admin_abort_cmd:
1039 		req->execute = nvmet_execute_abort;
1040 		return 0;
1041 	case nvme_admin_set_features:
1042 		req->execute = nvmet_execute_set_features;
1043 		return 0;
1044 	case nvme_admin_get_features:
1045 		req->execute = nvmet_execute_get_features;
1046 		return 0;
1047 	case nvme_admin_async_event:
1048 		req->execute = nvmet_execute_async_event;
1049 		return 0;
1050 	case nvme_admin_keep_alive:
1051 		req->execute = nvmet_execute_keep_alive;
1052 		return 0;
1053 	default:
1054 		return nvmet_report_invalid_opcode(req);
1055 	}
1056 }
1057