1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe ZNS-ZBD command implementation.
4 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/nvme.h>
8 #include <linux/blkdev.h>
9 #include "nvmet.h"
10
11 /*
12 * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
13 * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
14 * as page_shift value. When calculating the ZASL use shift by 12.
15 */
16 #define NVMET_MPSMIN_SHIFT 12
17
nvmet_zasl(unsigned int zone_append_sects)18 static inline u8 nvmet_zasl(unsigned int zone_append_sects)
19 {
20 /*
21 * Zone Append Size Limit (zasl) is expressed as a power of 2 value
22 * with the minimum memory page size (i.e. 12) as unit.
23 */
24 return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
25 }
26
validate_conv_zones_cb(struct blk_zone * z,unsigned int i,void * data)27 static int validate_conv_zones_cb(struct blk_zone *z,
28 unsigned int i, void *data)
29 {
30 if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
31 return -EOPNOTSUPP;
32 return 0;
33 }
34
nvmet_bdev_zns_enable(struct nvmet_ns * ns)35 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
36 {
37 u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
38 struct gendisk *bd_disk = ns->bdev->bd_disk;
39 int ret;
40
41 if (ns->subsys->zasl) {
42 if (ns->subsys->zasl > zasl)
43 return false;
44 }
45 ns->subsys->zasl = zasl;
46
47 /*
48 * Generic zoned block devices may have a smaller last zone which is
49 * not supported by ZNS. Exclude zoned drives that have such smaller
50 * last zone.
51 */
52 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
53 return false;
54 /*
55 * ZNS does not define a conventional zone type. If the underlying
56 * device has a bitmap set indicating the existence of conventional
57 * zones, reject the device. Otherwise, use report zones to detect if
58 * the device has conventional zones.
59 */
60 if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
61 return false;
62
63 ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
64 validate_conv_zones_cb, NULL);
65 if (ret < 0)
66 return false;
67
68 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
69
70 return true;
71 }
72
nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req * req)73 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
74 {
75 u8 zasl = req->sq->ctrl->subsys->zasl;
76 struct nvmet_ctrl *ctrl = req->sq->ctrl;
77 struct nvme_id_ctrl_zns *id;
78 u16 status;
79
80 id = kzalloc(sizeof(*id), GFP_KERNEL);
81 if (!id) {
82 status = NVME_SC_INTERNAL;
83 goto out;
84 }
85
86 if (ctrl->ops->get_mdts)
87 id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
88 else
89 id->zasl = zasl;
90
91 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
92
93 kfree(id);
94 out:
95 nvmet_req_complete(req, status);
96 }
97
nvmet_execute_identify_cns_cs_ns(struct nvmet_req * req)98 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
99 {
100 struct nvme_id_ns_zns *id_zns;
101 u64 zsze;
102 u16 status;
103 u32 mar, mor;
104
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
106 req->error_loc = offsetof(struct nvme_identify, nsid);
107 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
108 goto out;
109 }
110
111 id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
112 if (!id_zns) {
113 status = NVME_SC_INTERNAL;
114 goto out;
115 }
116
117 status = nvmet_req_find_ns(req);
118 if (status)
119 goto done;
120
121 if (!bdev_is_zoned(req->ns->bdev)) {
122 req->error_loc = offsetof(struct nvme_identify, nsid);
123 goto done;
124 }
125
126 if (nvmet_ns_revalidate(req->ns)) {
127 mutex_lock(&req->ns->subsys->lock);
128 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
129 mutex_unlock(&req->ns->subsys->lock);
130 }
131 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
132 req->ns->blksize_shift;
133 id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
134
135 mor = bdev_max_open_zones(req->ns->bdev);
136 if (!mor)
137 mor = U32_MAX;
138 else
139 mor--;
140 id_zns->mor = cpu_to_le32(mor);
141
142 mar = bdev_max_active_zones(req->ns->bdev);
143 if (!mar)
144 mar = U32_MAX;
145 else
146 mar--;
147 id_zns->mar = cpu_to_le32(mar);
148
149 done:
150 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
151 kfree(id_zns);
152 out:
153 nvmet_req_complete(req, status);
154 }
155
nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req * req)156 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
157 {
158 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
159 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
160
161 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
162 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
163 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
164 }
165
166 if (out_bufsize < sizeof(struct nvme_zone_report)) {
167 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
168 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
169 }
170
171 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
172 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
173 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
174 }
175
176 switch (req->cmd->zmr.pr) {
177 case 0:
178 case 1:
179 break;
180 default:
181 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
182 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
183 }
184
185 switch (req->cmd->zmr.zrasf) {
186 case NVME_ZRASF_ZONE_REPORT_ALL:
187 case NVME_ZRASF_ZONE_STATE_EMPTY:
188 case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
189 case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
190 case NVME_ZRASF_ZONE_STATE_CLOSED:
191 case NVME_ZRASF_ZONE_STATE_FULL:
192 case NVME_ZRASF_ZONE_STATE_READONLY:
193 case NVME_ZRASF_ZONE_STATE_OFFLINE:
194 break;
195 default:
196 req->error_loc =
197 offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
198 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
199 }
200
201 return NVME_SC_SUCCESS;
202 }
203
204 struct nvmet_report_zone_data {
205 struct nvmet_req *req;
206 u64 out_buf_offset;
207 u64 out_nr_zones;
208 u64 nr_zones;
209 u8 zrasf;
210 };
211
nvmet_bdev_report_zone_cb(struct blk_zone * z,unsigned i,void * d)212 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
213 {
214 static const unsigned int nvme_zrasf_to_blk_zcond[] = {
215 [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
216 [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
217 [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
218 [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
219 [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
220 [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
221 [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
222 };
223 struct nvmet_report_zone_data *rz = d;
224
225 if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
226 z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
227 return 0;
228
229 if (rz->nr_zones < rz->out_nr_zones) {
230 struct nvme_zone_descriptor zdesc = { };
231 u16 status;
232
233 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
234 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
235 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
236 zdesc.za = z->reset ? 1 << 2 : 0;
237 zdesc.zs = z->cond << 4;
238 zdesc.zt = z->type;
239
240 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
241 sizeof(zdesc));
242 if (status)
243 return -EINVAL;
244
245 rz->out_buf_offset += sizeof(zdesc);
246 }
247
248 rz->nr_zones++;
249
250 return 0;
251 }
252
nvmet_req_nr_zones_from_slba(struct nvmet_req * req)253 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
254 {
255 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
256
257 return blkdev_nr_zones(req->ns->bdev->bd_disk) -
258 (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
259 }
260
get_nr_zones_from_buf(struct nvmet_req * req,u32 bufsize)261 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
262 {
263 if (bufsize <= sizeof(struct nvme_zone_report))
264 return 0;
265
266 return (bufsize - sizeof(struct nvme_zone_report)) /
267 sizeof(struct nvme_zone_descriptor);
268 }
269
nvmet_bdev_zone_zmgmt_recv_work(struct work_struct * w)270 static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
271 {
272 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
273 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
274 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
275 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
276 __le64 nr_zones;
277 u16 status;
278 int ret;
279 struct nvmet_report_zone_data rz_data = {
280 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
281 /* leave the place for report zone header */
282 .out_buf_offset = sizeof(struct nvme_zone_report),
283 .zrasf = req->cmd->zmr.zrasf,
284 .nr_zones = 0,
285 .req = req,
286 };
287
288 status = nvmet_bdev_validate_zone_mgmt_recv(req);
289 if (status)
290 goto out;
291
292 if (!req_slba_nr_zones) {
293 status = NVME_SC_SUCCESS;
294 goto out;
295 }
296
297 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
298 nvmet_bdev_report_zone_cb, &rz_data);
299 if (ret < 0) {
300 status = NVME_SC_INTERNAL;
301 goto out;
302 }
303
304 /*
305 * When partial bit is set nr_zones must indicate the number of zone
306 * descriptors actually transferred.
307 */
308 if (req->cmd->zmr.pr)
309 rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
310
311 nr_zones = cpu_to_le64(rz_data.nr_zones);
312 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
313
314 out:
315 nvmet_req_complete(req, status);
316 }
317
nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req * req)318 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
319 {
320 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
321 queue_work(zbd_wq, &req->z.zmgmt_work);
322 }
323
zsa_req_op(u8 zsa)324 static inline enum req_opf zsa_req_op(u8 zsa)
325 {
326 switch (zsa) {
327 case NVME_ZONE_OPEN:
328 return REQ_OP_ZONE_OPEN;
329 case NVME_ZONE_CLOSE:
330 return REQ_OP_ZONE_CLOSE;
331 case NVME_ZONE_FINISH:
332 return REQ_OP_ZONE_FINISH;
333 case NVME_ZONE_RESET:
334 return REQ_OP_ZONE_RESET;
335 default:
336 return REQ_OP_LAST;
337 }
338 }
339
blkdev_zone_mgmt_errno_to_nvme_status(int ret)340 static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
341 {
342 switch (ret) {
343 case 0:
344 return NVME_SC_SUCCESS;
345 case -EINVAL:
346 case -EIO:
347 return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
348 default:
349 return NVME_SC_INTERNAL;
350 }
351 }
352
353 struct nvmet_zone_mgmt_send_all_data {
354 unsigned long *zbitmap;
355 struct nvmet_req *req;
356 };
357
zmgmt_send_scan_cb(struct blk_zone * z,unsigned i,void * d)358 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
359 {
360 struct nvmet_zone_mgmt_send_all_data *data = d;
361
362 switch (zsa_req_op(data->req->cmd->zms.zsa)) {
363 case REQ_OP_ZONE_OPEN:
364 switch (z->cond) {
365 case BLK_ZONE_COND_CLOSED:
366 break;
367 default:
368 return 0;
369 }
370 break;
371 case REQ_OP_ZONE_CLOSE:
372 switch (z->cond) {
373 case BLK_ZONE_COND_IMP_OPEN:
374 case BLK_ZONE_COND_EXP_OPEN:
375 break;
376 default:
377 return 0;
378 }
379 break;
380 case REQ_OP_ZONE_FINISH:
381 switch (z->cond) {
382 case BLK_ZONE_COND_IMP_OPEN:
383 case BLK_ZONE_COND_EXP_OPEN:
384 case BLK_ZONE_COND_CLOSED:
385 break;
386 default:
387 return 0;
388 }
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 set_bit(i, data->zbitmap);
395
396 return 0;
397 }
398
nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req * req)399 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
400 {
401 struct block_device *bdev = req->ns->bdev;
402 unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
403 struct request_queue *q = bdev_get_queue(bdev);
404 struct bio *bio = NULL;
405 sector_t sector = 0;
406 int ret;
407 struct nvmet_zone_mgmt_send_all_data d = {
408 .req = req,
409 };
410
411 d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
412 GFP_NOIO, q->node);
413 if (!d.zbitmap) {
414 ret = -ENOMEM;
415 goto out;
416 }
417
418 /* Scan and build bitmap of the eligible zones */
419 ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
420 if (ret != nr_zones) {
421 if (ret > 0)
422 ret = -EIO;
423 goto out;
424 } else {
425 /* We scanned all the zones */
426 ret = 0;
427 }
428
429 while (sector < get_capacity(bdev->bd_disk)) {
430 if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
431 bio = blk_next_bio(bio, bdev, 0,
432 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
433 GFP_KERNEL);
434 bio->bi_iter.bi_sector = sector;
435 /* This may take a while, so be nice to others */
436 cond_resched();
437 }
438 sector += blk_queue_zone_sectors(q);
439 }
440
441 if (bio) {
442 ret = submit_bio_wait(bio);
443 bio_put(bio);
444 }
445
446 out:
447 kfree(d.zbitmap);
448
449 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
450 }
451
nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req * req)452 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
453 {
454 int ret;
455
456 switch (zsa_req_op(req->cmd->zms.zsa)) {
457 case REQ_OP_ZONE_RESET:
458 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
459 get_capacity(req->ns->bdev->bd_disk),
460 GFP_KERNEL);
461 if (ret < 0)
462 return blkdev_zone_mgmt_errno_to_nvme_status(ret);
463 break;
464 case REQ_OP_ZONE_OPEN:
465 case REQ_OP_ZONE_CLOSE:
466 case REQ_OP_ZONE_FINISH:
467 return nvmet_bdev_zone_mgmt_emulate_all(req);
468 default:
469 /* this is needed to quiet compiler warning */
470 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
471 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
472 }
473
474 return NVME_SC_SUCCESS;
475 }
476
nvmet_bdev_zmgmt_send_work(struct work_struct * w)477 static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
478 {
479 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
480 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
481 enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
482 struct block_device *bdev = req->ns->bdev;
483 sector_t zone_sectors = bdev_zone_sectors(bdev);
484 u16 status = NVME_SC_SUCCESS;
485 int ret;
486
487 if (op == REQ_OP_LAST) {
488 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
489 status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
490 goto out;
491 }
492
493 /* when select all bit is set slba field is ignored */
494 if (req->cmd->zms.select_all) {
495 status = nvmet_bdev_execute_zmgmt_send_all(req);
496 goto out;
497 }
498
499 if (sect >= get_capacity(bdev->bd_disk)) {
500 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
501 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
502 goto out;
503 }
504
505 if (sect & (zone_sectors - 1)) {
506 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
507 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
508 goto out;
509 }
510
511 ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
512 if (ret < 0)
513 status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
514
515 out:
516 nvmet_req_complete(req, status);
517 }
518
nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req * req)519 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
520 {
521 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
522 queue_work(zbd_wq, &req->z.zmgmt_work);
523 }
524
nvmet_bdev_zone_append_bio_done(struct bio * bio)525 static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
526 {
527 struct nvmet_req *req = bio->bi_private;
528
529 if (bio->bi_status == BLK_STS_OK) {
530 req->cqe->result.u64 =
531 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
532 }
533
534 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
535 nvmet_req_bio_put(req, bio);
536 }
537
nvmet_bdev_execute_zone_append(struct nvmet_req * req)538 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
539 {
540 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
541 const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
542 u16 status = NVME_SC_SUCCESS;
543 unsigned int total_len = 0;
544 struct scatterlist *sg;
545 struct bio *bio;
546 int sg_cnt;
547
548 /* Request is completed on len mismatch in nvmet_check_transter_len() */
549 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
550 return;
551
552 if (!req->sg_cnt) {
553 nvmet_req_complete(req, 0);
554 return;
555 }
556
557 if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
558 req->error_loc = offsetof(struct nvme_rw_command, slba);
559 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
560 goto out;
561 }
562
563 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
564 req->error_loc = offsetof(struct nvme_rw_command, slba);
565 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
566 goto out;
567 }
568
569 if (nvmet_use_inline_bvec(req)) {
570 bio = &req->z.inline_bio;
571 bio_init(bio, req->ns->bdev, req->inline_bvec,
572 ARRAY_SIZE(req->inline_bvec), op);
573 } else {
574 bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
575 }
576
577 bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
578 bio->bi_iter.bi_sector = sect;
579 bio->bi_private = req;
580 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
581 bio->bi_opf |= REQ_FUA;
582
583 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
584 struct page *p = sg_page(sg);
585 unsigned int l = sg->length;
586 unsigned int o = sg->offset;
587 unsigned int ret;
588
589 ret = bio_add_zone_append_page(bio, p, l, o);
590 if (ret != sg->length) {
591 status = NVME_SC_INTERNAL;
592 goto out_put_bio;
593 }
594 total_len += sg->length;
595 }
596
597 if (total_len != nvmet_rw_data_len(req)) {
598 status = NVME_SC_INTERNAL | NVME_SC_DNR;
599 goto out_put_bio;
600 }
601
602 submit_bio(bio);
603 return;
604
605 out_put_bio:
606 nvmet_req_bio_put(req, bio);
607 out:
608 nvmet_req_complete(req, status);
609 }
610
nvmet_bdev_zns_parse_io_cmd(struct nvmet_req * req)611 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
612 {
613 struct nvme_command *cmd = req->cmd;
614
615 switch (cmd->common.opcode) {
616 case nvme_cmd_zone_append:
617 req->execute = nvmet_bdev_execute_zone_append;
618 return 0;
619 case nvme_cmd_zone_mgmt_recv:
620 req->execute = nvmet_bdev_execute_zone_mgmt_recv;
621 return 0;
622 case nvme_cmd_zone_mgmt_send:
623 req->execute = nvmet_bdev_execute_zone_mgmt_send;
624 return 0;
625 default:
626 return nvmet_bdev_parse_io_cmd(req);
627 }
628 }
629