1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 */
5
6 #ifndef _NVMET_H
7 #define _NVMET_H
8
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23
24 #define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
25
26 #define NVMET_ASYNC_EVENTS 4
27 #define NVMET_ERROR_LOG_SLOTS 128
28 #define NVMET_NO_ERROR_LOC ((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL "Linux"
30 #define NVMET_MN_MAX_SIZE 40
31 #define NVMET_SN_MAX_SIZE 20
32
33 /*
34 * Supported optional AENs:
35 */
36 #define NVMET_AEN_CFG_OPTIONAL \
37 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
38 #define NVMET_DISC_AEN_CFG_OPTIONAL \
39 (NVME_AEN_CFG_DISC_CHANGE)
40
41 /*
42 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
43 */
44 #define NVMET_AEN_CFG_ALL \
45 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
46 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
47 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
48
49 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
50 * The 16 bit shift is to set IATTR bit to 1, which means offending
51 * offset starts in the data section of connect()
52 */
53 #define IPO_IATTR_CONNECT_DATA(x) \
54 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
55 #define IPO_IATTR_CONNECT_SQE(x) \
56 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
57
58 struct nvmet_ns {
59 struct percpu_ref ref;
60 struct block_device *bdev;
61 struct file *file;
62 bool readonly;
63 u32 nsid;
64 u32 blksize_shift;
65 loff_t size;
66 u8 nguid[16];
67 uuid_t uuid;
68 u32 anagrpid;
69
70 bool buffered_io;
71 bool enabled;
72 struct nvmet_subsys *subsys;
73 const char *device_path;
74
75 struct config_group device_group;
76 struct config_group group;
77
78 struct completion disable_done;
79 mempool_t *bvec_pool;
80 struct kmem_cache *bvec_cache;
81
82 int use_p2pmem;
83 struct pci_dev *p2p_dev;
84 int pi_type;
85 int metadata_size;
86 u8 csi;
87 };
88
to_nvmet_ns(struct config_item * item)89 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
90 {
91 return container_of(to_config_group(item), struct nvmet_ns, group);
92 }
93
nvmet_ns_dev(struct nvmet_ns * ns)94 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
95 {
96 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
97 }
98
99 struct nvmet_cq {
100 u16 qid;
101 u16 size;
102 };
103
104 struct nvmet_sq {
105 struct nvmet_ctrl *ctrl;
106 struct percpu_ref ref;
107 u16 qid;
108 u16 size;
109 u32 sqhd;
110 bool sqhd_disabled;
111 struct completion free_done;
112 struct completion confirm_done;
113 };
114
115 struct nvmet_ana_group {
116 struct config_group group;
117 struct nvmet_port *port;
118 u32 grpid;
119 };
120
to_ana_group(struct config_item * item)121 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
122 {
123 return container_of(to_config_group(item), struct nvmet_ana_group,
124 group);
125 }
126
127 /**
128 * struct nvmet_port - Common structure to keep port
129 * information for the target.
130 * @entry: Entry into referrals or transport list.
131 * @disc_addr: Address information is stored in a format defined
132 * for a discovery log page entry.
133 * @group: ConfigFS group for this element's folder.
134 * @priv: Private data for the transport.
135 */
136 struct nvmet_port {
137 struct list_head entry;
138 struct nvmf_disc_rsp_page_entry disc_addr;
139 struct config_group group;
140 struct config_group subsys_group;
141 struct list_head subsystems;
142 struct config_group referrals_group;
143 struct list_head referrals;
144 struct list_head global_entry;
145 struct config_group ana_groups_group;
146 struct nvmet_ana_group ana_default_group;
147 enum nvme_ana_state *ana_state;
148 void *priv;
149 bool enabled;
150 int inline_data_size;
151 const struct nvmet_fabrics_ops *tr_ops;
152 bool pi_enable;
153 };
154
to_nvmet_port(struct config_item * item)155 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
156 {
157 return container_of(to_config_group(item), struct nvmet_port,
158 group);
159 }
160
ana_groups_to_port(struct config_item * item)161 static inline struct nvmet_port *ana_groups_to_port(
162 struct config_item *item)
163 {
164 return container_of(to_config_group(item), struct nvmet_port,
165 ana_groups_group);
166 }
167
168 struct nvmet_ctrl {
169 struct nvmet_subsys *subsys;
170 struct nvmet_sq **sqs;
171
172 bool reset_tbkas;
173
174 struct mutex lock;
175 u64 cap;
176 u32 cc;
177 u32 csts;
178
179 uuid_t hostid;
180 u16 cntlid;
181 u32 kato;
182
183 struct nvmet_port *port;
184
185 u32 aen_enabled;
186 unsigned long aen_masked;
187 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
188 unsigned int nr_async_event_cmds;
189 struct list_head async_events;
190 struct work_struct async_event_work;
191
192 struct list_head subsys_entry;
193 struct kref ref;
194 struct delayed_work ka_work;
195 struct work_struct fatal_err_work;
196
197 const struct nvmet_fabrics_ops *ops;
198
199 __le32 *changed_ns_list;
200 u32 nr_changed_ns;
201
202 char subsysnqn[NVMF_NQN_FIELD_LEN];
203 char hostnqn[NVMF_NQN_FIELD_LEN];
204
205 struct device *p2p_client;
206 struct radix_tree_root p2p_ns_map;
207
208 spinlock_t error_lock;
209 u64 err_counter;
210 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
211 bool pi_support;
212 };
213
214 struct nvmet_subsys {
215 enum nvme_subsys_type type;
216
217 struct mutex lock;
218 struct kref ref;
219
220 struct xarray namespaces;
221 unsigned int nr_namespaces;
222 u32 max_nsid;
223 u16 cntlid_min;
224 u16 cntlid_max;
225
226 struct list_head ctrls;
227
228 struct list_head hosts;
229 bool allow_any_host;
230
231 u16 max_qid;
232
233 u64 ver;
234 char serial[NVMET_SN_MAX_SIZE];
235 bool subsys_discovered;
236 char *subsysnqn;
237 bool pi_support;
238
239 struct config_group group;
240
241 struct config_group namespaces_group;
242 struct config_group allowed_hosts_group;
243
244 char *model_number;
245
246 #ifdef CONFIG_NVME_TARGET_PASSTHRU
247 struct nvme_ctrl *passthru_ctrl;
248 char *passthru_ctrl_path;
249 struct config_group passthru_group;
250 unsigned int admin_timeout;
251 unsigned int io_timeout;
252 unsigned int clear_ids;
253 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
254
255 #ifdef CONFIG_BLK_DEV_ZONED
256 u8 zasl;
257 #endif /* CONFIG_BLK_DEV_ZONED */
258 };
259
to_subsys(struct config_item * item)260 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
261 {
262 return container_of(to_config_group(item), struct nvmet_subsys, group);
263 }
264
namespaces_to_subsys(struct config_item * item)265 static inline struct nvmet_subsys *namespaces_to_subsys(
266 struct config_item *item)
267 {
268 return container_of(to_config_group(item), struct nvmet_subsys,
269 namespaces_group);
270 }
271
272 struct nvmet_host {
273 struct config_group group;
274 };
275
to_host(struct config_item * item)276 static inline struct nvmet_host *to_host(struct config_item *item)
277 {
278 return container_of(to_config_group(item), struct nvmet_host, group);
279 }
280
nvmet_host_name(struct nvmet_host * host)281 static inline char *nvmet_host_name(struct nvmet_host *host)
282 {
283 return config_item_name(&host->group.cg_item);
284 }
285
286 struct nvmet_host_link {
287 struct list_head entry;
288 struct nvmet_host *host;
289 };
290
291 struct nvmet_subsys_link {
292 struct list_head entry;
293 struct nvmet_subsys *subsys;
294 };
295
296 struct nvmet_req;
297 struct nvmet_fabrics_ops {
298 struct module *owner;
299 unsigned int type;
300 unsigned int msdbd;
301 unsigned int flags;
302 #define NVMF_KEYED_SGLS (1 << 0)
303 #define NVMF_METADATA_SUPPORTED (1 << 1)
304 void (*queue_response)(struct nvmet_req *req);
305 int (*add_port)(struct nvmet_port *port);
306 void (*remove_port)(struct nvmet_port *port);
307 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
308 void (*disc_traddr)(struct nvmet_req *req,
309 struct nvmet_port *port, char *traddr);
310 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
311 void (*discovery_chg)(struct nvmet_port *port);
312 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
313 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
314 };
315
316 #define NVMET_MAX_INLINE_BIOVEC 8
317 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
318
319 struct nvmet_req {
320 struct nvme_command *cmd;
321 struct nvme_completion *cqe;
322 struct nvmet_sq *sq;
323 struct nvmet_cq *cq;
324 struct nvmet_ns *ns;
325 struct scatterlist *sg;
326 struct scatterlist *metadata_sg;
327 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
328 union {
329 struct {
330 struct bio inline_bio;
331 } b;
332 struct {
333 bool mpool_alloc;
334 struct kiocb iocb;
335 struct bio_vec *bvec;
336 struct work_struct work;
337 } f;
338 struct {
339 struct bio inline_bio;
340 struct request *rq;
341 struct work_struct work;
342 bool use_workqueue;
343 } p;
344 #ifdef CONFIG_BLK_DEV_ZONED
345 struct {
346 struct bio inline_bio;
347 struct work_struct zmgmt_work;
348 } z;
349 #endif /* CONFIG_BLK_DEV_ZONED */
350 };
351 int sg_cnt;
352 int metadata_sg_cnt;
353 /* data length as parsed from the SGL descriptor: */
354 size_t transfer_len;
355 size_t metadata_len;
356
357 struct nvmet_port *port;
358
359 void (*execute)(struct nvmet_req *req);
360 const struct nvmet_fabrics_ops *ops;
361
362 struct pci_dev *p2p_dev;
363 struct device *p2p_client;
364 u16 error_loc;
365 u64 error_slba;
366 };
367
368 extern struct workqueue_struct *buffered_io_wq;
369 extern struct workqueue_struct *zbd_wq;
370 extern struct workqueue_struct *nvmet_wq;
371
nvmet_set_result(struct nvmet_req * req,u32 result)372 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
373 {
374 req->cqe->result.u32 = cpu_to_le32(result);
375 }
376
377 /*
378 * NVMe command writes actually are DMA reads for us on the target side.
379 */
380 static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)381 nvmet_data_dir(struct nvmet_req *req)
382 {
383 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
384 }
385
386 struct nvmet_async_event {
387 struct list_head entry;
388 u8 event_type;
389 u8 event_info;
390 u8 log_page;
391 };
392
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)393 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
394 {
395 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
396
397 if (!rae)
398 clear_bit(bn, &req->sq->ctrl->aen_masked);
399 }
400
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)401 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
402 {
403 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
404 return true;
405 return test_and_set_bit(bn, &ctrl->aen_masked);
406 }
407
408 void nvmet_get_feat_kato(struct nvmet_req *req);
409 void nvmet_get_feat_async_event(struct nvmet_req *req);
410 u16 nvmet_set_feat_kato(struct nvmet_req *req);
411 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
412 void nvmet_execute_async_event(struct nvmet_req *req);
413 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
414 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
415
416 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
417 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
418 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
419 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
420 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
421 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
422 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
423 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
424
425 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
426 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
427 void nvmet_req_uninit(struct nvmet_req *req);
428 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
429 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
430 void nvmet_req_complete(struct nvmet_req *req, u16 status);
431 int nvmet_req_alloc_sgls(struct nvmet_req *req);
432 void nvmet_req_free_sgls(struct nvmet_req *req);
433
434 void nvmet_execute_set_features(struct nvmet_req *req);
435 void nvmet_execute_get_features(struct nvmet_req *req);
436 void nvmet_execute_keep_alive(struct nvmet_req *req);
437
438 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
439 u16 size);
440 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
441 u16 size);
442 void nvmet_sq_destroy(struct nvmet_sq *sq);
443 int nvmet_sq_init(struct nvmet_sq *sq);
444
445 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
446
447 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
448 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
449 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
450 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
451 const char *hostnqn, u16 cntlid,
452 struct nvmet_req *req);
453 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
454 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
455
456 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
457 enum nvme_subsys_type type);
458 void nvmet_subsys_put(struct nvmet_subsys *subsys);
459 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
460
461 u16 nvmet_req_find_ns(struct nvmet_req *req);
462 void nvmet_put_namespace(struct nvmet_ns *ns);
463 int nvmet_ns_enable(struct nvmet_ns *ns);
464 void nvmet_ns_disable(struct nvmet_ns *ns);
465 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
466 void nvmet_ns_free(struct nvmet_ns *ns);
467
468 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
469 struct nvmet_port *port);
470 void nvmet_port_send_ana_event(struct nvmet_port *port);
471
472 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
473 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
474
475 void nvmet_port_del_ctrls(struct nvmet_port *port,
476 struct nvmet_subsys *subsys);
477
478 int nvmet_enable_port(struct nvmet_port *port);
479 void nvmet_disable_port(struct nvmet_port *port);
480
481 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
482 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
483
484 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
485 size_t len);
486 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
487 size_t len);
488 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
489
490 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
491 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
492
493 extern struct list_head *nvmet_ports;
494 void nvmet_port_disc_changed(struct nvmet_port *port,
495 struct nvmet_subsys *subsys);
496 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
497 struct nvmet_host *host);
498 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
499 u8 event_info, u8 log_page);
500
501 #define NVMET_QUEUE_SIZE 1024
502 #define NVMET_NR_QUEUES 128
503 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
504
505 /*
506 * Nice round number that makes a list of nsids fit into a page.
507 * Should become tunable at some point in the future.
508 */
509 #define NVMET_MAX_NAMESPACES 1024
510
511 /*
512 * 0 is not a valid ANA group ID, so we start numbering at 1.
513 *
514 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
515 * by default, and is available in an optimized state through all ports.
516 */
517 #define NVMET_MAX_ANAGRPS 128
518 #define NVMET_DEFAULT_ANA_GRPID 1
519
520 #define NVMET_KAS 10
521 #define NVMET_DISC_KATO_MS 120000
522
523 int __init nvmet_init_configfs(void);
524 void __exit nvmet_exit_configfs(void);
525
526 int __init nvmet_init_discovery(void);
527 void nvmet_exit_discovery(void);
528
529 extern struct nvmet_subsys *nvmet_disc_subsys;
530 extern struct rw_semaphore nvmet_config_sem;
531
532 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
533 extern u64 nvmet_ana_chgcnt;
534 extern struct rw_semaphore nvmet_ana_sem;
535
536 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
537
538 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
539 int nvmet_file_ns_enable(struct nvmet_ns *ns);
540 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
541 void nvmet_file_ns_disable(struct nvmet_ns *ns);
542 u16 nvmet_bdev_flush(struct nvmet_req *req);
543 u16 nvmet_file_flush(struct nvmet_req *req);
544 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
545 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
546 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
547 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
548 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
549
550 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
551 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
552 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
553 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
554 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
555 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
556
nvmet_rw_data_len(struct nvmet_req * req)557 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
558 {
559 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
560 req->ns->blksize_shift;
561 }
562
nvmet_rw_metadata_len(struct nvmet_req * req)563 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
564 {
565 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
566 return 0;
567 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
568 req->ns->metadata_size;
569 }
570
nvmet_dsm_len(struct nvmet_req * req)571 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
572 {
573 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
574 sizeof(struct nvme_dsm_range);
575 }
576
nvmet_req_subsys(struct nvmet_req * req)577 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
578 {
579 return req->sq->ctrl->subsys;
580 }
581
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)582 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
583 {
584 return subsys->type != NVME_NQN_NVME;
585 }
586
587 #ifdef CONFIG_NVME_TARGET_PASSTHRU
588 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
589 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
590 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
591 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
592 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)593 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
594 {
595 return subsys->passthru_ctrl;
596 }
597 #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)598 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
599 {
600 }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)601 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
602 {
603 }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)604 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
605 {
606 return 0;
607 }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)608 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
609 {
610 return 0;
611 }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)612 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
613 {
614 return NULL;
615 }
616 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
617
nvmet_is_passthru_req(struct nvmet_req * req)618 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
619 {
620 return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
621 }
622
623 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
624
625 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
626 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
627
628 /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)629 static inline __le16 to0based(u32 a)
630 {
631 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
632 }
633
nvmet_ns_has_pi(struct nvmet_ns * ns)634 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
635 {
636 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
637 return false;
638 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
639 }
640
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)641 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
642 {
643 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
644 }
645
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)646 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
647 {
648 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
649 }
650
nvmet_use_inline_bvec(struct nvmet_req * req)651 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
652 {
653 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
654 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
655 }
656
nvmet_req_cns_error_complete(struct nvmet_req * req)657 static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
658 {
659 pr_debug("unhandled identify cns %d on qid %d\n",
660 req->cmd->identify.cns, req->sq->qid);
661 req->error_loc = offsetof(struct nvme_identify, cns);
662 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
663 }
664
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)665 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
666 {
667 if (bio != &req->b.inline_bio)
668 bio_put(bio);
669 }
670
671 #endif /* _NVMET_H */
672