1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 
24 #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25 
26 #define NVMET_ASYNC_EVENTS		4
27 #define NVMET_ERROR_LOG_SLOTS		128
28 #define NVMET_NO_ERROR_LOC		((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30 #define NVMET_MN_MAX_SIZE		40
31 #define NVMET_SN_MAX_SIZE		20
32 
33 /*
34  * Supported optional AENs:
35  */
36 #define NVMET_AEN_CFG_OPTIONAL \
37 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
38 #define NVMET_DISC_AEN_CFG_OPTIONAL \
39 	(NVME_AEN_CFG_DISC_CHANGE)
40 
41 /*
42  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
43  */
44 #define NVMET_AEN_CFG_ALL \
45 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
46 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
47 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
48 
49 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
50  * The 16 bit shift is to set IATTR bit to 1, which means offending
51  * offset starts in the data section of connect()
52  */
53 #define IPO_IATTR_CONNECT_DATA(x)	\
54 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
55 #define IPO_IATTR_CONNECT_SQE(x)	\
56 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
57 
58 struct nvmet_ns {
59 	struct percpu_ref	ref;
60 	struct block_device	*bdev;
61 	struct file		*file;
62 	bool			readonly;
63 	u32			nsid;
64 	u32			blksize_shift;
65 	loff_t			size;
66 	u8			nguid[16];
67 	uuid_t			uuid;
68 	u32			anagrpid;
69 
70 	bool			buffered_io;
71 	bool			enabled;
72 	struct nvmet_subsys	*subsys;
73 	const char		*device_path;
74 
75 	struct config_group	device_group;
76 	struct config_group	group;
77 
78 	struct completion	disable_done;
79 	mempool_t		*bvec_pool;
80 
81 	int			use_p2pmem;
82 	struct pci_dev		*p2p_dev;
83 	int			pi_type;
84 	int			metadata_size;
85 	u8			csi;
86 };
87 
to_nvmet_ns(struct config_item * item)88 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
89 {
90 	return container_of(to_config_group(item), struct nvmet_ns, group);
91 }
92 
nvmet_ns_dev(struct nvmet_ns * ns)93 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
94 {
95 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
96 }
97 
98 struct nvmet_cq {
99 	u16			qid;
100 	u16			size;
101 };
102 
103 struct nvmet_sq {
104 	struct nvmet_ctrl	*ctrl;
105 	struct percpu_ref	ref;
106 	u16			qid;
107 	u16			size;
108 	u32			sqhd;
109 	bool			sqhd_disabled;
110 #ifdef CONFIG_NVME_TARGET_AUTH
111 	struct delayed_work	auth_expired_work;
112 	bool			authenticated;
113 	u16			dhchap_tid;
114 	u16			dhchap_status;
115 	int			dhchap_step;
116 	u8			*dhchap_c1;
117 	u8			*dhchap_c2;
118 	u32			dhchap_s1;
119 	u32			dhchap_s2;
120 	u8			*dhchap_skey;
121 	int			dhchap_skey_len;
122 #endif
123 	struct completion	free_done;
124 	struct completion	confirm_done;
125 };
126 
127 struct nvmet_ana_group {
128 	struct config_group	group;
129 	struct nvmet_port	*port;
130 	u32			grpid;
131 };
132 
to_ana_group(struct config_item * item)133 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
134 {
135 	return container_of(to_config_group(item), struct nvmet_ana_group,
136 			group);
137 }
138 
139 /**
140  * struct nvmet_port -	Common structure to keep port
141  *				information for the target.
142  * @entry:		Entry into referrals or transport list.
143  * @disc_addr:		Address information is stored in a format defined
144  *				for a discovery log page entry.
145  * @group:		ConfigFS group for this element's folder.
146  * @priv:		Private data for the transport.
147  */
148 struct nvmet_port {
149 	struct list_head		entry;
150 	struct nvmf_disc_rsp_page_entry	disc_addr;
151 	struct config_group		group;
152 	struct config_group		subsys_group;
153 	struct list_head		subsystems;
154 	struct config_group		referrals_group;
155 	struct list_head		referrals;
156 	struct list_head		global_entry;
157 	struct config_group		ana_groups_group;
158 	struct nvmet_ana_group		ana_default_group;
159 	enum nvme_ana_state		*ana_state;
160 	void				*priv;
161 	bool				enabled;
162 	int				inline_data_size;
163 	const struct nvmet_fabrics_ops	*tr_ops;
164 	bool				pi_enable;
165 };
166 
to_nvmet_port(struct config_item * item)167 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
168 {
169 	return container_of(to_config_group(item), struct nvmet_port,
170 			group);
171 }
172 
ana_groups_to_port(struct config_item * item)173 static inline struct nvmet_port *ana_groups_to_port(
174 		struct config_item *item)
175 {
176 	return container_of(to_config_group(item), struct nvmet_port,
177 			ana_groups_group);
178 }
179 
180 struct nvmet_ctrl {
181 	struct nvmet_subsys	*subsys;
182 	struct nvmet_sq		**sqs;
183 
184 	bool			reset_tbkas;
185 
186 	struct mutex		lock;
187 	u64			cap;
188 	u32			cc;
189 	u32			csts;
190 
191 	uuid_t			hostid;
192 	u16			cntlid;
193 	u32			kato;
194 
195 	struct nvmet_port	*port;
196 
197 	u32			aen_enabled;
198 	unsigned long		aen_masked;
199 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
200 	unsigned int		nr_async_event_cmds;
201 	struct list_head	async_events;
202 	struct work_struct	async_event_work;
203 
204 	struct list_head	subsys_entry;
205 	struct kref		ref;
206 	struct delayed_work	ka_work;
207 	struct work_struct	fatal_err_work;
208 
209 	const struct nvmet_fabrics_ops *ops;
210 
211 	__le32			*changed_ns_list;
212 	u32			nr_changed_ns;
213 
214 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
215 	char			hostnqn[NVMF_NQN_FIELD_LEN];
216 
217 	struct device		*p2p_client;
218 	struct radix_tree_root	p2p_ns_map;
219 
220 	spinlock_t		error_lock;
221 	u64			err_counter;
222 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
223 	bool			pi_support;
224 #ifdef CONFIG_NVME_TARGET_AUTH
225 	struct nvme_dhchap_key	*host_key;
226 	struct nvme_dhchap_key	*ctrl_key;
227 	u8			shash_id;
228 	struct crypto_kpp	*dh_tfm;
229 	u8			dh_gid;
230 	u8			*dh_key;
231 	size_t			dh_keysize;
232 #endif
233 };
234 
235 struct nvmet_subsys {
236 	enum nvme_subsys_type	type;
237 
238 	struct mutex		lock;
239 	struct kref		ref;
240 
241 	struct xarray		namespaces;
242 	unsigned int		nr_namespaces;
243 	u32			max_nsid;
244 	u16			cntlid_min;
245 	u16			cntlid_max;
246 
247 	struct list_head	ctrls;
248 
249 	struct list_head	hosts;
250 	bool			allow_any_host;
251 
252 	u16			max_qid;
253 
254 	u64			ver;
255 	char			serial[NVMET_SN_MAX_SIZE];
256 	bool			subsys_discovered;
257 	char			*subsysnqn;
258 	bool			pi_support;
259 
260 	struct config_group	group;
261 
262 	struct config_group	namespaces_group;
263 	struct config_group	allowed_hosts_group;
264 
265 	char			*model_number;
266 
267 #ifdef CONFIG_NVME_TARGET_PASSTHRU
268 	struct nvme_ctrl	*passthru_ctrl;
269 	char			*passthru_ctrl_path;
270 	struct config_group	passthru_group;
271 	unsigned int		admin_timeout;
272 	unsigned int		io_timeout;
273 	unsigned int		clear_ids;
274 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
275 
276 #ifdef CONFIG_BLK_DEV_ZONED
277 	u8			zasl;
278 #endif /* CONFIG_BLK_DEV_ZONED */
279 };
280 
to_subsys(struct config_item * item)281 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
282 {
283 	return container_of(to_config_group(item), struct nvmet_subsys, group);
284 }
285 
namespaces_to_subsys(struct config_item * item)286 static inline struct nvmet_subsys *namespaces_to_subsys(
287 		struct config_item *item)
288 {
289 	return container_of(to_config_group(item), struct nvmet_subsys,
290 			namespaces_group);
291 }
292 
293 struct nvmet_host {
294 	struct config_group	group;
295 	u8			*dhchap_secret;
296 	u8			*dhchap_ctrl_secret;
297 	u8			dhchap_key_hash;
298 	u8			dhchap_ctrl_key_hash;
299 	u8			dhchap_hash_id;
300 	u8			dhchap_dhgroup_id;
301 };
302 
to_host(struct config_item * item)303 static inline struct nvmet_host *to_host(struct config_item *item)
304 {
305 	return container_of(to_config_group(item), struct nvmet_host, group);
306 }
307 
nvmet_host_name(struct nvmet_host * host)308 static inline char *nvmet_host_name(struct nvmet_host *host)
309 {
310 	return config_item_name(&host->group.cg_item);
311 }
312 
313 struct nvmet_host_link {
314 	struct list_head	entry;
315 	struct nvmet_host	*host;
316 };
317 
318 struct nvmet_subsys_link {
319 	struct list_head	entry;
320 	struct nvmet_subsys	*subsys;
321 };
322 
323 struct nvmet_req;
324 struct nvmet_fabrics_ops {
325 	struct module *owner;
326 	unsigned int type;
327 	unsigned int msdbd;
328 	unsigned int flags;
329 #define NVMF_KEYED_SGLS			(1 << 0)
330 #define NVMF_METADATA_SUPPORTED		(1 << 1)
331 	void (*queue_response)(struct nvmet_req *req);
332 	int (*add_port)(struct nvmet_port *port);
333 	void (*remove_port)(struct nvmet_port *port);
334 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
335 	void (*disc_traddr)(struct nvmet_req *req,
336 			struct nvmet_port *port, char *traddr);
337 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
338 	void (*discovery_chg)(struct nvmet_port *port);
339 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
340 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
341 };
342 
343 #define NVMET_MAX_INLINE_BIOVEC	8
344 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
345 
346 struct nvmet_req {
347 	struct nvme_command	*cmd;
348 	struct nvme_completion	*cqe;
349 	struct nvmet_sq		*sq;
350 	struct nvmet_cq		*cq;
351 	struct nvmet_ns		*ns;
352 	struct scatterlist	*sg;
353 	struct scatterlist	*metadata_sg;
354 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
355 	union {
356 		struct {
357 			struct bio      inline_bio;
358 		} b;
359 		struct {
360 			bool			mpool_alloc;
361 			struct kiocb            iocb;
362 			struct bio_vec          *bvec;
363 			struct work_struct      work;
364 		} f;
365 		struct {
366 			struct bio		inline_bio;
367 			struct request		*rq;
368 			struct work_struct      work;
369 			bool			use_workqueue;
370 		} p;
371 #ifdef CONFIG_BLK_DEV_ZONED
372 		struct {
373 			struct bio		inline_bio;
374 			struct work_struct	zmgmt_work;
375 		} z;
376 #endif /* CONFIG_BLK_DEV_ZONED */
377 	};
378 	int			sg_cnt;
379 	int			metadata_sg_cnt;
380 	/* data length as parsed from the SGL descriptor: */
381 	size_t			transfer_len;
382 	size_t			metadata_len;
383 
384 	struct nvmet_port	*port;
385 
386 	void (*execute)(struct nvmet_req *req);
387 	const struct nvmet_fabrics_ops *ops;
388 
389 	struct pci_dev		*p2p_dev;
390 	struct device		*p2p_client;
391 	u16			error_loc;
392 	u64			error_slba;
393 };
394 
395 #define NVMET_MAX_MPOOL_BVEC		16
396 extern struct kmem_cache *nvmet_bvec_cache;
397 extern struct workqueue_struct *buffered_io_wq;
398 extern struct workqueue_struct *zbd_wq;
399 extern struct workqueue_struct *nvmet_wq;
400 
nvmet_set_result(struct nvmet_req * req,u32 result)401 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
402 {
403 	req->cqe->result.u32 = cpu_to_le32(result);
404 }
405 
406 /*
407  * NVMe command writes actually are DMA reads for us on the target side.
408  */
409 static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)410 nvmet_data_dir(struct nvmet_req *req)
411 {
412 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
413 }
414 
415 struct nvmet_async_event {
416 	struct list_head	entry;
417 	u8			event_type;
418 	u8			event_info;
419 	u8			log_page;
420 };
421 
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)422 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
423 {
424 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
425 
426 	if (!rae)
427 		clear_bit(bn, &req->sq->ctrl->aen_masked);
428 }
429 
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)430 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
431 {
432 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
433 		return true;
434 	return test_and_set_bit(bn, &ctrl->aen_masked);
435 }
436 
437 void nvmet_get_feat_kato(struct nvmet_req *req);
438 void nvmet_get_feat_async_event(struct nvmet_req *req);
439 u16 nvmet_set_feat_kato(struct nvmet_req *req);
440 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
441 void nvmet_execute_async_event(struct nvmet_req *req);
442 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
443 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
444 
445 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
446 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
447 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
448 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
449 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
450 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
451 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
452 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
453 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
454 
455 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
456 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
457 void nvmet_req_uninit(struct nvmet_req *req);
458 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
459 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
460 void nvmet_req_complete(struct nvmet_req *req, u16 status);
461 int nvmet_req_alloc_sgls(struct nvmet_req *req);
462 void nvmet_req_free_sgls(struct nvmet_req *req);
463 
464 void nvmet_execute_set_features(struct nvmet_req *req);
465 void nvmet_execute_get_features(struct nvmet_req *req);
466 void nvmet_execute_keep_alive(struct nvmet_req *req);
467 
468 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
469 		u16 size);
470 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
471 		u16 size);
472 void nvmet_sq_destroy(struct nvmet_sq *sq);
473 int nvmet_sq_init(struct nvmet_sq *sq);
474 
475 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
476 
477 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
478 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
479 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
480 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
481 				       const char *hostnqn, u16 cntlid,
482 				       struct nvmet_req *req);
483 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
484 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
485 
486 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
487 		enum nvme_subsys_type type);
488 void nvmet_subsys_put(struct nvmet_subsys *subsys);
489 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
490 
491 u16 nvmet_req_find_ns(struct nvmet_req *req);
492 void nvmet_put_namespace(struct nvmet_ns *ns);
493 int nvmet_ns_enable(struct nvmet_ns *ns);
494 void nvmet_ns_disable(struct nvmet_ns *ns);
495 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
496 void nvmet_ns_free(struct nvmet_ns *ns);
497 
498 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
499 		struct nvmet_port *port);
500 void nvmet_port_send_ana_event(struct nvmet_port *port);
501 
502 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
503 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
504 
505 void nvmet_port_del_ctrls(struct nvmet_port *port,
506 			  struct nvmet_subsys *subsys);
507 
508 int nvmet_enable_port(struct nvmet_port *port);
509 void nvmet_disable_port(struct nvmet_port *port);
510 
511 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
512 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
513 
514 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
515 		size_t len);
516 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
517 		size_t len);
518 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
519 
520 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
521 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
522 
523 extern struct list_head *nvmet_ports;
524 void nvmet_port_disc_changed(struct nvmet_port *port,
525 		struct nvmet_subsys *subsys);
526 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
527 		struct nvmet_host *host);
528 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
529 		u8 event_info, u8 log_page);
530 
531 #define NVMET_QUEUE_SIZE	1024
532 #define NVMET_NR_QUEUES		128
533 #define NVMET_MAX_CMD		NVMET_QUEUE_SIZE
534 
535 /*
536  * Nice round number that makes a list of nsids fit into a page.
537  * Should become tunable at some point in the future.
538  */
539 #define NVMET_MAX_NAMESPACES	1024
540 
541 /*
542  * 0 is not a valid ANA group ID, so we start numbering at 1.
543  *
544  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
545  * by default, and is available in an optimized state through all ports.
546  */
547 #define NVMET_MAX_ANAGRPS	128
548 #define NVMET_DEFAULT_ANA_GRPID	1
549 
550 #define NVMET_KAS		10
551 #define NVMET_DISC_KATO_MS		120000
552 
553 int __init nvmet_init_configfs(void);
554 void __exit nvmet_exit_configfs(void);
555 
556 int __init nvmet_init_discovery(void);
557 void nvmet_exit_discovery(void);
558 
559 extern struct nvmet_subsys *nvmet_disc_subsys;
560 extern struct rw_semaphore nvmet_config_sem;
561 
562 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
563 extern u64 nvmet_ana_chgcnt;
564 extern struct rw_semaphore nvmet_ana_sem;
565 
566 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
567 
568 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
569 int nvmet_file_ns_enable(struct nvmet_ns *ns);
570 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
571 void nvmet_file_ns_disable(struct nvmet_ns *ns);
572 u16 nvmet_bdev_flush(struct nvmet_req *req);
573 u16 nvmet_file_flush(struct nvmet_req *req);
574 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
575 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
576 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
577 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
578 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
579 
580 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
581 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
582 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
583 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
584 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
585 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
586 
nvmet_rw_data_len(struct nvmet_req * req)587 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
588 {
589 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
590 			req->ns->blksize_shift;
591 }
592 
nvmet_rw_metadata_len(struct nvmet_req * req)593 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
594 {
595 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
596 		return 0;
597 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
598 			req->ns->metadata_size;
599 }
600 
nvmet_dsm_len(struct nvmet_req * req)601 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
602 {
603 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
604 		sizeof(struct nvme_dsm_range);
605 }
606 
nvmet_req_subsys(struct nvmet_req * req)607 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
608 {
609 	return req->sq->ctrl->subsys;
610 }
611 
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)612 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
613 {
614     return subsys->type != NVME_NQN_NVME;
615 }
616 
617 #ifdef CONFIG_NVME_TARGET_PASSTHRU
618 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
619 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
620 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
621 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
622 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)623 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
624 {
625 	return subsys->passthru_ctrl;
626 }
627 #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)628 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
629 {
630 }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)631 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
632 {
633 }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)634 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
635 {
636 	return 0;
637 }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)638 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
639 {
640 	return 0;
641 }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)642 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
643 {
644 	return NULL;
645 }
646 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
647 
nvmet_is_passthru_req(struct nvmet_req * req)648 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
649 {
650 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
651 }
652 
653 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
654 
655 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
656 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
657 
658 /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)659 static inline __le16 to0based(u32 a)
660 {
661 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
662 }
663 
nvmet_ns_has_pi(struct nvmet_ns * ns)664 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
665 {
666 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
667 		return false;
668 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
669 }
670 
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)671 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
672 {
673 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
674 }
675 
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)676 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
677 {
678 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
679 }
680 
nvmet_use_inline_bvec(struct nvmet_req * req)681 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
682 {
683 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
684 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
685 }
686 
nvmet_req_cns_error_complete(struct nvmet_req * req)687 static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
688 {
689 	pr_debug("unhandled identify cns %d on qid %d\n",
690 	       req->cmd->identify.cns, req->sq->qid);
691 	req->error_loc = offsetof(struct nvme_identify, cns);
692 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
693 }
694 
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)695 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
696 {
697 	if (bio != &req->b.inline_bio)
698 		bio_put(bio);
699 }
700 
701 #ifdef CONFIG_NVME_TARGET_AUTH
702 void nvmet_execute_auth_send(struct nvmet_req *req);
703 void nvmet_execute_auth_receive(struct nvmet_req *req);
704 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
705 		       bool set_ctrl);
706 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
707 int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
708 void nvmet_auth_sq_init(struct nvmet_sq *sq);
709 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
710 void nvmet_auth_sq_free(struct nvmet_sq *sq);
711 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
712 bool nvmet_check_auth_status(struct nvmet_req *req);
713 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
714 			 unsigned int hash_len);
715 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
716 			 unsigned int hash_len);
nvmet_has_auth(struct nvmet_ctrl * ctrl)717 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
718 {
719 	return ctrl->host_key != NULL;
720 }
721 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
722 				u8 *buf, int buf_size);
723 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
724 			    u8 *buf, int buf_size);
725 #else
nvmet_setup_auth(struct nvmet_ctrl * ctrl)726 static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
727 {
728 	return 0;
729 }
nvmet_auth_sq_init(struct nvmet_sq * sq)730 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
731 {
732 }
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)733 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
nvmet_auth_sq_free(struct nvmet_sq * sq)734 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
nvmet_check_auth_status(struct nvmet_req * req)735 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
736 {
737 	return true;
738 }
nvmet_has_auth(struct nvmet_ctrl * ctrl)739 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
740 {
741 	return false;
742 }
nvmet_dhchap_dhgroup_name(u8 dhgid)743 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
744 #endif
745 
746 #endif /* _NVMET_H */
747