1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  */
5 
6 #ifndef _NVME_H
7 #define _NVME_H
8 
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19 
20 #include <trace/events/block.h>
21 
22 extern unsigned int nvme_io_timeout;
23 #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
24 
25 extern unsigned int admin_timeout;
26 #define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
27 
28 #define NVME_DEFAULT_KATO	5
29 
30 #ifdef CONFIG_ARCH_NO_SG_CHAIN
31 #define  NVME_INLINE_SG_CNT  0
32 #define  NVME_INLINE_METADATA_SG_CNT  0
33 #else
34 #define  NVME_INLINE_SG_CNT  2
35 #define  NVME_INLINE_METADATA_SG_CNT  1
36 #endif
37 
38 /*
39  * Default to a 4K page size, with the intention to update this
40  * path in the future to accommodate architectures with differing
41  * kernel and IO page sizes.
42  */
43 #define NVME_CTRL_PAGE_SHIFT	12
44 #define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)
45 
46 extern struct workqueue_struct *nvme_wq;
47 extern struct workqueue_struct *nvme_reset_wq;
48 extern struct workqueue_struct *nvme_delete_wq;
49 
50 /*
51  * List of workarounds for devices that required behavior not specified in
52  * the standard.
53  */
54 enum nvme_quirks {
55 	/*
56 	 * Prefers I/O aligned to a stripe size specified in a vendor
57 	 * specific Identify field.
58 	 */
59 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
60 
61 	/*
62 	 * The controller doesn't handle Identify value others than 0 or 1
63 	 * correctly.
64 	 */
65 	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
66 
67 	/*
68 	 * The controller deterministically returns O's on reads to
69 	 * logical blocks that deallocate was called on.
70 	 */
71 	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
72 
73 	/*
74 	 * The controller needs a delay before starts checking the device
75 	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
76 	 */
77 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
78 
79 	/*
80 	 * APST should not be used.
81 	 */
82 	NVME_QUIRK_NO_APST			= (1 << 4),
83 
84 	/*
85 	 * The deepest sleep state should not be used.
86 	 */
87 	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
88 
89 	/*
90 	 * Set MEDIUM priority on SQ creation
91 	 */
92 	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
93 
94 	/*
95 	 * Ignore device provided subnqn.
96 	 */
97 	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
98 
99 	/*
100 	 * Broken Write Zeroes.
101 	 */
102 	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
103 
104 	/*
105 	 * Force simple suspend/resume path.
106 	 */
107 	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
108 
109 	/*
110 	 * Use only one interrupt vector for all queues
111 	 */
112 	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
113 
114 	/*
115 	 * Use non-standard 128 bytes SQEs.
116 	 */
117 	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
118 
119 	/*
120 	 * Prevent tag overlap between queues
121 	 */
122 	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
123 
124 	/*
125 	 * Don't change the value of the temperature threshold feature
126 	 */
127 	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
128 
129 	/*
130 	 * The controller doesn't handle the Identify Namespace
131 	 * Identification Descriptor list subcommand despite claiming
132 	 * NVMe 1.3 compliance.
133 	 */
134 	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
135 
136 	/*
137 	 * The controller does not properly handle DMA addresses over
138 	 * 48 bits.
139 	 */
140 	NVME_QUIRK_DMA_ADDRESS_BITS_48		= (1 << 16),
141 
142 	/*
143 	 * The controller requires the command_id value be limited, so skip
144 	 * encoding the generation sequence number.
145 	 */
146 	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
147 
148 	/*
149 	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
150 	 */
151 	NVME_QUIRK_BOGUS_NID			= (1 << 18),
152 };
153 
154 /*
155  * Common request structure for NVMe passthrough.  All drivers must have
156  * this structure as the first member of their request-private data.
157  */
158 struct nvme_request {
159 	struct nvme_command	*cmd;
160 	union nvme_result	result;
161 	u8			genctr;
162 	u8			retries;
163 	u8			flags;
164 	u16			status;
165 	struct nvme_ctrl	*ctrl;
166 };
167 
168 /*
169  * Mark a bio as coming in through the mpath node.
170  */
171 #define REQ_NVME_MPATH		REQ_DRV
172 
173 enum {
174 	NVME_REQ_CANCELLED		= (1 << 0),
175 	NVME_REQ_USERCMD		= (1 << 1),
176 };
177 
nvme_req(struct request * req)178 static inline struct nvme_request *nvme_req(struct request *req)
179 {
180 	return blk_mq_rq_to_pdu(req);
181 }
182 
nvme_req_qid(struct request * req)183 static inline u16 nvme_req_qid(struct request *req)
184 {
185 	if (!req->q->queuedata)
186 		return 0;
187 
188 	return req->mq_hctx->queue_num + 1;
189 }
190 
191 /* The below value is the specific amount of delay needed before checking
192  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
193  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
194  * found empirically.
195  */
196 #define NVME_QUIRK_DELAY_AMOUNT		2300
197 
198 /*
199  * enum nvme_ctrl_state: Controller state
200  *
201  * @NVME_CTRL_NEW:		New controller just allocated, initial state
202  * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
203  * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
204  * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
205  *				transport
206  * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
207  * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
208  *				disabled/failed immediately. This state comes
209  * 				after all async event processing took place and
210  * 				before ns removal and the controller deletion
211  * 				progress
212  * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
213  *				shutdown or removal. In this case we forcibly
214  *				kill all inflight I/O as they have no chance to
215  *				complete
216  */
217 enum nvme_ctrl_state {
218 	NVME_CTRL_NEW,
219 	NVME_CTRL_LIVE,
220 	NVME_CTRL_RESETTING,
221 	NVME_CTRL_CONNECTING,
222 	NVME_CTRL_DELETING,
223 	NVME_CTRL_DELETING_NOIO,
224 	NVME_CTRL_DEAD,
225 };
226 
227 struct nvme_fault_inject {
228 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
229 	struct fault_attr attr;
230 	struct dentry *parent;
231 	bool dont_retry;	/* DNR, do not retry */
232 	u16 status;		/* status code */
233 #endif
234 };
235 
236 enum nvme_ctrl_flags {
237 	NVME_CTRL_FAILFAST_EXPIRED	= 0,
238 	NVME_CTRL_ADMIN_Q_STOPPED	= 1,
239 	NVME_CTRL_STARTED_ONCE		= 2,
240 };
241 
242 struct nvme_ctrl {
243 	bool comp_seen;
244 	enum nvme_ctrl_state state;
245 	bool identified;
246 	spinlock_t lock;
247 	struct mutex scan_lock;
248 	const struct nvme_ctrl_ops *ops;
249 	struct request_queue *admin_q;
250 	struct request_queue *connect_q;
251 	struct request_queue *fabrics_q;
252 	struct device *dev;
253 	int instance;
254 	int numa_node;
255 	struct blk_mq_tag_set *tagset;
256 	struct blk_mq_tag_set *admin_tagset;
257 	struct list_head namespaces;
258 	struct rw_semaphore namespaces_rwsem;
259 	struct device ctrl_device;
260 	struct device *device;	/* char device */
261 #ifdef CONFIG_NVME_HWMON
262 	struct device *hwmon_device;
263 #endif
264 	struct cdev cdev;
265 	struct work_struct reset_work;
266 	struct work_struct delete_work;
267 	wait_queue_head_t state_wq;
268 
269 	struct nvme_subsystem *subsys;
270 	struct list_head subsys_entry;
271 
272 	struct opal_dev *opal_dev;
273 
274 	char name[12];
275 	u16 cntlid;
276 
277 	u32 ctrl_config;
278 	u16 mtfa;
279 	u32 queue_count;
280 
281 	u64 cap;
282 	u32 max_hw_sectors;
283 	u32 max_segments;
284 	u32 max_integrity_segments;
285 	u32 max_discard_sectors;
286 	u32 max_discard_segments;
287 	u32 max_zeroes_sectors;
288 #ifdef CONFIG_BLK_DEV_ZONED
289 	u32 max_zone_append;
290 #endif
291 	u16 crdt[3];
292 	u16 oncs;
293 	u32 dmrsl;
294 	u16 oacs;
295 	u16 sqsize;
296 	u32 max_namespaces;
297 	atomic_t abort_limit;
298 	u8 vwc;
299 	u32 vs;
300 	u32 sgls;
301 	u16 kas;
302 	u8 npss;
303 	u8 apsta;
304 	u16 wctemp;
305 	u16 cctemp;
306 	u32 oaes;
307 	u32 aen_result;
308 	u32 ctratt;
309 	unsigned int shutdown_timeout;
310 	unsigned int kato;
311 	bool subsystem;
312 	unsigned long quirks;
313 	struct nvme_id_power_state psd[32];
314 	struct nvme_effects_log *effects;
315 	struct xarray cels;
316 	struct work_struct scan_work;
317 	struct work_struct async_event_work;
318 	struct delayed_work ka_work;
319 	struct delayed_work failfast_work;
320 	struct nvme_command ka_cmd;
321 	struct work_struct fw_act_work;
322 	unsigned long events;
323 
324 #ifdef CONFIG_NVME_MULTIPATH
325 	/* asymmetric namespace access: */
326 	u8 anacap;
327 	u8 anatt;
328 	u32 anagrpmax;
329 	u32 nanagrpid;
330 	struct mutex ana_lock;
331 	struct nvme_ana_rsp_hdr *ana_log_buf;
332 	size_t ana_log_size;
333 	struct timer_list anatt_timer;
334 	struct work_struct ana_work;
335 #endif
336 
337 #ifdef CONFIG_NVME_AUTH
338 	struct work_struct dhchap_auth_work;
339 	struct list_head dhchap_auth_list;
340 	struct mutex dhchap_auth_mutex;
341 	struct nvme_dhchap_key *host_key;
342 	struct nvme_dhchap_key *ctrl_key;
343 	u16 transaction;
344 #endif
345 
346 	/* Power saving configuration */
347 	u64 ps_max_latency_us;
348 	bool apst_enabled;
349 
350 	/* PCIe only: */
351 	u32 hmpre;
352 	u32 hmmin;
353 	u32 hmminds;
354 	u16 hmmaxd;
355 
356 	/* Fabrics only */
357 	u32 ioccsz;
358 	u32 iorcsz;
359 	u16 icdoff;
360 	u16 maxcmd;
361 	int nr_reconnects;
362 	unsigned long flags;
363 	struct nvmf_ctrl_options *opts;
364 
365 	struct page *discard_page;
366 	unsigned long discard_page_busy;
367 
368 	struct nvme_fault_inject fault_inject;
369 
370 	enum nvme_ctrl_type cntrltype;
371 	enum nvme_dctype dctype;
372 };
373 
374 enum nvme_iopolicy {
375 	NVME_IOPOLICY_NUMA,
376 	NVME_IOPOLICY_RR,
377 };
378 
379 struct nvme_subsystem {
380 	int			instance;
381 	struct device		dev;
382 	/*
383 	 * Because we unregister the device on the last put we need
384 	 * a separate refcount.
385 	 */
386 	struct kref		ref;
387 	struct list_head	entry;
388 	struct mutex		lock;
389 	struct list_head	ctrls;
390 	struct list_head	nsheads;
391 	char			subnqn[NVMF_NQN_SIZE];
392 	char			serial[20];
393 	char			model[40];
394 	char			firmware_rev[8];
395 	u8			cmic;
396 	enum nvme_subsys_type	subtype;
397 	u16			vendor_id;
398 	u16			awupf;	/* 0's based awupf value. */
399 	struct ida		ns_ida;
400 #ifdef CONFIG_NVME_MULTIPATH
401 	enum nvme_iopolicy	iopolicy;
402 #endif
403 };
404 
405 /*
406  * Container structure for uniqueue namespace identifiers.
407  */
408 struct nvme_ns_ids {
409 	u8	eui64[8];
410 	u8	nguid[16];
411 	uuid_t	uuid;
412 	u8	csi;
413 };
414 
415 /*
416  * Anchor structure for namespaces.  There is one for each namespace in a
417  * NVMe subsystem that any of our controllers can see, and the namespace
418  * structure for each controller is chained of it.  For private namespaces
419  * there is a 1:1 relation to our namespace structures, that is ->list
420  * only ever has a single entry for private namespaces.
421  */
422 struct nvme_ns_head {
423 	struct list_head	list;
424 	struct srcu_struct      srcu;
425 	struct nvme_subsystem	*subsys;
426 	unsigned		ns_id;
427 	struct nvme_ns_ids	ids;
428 	struct list_head	entry;
429 	struct kref		ref;
430 	bool			shared;
431 	int			instance;
432 	struct nvme_effects_log *effects;
433 
434 	struct cdev		cdev;
435 	struct device		cdev_device;
436 
437 	struct gendisk		*disk;
438 #ifdef CONFIG_NVME_MULTIPATH
439 	struct bio_list		requeue_list;
440 	spinlock_t		requeue_lock;
441 	struct work_struct	requeue_work;
442 	struct mutex		lock;
443 	unsigned long		flags;
444 #define NVME_NSHEAD_DISK_LIVE	0
445 	struct nvme_ns __rcu	*current_path[];
446 #endif
447 };
448 
nvme_ns_head_multipath(struct nvme_ns_head * head)449 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
450 {
451 	return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
452 }
453 
454 enum nvme_ns_features {
455 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
456 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
457 };
458 
459 struct nvme_ns {
460 	struct list_head list;
461 
462 	struct nvme_ctrl *ctrl;
463 	struct request_queue *queue;
464 	struct gendisk *disk;
465 #ifdef CONFIG_NVME_MULTIPATH
466 	enum nvme_ana_state ana_state;
467 	u32 ana_grpid;
468 #endif
469 	struct list_head siblings;
470 	struct kref kref;
471 	struct nvme_ns_head *head;
472 
473 	int lba_shift;
474 	u16 ms;
475 	u16 pi_size;
476 	u16 sgs;
477 	u32 sws;
478 	u8 pi_type;
479 	u8 guard_type;
480 #ifdef CONFIG_BLK_DEV_ZONED
481 	u64 zsze;
482 #endif
483 	unsigned long features;
484 	unsigned long flags;
485 #define NVME_NS_REMOVING	0
486 #define NVME_NS_DEAD     	1
487 #define NVME_NS_ANA_PENDING	2
488 #define NVME_NS_FORCE_RO	3
489 #define NVME_NS_READY		4
490 #define NVME_NS_STOPPED		5
491 
492 	struct cdev		cdev;
493 	struct device		cdev_device;
494 
495 	struct nvme_fault_inject fault_inject;
496 
497 };
498 
499 /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns * ns)500 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
501 {
502 	return ns->pi_type && ns->ms == ns->pi_size;
503 }
504 
505 struct nvme_ctrl_ops {
506 	const char *name;
507 	struct module *module;
508 	unsigned int flags;
509 #define NVME_F_FABRICS			(1 << 0)
510 #define NVME_F_METADATA_SUPPORTED	(1 << 1)
511 #define NVME_F_BLOCKING			(1 << 2)
512 
513 	const struct attribute_group **dev_attr_groups;
514 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
515 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
516 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
517 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
518 	void (*submit_async_event)(struct nvme_ctrl *ctrl);
519 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
520 	void (*stop_ctrl)(struct nvme_ctrl *ctrl);
521 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
522 	void (*print_device_info)(struct nvme_ctrl *ctrl);
523 	bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
524 };
525 
526 /*
527  * nvme command_id is constructed as such:
528  * | xxxx | xxxxxxxxxxxx |
529  *   gen    request tag
530  */
531 #define nvme_genctr_mask(gen)			(gen & 0xf)
532 #define nvme_cid_install_genctr(gen)		(nvme_genctr_mask(gen) << 12)
533 #define nvme_genctr_from_cid(cid)		((cid & 0xf000) >> 12)
534 #define nvme_tag_from_cid(cid)			(cid & 0xfff)
535 
nvme_cid(struct request * rq)536 static inline u16 nvme_cid(struct request *rq)
537 {
538 	return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
539 }
540 
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)541 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
542 		u16 command_id)
543 {
544 	u8 genctr = nvme_genctr_from_cid(command_id);
545 	u16 tag = nvme_tag_from_cid(command_id);
546 	struct request *rq;
547 
548 	rq = blk_mq_tag_to_rq(tags, tag);
549 	if (unlikely(!rq)) {
550 		pr_err("could not locate request for tag %#x\n",
551 			tag);
552 		return NULL;
553 	}
554 	if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
555 		dev_err(nvme_req(rq)->ctrl->device,
556 			"request %#x genctr mismatch (got %#x expected %#x)\n",
557 			tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
558 		return NULL;
559 	}
560 	return rq;
561 }
562 
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)563 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
564                 u16 command_id)
565 {
566 	return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
567 }
568 
569 /*
570  * Return the length of the string without the space padding
571  */
nvme_strlen(char * s,int len)572 static inline int nvme_strlen(char *s, int len)
573 {
574 	while (s[len - 1] == ' ')
575 		len--;
576 	return len;
577 }
578 
nvme_print_device_info(struct nvme_ctrl * ctrl)579 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
580 {
581 	struct nvme_subsystem *subsys = ctrl->subsys;
582 
583 	if (ctrl->ops->print_device_info) {
584 		ctrl->ops->print_device_info(ctrl);
585 		return;
586 	}
587 
588 	dev_err(ctrl->device,
589 		"VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
590 		nvme_strlen(subsys->model, sizeof(subsys->model)),
591 		subsys->model, nvme_strlen(subsys->firmware_rev,
592 					   sizeof(subsys->firmware_rev)),
593 		subsys->firmware_rev);
594 }
595 
596 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
597 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
598 			    const char *dev_name);
599 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
600 void nvme_should_fail(struct request *req);
601 #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)602 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
603 					  const char *dev_name)
604 {
605 }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)606 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
607 {
608 }
nvme_should_fail(struct request * req)609 static inline void nvme_should_fail(struct request *req) {}
610 #endif
611 
612 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
613 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
614 
nvme_reset_subsystem(struct nvme_ctrl * ctrl)615 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
616 {
617 	int ret;
618 
619 	if (!ctrl->subsystem)
620 		return -ENOTTY;
621 	if (!nvme_wait_reset(ctrl))
622 		return -EBUSY;
623 
624 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
625 	if (ret)
626 		return ret;
627 
628 	return nvme_try_sched_reset(ctrl);
629 }
630 
631 /*
632  * Convert a 512B sector number to a device logical block number.
633  */
nvme_sect_to_lba(struct nvme_ns * ns,sector_t sector)634 static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
635 {
636 	return sector >> (ns->lba_shift - SECTOR_SHIFT);
637 }
638 
639 /*
640  * Convert a device logical block number to a 512B sector number.
641  */
nvme_lba_to_sect(struct nvme_ns * ns,u64 lba)642 static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
643 {
644 	return lba << (ns->lba_shift - SECTOR_SHIFT);
645 }
646 
647 /*
648  * Convert byte length to nvme's 0-based num dwords
649  */
nvme_bytes_to_numd(size_t len)650 static inline u32 nvme_bytes_to_numd(size_t len)
651 {
652 	return (len >> 2) - 1;
653 }
654 
nvme_is_ana_error(u16 status)655 static inline bool nvme_is_ana_error(u16 status)
656 {
657 	switch (status & 0x7ff) {
658 	case NVME_SC_ANA_TRANSITION:
659 	case NVME_SC_ANA_INACCESSIBLE:
660 	case NVME_SC_ANA_PERSISTENT_LOSS:
661 		return true;
662 	default:
663 		return false;
664 	}
665 }
666 
nvme_is_path_error(u16 status)667 static inline bool nvme_is_path_error(u16 status)
668 {
669 	/* check for a status code type of 'path related status' */
670 	return (status & 0x700) == 0x300;
671 }
672 
673 /*
674  * Fill in the status and result information from the CQE, and then figure out
675  * if blk-mq will need to use IPI magic to complete the request, and if yes do
676  * so.  If not let the caller complete the request without an indirect function
677  * call.
678  */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)679 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
680 		union nvme_result result)
681 {
682 	struct nvme_request *rq = nvme_req(req);
683 	struct nvme_ctrl *ctrl = rq->ctrl;
684 
685 	if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
686 		rq->genctr++;
687 
688 	rq->status = le16_to_cpu(status) >> 1;
689 	rq->result = result;
690 	/* inject error when permitted by fault injection framework */
691 	nvme_should_fail(req);
692 	if (unlikely(blk_should_fake_timeout(req->q)))
693 		return true;
694 	return blk_mq_complete_request_remote(req);
695 }
696 
nvme_get_ctrl(struct nvme_ctrl * ctrl)697 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
698 {
699 	get_device(ctrl->device);
700 }
701 
nvme_put_ctrl(struct nvme_ctrl * ctrl)702 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
703 {
704 	put_device(ctrl->device);
705 }
706 
nvme_is_aen_req(u16 qid,__u16 command_id)707 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
708 {
709 	return !qid &&
710 		nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
711 }
712 
713 void nvme_complete_rq(struct request *req);
714 void nvme_complete_batch_req(struct request *req);
715 
nvme_complete_batch(struct io_comp_batch * iob,void (* fn)(struct request * rq))716 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
717 						void (*fn)(struct request *rq))
718 {
719 	struct request *req;
720 
721 	rq_list_for_each(&iob->req_list, req) {
722 		fn(req);
723 		nvme_complete_batch_req(req);
724 	}
725 	blk_mq_end_request_batch(iob);
726 }
727 
728 blk_status_t nvme_host_path_error(struct request *req);
729 bool nvme_cancel_request(struct request *req, void *data);
730 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
731 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
732 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
733 		enum nvme_ctrl_state new_state);
734 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
735 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
736 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
737 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
738 		const struct nvme_ctrl_ops *ops, unsigned long quirks);
739 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
740 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
741 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
742 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
743 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
744 		const struct blk_mq_ops *ops, unsigned int cmd_size);
745 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
746 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
747 		const struct blk_mq_ops *ops, unsigned int nr_maps,
748 		unsigned int cmd_size);
749 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
750 
751 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
752 
753 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
754 		bool send);
755 
756 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
757 		volatile union nvme_result *res);
758 
759 void nvme_stop_queues(struct nvme_ctrl *ctrl);
760 void nvme_start_queues(struct nvme_ctrl *ctrl);
761 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
762 void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
763 void nvme_kill_queues(struct nvme_ctrl *ctrl);
764 void nvme_sync_queues(struct nvme_ctrl *ctrl);
765 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
766 void nvme_unfreeze(struct nvme_ctrl *ctrl);
767 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
768 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
769 void nvme_start_freeze(struct nvme_ctrl *ctrl);
770 
nvme_req_op(struct nvme_command * cmd)771 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
772 {
773 	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
774 }
775 
776 #define NVME_QID_ANY -1
777 void nvme_init_request(struct request *req, struct nvme_command *cmd);
778 void nvme_cleanup_cmd(struct request *req);
779 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
780 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
781 		struct request *req);
782 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
783 		bool queue_live);
784 
nvme_check_ready(struct nvme_ctrl * ctrl,struct request * rq,bool queue_live)785 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
786 		bool queue_live)
787 {
788 	if (likely(ctrl->state == NVME_CTRL_LIVE))
789 		return true;
790 	if (ctrl->ops->flags & NVME_F_FABRICS &&
791 	    ctrl->state == NVME_CTRL_DELETING)
792 		return queue_live;
793 	return __nvme_check_ready(ctrl, rq, queue_live);
794 }
795 
796 /*
797  * NSID shall be unique for all shared namespaces, or if at least one of the
798  * following conditions is met:
799  *   1. Namespace Management is supported by the controller
800  *   2. ANA is supported by the controller
801  *   3. NVM Set are supported by the controller
802  *
803  * In other case, private namespace are not required to report a unique NSID.
804  */
nvme_is_unique_nsid(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)805 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
806 		struct nvme_ns_head *head)
807 {
808 	return head->shared ||
809 		(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
810 		(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
811 		(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
812 }
813 
814 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
815 		void *buf, unsigned bufflen);
816 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
817 		union nvme_result *result, void *buffer, unsigned bufflen,
818 		int qid, int at_head,
819 		blk_mq_req_flags_t flags);
820 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
821 		      unsigned int dword11, void *buffer, size_t buflen,
822 		      u32 *result);
823 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
824 		      unsigned int dword11, void *buffer, size_t buflen,
825 		      u32 *result);
826 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
827 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
828 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
829 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
830 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
831 void nvme_queue_scan(struct nvme_ctrl *ctrl);
832 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
833 		void *log, size_t size, u64 offset);
834 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
835 void nvme_put_ns_head(struct nvme_ns_head *head);
836 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
837 		const struct file_operations *fops, struct module *owner);
838 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
839 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
840 		unsigned int cmd, unsigned long arg);
841 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
842 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
843 		unsigned int cmd, unsigned long arg);
844 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
845 		unsigned long arg);
846 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
847 		unsigned long arg);
848 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
849 		struct io_comp_batch *iob, unsigned int poll_flags);
850 int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
851 		struct io_comp_batch *iob, unsigned int poll_flags);
852 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
853 		unsigned int issue_flags);
854 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
855 		unsigned int issue_flags);
856 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
857 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
858 
859 extern const struct attribute_group *nvme_ns_id_attr_groups[];
860 extern const struct pr_ops nvme_pr_ops;
861 extern const struct block_device_operations nvme_ns_head_ops;
862 extern const struct attribute_group nvme_dev_attrs_group;
863 
864 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
865 #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)866 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
867 {
868 	return ctrl->ana_log_buf != NULL;
869 }
870 
871 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
872 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
873 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
874 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
875 void nvme_failover_req(struct request *req);
876 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
877 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
878 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
879 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
880 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
881 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
882 void nvme_mpath_update(struct nvme_ctrl *ctrl);
883 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
884 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
885 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
886 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
887 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
888 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
889 
nvme_trace_bio_complete(struct request * req)890 static inline void nvme_trace_bio_complete(struct request *req)
891 {
892 	struct nvme_ns *ns = req->q->queuedata;
893 
894 	if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
895 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
896 }
897 
898 extern bool multipath;
899 extern struct device_attribute dev_attr_ana_grpid;
900 extern struct device_attribute dev_attr_ana_state;
901 extern struct device_attribute subsys_attr_iopolicy;
902 
903 #else
904 #define multipath false
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)905 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
906 {
907 	return false;
908 }
nvme_failover_req(struct request * req)909 static inline void nvme_failover_req(struct request *req)
910 {
911 }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)912 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
913 {
914 }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)915 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
916 		struct nvme_ns_head *head)
917 {
918 	return 0;
919 }
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)920 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
921 {
922 }
nvme_mpath_remove_disk(struct nvme_ns_head * head)923 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
924 {
925 }
nvme_mpath_clear_current_path(struct nvme_ns * ns)926 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
927 {
928 	return false;
929 }
nvme_mpath_revalidate_paths(struct nvme_ns * ns)930 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
931 {
932 }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)933 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
934 {
935 }
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)936 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
937 {
938 }
nvme_trace_bio_complete(struct request * req)939 static inline void nvme_trace_bio_complete(struct request *req)
940 {
941 }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)942 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
943 {
944 }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)945 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
946 		struct nvme_id_ctrl *id)
947 {
948 	if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
949 		dev_warn(ctrl->device,
950 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
951 	return 0;
952 }
nvme_mpath_update(struct nvme_ctrl * ctrl)953 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
954 {
955 }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)956 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
957 {
958 }
nvme_mpath_stop(struct nvme_ctrl * ctrl)959 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
960 {
961 }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)962 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
963 {
964 }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)965 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
966 {
967 }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)968 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
969 {
970 }
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)971 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
972 {
973 }
974 #endif /* CONFIG_NVME_MULTIPATH */
975 
976 int nvme_revalidate_zones(struct nvme_ns *ns);
977 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
978 		unsigned int nr_zones, report_zones_cb cb, void *data);
979 #ifdef CONFIG_BLK_DEV_ZONED
980 int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
981 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
982 				       struct nvme_command *cmnd,
983 				       enum nvme_zone_mgmt_action action);
984 #else
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)985 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
986 		struct request *req, struct nvme_command *cmnd,
987 		enum nvme_zone_mgmt_action action)
988 {
989 	return BLK_STS_NOTSUPP;
990 }
991 
nvme_update_zone_info(struct nvme_ns * ns,unsigned lbaf)992 static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
993 {
994 	dev_warn(ns->ctrl->device,
995 		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
996 	return -EPROTONOSUPPORT;
997 }
998 #endif
999 
nvme_get_ns_from_dev(struct device * dev)1000 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
1001 {
1002 	return dev_to_disk(dev)->private_data;
1003 }
1004 
1005 #ifdef CONFIG_NVME_HWMON
1006 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1007 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1008 #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)1009 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
1010 {
1011 	return 0;
1012 }
1013 
nvme_hwmon_exit(struct nvme_ctrl * ctrl)1014 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1015 {
1016 }
1017 #endif
1018 
nvme_ctrl_sgl_supported(struct nvme_ctrl * ctrl)1019 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
1020 {
1021 	return ctrl->sgls & ((1 << 0) | (1 << 1));
1022 }
1023 
1024 #ifdef CONFIG_NVME_AUTH
1025 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1026 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1027 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1028 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1029 void nvme_auth_reset(struct nvme_ctrl *ctrl);
1030 void nvme_auth_free(struct nvme_ctrl *ctrl);
1031 #else
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1032 static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
nvme_auth_stop(struct nvme_ctrl * ctrl)1033 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)1034 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1035 {
1036 	return -EPROTONOSUPPORT;
1037 }
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)1038 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1039 {
1040 	return NVME_SC_AUTH_REQUIRED;
1041 }
nvme_auth_free(struct nvme_ctrl * ctrl)1042 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
1043 #endif
1044 
1045 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1046 			 u8 opcode);
1047 int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
1048 void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1049 		       struct nvme_command *cmd, int status);
1050 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1051 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1052 void nvme_put_ns(struct nvme_ns *ns);
1053 
nvme_multi_css(struct nvme_ctrl * ctrl)1054 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1055 {
1056 	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1057 }
1058 
1059 #ifdef CONFIG_NVME_VERBOSE_ERRORS
1060 const unsigned char *nvme_get_error_status_str(u16 status);
1061 const unsigned char *nvme_get_opcode_str(u8 opcode);
1062 const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
1063 #else /* CONFIG_NVME_VERBOSE_ERRORS */
nvme_get_error_status_str(u16 status)1064 static inline const unsigned char *nvme_get_error_status_str(u16 status)
1065 {
1066 	return "I/O Error";
1067 }
nvme_get_opcode_str(u8 opcode)1068 static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
1069 {
1070 	return "I/O Cmd";
1071 }
nvme_get_admin_opcode_str(u8 opcode)1072 static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
1073 {
1074 	return "Admin Cmd";
1075 }
1076 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
1077 
1078 #endif /* _NVME_H */
1079