1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017-2018 Christoph Hellwig.
4  */
5 
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11 
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15 	"turn on native support for multiple controllers per subsystem");
16 
17 static const char *nvme_iopolicy_names[] = {
18 	[NVME_IOPOLICY_NUMA]	= "numa",
19 	[NVME_IOPOLICY_RR]	= "round-robin",
20 };
21 
22 static int iopolicy = NVME_IOPOLICY_NUMA;
23 
nvme_set_iopolicy(const char * val,const struct kernel_param * kp)24 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
25 {
26 	if (!val)
27 		return -EINVAL;
28 	if (!strncmp(val, "numa", 4))
29 		iopolicy = NVME_IOPOLICY_NUMA;
30 	else if (!strncmp(val, "round-robin", 11))
31 		iopolicy = NVME_IOPOLICY_RR;
32 	else
33 		return -EINVAL;
34 
35 	return 0;
36 }
37 
nvme_get_iopolicy(char * buf,const struct kernel_param * kp)38 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
39 {
40 	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
41 }
42 
43 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
44 	&iopolicy, 0644);
45 MODULE_PARM_DESC(iopolicy,
46 	"Default multipath I/O policy; 'numa' (default) or 'round-robin'");
47 
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)48 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
49 {
50 	subsys->iopolicy = iopolicy;
51 }
52 
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)53 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
54 {
55 	struct nvme_ns_head *h;
56 
57 	lockdep_assert_held(&subsys->lock);
58 	list_for_each_entry(h, &subsys->nsheads, entry)
59 		if (h->disk)
60 			blk_mq_unfreeze_queue(h->disk->queue);
61 }
62 
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)63 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
64 {
65 	struct nvme_ns_head *h;
66 
67 	lockdep_assert_held(&subsys->lock);
68 	list_for_each_entry(h, &subsys->nsheads, entry)
69 		if (h->disk)
70 			blk_mq_freeze_queue_wait(h->disk->queue);
71 }
72 
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)73 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
74 {
75 	struct nvme_ns_head *h;
76 
77 	lockdep_assert_held(&subsys->lock);
78 	list_for_each_entry(h, &subsys->nsheads, entry)
79 		if (h->disk)
80 			blk_freeze_queue_start(h->disk->queue);
81 }
82 
nvme_failover_req(struct request * req)83 void nvme_failover_req(struct request *req)
84 {
85 	struct nvme_ns *ns = req->q->queuedata;
86 	u16 status = nvme_req(req)->status & 0x7ff;
87 	unsigned long flags;
88 	struct bio *bio;
89 
90 	nvme_mpath_clear_current_path(ns);
91 
92 	/*
93 	 * If we got back an ANA error, we know the controller is alive but not
94 	 * ready to serve this namespace.  Kick of a re-read of the ANA
95 	 * information page, and just try any other available path for now.
96 	 */
97 	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
98 		set_bit(NVME_NS_ANA_PENDING, &ns->flags);
99 		queue_work(nvme_wq, &ns->ctrl->ana_work);
100 	}
101 
102 	spin_lock_irqsave(&ns->head->requeue_lock, flags);
103 	for (bio = req->bio; bio; bio = bio->bi_next) {
104 		bio_set_dev(bio, ns->head->disk->part0);
105 		if (bio->bi_opf & REQ_POLLED) {
106 			bio->bi_opf &= ~REQ_POLLED;
107 			bio->bi_cookie = BLK_QC_T_NONE;
108 		}
109 	}
110 	blk_steal_bios(&ns->head->requeue_list, req);
111 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
112 
113 	blk_mq_end_request(req, 0);
114 	kblockd_schedule_work(&ns->head->requeue_work);
115 }
116 
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)117 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
118 {
119 	struct nvme_ns *ns;
120 
121 	down_read(&ctrl->namespaces_rwsem);
122 	list_for_each_entry(ns, &ctrl->namespaces, list) {
123 		if (!ns->head->disk)
124 			continue;
125 		kblockd_schedule_work(&ns->head->requeue_work);
126 		if (ctrl->state == NVME_CTRL_LIVE)
127 			disk_uevent(ns->head->disk, KOBJ_CHANGE);
128 	}
129 	up_read(&ctrl->namespaces_rwsem);
130 }
131 
132 static const char *nvme_ana_state_names[] = {
133 	[0]				= "invalid state",
134 	[NVME_ANA_OPTIMIZED]		= "optimized",
135 	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
136 	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
137 	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
138 	[NVME_ANA_CHANGE]		= "change",
139 };
140 
nvme_mpath_clear_current_path(struct nvme_ns * ns)141 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
142 {
143 	struct nvme_ns_head *head = ns->head;
144 	bool changed = false;
145 	int node;
146 
147 	if (!head)
148 		goto out;
149 
150 	for_each_node(node) {
151 		if (ns == rcu_access_pointer(head->current_path[node])) {
152 			rcu_assign_pointer(head->current_path[node], NULL);
153 			changed = true;
154 		}
155 	}
156 out:
157 	return changed;
158 }
159 
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)160 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
161 {
162 	struct nvme_ns *ns;
163 
164 	down_read(&ctrl->namespaces_rwsem);
165 	list_for_each_entry(ns, &ctrl->namespaces, list) {
166 		nvme_mpath_clear_current_path(ns);
167 		kblockd_schedule_work(&ns->head->requeue_work);
168 	}
169 	up_read(&ctrl->namespaces_rwsem);
170 }
171 
nvme_mpath_revalidate_paths(struct nvme_ns * ns)172 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
173 {
174 	struct nvme_ns_head *head = ns->head;
175 	sector_t capacity = get_capacity(head->disk);
176 	int node;
177 	int srcu_idx;
178 
179 	srcu_idx = srcu_read_lock(&head->srcu);
180 	list_for_each_entry_rcu(ns, &head->list, siblings) {
181 		if (capacity != get_capacity(ns->disk))
182 			clear_bit(NVME_NS_READY, &ns->flags);
183 	}
184 	srcu_read_unlock(&head->srcu, srcu_idx);
185 
186 	for_each_node(node)
187 		rcu_assign_pointer(head->current_path[node], NULL);
188 	kblockd_schedule_work(&head->requeue_work);
189 }
190 
nvme_path_is_disabled(struct nvme_ns * ns)191 static bool nvme_path_is_disabled(struct nvme_ns *ns)
192 {
193 	/*
194 	 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
195 	 * still be able to complete assuming that the controller is connected.
196 	 * Otherwise it will fail immediately and return to the requeue list.
197 	 */
198 	if (ns->ctrl->state != NVME_CTRL_LIVE &&
199 	    ns->ctrl->state != NVME_CTRL_DELETING)
200 		return true;
201 	if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
202 	    !test_bit(NVME_NS_READY, &ns->flags))
203 		return true;
204 	return false;
205 }
206 
__nvme_find_path(struct nvme_ns_head * head,int node)207 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
208 {
209 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
210 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
211 
212 	list_for_each_entry_rcu(ns, &head->list, siblings) {
213 		if (nvme_path_is_disabled(ns))
214 			continue;
215 
216 		if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
217 			distance = node_distance(node, ns->ctrl->numa_node);
218 		else
219 			distance = LOCAL_DISTANCE;
220 
221 		switch (ns->ana_state) {
222 		case NVME_ANA_OPTIMIZED:
223 			if (distance < found_distance) {
224 				found_distance = distance;
225 				found = ns;
226 			}
227 			break;
228 		case NVME_ANA_NONOPTIMIZED:
229 			if (distance < fallback_distance) {
230 				fallback_distance = distance;
231 				fallback = ns;
232 			}
233 			break;
234 		default:
235 			break;
236 		}
237 	}
238 
239 	if (!found)
240 		found = fallback;
241 	if (found)
242 		rcu_assign_pointer(head->current_path[node], found);
243 	return found;
244 }
245 
nvme_next_ns(struct nvme_ns_head * head,struct nvme_ns * ns)246 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
247 		struct nvme_ns *ns)
248 {
249 	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
250 			siblings);
251 	if (ns)
252 		return ns;
253 	return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
254 }
255 
nvme_round_robin_path(struct nvme_ns_head * head,int node,struct nvme_ns * old)256 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
257 		int node, struct nvme_ns *old)
258 {
259 	struct nvme_ns *ns, *found = NULL;
260 
261 	if (list_is_singular(&head->list)) {
262 		if (nvme_path_is_disabled(old))
263 			return NULL;
264 		return old;
265 	}
266 
267 	for (ns = nvme_next_ns(head, old);
268 	     ns && ns != old;
269 	     ns = nvme_next_ns(head, ns)) {
270 		if (nvme_path_is_disabled(ns))
271 			continue;
272 
273 		if (ns->ana_state == NVME_ANA_OPTIMIZED) {
274 			found = ns;
275 			goto out;
276 		}
277 		if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
278 			found = ns;
279 	}
280 
281 	/*
282 	 * The loop above skips the current path for round-robin semantics.
283 	 * Fall back to the current path if either:
284 	 *  - no other optimized path found and current is optimized,
285 	 *  - no other usable path found and current is usable.
286 	 */
287 	if (!nvme_path_is_disabled(old) &&
288 	    (old->ana_state == NVME_ANA_OPTIMIZED ||
289 	     (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
290 		return old;
291 
292 	if (!found)
293 		return NULL;
294 out:
295 	rcu_assign_pointer(head->current_path[node], found);
296 	return found;
297 }
298 
nvme_path_is_optimized(struct nvme_ns * ns)299 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
300 {
301 	return ns->ctrl->state == NVME_CTRL_LIVE &&
302 		ns->ana_state == NVME_ANA_OPTIMIZED;
303 }
304 
nvme_find_path(struct nvme_ns_head * head)305 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
306 {
307 	int node = numa_node_id();
308 	struct nvme_ns *ns;
309 
310 	ns = srcu_dereference(head->current_path[node], &head->srcu);
311 	if (unlikely(!ns))
312 		return __nvme_find_path(head, node);
313 
314 	if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
315 		return nvme_round_robin_path(head, node, ns);
316 	if (unlikely(!nvme_path_is_optimized(ns)))
317 		return __nvme_find_path(head, node);
318 	return ns;
319 }
320 
nvme_available_path(struct nvme_ns_head * head)321 static bool nvme_available_path(struct nvme_ns_head *head)
322 {
323 	struct nvme_ns *ns;
324 
325 	list_for_each_entry_rcu(ns, &head->list, siblings) {
326 		if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
327 			continue;
328 		switch (ns->ctrl->state) {
329 		case NVME_CTRL_LIVE:
330 		case NVME_CTRL_RESETTING:
331 		case NVME_CTRL_CONNECTING:
332 			/* fallthru */
333 			return true;
334 		default:
335 			break;
336 		}
337 	}
338 	return false;
339 }
340 
nvme_ns_head_submit_bio(struct bio * bio)341 static void nvme_ns_head_submit_bio(struct bio *bio)
342 {
343 	struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
344 	struct device *dev = disk_to_dev(head->disk);
345 	struct nvme_ns *ns;
346 	int srcu_idx;
347 
348 	/*
349 	 * The namespace might be going away and the bio might be moved to a
350 	 * different queue via blk_steal_bios(), so we need to use the bio_split
351 	 * pool from the original queue to allocate the bvecs from.
352 	 */
353 	bio = bio_split_to_limits(bio);
354 	if (!bio)
355 		return;
356 
357 	srcu_idx = srcu_read_lock(&head->srcu);
358 	ns = nvme_find_path(head);
359 	if (likely(ns)) {
360 		bio_set_dev(bio, ns->disk->part0);
361 		bio->bi_opf |= REQ_NVME_MPATH;
362 		trace_block_bio_remap(bio, disk_devt(ns->head->disk),
363 				      bio->bi_iter.bi_sector);
364 		submit_bio_noacct(bio);
365 	} else if (nvme_available_path(head)) {
366 		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
367 
368 		spin_lock_irq(&head->requeue_lock);
369 		bio_list_add(&head->requeue_list, bio);
370 		spin_unlock_irq(&head->requeue_lock);
371 	} else {
372 		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
373 
374 		bio_io_error(bio);
375 	}
376 
377 	srcu_read_unlock(&head->srcu, srcu_idx);
378 }
379 
nvme_ns_head_open(struct block_device * bdev,fmode_t mode)380 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
381 {
382 	if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
383 		return -ENXIO;
384 	return 0;
385 }
386 
nvme_ns_head_release(struct gendisk * disk,fmode_t mode)387 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
388 {
389 	nvme_put_ns_head(disk->private_data);
390 }
391 
392 #ifdef CONFIG_BLK_DEV_ZONED
nvme_ns_head_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)393 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
394 		unsigned int nr_zones, report_zones_cb cb, void *data)
395 {
396 	struct nvme_ns_head *head = disk->private_data;
397 	struct nvme_ns *ns;
398 	int srcu_idx, ret = -EWOULDBLOCK;
399 
400 	srcu_idx = srcu_read_lock(&head->srcu);
401 	ns = nvme_find_path(head);
402 	if (ns)
403 		ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
404 	srcu_read_unlock(&head->srcu, srcu_idx);
405 	return ret;
406 }
407 #else
408 #define nvme_ns_head_report_zones	NULL
409 #endif /* CONFIG_BLK_DEV_ZONED */
410 
411 const struct block_device_operations nvme_ns_head_ops = {
412 	.owner		= THIS_MODULE,
413 	.submit_bio	= nvme_ns_head_submit_bio,
414 	.open		= nvme_ns_head_open,
415 	.release	= nvme_ns_head_release,
416 	.ioctl		= nvme_ns_head_ioctl,
417 	.compat_ioctl	= blkdev_compat_ptr_ioctl,
418 	.getgeo		= nvme_getgeo,
419 	.report_zones	= nvme_ns_head_report_zones,
420 	.pr_ops		= &nvme_pr_ops,
421 };
422 
cdev_to_ns_head(struct cdev * cdev)423 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
424 {
425 	return container_of(cdev, struct nvme_ns_head, cdev);
426 }
427 
nvme_ns_head_chr_open(struct inode * inode,struct file * file)428 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
429 {
430 	if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
431 		return -ENXIO;
432 	return 0;
433 }
434 
nvme_ns_head_chr_release(struct inode * inode,struct file * file)435 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
436 {
437 	nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
438 	return 0;
439 }
440 
441 static const struct file_operations nvme_ns_head_chr_fops = {
442 	.owner		= THIS_MODULE,
443 	.open		= nvme_ns_head_chr_open,
444 	.release	= nvme_ns_head_chr_release,
445 	.unlocked_ioctl	= nvme_ns_head_chr_ioctl,
446 	.compat_ioctl	= compat_ptr_ioctl,
447 	.uring_cmd	= nvme_ns_head_chr_uring_cmd,
448 	.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
449 };
450 
nvme_add_ns_head_cdev(struct nvme_ns_head * head)451 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
452 {
453 	int ret;
454 
455 	head->cdev_device.parent = &head->subsys->dev;
456 	ret = dev_set_name(&head->cdev_device, "ng%dn%d",
457 			   head->subsys->instance, head->instance);
458 	if (ret)
459 		return ret;
460 	ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
461 			    &nvme_ns_head_chr_fops, THIS_MODULE);
462 	return ret;
463 }
464 
nvme_requeue_work(struct work_struct * work)465 static void nvme_requeue_work(struct work_struct *work)
466 {
467 	struct nvme_ns_head *head =
468 		container_of(work, struct nvme_ns_head, requeue_work);
469 	struct bio *bio, *next;
470 
471 	spin_lock_irq(&head->requeue_lock);
472 	next = bio_list_get(&head->requeue_list);
473 	spin_unlock_irq(&head->requeue_lock);
474 
475 	while ((bio = next) != NULL) {
476 		next = bio->bi_next;
477 		bio->bi_next = NULL;
478 
479 		submit_bio_noacct(bio);
480 	}
481 }
482 
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)483 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
484 {
485 	bool vwc = false;
486 
487 	mutex_init(&head->lock);
488 	bio_list_init(&head->requeue_list);
489 	spin_lock_init(&head->requeue_lock);
490 	INIT_WORK(&head->requeue_work, nvme_requeue_work);
491 
492 	/*
493 	 * Add a multipath node if the subsystems supports multiple controllers.
494 	 * We also do this for private namespaces as the namespace sharing flag
495 	 * could change after a rescan.
496 	 */
497 	if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
498 	    !nvme_is_unique_nsid(ctrl, head) || !multipath)
499 		return 0;
500 
501 	head->disk = blk_alloc_disk(ctrl->numa_node);
502 	if (!head->disk)
503 		return -ENOMEM;
504 	head->disk->fops = &nvme_ns_head_ops;
505 	head->disk->private_data = head;
506 	sprintf(head->disk->disk_name, "nvme%dn%d",
507 			ctrl->subsys->instance, head->instance);
508 
509 	blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
510 	blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
511 	/*
512 	 * This assumes all controllers that refer to a namespace either
513 	 * support poll queues or not.  That is not a strict guarantee,
514 	 * but if the assumption is wrong the effect is only suboptimal
515 	 * performance but not correctness problem.
516 	 */
517 	if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
518 	    ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
519 		blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
520 
521 	/* set to a default value of 512 until the disk is validated */
522 	blk_queue_logical_block_size(head->disk->queue, 512);
523 	blk_set_stacking_limits(&head->disk->queue->limits);
524 	blk_queue_dma_alignment(head->disk->queue, 3);
525 
526 	/* we need to propagate up the VMC settings */
527 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
528 		vwc = true;
529 	blk_queue_write_cache(head->disk->queue, vwc, vwc);
530 	return 0;
531 }
532 
nvme_mpath_set_live(struct nvme_ns * ns)533 static void nvme_mpath_set_live(struct nvme_ns *ns)
534 {
535 	struct nvme_ns_head *head = ns->head;
536 	int rc;
537 
538 	if (!head->disk)
539 		return;
540 
541 	/*
542 	 * test_and_set_bit() is used because it is protecting against two nvme
543 	 * paths simultaneously calling device_add_disk() on the same namespace
544 	 * head.
545 	 */
546 	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
547 		rc = device_add_disk(&head->subsys->dev, head->disk,
548 				     nvme_ns_id_attr_groups);
549 		if (rc) {
550 			clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
551 			return;
552 		}
553 		nvme_add_ns_head_cdev(head);
554 	}
555 
556 	mutex_lock(&head->lock);
557 	if (nvme_path_is_optimized(ns)) {
558 		int node, srcu_idx;
559 
560 		srcu_idx = srcu_read_lock(&head->srcu);
561 		for_each_node(node)
562 			__nvme_find_path(head, node);
563 		srcu_read_unlock(&head->srcu, srcu_idx);
564 	}
565 	mutex_unlock(&head->lock);
566 
567 	synchronize_srcu(&head->srcu);
568 	kblockd_schedule_work(&head->requeue_work);
569 }
570 
nvme_parse_ana_log(struct nvme_ctrl * ctrl,void * data,int (* cb)(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc *,void *))571 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
572 		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
573 			void *))
574 {
575 	void *base = ctrl->ana_log_buf;
576 	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
577 	int error, i;
578 
579 	lockdep_assert_held(&ctrl->ana_lock);
580 
581 	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
582 		struct nvme_ana_group_desc *desc = base + offset;
583 		u32 nr_nsids;
584 		size_t nsid_buf_size;
585 
586 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
587 			return -EINVAL;
588 
589 		nr_nsids = le32_to_cpu(desc->nnsids);
590 		nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
591 
592 		if (WARN_ON_ONCE(desc->grpid == 0))
593 			return -EINVAL;
594 		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
595 			return -EINVAL;
596 		if (WARN_ON_ONCE(desc->state == 0))
597 			return -EINVAL;
598 		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
599 			return -EINVAL;
600 
601 		offset += sizeof(*desc);
602 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
603 			return -EINVAL;
604 
605 		error = cb(ctrl, desc, data);
606 		if (error)
607 			return error;
608 
609 		offset += nsid_buf_size;
610 	}
611 
612 	return 0;
613 }
614 
nvme_state_is_live(enum nvme_ana_state state)615 static inline bool nvme_state_is_live(enum nvme_ana_state state)
616 {
617 	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
618 }
619 
nvme_update_ns_ana_state(struct nvme_ana_group_desc * desc,struct nvme_ns * ns)620 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
621 		struct nvme_ns *ns)
622 {
623 	ns->ana_grpid = le32_to_cpu(desc->grpid);
624 	ns->ana_state = desc->state;
625 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
626 	/*
627 	 * nvme_mpath_set_live() will trigger I/O to the multipath path device
628 	 * and in turn to this path device.  However we cannot accept this I/O
629 	 * if the controller is not live.  This may deadlock if called from
630 	 * nvme_mpath_init_identify() and the ctrl will never complete
631 	 * initialization, preventing I/O from completing.  For this case we
632 	 * will reprocess the ANA log page in nvme_mpath_update() once the
633 	 * controller is ready.
634 	 */
635 	if (nvme_state_is_live(ns->ana_state) &&
636 	    ns->ctrl->state == NVME_CTRL_LIVE)
637 		nvme_mpath_set_live(ns);
638 }
639 
nvme_update_ana_state(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)640 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
641 		struct nvme_ana_group_desc *desc, void *data)
642 {
643 	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
644 	unsigned *nr_change_groups = data;
645 	struct nvme_ns *ns;
646 
647 	dev_dbg(ctrl->device, "ANA group %d: %s.\n",
648 			le32_to_cpu(desc->grpid),
649 			nvme_ana_state_names[desc->state]);
650 
651 	if (desc->state == NVME_ANA_CHANGE)
652 		(*nr_change_groups)++;
653 
654 	if (!nr_nsids)
655 		return 0;
656 
657 	down_read(&ctrl->namespaces_rwsem);
658 	list_for_each_entry(ns, &ctrl->namespaces, list) {
659 		unsigned nsid;
660 again:
661 		nsid = le32_to_cpu(desc->nsids[n]);
662 		if (ns->head->ns_id < nsid)
663 			continue;
664 		if (ns->head->ns_id == nsid)
665 			nvme_update_ns_ana_state(desc, ns);
666 		if (++n == nr_nsids)
667 			break;
668 		if (ns->head->ns_id > nsid)
669 			goto again;
670 	}
671 	up_read(&ctrl->namespaces_rwsem);
672 	return 0;
673 }
674 
nvme_read_ana_log(struct nvme_ctrl * ctrl)675 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
676 {
677 	u32 nr_change_groups = 0;
678 	int error;
679 
680 	mutex_lock(&ctrl->ana_lock);
681 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
682 			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
683 	if (error) {
684 		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
685 		goto out_unlock;
686 	}
687 
688 	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
689 			nvme_update_ana_state);
690 	if (error)
691 		goto out_unlock;
692 
693 	/*
694 	 * In theory we should have an ANATT timer per group as they might enter
695 	 * the change state at different times.  But that is a lot of overhead
696 	 * just to protect against a target that keeps entering new changes
697 	 * states while never finishing previous ones.  But we'll still
698 	 * eventually time out once all groups are in change state, so this
699 	 * isn't a big deal.
700 	 *
701 	 * We also double the ANATT value to provide some slack for transports
702 	 * or AEN processing overhead.
703 	 */
704 	if (nr_change_groups)
705 		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
706 	else
707 		del_timer_sync(&ctrl->anatt_timer);
708 out_unlock:
709 	mutex_unlock(&ctrl->ana_lock);
710 	return error;
711 }
712 
nvme_ana_work(struct work_struct * work)713 static void nvme_ana_work(struct work_struct *work)
714 {
715 	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
716 
717 	if (ctrl->state != NVME_CTRL_LIVE)
718 		return;
719 
720 	nvme_read_ana_log(ctrl);
721 }
722 
nvme_mpath_update(struct nvme_ctrl * ctrl)723 void nvme_mpath_update(struct nvme_ctrl *ctrl)
724 {
725 	u32 nr_change_groups = 0;
726 
727 	if (!ctrl->ana_log_buf)
728 		return;
729 
730 	mutex_lock(&ctrl->ana_lock);
731 	nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
732 	mutex_unlock(&ctrl->ana_lock);
733 }
734 
nvme_anatt_timeout(struct timer_list * t)735 static void nvme_anatt_timeout(struct timer_list *t)
736 {
737 	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
738 
739 	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
740 	nvme_reset_ctrl(ctrl);
741 }
742 
nvme_mpath_stop(struct nvme_ctrl * ctrl)743 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
744 {
745 	if (!nvme_ctrl_use_ana(ctrl))
746 		return;
747 	del_timer_sync(&ctrl->anatt_timer);
748 	cancel_work_sync(&ctrl->ana_work);
749 }
750 
751 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
752 	struct device_attribute subsys_attr_##_name =	\
753 		__ATTR(_name, _mode, _show, _store)
754 
nvme_subsys_iopolicy_show(struct device * dev,struct device_attribute * attr,char * buf)755 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
756 		struct device_attribute *attr, char *buf)
757 {
758 	struct nvme_subsystem *subsys =
759 		container_of(dev, struct nvme_subsystem, dev);
760 
761 	return sysfs_emit(buf, "%s\n",
762 			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
763 }
764 
nvme_subsys_iopolicy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)765 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
766 		struct device_attribute *attr, const char *buf, size_t count)
767 {
768 	struct nvme_subsystem *subsys =
769 		container_of(dev, struct nvme_subsystem, dev);
770 	int i;
771 
772 	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
773 		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
774 			WRITE_ONCE(subsys->iopolicy, i);
775 			return count;
776 		}
777 	}
778 
779 	return -EINVAL;
780 }
781 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
782 		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
783 
ana_grpid_show(struct device * dev,struct device_attribute * attr,char * buf)784 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
785 		char *buf)
786 {
787 	return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
788 }
789 DEVICE_ATTR_RO(ana_grpid);
790 
ana_state_show(struct device * dev,struct device_attribute * attr,char * buf)791 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
792 		char *buf)
793 {
794 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
795 
796 	return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
797 }
798 DEVICE_ATTR_RO(ana_state);
799 
nvme_lookup_ana_group_desc(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)800 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
801 		struct nvme_ana_group_desc *desc, void *data)
802 {
803 	struct nvme_ana_group_desc *dst = data;
804 
805 	if (desc->grpid != dst->grpid)
806 		return 0;
807 
808 	*dst = *desc;
809 	return -ENXIO; /* just break out of the loop */
810 }
811 
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)812 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
813 {
814 	if (nvme_ctrl_use_ana(ns->ctrl)) {
815 		struct nvme_ana_group_desc desc = {
816 			.grpid = anagrpid,
817 			.state = 0,
818 		};
819 
820 		mutex_lock(&ns->ctrl->ana_lock);
821 		ns->ana_grpid = le32_to_cpu(anagrpid);
822 		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
823 		mutex_unlock(&ns->ctrl->ana_lock);
824 		if (desc.state) {
825 			/* found the group desc: update */
826 			nvme_update_ns_ana_state(&desc, ns);
827 		} else {
828 			/* group desc not found: trigger a re-read */
829 			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
830 			queue_work(nvme_wq, &ns->ctrl->ana_work);
831 		}
832 	} else {
833 		ns->ana_state = NVME_ANA_OPTIMIZED;
834 		nvme_mpath_set_live(ns);
835 	}
836 
837 	if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
838 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
839 				   ns->head->disk->queue);
840 #ifdef CONFIG_BLK_DEV_ZONED
841 	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
842 		ns->head->disk->nr_zones = ns->disk->nr_zones;
843 #endif
844 }
845 
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)846 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
847 {
848 	if (!head->disk)
849 		return;
850 	kblockd_schedule_work(&head->requeue_work);
851 	if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
852 		nvme_cdev_del(&head->cdev, &head->cdev_device);
853 		del_gendisk(head->disk);
854 	}
855 }
856 
nvme_mpath_remove_disk(struct nvme_ns_head * head)857 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
858 {
859 	if (!head->disk)
860 		return;
861 	blk_mark_disk_dead(head->disk);
862 	/* make sure all pending bios are cleaned up */
863 	kblockd_schedule_work(&head->requeue_work);
864 	flush_work(&head->requeue_work);
865 	put_disk(head->disk);
866 }
867 
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)868 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
869 {
870 	mutex_init(&ctrl->ana_lock);
871 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
872 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
873 }
874 
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)875 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
876 {
877 	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
878 	size_t ana_log_size;
879 	int error = 0;
880 
881 	/* check if multipath is enabled and we have the capability */
882 	if (!multipath || !ctrl->subsys ||
883 	    !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
884 		return 0;
885 
886 	if (!ctrl->max_namespaces ||
887 	    ctrl->max_namespaces > le32_to_cpu(id->nn)) {
888 		dev_err(ctrl->device,
889 			"Invalid MNAN value %u\n", ctrl->max_namespaces);
890 		return -EINVAL;
891 	}
892 
893 	ctrl->anacap = id->anacap;
894 	ctrl->anatt = id->anatt;
895 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
896 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
897 
898 	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
899 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
900 		ctrl->max_namespaces * sizeof(__le32);
901 	if (ana_log_size > max_transfer_size) {
902 		dev_err(ctrl->device,
903 			"ANA log page size (%zd) larger than MDTS (%zd).\n",
904 			ana_log_size, max_transfer_size);
905 		dev_err(ctrl->device, "disabling ANA support.\n");
906 		goto out_uninit;
907 	}
908 	if (ana_log_size > ctrl->ana_log_size) {
909 		nvme_mpath_stop(ctrl);
910 		nvme_mpath_uninit(ctrl);
911 		ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
912 		if (!ctrl->ana_log_buf)
913 			return -ENOMEM;
914 	}
915 	ctrl->ana_log_size = ana_log_size;
916 	error = nvme_read_ana_log(ctrl);
917 	if (error)
918 		goto out_uninit;
919 	return 0;
920 
921 out_uninit:
922 	nvme_mpath_uninit(ctrl);
923 	return error;
924 }
925 
nvme_mpath_uninit(struct nvme_ctrl * ctrl)926 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
927 {
928 	kvfree(ctrl->ana_log_buf);
929 	ctrl->ana_log_buf = NULL;
930 	ctrl->ana_log_size = 0;
931 }
932