1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Zoned block device handling
4  *
5  * Copyright (c) 2015, Hannes Reinecke
6  * Copyright (c) 2015, SUSE Linux GmbH
7  *
8  * Copyright (c) 2016, Damien Le Moal
9  * Copyright (c) 2016, Western Digital
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/rbtree.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/mm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/mm.h>
20 
21 #include "blk.h"
22 
23 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
24 static const char *const zone_cond_name[] = {
25 	ZONE_COND_NAME(NOT_WP),
26 	ZONE_COND_NAME(EMPTY),
27 	ZONE_COND_NAME(IMP_OPEN),
28 	ZONE_COND_NAME(EXP_OPEN),
29 	ZONE_COND_NAME(CLOSED),
30 	ZONE_COND_NAME(READONLY),
31 	ZONE_COND_NAME(FULL),
32 	ZONE_COND_NAME(OFFLINE),
33 };
34 #undef ZONE_COND_NAME
35 
36 /**
37  * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
38  * @zone_cond: BLK_ZONE_COND_XXX.
39  *
40  * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
41  * into string format. Useful in the debugging and tracing zone conditions. For
42  * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
43  */
blk_zone_cond_str(enum blk_zone_cond zone_cond)44 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
45 {
46 	static const char *zone_cond_str = "UNKNOWN";
47 
48 	if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
49 		zone_cond_str = zone_cond_name[zone_cond];
50 
51 	return zone_cond_str;
52 }
53 EXPORT_SYMBOL_GPL(blk_zone_cond_str);
54 
55 /*
56  * Return true if a request is a write requests that needs zone write locking.
57  */
blk_req_needs_zone_write_lock(struct request * rq)58 bool blk_req_needs_zone_write_lock(struct request *rq)
59 {
60 	if (blk_rq_is_passthrough(rq))
61 		return false;
62 
63 	if (!rq->q->disk->seq_zones_wlock)
64 		return false;
65 
66 	if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
67 		return blk_rq_zone_is_seq(rq);
68 
69 	return false;
70 }
71 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
72 
blk_req_zone_write_trylock(struct request * rq)73 bool blk_req_zone_write_trylock(struct request *rq)
74 {
75 	unsigned int zno = blk_rq_zone_no(rq);
76 
77 	if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock))
78 		return false;
79 
80 	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
81 	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
82 
83 	return true;
84 }
85 EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
86 
__blk_req_zone_write_lock(struct request * rq)87 void __blk_req_zone_write_lock(struct request *rq)
88 {
89 	if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
90 					  rq->q->disk->seq_zones_wlock)))
91 		return;
92 
93 	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
94 	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
95 }
96 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
97 
__blk_req_zone_write_unlock(struct request * rq)98 void __blk_req_zone_write_unlock(struct request *rq)
99 {
100 	rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
101 	if (rq->q->disk->seq_zones_wlock)
102 		WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
103 						 rq->q->disk->seq_zones_wlock));
104 }
105 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
106 
107 /**
108  * bdev_nr_zones - Get number of zones
109  * @bdev:	Target device
110  *
111  * Return the total number of zones of a zoned block device.  For a block
112  * device without zone capabilities, the number of zones is always 0.
113  */
bdev_nr_zones(struct block_device * bdev)114 unsigned int bdev_nr_zones(struct block_device *bdev)
115 {
116 	sector_t zone_sectors = bdev_zone_sectors(bdev);
117 
118 	if (!bdev_is_zoned(bdev))
119 		return 0;
120 	return (bdev_nr_sectors(bdev) + zone_sectors - 1) >>
121 		ilog2(zone_sectors);
122 }
123 EXPORT_SYMBOL_GPL(bdev_nr_zones);
124 
125 /**
126  * blkdev_report_zones - Get zones information
127  * @bdev:	Target block device
128  * @sector:	Sector from which to report zones
129  * @nr_zones:	Maximum number of zones to report
130  * @cb:		Callback function called for each reported zone
131  * @data:	Private data for the callback
132  *
133  * Description:
134  *    Get zone information starting from the zone containing @sector for at most
135  *    @nr_zones, and call @cb for each zone reported by the device.
136  *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
137  *    constant can be passed to @nr_zones.
138  *    Returns the number of zones reported by the device, or a negative errno
139  *    value in case of failure.
140  *
141  *    Note: The caller must use memalloc_noXX_save/restore() calls to control
142  *    memory allocations done within this function.
143  */
blkdev_report_zones(struct block_device * bdev,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)144 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
145 			unsigned int nr_zones, report_zones_cb cb, void *data)
146 {
147 	struct gendisk *disk = bdev->bd_disk;
148 	sector_t capacity = get_capacity(disk);
149 
150 	if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
151 		return -EOPNOTSUPP;
152 
153 	if (!nr_zones || sector >= capacity)
154 		return 0;
155 
156 	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
157 }
158 EXPORT_SYMBOL_GPL(blkdev_report_zones);
159 
blk_alloc_zone_bitmap(int node,unsigned int nr_zones)160 static inline unsigned long *blk_alloc_zone_bitmap(int node,
161 						   unsigned int nr_zones)
162 {
163 	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
164 			    GFP_NOIO, node);
165 }
166 
blk_zone_need_reset_cb(struct blk_zone * zone,unsigned int idx,void * data)167 static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
168 				  void *data)
169 {
170 	/*
171 	 * For an all-zones reset, ignore conventional, empty, read-only
172 	 * and offline zones.
173 	 */
174 	switch (zone->cond) {
175 	case BLK_ZONE_COND_NOT_WP:
176 	case BLK_ZONE_COND_EMPTY:
177 	case BLK_ZONE_COND_READONLY:
178 	case BLK_ZONE_COND_OFFLINE:
179 		return 0;
180 	default:
181 		set_bit(idx, (unsigned long *)data);
182 		return 0;
183 	}
184 }
185 
blkdev_zone_reset_all_emulated(struct block_device * bdev,gfp_t gfp_mask)186 static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
187 					  gfp_t gfp_mask)
188 {
189 	struct gendisk *disk = bdev->bd_disk;
190 	sector_t capacity = bdev_nr_sectors(bdev);
191 	sector_t zone_sectors = bdev_zone_sectors(bdev);
192 	unsigned long *need_reset;
193 	struct bio *bio = NULL;
194 	sector_t sector = 0;
195 	int ret;
196 
197 	need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
198 	if (!need_reset)
199 		return -ENOMEM;
200 
201 	ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
202 				       blk_zone_need_reset_cb, need_reset);
203 	if (ret < 0)
204 		goto out_free_need_reset;
205 
206 	ret = 0;
207 	while (sector < capacity) {
208 		if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
209 			sector += zone_sectors;
210 			continue;
211 		}
212 
213 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
214 				   gfp_mask);
215 		bio->bi_iter.bi_sector = sector;
216 		sector += zone_sectors;
217 
218 		/* This may take a while, so be nice to others */
219 		cond_resched();
220 	}
221 
222 	if (bio) {
223 		ret = submit_bio_wait(bio);
224 		bio_put(bio);
225 	}
226 
227 out_free_need_reset:
228 	kfree(need_reset);
229 	return ret;
230 }
231 
blkdev_zone_reset_all(struct block_device * bdev,gfp_t gfp_mask)232 static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
233 {
234 	struct bio bio;
235 
236 	bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
237 	return submit_bio_wait(&bio);
238 }
239 
240 /**
241  * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
242  * @bdev:	Target block device
243  * @op:		Operation to be performed on the zones
244  * @sector:	Start sector of the first zone to operate on
245  * @nr_sectors:	Number of sectors, should be at least the length of one zone and
246  *		must be zone size aligned.
247  * @gfp_mask:	Memory allocation flags (for bio_alloc)
248  *
249  * Description:
250  *    Perform the specified operation on the range of zones specified by
251  *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
252  *    is valid, but the specified range should not contain conventional zones.
253  *    The operation to execute on each zone can be a zone reset, open, close
254  *    or finish request.
255  */
blkdev_zone_mgmt(struct block_device * bdev,enum req_op op,sector_t sector,sector_t nr_sectors,gfp_t gfp_mask)256 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
257 		     sector_t sector, sector_t nr_sectors, gfp_t gfp_mask)
258 {
259 	struct request_queue *q = bdev_get_queue(bdev);
260 	sector_t zone_sectors = bdev_zone_sectors(bdev);
261 	sector_t capacity = bdev_nr_sectors(bdev);
262 	sector_t end_sector = sector + nr_sectors;
263 	struct bio *bio = NULL;
264 	int ret = 0;
265 
266 	if (!bdev_is_zoned(bdev))
267 		return -EOPNOTSUPP;
268 
269 	if (bdev_read_only(bdev))
270 		return -EPERM;
271 
272 	if (!op_is_zone_mgmt(op))
273 		return -EOPNOTSUPP;
274 
275 	if (end_sector <= sector || end_sector > capacity)
276 		/* Out of range */
277 		return -EINVAL;
278 
279 	/* Check alignment (handle eventual smaller last zone) */
280 	if (sector & (zone_sectors - 1))
281 		return -EINVAL;
282 
283 	if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
284 		return -EINVAL;
285 
286 	/*
287 	 * In the case of a zone reset operation over all zones,
288 	 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
289 	 * command. For other devices, we emulate this command behavior by
290 	 * identifying the zones needing a reset.
291 	 */
292 	if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
293 		if (!blk_queue_zone_resetall(q))
294 			return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
295 		return blkdev_zone_reset_all(bdev, gfp_mask);
296 	}
297 
298 	while (sector < end_sector) {
299 		bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
300 		bio->bi_iter.bi_sector = sector;
301 		sector += zone_sectors;
302 
303 		/* This may take a while, so be nice to others */
304 		cond_resched();
305 	}
306 
307 	ret = submit_bio_wait(bio);
308 	bio_put(bio);
309 
310 	return ret;
311 }
312 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
313 
314 struct zone_report_args {
315 	struct blk_zone __user *zones;
316 };
317 
blkdev_copy_zone_to_user(struct blk_zone * zone,unsigned int idx,void * data)318 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
319 				    void *data)
320 {
321 	struct zone_report_args *args = data;
322 
323 	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
324 		return -EFAULT;
325 	return 0;
326 }
327 
328 /*
329  * BLKREPORTZONE ioctl processing.
330  * Called from blkdev_ioctl.
331  */
blkdev_report_zones_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)332 int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
333 			      unsigned int cmd, unsigned long arg)
334 {
335 	void __user *argp = (void __user *)arg;
336 	struct zone_report_args args;
337 	struct request_queue *q;
338 	struct blk_zone_report rep;
339 	int ret;
340 
341 	if (!argp)
342 		return -EINVAL;
343 
344 	q = bdev_get_queue(bdev);
345 	if (!q)
346 		return -ENXIO;
347 
348 	if (!bdev_is_zoned(bdev))
349 		return -ENOTTY;
350 
351 	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
352 		return -EFAULT;
353 
354 	if (!rep.nr_zones)
355 		return -EINVAL;
356 
357 	args.zones = argp + sizeof(struct blk_zone_report);
358 	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
359 				  blkdev_copy_zone_to_user, &args);
360 	if (ret < 0)
361 		return ret;
362 
363 	rep.nr_zones = ret;
364 	rep.flags = BLK_ZONE_REP_CAPACITY;
365 	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
366 		return -EFAULT;
367 	return 0;
368 }
369 
blkdev_truncate_zone_range(struct block_device * bdev,fmode_t mode,const struct blk_zone_range * zrange)370 static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
371 				      const struct blk_zone_range *zrange)
372 {
373 	loff_t start, end;
374 
375 	if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
376 	    zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
377 		/* Out of range */
378 		return -EINVAL;
379 
380 	start = zrange->sector << SECTOR_SHIFT;
381 	end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
382 
383 	return truncate_bdev_range(bdev, mode, start, end);
384 }
385 
386 /*
387  * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
388  * Called from blkdev_ioctl.
389  */
blkdev_zone_mgmt_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)390 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
391 			   unsigned int cmd, unsigned long arg)
392 {
393 	void __user *argp = (void __user *)arg;
394 	struct request_queue *q;
395 	struct blk_zone_range zrange;
396 	enum req_op op;
397 	int ret;
398 
399 	if (!argp)
400 		return -EINVAL;
401 
402 	q = bdev_get_queue(bdev);
403 	if (!q)
404 		return -ENXIO;
405 
406 	if (!bdev_is_zoned(bdev))
407 		return -ENOTTY;
408 
409 	if (!(mode & FMODE_WRITE))
410 		return -EBADF;
411 
412 	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
413 		return -EFAULT;
414 
415 	switch (cmd) {
416 	case BLKRESETZONE:
417 		op = REQ_OP_ZONE_RESET;
418 
419 		/* Invalidate the page cache, including dirty pages. */
420 		filemap_invalidate_lock(bdev->bd_inode->i_mapping);
421 		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
422 		if (ret)
423 			goto fail;
424 		break;
425 	case BLKOPENZONE:
426 		op = REQ_OP_ZONE_OPEN;
427 		break;
428 	case BLKCLOSEZONE:
429 		op = REQ_OP_ZONE_CLOSE;
430 		break;
431 	case BLKFINISHZONE:
432 		op = REQ_OP_ZONE_FINISH;
433 		break;
434 	default:
435 		return -ENOTTY;
436 	}
437 
438 	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
439 			       GFP_KERNEL);
440 
441 fail:
442 	if (cmd == BLKRESETZONE)
443 		filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
444 
445 	return ret;
446 }
447 
disk_free_zone_bitmaps(struct gendisk * disk)448 void disk_free_zone_bitmaps(struct gendisk *disk)
449 {
450 	kfree(disk->conv_zones_bitmap);
451 	disk->conv_zones_bitmap = NULL;
452 	kfree(disk->seq_zones_wlock);
453 	disk->seq_zones_wlock = NULL;
454 }
455 
456 struct blk_revalidate_zone_args {
457 	struct gendisk	*disk;
458 	unsigned long	*conv_zones_bitmap;
459 	unsigned long	*seq_zones_wlock;
460 	unsigned int	nr_zones;
461 	sector_t	zone_sectors;
462 	sector_t	sector;
463 };
464 
465 /*
466  * Helper function to check the validity of zones of a zoned block device.
467  */
blk_revalidate_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)468 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
469 				  void *data)
470 {
471 	struct blk_revalidate_zone_args *args = data;
472 	struct gendisk *disk = args->disk;
473 	struct request_queue *q = disk->queue;
474 	sector_t capacity = get_capacity(disk);
475 
476 	/*
477 	 * All zones must have the same size, with the exception on an eventual
478 	 * smaller last zone.
479 	 */
480 	if (zone->start == 0) {
481 		if (zone->len == 0 || !is_power_of_2(zone->len)) {
482 			pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
483 				disk->disk_name, zone->len);
484 			return -ENODEV;
485 		}
486 
487 		args->zone_sectors = zone->len;
488 		args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
489 	} else if (zone->start + args->zone_sectors < capacity) {
490 		if (zone->len != args->zone_sectors) {
491 			pr_warn("%s: Invalid zoned device with non constant zone size\n",
492 				disk->disk_name);
493 			return -ENODEV;
494 		}
495 	} else {
496 		if (zone->len > args->zone_sectors) {
497 			pr_warn("%s: Invalid zoned device with larger last zone size\n",
498 				disk->disk_name);
499 			return -ENODEV;
500 		}
501 	}
502 
503 	/* Check for holes in the zone report */
504 	if (zone->start != args->sector) {
505 		pr_warn("%s: Zone gap at sectors %llu..%llu\n",
506 			disk->disk_name, args->sector, zone->start);
507 		return -ENODEV;
508 	}
509 
510 	/* Check zone type */
511 	switch (zone->type) {
512 	case BLK_ZONE_TYPE_CONVENTIONAL:
513 		if (!args->conv_zones_bitmap) {
514 			args->conv_zones_bitmap =
515 				blk_alloc_zone_bitmap(q->node, args->nr_zones);
516 			if (!args->conv_zones_bitmap)
517 				return -ENOMEM;
518 		}
519 		set_bit(idx, args->conv_zones_bitmap);
520 		break;
521 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
522 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
523 		if (!args->seq_zones_wlock) {
524 			args->seq_zones_wlock =
525 				blk_alloc_zone_bitmap(q->node, args->nr_zones);
526 			if (!args->seq_zones_wlock)
527 				return -ENOMEM;
528 		}
529 		break;
530 	default:
531 		pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
532 			disk->disk_name, (int)zone->type, zone->start);
533 		return -ENODEV;
534 	}
535 
536 	args->sector += zone->len;
537 	return 0;
538 }
539 
540 /**
541  * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
542  * @disk:	Target disk
543  * @update_driver_data:	Callback to update driver data on the frozen disk
544  *
545  * Helper function for low-level device drivers to (re) allocate and initialize
546  * a disk request queue zone bitmaps. This functions should normally be called
547  * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
548  * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
549  * is correct.
550  * If the @update_driver_data callback function is not NULL, the callback is
551  * executed with the device request queue frozen after all zones have been
552  * checked.
553  */
blk_revalidate_disk_zones(struct gendisk * disk,void (* update_driver_data)(struct gendisk * disk))554 int blk_revalidate_disk_zones(struct gendisk *disk,
555 			      void (*update_driver_data)(struct gendisk *disk))
556 {
557 	struct request_queue *q = disk->queue;
558 	struct blk_revalidate_zone_args args = {
559 		.disk		= disk,
560 	};
561 	unsigned int noio_flag;
562 	int ret;
563 
564 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
565 		return -EIO;
566 	if (WARN_ON_ONCE(!queue_is_mq(q)))
567 		return -EIO;
568 
569 	if (!get_capacity(disk))
570 		return -EIO;
571 
572 	/*
573 	 * Ensure that all memory allocations in this context are done as if
574 	 * GFP_NOIO was specified.
575 	 */
576 	noio_flag = memalloc_noio_save();
577 	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
578 				       blk_revalidate_zone_cb, &args);
579 	if (!ret) {
580 		pr_warn("%s: No zones reported\n", disk->disk_name);
581 		ret = -ENODEV;
582 	}
583 	memalloc_noio_restore(noio_flag);
584 
585 	/*
586 	 * If zones where reported, make sure that the entire disk capacity
587 	 * has been checked.
588 	 */
589 	if (ret > 0 && args.sector != get_capacity(disk)) {
590 		pr_warn("%s: Missing zones from sector %llu\n",
591 			disk->disk_name, args.sector);
592 		ret = -ENODEV;
593 	}
594 
595 	/*
596 	 * Install the new bitmaps and update nr_zones only once the queue is
597 	 * stopped and all I/Os are completed (i.e. a scheduler is not
598 	 * referencing the bitmaps).
599 	 */
600 	blk_mq_freeze_queue(q);
601 	if (ret > 0) {
602 		blk_queue_chunk_sectors(q, args.zone_sectors);
603 		disk->nr_zones = args.nr_zones;
604 		swap(disk->seq_zones_wlock, args.seq_zones_wlock);
605 		swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
606 		if (update_driver_data)
607 			update_driver_data(disk);
608 		ret = 0;
609 	} else {
610 		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
611 		disk_free_zone_bitmaps(disk);
612 	}
613 	blk_mq_unfreeze_queue(q);
614 
615 	kfree(args.seq_zones_wlock);
616 	kfree(args.conv_zones_bitmap);
617 	return ret;
618 }
619 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
620 
disk_clear_zone_settings(struct gendisk * disk)621 void disk_clear_zone_settings(struct gendisk *disk)
622 {
623 	struct request_queue *q = disk->queue;
624 
625 	blk_mq_freeze_queue(q);
626 
627 	disk_free_zone_bitmaps(disk);
628 	blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
629 	q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
630 	disk->nr_zones = 0;
631 	disk->max_open_zones = 0;
632 	disk->max_active_zones = 0;
633 	q->limits.chunk_sectors = 0;
634 	q->limits.zone_write_granularity = 0;
635 	q->limits.max_zone_append_sectors = 0;
636 
637 	blk_mq_unfreeze_queue(q);
638 }
639