1 /*
2    md.c : Multiple Devices driver for Linux
3 	  Copyright (C) 1998, 1999, 2000 Ingo Molnar
4 
5      completely rewritten, based on the MD driver code from Marc Zyngier
6 
7    Changes:
8 
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16 
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19 
20      Neil Brown <neilb@cse.unsw.edu.au>.
21 
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24 
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29 
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34 
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/mutex.h>
40 #include <linux/buffer_head.h> /* for invalidate_bdev */
41 #include <linux/poll.h>
42 #include <linux/ctype.h>
43 #include <linux/string.h>
44 #include <linux/hdreg.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
54 #include "md.h"
55 #include "bitmap.h"
56 
57 #define DEBUG 0
58 #define dprintk(x...) ((void)(DEBUG && printk(x)))
59 
60 #ifndef MODULE
61 static void autostart_arrays(int part);
62 #endif
63 
64 static LIST_HEAD(pers_list);
65 static DEFINE_SPINLOCK(pers_lock);
66 
67 static void md_print_devices(void);
68 
69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
70 static struct workqueue_struct *md_wq;
71 static struct workqueue_struct *md_misc_wq;
72 
73 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
74 
75 /*
76  * Default number of read corrections we'll attempt on an rdev
77  * before ejecting it from the array. We divide the read error
78  * count by 2 for every hour elapsed between read errors.
79  */
80 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
81 /*
82  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83  * is 1000 KB/sec, so the extra system load does not show up that much.
84  * Increase it if you want to have more _guaranteed_ speed. Note that
85  * the RAID driver will use the maximum available bandwidth if the IO
86  * subsystem is idle. There is also an 'absolute maximum' reconstruction
87  * speed limit - in case reconstruction slows down your system despite
88  * idle IO detection.
89  *
90  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91  * or /sys/block/mdX/md/sync_speed_{min,max}
92  */
93 
94 static int sysctl_speed_limit_min = 1000;
95 static int sysctl_speed_limit_max = 200000;
speed_min(mddev_t * mddev)96 static inline int speed_min(mddev_t *mddev)
97 {
98 	return mddev->sync_speed_min ?
99 		mddev->sync_speed_min : sysctl_speed_limit_min;
100 }
101 
speed_max(mddev_t * mddev)102 static inline int speed_max(mddev_t *mddev)
103 {
104 	return mddev->sync_speed_max ?
105 		mddev->sync_speed_max : sysctl_speed_limit_max;
106 }
107 
108 static struct ctl_table_header *raid_table_header;
109 
110 static ctl_table raid_table[] = {
111 	{
112 		.procname	= "speed_limit_min",
113 		.data		= &sysctl_speed_limit_min,
114 		.maxlen		= sizeof(int),
115 		.mode		= S_IRUGO|S_IWUSR,
116 		.proc_handler	= proc_dointvec,
117 	},
118 	{
119 		.procname	= "speed_limit_max",
120 		.data		= &sysctl_speed_limit_max,
121 		.maxlen		= sizeof(int),
122 		.mode		= S_IRUGO|S_IWUSR,
123 		.proc_handler	= proc_dointvec,
124 	},
125 	{ }
126 };
127 
128 static ctl_table raid_dir_table[] = {
129 	{
130 		.procname	= "raid",
131 		.maxlen		= 0,
132 		.mode		= S_IRUGO|S_IXUGO,
133 		.child		= raid_table,
134 	},
135 	{ }
136 };
137 
138 static ctl_table raid_root_table[] = {
139 	{
140 		.procname	= "dev",
141 		.maxlen		= 0,
142 		.mode		= 0555,
143 		.child		= raid_dir_table,
144 	},
145 	{  }
146 };
147 
148 static const struct block_device_operations md_fops;
149 
150 static int start_readonly;
151 
152 /* bio_clone_mddev
153  * like bio_clone, but with a local bio set
154  */
155 
mddev_bio_destructor(struct bio * bio)156 static void mddev_bio_destructor(struct bio *bio)
157 {
158 	mddev_t *mddev, **mddevp;
159 
160 	mddevp = (void*)bio;
161 	mddev = mddevp[-1];
162 
163 	bio_free(bio, mddev->bio_set);
164 }
165 
bio_alloc_mddev(gfp_t gfp_mask,int nr_iovecs,mddev_t * mddev)166 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
167 			    mddev_t *mddev)
168 {
169 	struct bio *b;
170 	mddev_t **mddevp;
171 
172 	if (!mddev || !mddev->bio_set)
173 		return bio_alloc(gfp_mask, nr_iovecs);
174 
175 	b = bio_alloc_bioset(gfp_mask, nr_iovecs,
176 			     mddev->bio_set);
177 	if (!b)
178 		return NULL;
179 	mddevp = (void*)b;
180 	mddevp[-1] = mddev;
181 	b->bi_destructor = mddev_bio_destructor;
182 	return b;
183 }
184 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
185 
bio_clone_mddev(struct bio * bio,gfp_t gfp_mask,mddev_t * mddev)186 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
187 			    mddev_t *mddev)
188 {
189 	struct bio *b;
190 	mddev_t **mddevp;
191 
192 	if (!mddev || !mddev->bio_set)
193 		return bio_clone(bio, gfp_mask);
194 
195 	b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
196 			     mddev->bio_set);
197 	if (!b)
198 		return NULL;
199 	mddevp = (void*)b;
200 	mddevp[-1] = mddev;
201 	b->bi_destructor = mddev_bio_destructor;
202 	__bio_clone(b, bio);
203 	if (bio_integrity(bio)) {
204 		int ret;
205 
206 		ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
207 
208 		if (ret < 0) {
209 			bio_put(b);
210 			return NULL;
211 		}
212 	}
213 
214 	return b;
215 }
216 EXPORT_SYMBOL_GPL(bio_clone_mddev);
217 
218 /*
219  * We have a system wide 'event count' that is incremented
220  * on any 'interesting' event, and readers of /proc/mdstat
221  * can use 'poll' or 'select' to find out when the event
222  * count increases.
223  *
224  * Events are:
225  *  start array, stop array, error, add device, remove device,
226  *  start build, activate spare
227  */
228 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
229 static atomic_t md_event_count;
md_new_event(mddev_t * mddev)230 void md_new_event(mddev_t *mddev)
231 {
232 	atomic_inc(&md_event_count);
233 	wake_up(&md_event_waiters);
234 }
235 EXPORT_SYMBOL_GPL(md_new_event);
236 
237 /* Alternate version that can be called from interrupts
238  * when calling sysfs_notify isn't needed.
239  */
md_new_event_inintr(mddev_t * mddev)240 static void md_new_event_inintr(mddev_t *mddev)
241 {
242 	atomic_inc(&md_event_count);
243 	wake_up(&md_event_waiters);
244 }
245 
246 /*
247  * Enables to iterate over all existing md arrays
248  * all_mddevs_lock protects this list.
249  */
250 static LIST_HEAD(all_mddevs);
251 static DEFINE_SPINLOCK(all_mddevs_lock);
252 
253 
254 /*
255  * iterates through all used mddevs in the system.
256  * We take care to grab the all_mddevs_lock whenever navigating
257  * the list, and to always hold a refcount when unlocked.
258  * Any code which breaks out of this loop while own
259  * a reference to the current mddev and must mddev_put it.
260  */
261 #define for_each_mddev(mddev,tmp)					\
262 									\
263 	for (({ spin_lock(&all_mddevs_lock); 				\
264 		tmp = all_mddevs.next;					\
265 		mddev = NULL;});					\
266 	     ({ if (tmp != &all_mddevs)					\
267 			mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
268 		spin_unlock(&all_mddevs_lock);				\
269 		if (mddev) mddev_put(mddev);				\
270 		mddev = list_entry(tmp, mddev_t, all_mddevs);		\
271 		tmp != &all_mddevs;});					\
272 	     ({ spin_lock(&all_mddevs_lock);				\
273 		tmp = tmp->next;})					\
274 		)
275 
276 
277 /* Rather than calling directly into the personality make_request function,
278  * IO requests come here first so that we can check if the device is
279  * being suspended pending a reconfiguration.
280  * We hold a refcount over the call to ->make_request.  By the time that
281  * call has finished, the bio has been linked into some internal structure
282  * and so is visible to ->quiesce(), so we don't need the refcount any more.
283  */
md_make_request(struct request_queue * q,struct bio * bio)284 static int md_make_request(struct request_queue *q, struct bio *bio)
285 {
286 	const int rw = bio_data_dir(bio);
287 	mddev_t *mddev = q->queuedata;
288 	int rv;
289 	int cpu;
290 	unsigned int sectors;
291 
292 	if (mddev == NULL || mddev->pers == NULL
293 	    || !mddev->ready) {
294 		bio_io_error(bio);
295 		return 0;
296 	}
297 	smp_rmb(); /* Ensure implications of  'active' are visible */
298 	rcu_read_lock();
299 	if (mddev->suspended) {
300 		DEFINE_WAIT(__wait);
301 		for (;;) {
302 			prepare_to_wait(&mddev->sb_wait, &__wait,
303 					TASK_UNINTERRUPTIBLE);
304 			if (!mddev->suspended)
305 				break;
306 			rcu_read_unlock();
307 			schedule();
308 			rcu_read_lock();
309 		}
310 		finish_wait(&mddev->sb_wait, &__wait);
311 	}
312 	atomic_inc(&mddev->active_io);
313 	rcu_read_unlock();
314 
315 	/*
316 	 * save the sectors now since our bio can
317 	 * go away inside make_request
318 	 */
319 	sectors = bio_sectors(bio);
320 	rv = mddev->pers->make_request(mddev, bio);
321 
322 	cpu = part_stat_lock();
323 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
324 	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
325 	part_stat_unlock();
326 
327 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
328 		wake_up(&mddev->sb_wait);
329 
330 	return rv;
331 }
332 
333 /* mddev_suspend makes sure no new requests are submitted
334  * to the device, and that any requests that have been submitted
335  * are completely handled.
336  * Once ->stop is called and completes, the module will be completely
337  * unused.
338  */
mddev_suspend(mddev_t * mddev)339 void mddev_suspend(mddev_t *mddev)
340 {
341 	BUG_ON(mddev->suspended);
342 	mddev->suspended = 1;
343 	synchronize_rcu();
344 	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
345 	mddev->pers->quiesce(mddev, 1);
346 }
347 EXPORT_SYMBOL_GPL(mddev_suspend);
348 
mddev_resume(mddev_t * mddev)349 void mddev_resume(mddev_t *mddev)
350 {
351 	mddev->suspended = 0;
352 	wake_up(&mddev->sb_wait);
353 	mddev->pers->quiesce(mddev, 0);
354 }
355 EXPORT_SYMBOL_GPL(mddev_resume);
356 
mddev_congested(mddev_t * mddev,int bits)357 int mddev_congested(mddev_t *mddev, int bits)
358 {
359 	return mddev->suspended;
360 }
361 EXPORT_SYMBOL(mddev_congested);
362 
363 /*
364  * Generic flush handling for md
365  */
366 
md_end_flush(struct bio * bio,int err)367 static void md_end_flush(struct bio *bio, int err)
368 {
369 	mdk_rdev_t *rdev = bio->bi_private;
370 	mddev_t *mddev = rdev->mddev;
371 
372 	rdev_dec_pending(rdev, mddev);
373 
374 	if (atomic_dec_and_test(&mddev->flush_pending)) {
375 		/* The pre-request flush has finished */
376 		queue_work(md_wq, &mddev->flush_work);
377 	}
378 	bio_put(bio);
379 }
380 
381 static void md_submit_flush_data(struct work_struct *ws);
382 
submit_flushes(struct work_struct * ws)383 static void submit_flushes(struct work_struct *ws)
384 {
385 	mddev_t *mddev = container_of(ws, mddev_t, flush_work);
386 	mdk_rdev_t *rdev;
387 
388 	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
389 	atomic_set(&mddev->flush_pending, 1);
390 	rcu_read_lock();
391 	list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
392 		if (rdev->raid_disk >= 0 &&
393 		    !test_bit(Faulty, &rdev->flags)) {
394 			/* Take two references, one is dropped
395 			 * when request finishes, one after
396 			 * we reclaim rcu_read_lock
397 			 */
398 			struct bio *bi;
399 			atomic_inc(&rdev->nr_pending);
400 			atomic_inc(&rdev->nr_pending);
401 			rcu_read_unlock();
402 			bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
403 			bi->bi_end_io = md_end_flush;
404 			bi->bi_private = rdev;
405 			bi->bi_bdev = rdev->bdev;
406 			atomic_inc(&mddev->flush_pending);
407 			submit_bio(WRITE_FLUSH, bi);
408 			rcu_read_lock();
409 			rdev_dec_pending(rdev, mddev);
410 		}
411 	rcu_read_unlock();
412 	if (atomic_dec_and_test(&mddev->flush_pending))
413 		queue_work(md_wq, &mddev->flush_work);
414 }
415 
md_submit_flush_data(struct work_struct * ws)416 static void md_submit_flush_data(struct work_struct *ws)
417 {
418 	mddev_t *mddev = container_of(ws, mddev_t, flush_work);
419 	struct bio *bio = mddev->flush_bio;
420 
421 	if (bio->bi_size == 0)
422 		/* an empty barrier - all done */
423 		bio_endio(bio, 0);
424 	else {
425 		bio->bi_rw &= ~REQ_FLUSH;
426 		if (mddev->pers->make_request(mddev, bio))
427 			generic_make_request(bio);
428 	}
429 
430 	mddev->flush_bio = NULL;
431 	wake_up(&mddev->sb_wait);
432 }
433 
md_flush_request(mddev_t * mddev,struct bio * bio)434 void md_flush_request(mddev_t *mddev, struct bio *bio)
435 {
436 	spin_lock_irq(&mddev->write_lock);
437 	wait_event_lock_irq(mddev->sb_wait,
438 			    !mddev->flush_bio,
439 			    mddev->write_lock, /*nothing*/);
440 	mddev->flush_bio = bio;
441 	spin_unlock_irq(&mddev->write_lock);
442 
443 	INIT_WORK(&mddev->flush_work, submit_flushes);
444 	queue_work(md_wq, &mddev->flush_work);
445 }
446 EXPORT_SYMBOL(md_flush_request);
447 
448 /* Support for plugging.
449  * This mirrors the plugging support in request_queue, but does not
450  * require having a whole queue or request structures.
451  * We allocate an md_plug_cb for each md device and each thread it gets
452  * plugged on.  This links tot the private plug_handle structure in the
453  * personality data where we keep a count of the number of outstanding
454  * plugs so other code can see if a plug is active.
455  */
456 struct md_plug_cb {
457 	struct blk_plug_cb cb;
458 	mddev_t *mddev;
459 };
460 
plugger_unplug(struct blk_plug_cb * cb)461 static void plugger_unplug(struct blk_plug_cb *cb)
462 {
463 	struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
464 	if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
465 		md_wakeup_thread(mdcb->mddev->thread);
466 	kfree(mdcb);
467 }
468 
469 /* Check that an unplug wakeup will come shortly.
470  * If not, wakeup the md thread immediately
471  */
mddev_check_plugged(mddev_t * mddev)472 int mddev_check_plugged(mddev_t *mddev)
473 {
474 	struct blk_plug *plug = current->plug;
475 	struct md_plug_cb *mdcb;
476 
477 	if (!plug)
478 		return 0;
479 
480 	list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
481 		if (mdcb->cb.callback == plugger_unplug &&
482 		    mdcb->mddev == mddev) {
483 			/* Already on the list, move to top */
484 			if (mdcb != list_first_entry(&plug->cb_list,
485 						    struct md_plug_cb,
486 						    cb.list))
487 				list_move(&mdcb->cb.list, &plug->cb_list);
488 			return 1;
489 		}
490 	}
491 	/* Not currently on the callback list */
492 	mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
493 	if (!mdcb)
494 		return 0;
495 
496 	mdcb->mddev = mddev;
497 	mdcb->cb.callback = plugger_unplug;
498 	atomic_inc(&mddev->plug_cnt);
499 	list_add(&mdcb->cb.list, &plug->cb_list);
500 	return 1;
501 }
502 EXPORT_SYMBOL_GPL(mddev_check_plugged);
503 
mddev_get(mddev_t * mddev)504 static inline mddev_t *mddev_get(mddev_t *mddev)
505 {
506 	atomic_inc(&mddev->active);
507 	return mddev;
508 }
509 
510 static void mddev_delayed_delete(struct work_struct *ws);
511 
mddev_put(mddev_t * mddev)512 static void mddev_put(mddev_t *mddev)
513 {
514 	struct bio_set *bs = NULL;
515 
516 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
517 		return;
518 	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
519 	    mddev->ctime == 0 && !mddev->hold_active) {
520 		/* Array is not configured at all, and not held active,
521 		 * so destroy it */
522 		list_del(&mddev->all_mddevs);
523 		bs = mddev->bio_set;
524 		mddev->bio_set = NULL;
525 		if (mddev->gendisk) {
526 			/* We did a probe so need to clean up.  Call
527 			 * queue_work inside the spinlock so that
528 			 * flush_workqueue() after mddev_find will
529 			 * succeed in waiting for the work to be done.
530 			 */
531 			INIT_WORK(&mddev->del_work, mddev_delayed_delete);
532 			queue_work(md_misc_wq, &mddev->del_work);
533 		} else
534 			kfree(mddev);
535 	}
536 	spin_unlock(&all_mddevs_lock);
537 	if (bs)
538 		bioset_free(bs);
539 }
540 
mddev_init(mddev_t * mddev)541 void mddev_init(mddev_t *mddev)
542 {
543 	mutex_init(&mddev->open_mutex);
544 	mutex_init(&mddev->reconfig_mutex);
545 	mutex_init(&mddev->bitmap_info.mutex);
546 	INIT_LIST_HEAD(&mddev->disks);
547 	INIT_LIST_HEAD(&mddev->all_mddevs);
548 	init_timer(&mddev->safemode_timer);
549 	atomic_set(&mddev->active, 1);
550 	atomic_set(&mddev->openers, 0);
551 	atomic_set(&mddev->active_io, 0);
552 	atomic_set(&mddev->plug_cnt, 0);
553 	spin_lock_init(&mddev->write_lock);
554 	atomic_set(&mddev->flush_pending, 0);
555 	init_waitqueue_head(&mddev->sb_wait);
556 	init_waitqueue_head(&mddev->recovery_wait);
557 	mddev->reshape_position = MaxSector;
558 	mddev->resync_min = 0;
559 	mddev->resync_max = MaxSector;
560 	mddev->level = LEVEL_NONE;
561 }
562 EXPORT_SYMBOL_GPL(mddev_init);
563 
mddev_find(dev_t unit)564 static mddev_t * mddev_find(dev_t unit)
565 {
566 	mddev_t *mddev, *new = NULL;
567 
568 	if (unit && MAJOR(unit) != MD_MAJOR)
569 		unit &= ~((1<<MdpMinorShift)-1);
570 
571  retry:
572 	spin_lock(&all_mddevs_lock);
573 
574 	if (unit) {
575 		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
576 			if (mddev->unit == unit) {
577 				mddev_get(mddev);
578 				spin_unlock(&all_mddevs_lock);
579 				kfree(new);
580 				return mddev;
581 			}
582 
583 		if (new) {
584 			list_add(&new->all_mddevs, &all_mddevs);
585 			spin_unlock(&all_mddevs_lock);
586 			new->hold_active = UNTIL_IOCTL;
587 			return new;
588 		}
589 	} else if (new) {
590 		/* find an unused unit number */
591 		static int next_minor = 512;
592 		int start = next_minor;
593 		int is_free = 0;
594 		int dev = 0;
595 		while (!is_free) {
596 			dev = MKDEV(MD_MAJOR, next_minor);
597 			next_minor++;
598 			if (next_minor > MINORMASK)
599 				next_minor = 0;
600 			if (next_minor == start) {
601 				/* Oh dear, all in use. */
602 				spin_unlock(&all_mddevs_lock);
603 				kfree(new);
604 				return NULL;
605 			}
606 
607 			is_free = 1;
608 			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
609 				if (mddev->unit == dev) {
610 					is_free = 0;
611 					break;
612 				}
613 		}
614 		new->unit = dev;
615 		new->md_minor = MINOR(dev);
616 		new->hold_active = UNTIL_STOP;
617 		list_add(&new->all_mddevs, &all_mddevs);
618 		spin_unlock(&all_mddevs_lock);
619 		return new;
620 	}
621 	spin_unlock(&all_mddevs_lock);
622 
623 	new = kzalloc(sizeof(*new), GFP_KERNEL);
624 	if (!new)
625 		return NULL;
626 
627 	new->unit = unit;
628 	if (MAJOR(unit) == MD_MAJOR)
629 		new->md_minor = MINOR(unit);
630 	else
631 		new->md_minor = MINOR(unit) >> MdpMinorShift;
632 
633 	mddev_init(new);
634 
635 	goto retry;
636 }
637 
mddev_lock(mddev_t * mddev)638 static inline int mddev_lock(mddev_t * mddev)
639 {
640 	return mutex_lock_interruptible(&mddev->reconfig_mutex);
641 }
642 
mddev_is_locked(mddev_t * mddev)643 static inline int mddev_is_locked(mddev_t *mddev)
644 {
645 	return mutex_is_locked(&mddev->reconfig_mutex);
646 }
647 
mddev_trylock(mddev_t * mddev)648 static inline int mddev_trylock(mddev_t * mddev)
649 {
650 	return mutex_trylock(&mddev->reconfig_mutex);
651 }
652 
653 static struct attribute_group md_redundancy_group;
654 
mddev_unlock(mddev_t * mddev)655 static void mddev_unlock(mddev_t * mddev)
656 {
657 	if (mddev->to_remove) {
658 		/* These cannot be removed under reconfig_mutex as
659 		 * an access to the files will try to take reconfig_mutex
660 		 * while holding the file unremovable, which leads to
661 		 * a deadlock.
662 		 * So hold set sysfs_active while the remove in happeing,
663 		 * and anything else which might set ->to_remove or my
664 		 * otherwise change the sysfs namespace will fail with
665 		 * -EBUSY if sysfs_active is still set.
666 		 * We set sysfs_active under reconfig_mutex and elsewhere
667 		 * test it under the same mutex to ensure its correct value
668 		 * is seen.
669 		 */
670 		struct attribute_group *to_remove = mddev->to_remove;
671 		mddev->to_remove = NULL;
672 		mddev->sysfs_active = 1;
673 		mutex_unlock(&mddev->reconfig_mutex);
674 
675 		if (mddev->kobj.sd) {
676 			if (to_remove != &md_redundancy_group)
677 				sysfs_remove_group(&mddev->kobj, to_remove);
678 			if (mddev->pers == NULL ||
679 			    mddev->pers->sync_request == NULL) {
680 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
681 				if (mddev->sysfs_action)
682 					sysfs_put(mddev->sysfs_action);
683 				mddev->sysfs_action = NULL;
684 			}
685 		}
686 		mddev->sysfs_active = 0;
687 	} else
688 		mutex_unlock(&mddev->reconfig_mutex);
689 
690 	md_wakeup_thread(mddev->thread);
691 }
692 
find_rdev_nr(mddev_t * mddev,int nr)693 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
694 {
695 	mdk_rdev_t *rdev;
696 
697 	list_for_each_entry(rdev, &mddev->disks, same_set)
698 		if (rdev->desc_nr == nr)
699 			return rdev;
700 
701 	return NULL;
702 }
703 
find_rdev(mddev_t * mddev,dev_t dev)704 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
705 {
706 	mdk_rdev_t *rdev;
707 
708 	list_for_each_entry(rdev, &mddev->disks, same_set)
709 		if (rdev->bdev->bd_dev == dev)
710 			return rdev;
711 
712 	return NULL;
713 }
714 
find_pers(int level,char * clevel)715 static struct mdk_personality *find_pers(int level, char *clevel)
716 {
717 	struct mdk_personality *pers;
718 	list_for_each_entry(pers, &pers_list, list) {
719 		if (level != LEVEL_NONE && pers->level == level)
720 			return pers;
721 		if (strcmp(pers->name, clevel)==0)
722 			return pers;
723 	}
724 	return NULL;
725 }
726 
727 /* return the offset of the super block in 512byte sectors */
calc_dev_sboffset(mdk_rdev_t * rdev)728 static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
729 {
730 	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
731 	return MD_NEW_SIZE_SECTORS(num_sectors);
732 }
733 
alloc_disk_sb(mdk_rdev_t * rdev)734 static int alloc_disk_sb(mdk_rdev_t * rdev)
735 {
736 	if (rdev->sb_page)
737 		MD_BUG();
738 
739 	rdev->sb_page = alloc_page(GFP_KERNEL);
740 	if (!rdev->sb_page) {
741 		printk(KERN_ALERT "md: out of memory.\n");
742 		return -ENOMEM;
743 	}
744 
745 	return 0;
746 }
747 
free_disk_sb(mdk_rdev_t * rdev)748 static void free_disk_sb(mdk_rdev_t * rdev)
749 {
750 	if (rdev->sb_page) {
751 		put_page(rdev->sb_page);
752 		rdev->sb_loaded = 0;
753 		rdev->sb_page = NULL;
754 		rdev->sb_start = 0;
755 		rdev->sectors = 0;
756 	}
757 }
758 
759 
super_written(struct bio * bio,int error)760 static void super_written(struct bio *bio, int error)
761 {
762 	mdk_rdev_t *rdev = bio->bi_private;
763 	mddev_t *mddev = rdev->mddev;
764 
765 	if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
766 		printk("md: super_written gets error=%d, uptodate=%d\n",
767 		       error, test_bit(BIO_UPTODATE, &bio->bi_flags));
768 		WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
769 		md_error(mddev, rdev);
770 	}
771 
772 	if (atomic_dec_and_test(&mddev->pending_writes))
773 		wake_up(&mddev->sb_wait);
774 	bio_put(bio);
775 }
776 
md_super_write(mddev_t * mddev,mdk_rdev_t * rdev,sector_t sector,int size,struct page * page)777 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
778 		   sector_t sector, int size, struct page *page)
779 {
780 	/* write first size bytes of page to sector of rdev
781 	 * Increment mddev->pending_writes before returning
782 	 * and decrement it on completion, waking up sb_wait
783 	 * if zero is reached.
784 	 * If an error occurred, call md_error
785 	 */
786 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
787 
788 	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
789 	bio->bi_sector = sector;
790 	bio_add_page(bio, page, size, 0);
791 	bio->bi_private = rdev;
792 	bio->bi_end_io = super_written;
793 
794 	atomic_inc(&mddev->pending_writes);
795 	submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
796 }
797 
md_super_wait(mddev_t * mddev)798 void md_super_wait(mddev_t *mddev)
799 {
800 	/* wait for all superblock writes that were scheduled to complete */
801 	DEFINE_WAIT(wq);
802 	for(;;) {
803 		prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
804 		if (atomic_read(&mddev->pending_writes)==0)
805 			break;
806 		schedule();
807 	}
808 	finish_wait(&mddev->sb_wait, &wq);
809 }
810 
bi_complete(struct bio * bio,int error)811 static void bi_complete(struct bio *bio, int error)
812 {
813 	complete((struct completion*)bio->bi_private);
814 }
815 
sync_page_io(mdk_rdev_t * rdev,sector_t sector,int size,struct page * page,int rw,bool metadata_op)816 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
817 		 struct page *page, int rw, bool metadata_op)
818 {
819 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
820 	struct completion event;
821 	int ret;
822 
823 	rw |= REQ_SYNC;
824 
825 	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
826 		rdev->meta_bdev : rdev->bdev;
827 	if (metadata_op)
828 		bio->bi_sector = sector + rdev->sb_start;
829 	else
830 		bio->bi_sector = sector + rdev->data_offset;
831 	bio_add_page(bio, page, size, 0);
832 	init_completion(&event);
833 	bio->bi_private = &event;
834 	bio->bi_end_io = bi_complete;
835 	submit_bio(rw, bio);
836 	wait_for_completion(&event);
837 
838 	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
839 	bio_put(bio);
840 	return ret;
841 }
842 EXPORT_SYMBOL_GPL(sync_page_io);
843 
read_disk_sb(mdk_rdev_t * rdev,int size)844 static int read_disk_sb(mdk_rdev_t * rdev, int size)
845 {
846 	char b[BDEVNAME_SIZE];
847 	if (!rdev->sb_page) {
848 		MD_BUG();
849 		return -EINVAL;
850 	}
851 	if (rdev->sb_loaded)
852 		return 0;
853 
854 
855 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
856 		goto fail;
857 	rdev->sb_loaded = 1;
858 	return 0;
859 
860 fail:
861 	printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
862 		bdevname(rdev->bdev,b));
863 	return -EINVAL;
864 }
865 
uuid_equal(mdp_super_t * sb1,mdp_super_t * sb2)866 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
867 {
868 	return 	sb1->set_uuid0 == sb2->set_uuid0 &&
869 		sb1->set_uuid1 == sb2->set_uuid1 &&
870 		sb1->set_uuid2 == sb2->set_uuid2 &&
871 		sb1->set_uuid3 == sb2->set_uuid3;
872 }
873 
sb_equal(mdp_super_t * sb1,mdp_super_t * sb2)874 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
875 {
876 	int ret;
877 	mdp_super_t *tmp1, *tmp2;
878 
879 	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
880 	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
881 
882 	if (!tmp1 || !tmp2) {
883 		ret = 0;
884 		printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
885 		goto abort;
886 	}
887 
888 	*tmp1 = *sb1;
889 	*tmp2 = *sb2;
890 
891 	/*
892 	 * nr_disks is not constant
893 	 */
894 	tmp1->nr_disks = 0;
895 	tmp2->nr_disks = 0;
896 
897 	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
898 abort:
899 	kfree(tmp1);
900 	kfree(tmp2);
901 	return ret;
902 }
903 
904 
md_csum_fold(u32 csum)905 static u32 md_csum_fold(u32 csum)
906 {
907 	csum = (csum & 0xffff) + (csum >> 16);
908 	return (csum & 0xffff) + (csum >> 16);
909 }
910 
calc_sb_csum(mdp_super_t * sb)911 static unsigned int calc_sb_csum(mdp_super_t * sb)
912 {
913 	u64 newcsum = 0;
914 	u32 *sb32 = (u32*)sb;
915 	int i;
916 	unsigned int disk_csum, csum;
917 
918 	disk_csum = sb->sb_csum;
919 	sb->sb_csum = 0;
920 
921 	for (i = 0; i < MD_SB_BYTES/4 ; i++)
922 		newcsum += sb32[i];
923 	csum = (newcsum & 0xffffffff) + (newcsum>>32);
924 
925 
926 #ifdef CONFIG_ALPHA
927 	/* This used to use csum_partial, which was wrong for several
928 	 * reasons including that different results are returned on
929 	 * different architectures.  It isn't critical that we get exactly
930 	 * the same return value as before (we always csum_fold before
931 	 * testing, and that removes any differences).  However as we
932 	 * know that csum_partial always returned a 16bit value on
933 	 * alphas, do a fold to maximise conformity to previous behaviour.
934 	 */
935 	sb->sb_csum = md_csum_fold(disk_csum);
936 #else
937 	sb->sb_csum = disk_csum;
938 #endif
939 	return csum;
940 }
941 
942 
943 /*
944  * Handle superblock details.
945  * We want to be able to handle multiple superblock formats
946  * so we have a common interface to them all, and an array of
947  * different handlers.
948  * We rely on user-space to write the initial superblock, and support
949  * reading and updating of superblocks.
950  * Interface methods are:
951  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
952  *      loads and validates a superblock on dev.
953  *      if refdev != NULL, compare superblocks on both devices
954  *    Return:
955  *      0 - dev has a superblock that is compatible with refdev
956  *      1 - dev has a superblock that is compatible and newer than refdev
957  *          so dev should be used as the refdev in future
958  *     -EINVAL superblock incompatible or invalid
959  *     -othererror e.g. -EIO
960  *
961  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
962  *      Verify that dev is acceptable into mddev.
963  *       The first time, mddev->raid_disks will be 0, and data from
964  *       dev should be merged in.  Subsequent calls check that dev
965  *       is new enough.  Return 0 or -EINVAL
966  *
967  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
968  *     Update the superblock for rdev with data in mddev
969  *     This does not write to disc.
970  *
971  */
972 
973 struct super_type  {
974 	char		    *name;
975 	struct module	    *owner;
976 	int		    (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
977 					  int minor_version);
978 	int		    (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
979 	void		    (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
980 	unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
981 						sector_t num_sectors);
982 };
983 
984 /*
985  * Check that the given mddev has no bitmap.
986  *
987  * This function is called from the run method of all personalities that do not
988  * support bitmaps. It prints an error message and returns non-zero if mddev
989  * has a bitmap. Otherwise, it returns 0.
990  *
991  */
md_check_no_bitmap(mddev_t * mddev)992 int md_check_no_bitmap(mddev_t *mddev)
993 {
994 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
995 		return 0;
996 	printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
997 		mdname(mddev), mddev->pers->name);
998 	return 1;
999 }
1000 EXPORT_SYMBOL(md_check_no_bitmap);
1001 
1002 /*
1003  * load_super for 0.90.0
1004  */
super_90_load(mdk_rdev_t * rdev,mdk_rdev_t * refdev,int minor_version)1005 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1006 {
1007 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1008 	mdp_super_t *sb;
1009 	int ret;
1010 
1011 	/*
1012 	 * Calculate the position of the superblock (512byte sectors),
1013 	 * it's at the end of the disk.
1014 	 *
1015 	 * It also happens to be a multiple of 4Kb.
1016 	 */
1017 	rdev->sb_start = calc_dev_sboffset(rdev);
1018 
1019 	ret = read_disk_sb(rdev, MD_SB_BYTES);
1020 	if (ret) return ret;
1021 
1022 	ret = -EINVAL;
1023 
1024 	bdevname(rdev->bdev, b);
1025 	sb = (mdp_super_t*)page_address(rdev->sb_page);
1026 
1027 	if (sb->md_magic != MD_SB_MAGIC) {
1028 		printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
1029 		       b);
1030 		goto abort;
1031 	}
1032 
1033 	if (sb->major_version != 0 ||
1034 	    sb->minor_version < 90 ||
1035 	    sb->minor_version > 91) {
1036 		printk(KERN_WARNING "Bad version number %d.%d on %s\n",
1037 			sb->major_version, sb->minor_version,
1038 			b);
1039 		goto abort;
1040 	}
1041 
1042 	if (sb->raid_disks <= 0)
1043 		goto abort;
1044 
1045 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1046 		printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
1047 			b);
1048 		goto abort;
1049 	}
1050 
1051 	rdev->preferred_minor = sb->md_minor;
1052 	rdev->data_offset = 0;
1053 	rdev->sb_size = MD_SB_BYTES;
1054 
1055 	if (sb->level == LEVEL_MULTIPATH)
1056 		rdev->desc_nr = -1;
1057 	else
1058 		rdev->desc_nr = sb->this_disk.number;
1059 
1060 	if (!refdev) {
1061 		ret = 1;
1062 	} else {
1063 		__u64 ev1, ev2;
1064 		mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
1065 		if (!uuid_equal(refsb, sb)) {
1066 			printk(KERN_WARNING "md: %s has different UUID to %s\n",
1067 				b, bdevname(refdev->bdev,b2));
1068 			goto abort;
1069 		}
1070 		if (!sb_equal(refsb, sb)) {
1071 			printk(KERN_WARNING "md: %s has same UUID"
1072 			       " but different superblock to %s\n",
1073 			       b, bdevname(refdev->bdev, b2));
1074 			goto abort;
1075 		}
1076 		ev1 = md_event(sb);
1077 		ev2 = md_event(refsb);
1078 		if (ev1 > ev2)
1079 			ret = 1;
1080 		else
1081 			ret = 0;
1082 	}
1083 	rdev->sectors = rdev->sb_start;
1084 
1085 	if (rdev->sectors < sb->size * 2 && sb->level > 1)
1086 		/* "this cannot possibly happen" ... */
1087 		ret = -EINVAL;
1088 
1089  abort:
1090 	return ret;
1091 }
1092 
1093 /*
1094  * validate_super for 0.90.0
1095  */
super_90_validate(mddev_t * mddev,mdk_rdev_t * rdev)1096 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1097 {
1098 	mdp_disk_t *desc;
1099 	mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
1100 	__u64 ev1 = md_event(sb);
1101 
1102 	rdev->raid_disk = -1;
1103 	clear_bit(Faulty, &rdev->flags);
1104 	clear_bit(In_sync, &rdev->flags);
1105 	clear_bit(WriteMostly, &rdev->flags);
1106 
1107 	if (mddev->raid_disks == 0) {
1108 		mddev->major_version = 0;
1109 		mddev->minor_version = sb->minor_version;
1110 		mddev->patch_version = sb->patch_version;
1111 		mddev->external = 0;
1112 		mddev->chunk_sectors = sb->chunk_size >> 9;
1113 		mddev->ctime = sb->ctime;
1114 		mddev->utime = sb->utime;
1115 		mddev->level = sb->level;
1116 		mddev->clevel[0] = 0;
1117 		mddev->layout = sb->layout;
1118 		mddev->raid_disks = sb->raid_disks;
1119 		mddev->dev_sectors = sb->size * 2;
1120 		mddev->events = ev1;
1121 		mddev->bitmap_info.offset = 0;
1122 		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1123 
1124 		if (mddev->minor_version >= 91) {
1125 			mddev->reshape_position = sb->reshape_position;
1126 			mddev->delta_disks = sb->delta_disks;
1127 			mddev->new_level = sb->new_level;
1128 			mddev->new_layout = sb->new_layout;
1129 			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1130 		} else {
1131 			mddev->reshape_position = MaxSector;
1132 			mddev->delta_disks = 0;
1133 			mddev->new_level = mddev->level;
1134 			mddev->new_layout = mddev->layout;
1135 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1136 		}
1137 
1138 		if (sb->state & (1<<MD_SB_CLEAN))
1139 			mddev->recovery_cp = MaxSector;
1140 		else {
1141 			if (sb->events_hi == sb->cp_events_hi &&
1142 				sb->events_lo == sb->cp_events_lo) {
1143 				mddev->recovery_cp = sb->recovery_cp;
1144 			} else
1145 				mddev->recovery_cp = 0;
1146 		}
1147 
1148 		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1149 		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1150 		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1151 		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1152 
1153 		mddev->max_disks = MD_SB_DISKS;
1154 
1155 		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1156 		    mddev->bitmap_info.file == NULL)
1157 			mddev->bitmap_info.offset =
1158 				mddev->bitmap_info.default_offset;
1159 
1160 	} else if (mddev->pers == NULL) {
1161 		/* Insist on good event counter while assembling, except
1162 		 * for spares (which don't need an event count) */
1163 		++ev1;
1164 		if (sb->disks[rdev->desc_nr].state & (
1165 			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1166 			if (ev1 < mddev->events)
1167 				return -EINVAL;
1168 	} else if (mddev->bitmap) {
1169 		/* if adding to array with a bitmap, then we can accept an
1170 		 * older device ... but not too old.
1171 		 */
1172 		if (ev1 < mddev->bitmap->events_cleared)
1173 			return 0;
1174 	} else {
1175 		if (ev1 < mddev->events)
1176 			/* just a hot-add of a new device, leave raid_disk at -1 */
1177 			return 0;
1178 	}
1179 
1180 	if (mddev->level != LEVEL_MULTIPATH) {
1181 		desc = sb->disks + rdev->desc_nr;
1182 
1183 		if (desc->state & (1<<MD_DISK_FAULTY))
1184 			set_bit(Faulty, &rdev->flags);
1185 		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1186 			    desc->raid_disk < mddev->raid_disks */) {
1187 			set_bit(In_sync, &rdev->flags);
1188 			rdev->raid_disk = desc->raid_disk;
1189 		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1190 			/* active but not in sync implies recovery up to
1191 			 * reshape position.  We don't know exactly where
1192 			 * that is, so set to zero for now */
1193 			if (mddev->minor_version >= 91) {
1194 				rdev->recovery_offset = 0;
1195 				rdev->raid_disk = desc->raid_disk;
1196 			}
1197 		}
1198 		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1199 			set_bit(WriteMostly, &rdev->flags);
1200 	} else /* MULTIPATH are always insync */
1201 		set_bit(In_sync, &rdev->flags);
1202 	return 0;
1203 }
1204 
1205 /*
1206  * sync_super for 0.90.0
1207  */
super_90_sync(mddev_t * mddev,mdk_rdev_t * rdev)1208 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1209 {
1210 	mdp_super_t *sb;
1211 	mdk_rdev_t *rdev2;
1212 	int next_spare = mddev->raid_disks;
1213 
1214 
1215 	/* make rdev->sb match mddev data..
1216 	 *
1217 	 * 1/ zero out disks
1218 	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1219 	 * 3/ any empty disks < next_spare become removed
1220 	 *
1221 	 * disks[0] gets initialised to REMOVED because
1222 	 * we cannot be sure from other fields if it has
1223 	 * been initialised or not.
1224 	 */
1225 	int i;
1226 	int active=0, working=0,failed=0,spare=0,nr_disks=0;
1227 
1228 	rdev->sb_size = MD_SB_BYTES;
1229 
1230 	sb = (mdp_super_t*)page_address(rdev->sb_page);
1231 
1232 	memset(sb, 0, sizeof(*sb));
1233 
1234 	sb->md_magic = MD_SB_MAGIC;
1235 	sb->major_version = mddev->major_version;
1236 	sb->patch_version = mddev->patch_version;
1237 	sb->gvalid_words  = 0; /* ignored */
1238 	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1239 	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1240 	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1241 	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1242 
1243 	sb->ctime = mddev->ctime;
1244 	sb->level = mddev->level;
1245 	sb->size = mddev->dev_sectors / 2;
1246 	sb->raid_disks = mddev->raid_disks;
1247 	sb->md_minor = mddev->md_minor;
1248 	sb->not_persistent = 0;
1249 	sb->utime = mddev->utime;
1250 	sb->state = 0;
1251 	sb->events_hi = (mddev->events>>32);
1252 	sb->events_lo = (u32)mddev->events;
1253 
1254 	if (mddev->reshape_position == MaxSector)
1255 		sb->minor_version = 90;
1256 	else {
1257 		sb->minor_version = 91;
1258 		sb->reshape_position = mddev->reshape_position;
1259 		sb->new_level = mddev->new_level;
1260 		sb->delta_disks = mddev->delta_disks;
1261 		sb->new_layout = mddev->new_layout;
1262 		sb->new_chunk = mddev->new_chunk_sectors << 9;
1263 	}
1264 	mddev->minor_version = sb->minor_version;
1265 	if (mddev->in_sync)
1266 	{
1267 		sb->recovery_cp = mddev->recovery_cp;
1268 		sb->cp_events_hi = (mddev->events>>32);
1269 		sb->cp_events_lo = (u32)mddev->events;
1270 		if (mddev->recovery_cp == MaxSector)
1271 			sb->state = (1<< MD_SB_CLEAN);
1272 	} else
1273 		sb->recovery_cp = 0;
1274 
1275 	sb->layout = mddev->layout;
1276 	sb->chunk_size = mddev->chunk_sectors << 9;
1277 
1278 	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1279 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1280 
1281 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
1282 	list_for_each_entry(rdev2, &mddev->disks, same_set) {
1283 		mdp_disk_t *d;
1284 		int desc_nr;
1285 		int is_active = test_bit(In_sync, &rdev2->flags);
1286 
1287 		if (rdev2->raid_disk >= 0 &&
1288 		    sb->minor_version >= 91)
1289 			/* we have nowhere to store the recovery_offset,
1290 			 * but if it is not below the reshape_position,
1291 			 * we can piggy-back on that.
1292 			 */
1293 			is_active = 1;
1294 		if (rdev2->raid_disk < 0 ||
1295 		    test_bit(Faulty, &rdev2->flags))
1296 			is_active = 0;
1297 		if (is_active)
1298 			desc_nr = rdev2->raid_disk;
1299 		else
1300 			desc_nr = next_spare++;
1301 		rdev2->desc_nr = desc_nr;
1302 		d = &sb->disks[rdev2->desc_nr];
1303 		nr_disks++;
1304 		d->number = rdev2->desc_nr;
1305 		d->major = MAJOR(rdev2->bdev->bd_dev);
1306 		d->minor = MINOR(rdev2->bdev->bd_dev);
1307 		if (is_active)
1308 			d->raid_disk = rdev2->raid_disk;
1309 		else
1310 			d->raid_disk = rdev2->desc_nr; /* compatibility */
1311 		if (test_bit(Faulty, &rdev2->flags))
1312 			d->state = (1<<MD_DISK_FAULTY);
1313 		else if (is_active) {
1314 			d->state = (1<<MD_DISK_ACTIVE);
1315 			if (test_bit(In_sync, &rdev2->flags))
1316 				d->state |= (1<<MD_DISK_SYNC);
1317 			active++;
1318 			working++;
1319 		} else {
1320 			d->state = 0;
1321 			spare++;
1322 			working++;
1323 		}
1324 		if (test_bit(WriteMostly, &rdev2->flags))
1325 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
1326 	}
1327 	/* now set the "removed" and "faulty" bits on any missing devices */
1328 	for (i=0 ; i < mddev->raid_disks ; i++) {
1329 		mdp_disk_t *d = &sb->disks[i];
1330 		if (d->state == 0 && d->number == 0) {
1331 			d->number = i;
1332 			d->raid_disk = i;
1333 			d->state = (1<<MD_DISK_REMOVED);
1334 			d->state |= (1<<MD_DISK_FAULTY);
1335 			failed++;
1336 		}
1337 	}
1338 	sb->nr_disks = nr_disks;
1339 	sb->active_disks = active;
1340 	sb->working_disks = working;
1341 	sb->failed_disks = failed;
1342 	sb->spare_disks = spare;
1343 
1344 	sb->this_disk = sb->disks[rdev->desc_nr];
1345 	sb->sb_csum = calc_sb_csum(sb);
1346 }
1347 
1348 /*
1349  * rdev_size_change for 0.90.0
1350  */
1351 static unsigned long long
super_90_rdev_size_change(mdk_rdev_t * rdev,sector_t num_sectors)1352 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1353 {
1354 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1355 		return 0; /* component must fit device */
1356 	if (rdev->mddev->bitmap_info.offset)
1357 		return 0; /* can't move bitmap */
1358 	rdev->sb_start = calc_dev_sboffset(rdev);
1359 	if (!num_sectors || num_sectors > rdev->sb_start)
1360 		num_sectors = rdev->sb_start;
1361 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1362 		       rdev->sb_page);
1363 	md_super_wait(rdev->mddev);
1364 	return num_sectors;
1365 }
1366 
1367 
1368 /*
1369  * version 1 superblock
1370  */
1371 
calc_sb_1_csum(struct mdp_superblock_1 * sb)1372 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1373 {
1374 	__le32 disk_csum;
1375 	u32 csum;
1376 	unsigned long long newcsum;
1377 	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1378 	__le32 *isuper = (__le32*)sb;
1379 	int i;
1380 
1381 	disk_csum = sb->sb_csum;
1382 	sb->sb_csum = 0;
1383 	newcsum = 0;
1384 	for (i=0; size>=4; size -= 4 )
1385 		newcsum += le32_to_cpu(*isuper++);
1386 
1387 	if (size == 2)
1388 		newcsum += le16_to_cpu(*(__le16*) isuper);
1389 
1390 	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1391 	sb->sb_csum = disk_csum;
1392 	return cpu_to_le32(csum);
1393 }
1394 
super_1_load(mdk_rdev_t * rdev,mdk_rdev_t * refdev,int minor_version)1395 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1396 {
1397 	struct mdp_superblock_1 *sb;
1398 	int ret;
1399 	sector_t sb_start;
1400 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1401 	int bmask;
1402 
1403 	/*
1404 	 * Calculate the position of the superblock in 512byte sectors.
1405 	 * It is always aligned to a 4K boundary and
1406 	 * depeding on minor_version, it can be:
1407 	 * 0: At least 8K, but less than 12K, from end of device
1408 	 * 1: At start of device
1409 	 * 2: 4K from start of device.
1410 	 */
1411 	switch(minor_version) {
1412 	case 0:
1413 		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1414 		sb_start -= 8*2;
1415 		sb_start &= ~(sector_t)(4*2-1);
1416 		break;
1417 	case 1:
1418 		sb_start = 0;
1419 		break;
1420 	case 2:
1421 		sb_start = 8;
1422 		break;
1423 	default:
1424 		return -EINVAL;
1425 	}
1426 	rdev->sb_start = sb_start;
1427 
1428 	/* superblock is rarely larger than 1K, but it can be larger,
1429 	 * and it is safe to read 4k, so we do that
1430 	 */
1431 	ret = read_disk_sb(rdev, 4096);
1432 	if (ret) return ret;
1433 
1434 
1435 	sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1436 
1437 	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1438 	    sb->major_version != cpu_to_le32(1) ||
1439 	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1440 	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1441 	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1442 		return -EINVAL;
1443 
1444 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
1445 		printk("md: invalid superblock checksum on %s\n",
1446 			bdevname(rdev->bdev,b));
1447 		return -EINVAL;
1448 	}
1449 	if (le64_to_cpu(sb->data_size) < 10) {
1450 		printk("md: data_size too small on %s\n",
1451 		       bdevname(rdev->bdev,b));
1452 		return -EINVAL;
1453 	}
1454 
1455 	rdev->preferred_minor = 0xffff;
1456 	rdev->data_offset = le64_to_cpu(sb->data_offset);
1457 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1458 
1459 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1460 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1461 	if (rdev->sb_size & bmask)
1462 		rdev->sb_size = (rdev->sb_size | bmask) + 1;
1463 
1464 	if (minor_version
1465 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1466 		return -EINVAL;
1467 
1468 	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1469 		rdev->desc_nr = -1;
1470 	else
1471 		rdev->desc_nr = le32_to_cpu(sb->dev_number);
1472 
1473 	if (!refdev) {
1474 		ret = 1;
1475 	} else {
1476 		__u64 ev1, ev2;
1477 		struct mdp_superblock_1 *refsb =
1478 			(struct mdp_superblock_1*)page_address(refdev->sb_page);
1479 
1480 		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1481 		    sb->level != refsb->level ||
1482 		    sb->layout != refsb->layout ||
1483 		    sb->chunksize != refsb->chunksize) {
1484 			printk(KERN_WARNING "md: %s has strangely different"
1485 				" superblock to %s\n",
1486 				bdevname(rdev->bdev,b),
1487 				bdevname(refdev->bdev,b2));
1488 			return -EINVAL;
1489 		}
1490 		ev1 = le64_to_cpu(sb->events);
1491 		ev2 = le64_to_cpu(refsb->events);
1492 
1493 		if (ev1 > ev2)
1494 			ret = 1;
1495 		else
1496 			ret = 0;
1497 	}
1498 	if (minor_version)
1499 		rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
1500 			le64_to_cpu(sb->data_offset);
1501 	else
1502 		rdev->sectors = rdev->sb_start;
1503 	if (rdev->sectors < le64_to_cpu(sb->data_size))
1504 		return -EINVAL;
1505 	rdev->sectors = le64_to_cpu(sb->data_size);
1506 	if (le64_to_cpu(sb->size) > rdev->sectors)
1507 		return -EINVAL;
1508 	return ret;
1509 }
1510 
super_1_validate(mddev_t * mddev,mdk_rdev_t * rdev)1511 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1512 {
1513 	struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1514 	__u64 ev1 = le64_to_cpu(sb->events);
1515 
1516 	rdev->raid_disk = -1;
1517 	clear_bit(Faulty, &rdev->flags);
1518 	clear_bit(In_sync, &rdev->flags);
1519 	clear_bit(WriteMostly, &rdev->flags);
1520 
1521 	if (mddev->raid_disks == 0) {
1522 		mddev->major_version = 1;
1523 		mddev->patch_version = 0;
1524 		mddev->external = 0;
1525 		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1526 		mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1527 		mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1528 		mddev->level = le32_to_cpu(sb->level);
1529 		mddev->clevel[0] = 0;
1530 		mddev->layout = le32_to_cpu(sb->layout);
1531 		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1532 		mddev->dev_sectors = le64_to_cpu(sb->size);
1533 		mddev->events = ev1;
1534 		mddev->bitmap_info.offset = 0;
1535 		mddev->bitmap_info.default_offset = 1024 >> 9;
1536 
1537 		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1538 		memcpy(mddev->uuid, sb->set_uuid, 16);
1539 
1540 		mddev->max_disks =  (4096-256)/2;
1541 
1542 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1543 		    mddev->bitmap_info.file == NULL )
1544 			mddev->bitmap_info.offset =
1545 				(__s32)le32_to_cpu(sb->bitmap_offset);
1546 
1547 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1548 			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1549 			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1550 			mddev->new_level = le32_to_cpu(sb->new_level);
1551 			mddev->new_layout = le32_to_cpu(sb->new_layout);
1552 			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1553 		} else {
1554 			mddev->reshape_position = MaxSector;
1555 			mddev->delta_disks = 0;
1556 			mddev->new_level = mddev->level;
1557 			mddev->new_layout = mddev->layout;
1558 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1559 		}
1560 
1561 	} else if (mddev->pers == NULL) {
1562 		/* Insist of good event counter while assembling, except for
1563 		 * spares (which don't need an event count) */
1564 		++ev1;
1565 		if (rdev->desc_nr >= 0 &&
1566 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1567 		    le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1568 			if (ev1 < mddev->events)
1569 				return -EINVAL;
1570 	} else if (mddev->bitmap) {
1571 		/* If adding to array with a bitmap, then we can accept an
1572 		 * older device, but not too old.
1573 		 */
1574 		if (ev1 < mddev->bitmap->events_cleared)
1575 			return 0;
1576 	} else {
1577 		if (ev1 < mddev->events)
1578 			/* just a hot-add of a new device, leave raid_disk at -1 */
1579 			return 0;
1580 	}
1581 	if (mddev->level != LEVEL_MULTIPATH) {
1582 		int role;
1583 		if (rdev->desc_nr < 0 ||
1584 		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1585 			role = 0xffff;
1586 			rdev->desc_nr = -1;
1587 		} else
1588 			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1589 		switch(role) {
1590 		case 0xffff: /* spare */
1591 			break;
1592 		case 0xfffe: /* faulty */
1593 			set_bit(Faulty, &rdev->flags);
1594 			break;
1595 		default:
1596 			if ((le32_to_cpu(sb->feature_map) &
1597 			     MD_FEATURE_RECOVERY_OFFSET))
1598 				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1599 			else
1600 				set_bit(In_sync, &rdev->flags);
1601 			rdev->raid_disk = role;
1602 			break;
1603 		}
1604 		if (sb->devflags & WriteMostly1)
1605 			set_bit(WriteMostly, &rdev->flags);
1606 	} else /* MULTIPATH are always insync */
1607 		set_bit(In_sync, &rdev->flags);
1608 
1609 	return 0;
1610 }
1611 
super_1_sync(mddev_t * mddev,mdk_rdev_t * rdev)1612 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1613 {
1614 	struct mdp_superblock_1 *sb;
1615 	mdk_rdev_t *rdev2;
1616 	int max_dev, i;
1617 	/* make rdev->sb match mddev and rdev data. */
1618 
1619 	sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1620 
1621 	sb->feature_map = 0;
1622 	sb->pad0 = 0;
1623 	sb->recovery_offset = cpu_to_le64(0);
1624 	memset(sb->pad1, 0, sizeof(sb->pad1));
1625 	memset(sb->pad2, 0, sizeof(sb->pad2));
1626 	memset(sb->pad3, 0, sizeof(sb->pad3));
1627 
1628 	sb->utime = cpu_to_le64((__u64)mddev->utime);
1629 	sb->events = cpu_to_le64(mddev->events);
1630 	if (mddev->in_sync)
1631 		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1632 	else
1633 		sb->resync_offset = cpu_to_le64(0);
1634 
1635 	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1636 
1637 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1638 	sb->size = cpu_to_le64(mddev->dev_sectors);
1639 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1640 	sb->level = cpu_to_le32(mddev->level);
1641 	sb->layout = cpu_to_le32(mddev->layout);
1642 
1643 	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1644 		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1645 		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1646 	}
1647 
1648 	if (rdev->raid_disk >= 0 &&
1649 	    !test_bit(In_sync, &rdev->flags)) {
1650 		sb->feature_map |=
1651 			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1652 		sb->recovery_offset =
1653 			cpu_to_le64(rdev->recovery_offset);
1654 	}
1655 
1656 	if (mddev->reshape_position != MaxSector) {
1657 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1658 		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1659 		sb->new_layout = cpu_to_le32(mddev->new_layout);
1660 		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1661 		sb->new_level = cpu_to_le32(mddev->new_level);
1662 		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1663 	}
1664 
1665 	max_dev = 0;
1666 	list_for_each_entry(rdev2, &mddev->disks, same_set)
1667 		if (rdev2->desc_nr+1 > max_dev)
1668 			max_dev = rdev2->desc_nr+1;
1669 
1670 	if (max_dev > le32_to_cpu(sb->max_dev)) {
1671 		int bmask;
1672 		sb->max_dev = cpu_to_le32(max_dev);
1673 		rdev->sb_size = max_dev * 2 + 256;
1674 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1675 		if (rdev->sb_size & bmask)
1676 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
1677 	} else
1678 		max_dev = le32_to_cpu(sb->max_dev);
1679 
1680 	for (i=0; i<max_dev;i++)
1681 		sb->dev_roles[i] = cpu_to_le16(0xfffe);
1682 
1683 	list_for_each_entry(rdev2, &mddev->disks, same_set) {
1684 		i = rdev2->desc_nr;
1685 		if (test_bit(Faulty, &rdev2->flags))
1686 			sb->dev_roles[i] = cpu_to_le16(0xfffe);
1687 		else if (test_bit(In_sync, &rdev2->flags))
1688 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1689 		else if (rdev2->raid_disk >= 0)
1690 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1691 		else
1692 			sb->dev_roles[i] = cpu_to_le16(0xffff);
1693 	}
1694 
1695 	sb->sb_csum = calc_sb_1_csum(sb);
1696 }
1697 
1698 static unsigned long long
super_1_rdev_size_change(mdk_rdev_t * rdev,sector_t num_sectors)1699 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1700 {
1701 	struct mdp_superblock_1 *sb;
1702 	sector_t max_sectors;
1703 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1704 		return 0; /* component must fit device */
1705 	if (rdev->sb_start < rdev->data_offset) {
1706 		/* minor versions 1 and 2; superblock before data */
1707 		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1708 		max_sectors -= rdev->data_offset;
1709 		if (!num_sectors || num_sectors > max_sectors)
1710 			num_sectors = max_sectors;
1711 	} else if (rdev->mddev->bitmap_info.offset) {
1712 		/* minor version 0 with bitmap we can't move */
1713 		return 0;
1714 	} else {
1715 		/* minor version 0; superblock after data */
1716 		sector_t sb_start;
1717 		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1718 		sb_start &= ~(sector_t)(4*2 - 1);
1719 		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1720 		if (!num_sectors || num_sectors > max_sectors)
1721 			num_sectors = max_sectors;
1722 		rdev->sb_start = sb_start;
1723 	}
1724 	sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1725 	sb->data_size = cpu_to_le64(num_sectors);
1726 	sb->super_offset = rdev->sb_start;
1727 	sb->sb_csum = calc_sb_1_csum(sb);
1728 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1729 		       rdev->sb_page);
1730 	md_super_wait(rdev->mddev);
1731 	return num_sectors;
1732 }
1733 
1734 static struct super_type super_types[] = {
1735 	[0] = {
1736 		.name	= "0.90.0",
1737 		.owner	= THIS_MODULE,
1738 		.load_super	    = super_90_load,
1739 		.validate_super	    = super_90_validate,
1740 		.sync_super	    = super_90_sync,
1741 		.rdev_size_change   = super_90_rdev_size_change,
1742 	},
1743 	[1] = {
1744 		.name	= "md-1",
1745 		.owner	= THIS_MODULE,
1746 		.load_super	    = super_1_load,
1747 		.validate_super	    = super_1_validate,
1748 		.sync_super	    = super_1_sync,
1749 		.rdev_size_change   = super_1_rdev_size_change,
1750 	},
1751 };
1752 
match_mddev_units(mddev_t * mddev1,mddev_t * mddev2)1753 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1754 {
1755 	mdk_rdev_t *rdev, *rdev2;
1756 
1757 	rcu_read_lock();
1758 	rdev_for_each_rcu(rdev, mddev1)
1759 		rdev_for_each_rcu(rdev2, mddev2)
1760 			if (rdev->bdev->bd_contains ==
1761 			    rdev2->bdev->bd_contains) {
1762 				rcu_read_unlock();
1763 				return 1;
1764 			}
1765 	rcu_read_unlock();
1766 	return 0;
1767 }
1768 
1769 static LIST_HEAD(pending_raid_disks);
1770 
1771 /*
1772  * Try to register data integrity profile for an mddev
1773  *
1774  * This is called when an array is started and after a disk has been kicked
1775  * from the array. It only succeeds if all working and active component devices
1776  * are integrity capable with matching profiles.
1777  */
md_integrity_register(mddev_t * mddev)1778 int md_integrity_register(mddev_t *mddev)
1779 {
1780 	mdk_rdev_t *rdev, *reference = NULL;
1781 
1782 	if (list_empty(&mddev->disks))
1783 		return 0; /* nothing to do */
1784 	if (blk_get_integrity(mddev->gendisk))
1785 		return 0; /* already registered */
1786 	list_for_each_entry(rdev, &mddev->disks, same_set) {
1787 		/* skip spares and non-functional disks */
1788 		if (test_bit(Faulty, &rdev->flags))
1789 			continue;
1790 		if (rdev->raid_disk < 0)
1791 			continue;
1792 		if (!reference) {
1793 			/* Use the first rdev as the reference */
1794 			reference = rdev;
1795 			continue;
1796 		}
1797 		/* does this rdev's profile match the reference profile? */
1798 		if (blk_integrity_compare(reference->bdev->bd_disk,
1799 				rdev->bdev->bd_disk) < 0)
1800 			return -EINVAL;
1801 	}
1802 	if (!reference || !bdev_get_integrity(reference->bdev))
1803 		return 0;
1804 	/*
1805 	 * All component devices are integrity capable and have matching
1806 	 * profiles, register the common profile for the md device.
1807 	 */
1808 	if (blk_integrity_register(mddev->gendisk,
1809 			bdev_get_integrity(reference->bdev)) != 0) {
1810 		printk(KERN_ERR "md: failed to register integrity for %s\n",
1811 			mdname(mddev));
1812 		return -EINVAL;
1813 	}
1814 	printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1815 	if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1816 		printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1817 		       mdname(mddev));
1818 		return -EINVAL;
1819 	}
1820 	return 0;
1821 }
1822 EXPORT_SYMBOL(md_integrity_register);
1823 
1824 /* Disable data integrity if non-capable/non-matching disk is being added */
md_integrity_add_rdev(mdk_rdev_t * rdev,mddev_t * mddev)1825 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1826 {
1827 	struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1828 	struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1829 
1830 	if (!bi_mddev) /* nothing to do */
1831 		return;
1832 	if (rdev->raid_disk < 0) /* skip spares */
1833 		return;
1834 	if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1835 					     rdev->bdev->bd_disk) >= 0)
1836 		return;
1837 	printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1838 	blk_integrity_unregister(mddev->gendisk);
1839 }
1840 EXPORT_SYMBOL(md_integrity_add_rdev);
1841 
bind_rdev_to_array(mdk_rdev_t * rdev,mddev_t * mddev)1842 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1843 {
1844 	char b[BDEVNAME_SIZE];
1845 	struct kobject *ko;
1846 	char *s;
1847 	int err;
1848 
1849 	if (rdev->mddev) {
1850 		MD_BUG();
1851 		return -EINVAL;
1852 	}
1853 
1854 	/* prevent duplicates */
1855 	if (find_rdev(mddev, rdev->bdev->bd_dev))
1856 		return -EEXIST;
1857 
1858 	/* make sure rdev->sectors exceeds mddev->dev_sectors */
1859 	if (rdev->sectors && (mddev->dev_sectors == 0 ||
1860 			rdev->sectors < mddev->dev_sectors)) {
1861 		if (mddev->pers) {
1862 			/* Cannot change size, so fail
1863 			 * If mddev->level <= 0, then we don't care
1864 			 * about aligning sizes (e.g. linear)
1865 			 */
1866 			if (mddev->level > 0)
1867 				return -ENOSPC;
1868 		} else
1869 			mddev->dev_sectors = rdev->sectors;
1870 	}
1871 
1872 	/* Verify rdev->desc_nr is unique.
1873 	 * If it is -1, assign a free number, else
1874 	 * check number is not in use
1875 	 */
1876 	if (rdev->desc_nr < 0) {
1877 		int choice = 0;
1878 		if (mddev->pers) choice = mddev->raid_disks;
1879 		while (find_rdev_nr(mddev, choice))
1880 			choice++;
1881 		rdev->desc_nr = choice;
1882 	} else {
1883 		if (find_rdev_nr(mddev, rdev->desc_nr))
1884 			return -EBUSY;
1885 	}
1886 	if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1887 		printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1888 		       mdname(mddev), mddev->max_disks);
1889 		return -EBUSY;
1890 	}
1891 	bdevname(rdev->bdev,b);
1892 	while ( (s=strchr(b, '/')) != NULL)
1893 		*s = '!';
1894 
1895 	rdev->mddev = mddev;
1896 	printk(KERN_INFO "md: bind<%s>\n", b);
1897 
1898 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1899 		goto fail;
1900 
1901 	ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1902 	if (sysfs_create_link(&rdev->kobj, ko, "block"))
1903 		/* failure here is OK */;
1904 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
1905 
1906 	list_add_rcu(&rdev->same_set, &mddev->disks);
1907 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
1908 
1909 	/* May as well allow recovery to be retried once */
1910 	mddev->recovery_disabled = 0;
1911 
1912 	return 0;
1913 
1914  fail:
1915 	printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1916 	       b, mdname(mddev));
1917 	return err;
1918 }
1919 
md_delayed_delete(struct work_struct * ws)1920 static void md_delayed_delete(struct work_struct *ws)
1921 {
1922 	mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1923 	kobject_del(&rdev->kobj);
1924 	kobject_put(&rdev->kobj);
1925 }
1926 
unbind_rdev_from_array(mdk_rdev_t * rdev)1927 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1928 {
1929 	char b[BDEVNAME_SIZE];
1930 	if (!rdev->mddev) {
1931 		MD_BUG();
1932 		return;
1933 	}
1934 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
1935 	list_del_rcu(&rdev->same_set);
1936 	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1937 	rdev->mddev = NULL;
1938 	sysfs_remove_link(&rdev->kobj, "block");
1939 	sysfs_put(rdev->sysfs_state);
1940 	rdev->sysfs_state = NULL;
1941 	/* We need to delay this, otherwise we can deadlock when
1942 	 * writing to 'remove' to "dev/state".  We also need
1943 	 * to delay it due to rcu usage.
1944 	 */
1945 	synchronize_rcu();
1946 	INIT_WORK(&rdev->del_work, md_delayed_delete);
1947 	kobject_get(&rdev->kobj);
1948 	queue_work(md_misc_wq, &rdev->del_work);
1949 }
1950 
1951 /*
1952  * prevent the device from being mounted, repartitioned or
1953  * otherwise reused by a RAID array (or any other kernel
1954  * subsystem), by bd_claiming the device.
1955  */
lock_rdev(mdk_rdev_t * rdev,dev_t dev,int shared)1956 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1957 {
1958 	int err = 0;
1959 	struct block_device *bdev;
1960 	char b[BDEVNAME_SIZE];
1961 
1962 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1963 				 shared ? (mdk_rdev_t *)lock_rdev : rdev);
1964 	if (IS_ERR(bdev)) {
1965 		printk(KERN_ERR "md: could not open %s.\n",
1966 			__bdevname(dev, b));
1967 		return PTR_ERR(bdev);
1968 	}
1969 	rdev->bdev = bdev;
1970 	return err;
1971 }
1972 
unlock_rdev(mdk_rdev_t * rdev)1973 static void unlock_rdev(mdk_rdev_t *rdev)
1974 {
1975 	struct block_device *bdev = rdev->bdev;
1976 	rdev->bdev = NULL;
1977 	if (!bdev)
1978 		MD_BUG();
1979 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1980 }
1981 
1982 void md_autodetect_dev(dev_t dev);
1983 
export_rdev(mdk_rdev_t * rdev)1984 static void export_rdev(mdk_rdev_t * rdev)
1985 {
1986 	char b[BDEVNAME_SIZE];
1987 	printk(KERN_INFO "md: export_rdev(%s)\n",
1988 		bdevname(rdev->bdev,b));
1989 	if (rdev->mddev)
1990 		MD_BUG();
1991 	free_disk_sb(rdev);
1992 #ifndef MODULE
1993 	if (test_bit(AutoDetected, &rdev->flags))
1994 		md_autodetect_dev(rdev->bdev->bd_dev);
1995 #endif
1996 	unlock_rdev(rdev);
1997 	kobject_put(&rdev->kobj);
1998 }
1999 
kick_rdev_from_array(mdk_rdev_t * rdev)2000 static void kick_rdev_from_array(mdk_rdev_t * rdev)
2001 {
2002 	unbind_rdev_from_array(rdev);
2003 	export_rdev(rdev);
2004 }
2005 
export_array(mddev_t * mddev)2006 static void export_array(mddev_t *mddev)
2007 {
2008 	mdk_rdev_t *rdev, *tmp;
2009 
2010 	rdev_for_each(rdev, tmp, mddev) {
2011 		if (!rdev->mddev) {
2012 			MD_BUG();
2013 			continue;
2014 		}
2015 		kick_rdev_from_array(rdev);
2016 	}
2017 	if (!list_empty(&mddev->disks))
2018 		MD_BUG();
2019 	mddev->raid_disks = 0;
2020 	mddev->major_version = 0;
2021 }
2022 
print_desc(mdp_disk_t * desc)2023 static void print_desc(mdp_disk_t *desc)
2024 {
2025 	printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
2026 		desc->major,desc->minor,desc->raid_disk,desc->state);
2027 }
2028 
print_sb_90(mdp_super_t * sb)2029 static void print_sb_90(mdp_super_t *sb)
2030 {
2031 	int i;
2032 
2033 	printk(KERN_INFO
2034 		"md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
2035 		sb->major_version, sb->minor_version, sb->patch_version,
2036 		sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
2037 		sb->ctime);
2038 	printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
2039 		sb->level, sb->size, sb->nr_disks, sb->raid_disks,
2040 		sb->md_minor, sb->layout, sb->chunk_size);
2041 	printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
2042 		" FD:%d SD:%d CSUM:%08x E:%08lx\n",
2043 		sb->utime, sb->state, sb->active_disks, sb->working_disks,
2044 		sb->failed_disks, sb->spare_disks,
2045 		sb->sb_csum, (unsigned long)sb->events_lo);
2046 
2047 	printk(KERN_INFO);
2048 	for (i = 0; i < MD_SB_DISKS; i++) {
2049 		mdp_disk_t *desc;
2050 
2051 		desc = sb->disks + i;
2052 		if (desc->number || desc->major || desc->minor ||
2053 		    desc->raid_disk || (desc->state && (desc->state != 4))) {
2054 			printk("     D %2d: ", i);
2055 			print_desc(desc);
2056 		}
2057 	}
2058 	printk(KERN_INFO "md:     THIS: ");
2059 	print_desc(&sb->this_disk);
2060 }
2061 
print_sb_1(struct mdp_superblock_1 * sb)2062 static void print_sb_1(struct mdp_superblock_1 *sb)
2063 {
2064 	__u8 *uuid;
2065 
2066 	uuid = sb->set_uuid;
2067 	printk(KERN_INFO
2068 	       "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
2069 	       "md:    Name: \"%s\" CT:%llu\n",
2070 		le32_to_cpu(sb->major_version),
2071 		le32_to_cpu(sb->feature_map),
2072 		uuid,
2073 		sb->set_name,
2074 		(unsigned long long)le64_to_cpu(sb->ctime)
2075 		       & MD_SUPERBLOCK_1_TIME_SEC_MASK);
2076 
2077 	uuid = sb->device_uuid;
2078 	printk(KERN_INFO
2079 	       "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2080 			" RO:%llu\n"
2081 	       "md:     Dev:%08x UUID: %pU\n"
2082 	       "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
2083 	       "md:         (MaxDev:%u) \n",
2084 		le32_to_cpu(sb->level),
2085 		(unsigned long long)le64_to_cpu(sb->size),
2086 		le32_to_cpu(sb->raid_disks),
2087 		le32_to_cpu(sb->layout),
2088 		le32_to_cpu(sb->chunksize),
2089 		(unsigned long long)le64_to_cpu(sb->data_offset),
2090 		(unsigned long long)le64_to_cpu(sb->data_size),
2091 		(unsigned long long)le64_to_cpu(sb->super_offset),
2092 		(unsigned long long)le64_to_cpu(sb->recovery_offset),
2093 		le32_to_cpu(sb->dev_number),
2094 		uuid,
2095 		sb->devflags,
2096 		(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2097 		(unsigned long long)le64_to_cpu(sb->events),
2098 		(unsigned long long)le64_to_cpu(sb->resync_offset),
2099 		le32_to_cpu(sb->sb_csum),
2100 		le32_to_cpu(sb->max_dev)
2101 		);
2102 }
2103 
print_rdev(mdk_rdev_t * rdev,int major_version)2104 static void print_rdev(mdk_rdev_t *rdev, int major_version)
2105 {
2106 	char b[BDEVNAME_SIZE];
2107 	printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2108 		bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2109 	        test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2110 	        rdev->desc_nr);
2111 	if (rdev->sb_loaded) {
2112 		printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2113 		switch (major_version) {
2114 		case 0:
2115 			print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
2116 			break;
2117 		case 1:
2118 			print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
2119 			break;
2120 		}
2121 	} else
2122 		printk(KERN_INFO "md: no rdev superblock!\n");
2123 }
2124 
md_print_devices(void)2125 static void md_print_devices(void)
2126 {
2127 	struct list_head *tmp;
2128 	mdk_rdev_t *rdev;
2129 	mddev_t *mddev;
2130 	char b[BDEVNAME_SIZE];
2131 
2132 	printk("\n");
2133 	printk("md:	**********************************\n");
2134 	printk("md:	* <COMPLETE RAID STATE PRINTOUT> *\n");
2135 	printk("md:	**********************************\n");
2136 	for_each_mddev(mddev, tmp) {
2137 
2138 		if (mddev->bitmap)
2139 			bitmap_print_sb(mddev->bitmap);
2140 		else
2141 			printk("%s: ", mdname(mddev));
2142 		list_for_each_entry(rdev, &mddev->disks, same_set)
2143 			printk("<%s>", bdevname(rdev->bdev,b));
2144 		printk("\n");
2145 
2146 		list_for_each_entry(rdev, &mddev->disks, same_set)
2147 			print_rdev(rdev, mddev->major_version);
2148 	}
2149 	printk("md:	**********************************\n");
2150 	printk("\n");
2151 }
2152 
2153 
sync_sbs(mddev_t * mddev,int nospares)2154 static void sync_sbs(mddev_t * mddev, int nospares)
2155 {
2156 	/* Update each superblock (in-memory image), but
2157 	 * if we are allowed to, skip spares which already
2158 	 * have the right event counter, or have one earlier
2159 	 * (which would mean they aren't being marked as dirty
2160 	 * with the rest of the array)
2161 	 */
2162 	mdk_rdev_t *rdev;
2163 	list_for_each_entry(rdev, &mddev->disks, same_set) {
2164 		if (rdev->sb_events == mddev->events ||
2165 		    (nospares &&
2166 		     rdev->raid_disk < 0 &&
2167 		     rdev->sb_events+1 == mddev->events)) {
2168 			/* Don't update this superblock */
2169 			rdev->sb_loaded = 2;
2170 		} else {
2171 			super_types[mddev->major_version].
2172 				sync_super(mddev, rdev);
2173 			rdev->sb_loaded = 1;
2174 		}
2175 	}
2176 }
2177 
md_update_sb(mddev_t * mddev,int force_change)2178 static void md_update_sb(mddev_t * mddev, int force_change)
2179 {
2180 	mdk_rdev_t *rdev;
2181 	int sync_req;
2182 	int nospares = 0;
2183 
2184 repeat:
2185 	/* First make sure individual recovery_offsets are correct */
2186 	list_for_each_entry(rdev, &mddev->disks, same_set) {
2187 		if (rdev->raid_disk >= 0 &&
2188 		    mddev->delta_disks >= 0 &&
2189 		    !test_bit(In_sync, &rdev->flags) &&
2190 		    mddev->curr_resync_completed > rdev->recovery_offset)
2191 				rdev->recovery_offset = mddev->curr_resync_completed;
2192 
2193 	}
2194 	if (!mddev->persistent) {
2195 		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2196 		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2197 		if (!mddev->external)
2198 			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2199 		wake_up(&mddev->sb_wait);
2200 		return;
2201 	}
2202 
2203 	spin_lock_irq(&mddev->write_lock);
2204 
2205 	mddev->utime = get_seconds();
2206 
2207 	if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2208 		force_change = 1;
2209 	if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2210 		/* just a clean<-> dirty transition, possibly leave spares alone,
2211 		 * though if events isn't the right even/odd, we will have to do
2212 		 * spares after all
2213 		 */
2214 		nospares = 1;
2215 	if (force_change)
2216 		nospares = 0;
2217 	if (mddev->degraded)
2218 		/* If the array is degraded, then skipping spares is both
2219 		 * dangerous and fairly pointless.
2220 		 * Dangerous because a device that was removed from the array
2221 		 * might have a event_count that still looks up-to-date,
2222 		 * so it can be re-added without a resync.
2223 		 * Pointless because if there are any spares to skip,
2224 		 * then a recovery will happen and soon that array won't
2225 		 * be degraded any more and the spare can go back to sleep then.
2226 		 */
2227 		nospares = 0;
2228 
2229 	sync_req = mddev->in_sync;
2230 
2231 	/* If this is just a dirty<->clean transition, and the array is clean
2232 	 * and 'events' is odd, we can roll back to the previous clean state */
2233 	if (nospares
2234 	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2235 	    && mddev->can_decrease_events
2236 	    && mddev->events != 1) {
2237 		mddev->events--;
2238 		mddev->can_decrease_events = 0;
2239 	} else {
2240 		/* otherwise we have to go forward and ... */
2241 		mddev->events ++;
2242 		mddev->can_decrease_events = nospares;
2243 	}
2244 
2245 	if (!mddev->events) {
2246 		/*
2247 		 * oops, this 64-bit counter should never wrap.
2248 		 * Either we are in around ~1 trillion A.C., assuming
2249 		 * 1 reboot per second, or we have a bug:
2250 		 */
2251 		MD_BUG();
2252 		mddev->events --;
2253 	}
2254 	sync_sbs(mddev, nospares);
2255 	spin_unlock_irq(&mddev->write_lock);
2256 
2257 	dprintk(KERN_INFO
2258 		"md: updating %s RAID superblock on device (in sync %d)\n",
2259 		mdname(mddev),mddev->in_sync);
2260 
2261 	bitmap_update_sb(mddev->bitmap);
2262 	list_for_each_entry(rdev, &mddev->disks, same_set) {
2263 		char b[BDEVNAME_SIZE];
2264 		dprintk(KERN_INFO "md: ");
2265 		if (rdev->sb_loaded != 1)
2266 			continue; /* no noise on spare devices */
2267 		if (test_bit(Faulty, &rdev->flags))
2268 			dprintk("(skipping faulty ");
2269 
2270 		dprintk("%s ", bdevname(rdev->bdev,b));
2271 		if (!test_bit(Faulty, &rdev->flags)) {
2272 			md_super_write(mddev,rdev,
2273 				       rdev->sb_start, rdev->sb_size,
2274 				       rdev->sb_page);
2275 			dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2276 				bdevname(rdev->bdev,b),
2277 				(unsigned long long)rdev->sb_start);
2278 			rdev->sb_events = mddev->events;
2279 
2280 		} else
2281 			dprintk(")\n");
2282 		if (mddev->level == LEVEL_MULTIPATH)
2283 			/* only need to write one superblock... */
2284 			break;
2285 	}
2286 	md_super_wait(mddev);
2287 	/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2288 
2289 	spin_lock_irq(&mddev->write_lock);
2290 	if (mddev->in_sync != sync_req ||
2291 	    test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2292 		/* have to write it out again */
2293 		spin_unlock_irq(&mddev->write_lock);
2294 		goto repeat;
2295 	}
2296 	clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2297 	spin_unlock_irq(&mddev->write_lock);
2298 	wake_up(&mddev->sb_wait);
2299 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2300 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2301 
2302 }
2303 
2304 /* words written to sysfs files may, or may not, be \n terminated.
2305  * We want to accept with case. For this we use cmd_match.
2306  */
cmd_match(const char * cmd,const char * str)2307 static int cmd_match(const char *cmd, const char *str)
2308 {
2309 	/* See if cmd, written into a sysfs file, matches
2310 	 * str.  They must either be the same, or cmd can
2311 	 * have a trailing newline
2312 	 */
2313 	while (*cmd && *str && *cmd == *str) {
2314 		cmd++;
2315 		str++;
2316 	}
2317 	if (*cmd == '\n')
2318 		cmd++;
2319 	if (*str || *cmd)
2320 		return 0;
2321 	return 1;
2322 }
2323 
2324 struct rdev_sysfs_entry {
2325 	struct attribute attr;
2326 	ssize_t (*show)(mdk_rdev_t *, char *);
2327 	ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2328 };
2329 
2330 static ssize_t
state_show(mdk_rdev_t * rdev,char * page)2331 state_show(mdk_rdev_t *rdev, char *page)
2332 {
2333 	char *sep = "";
2334 	size_t len = 0;
2335 
2336 	if (test_bit(Faulty, &rdev->flags)) {
2337 		len+= sprintf(page+len, "%sfaulty",sep);
2338 		sep = ",";
2339 	}
2340 	if (test_bit(In_sync, &rdev->flags)) {
2341 		len += sprintf(page+len, "%sin_sync",sep);
2342 		sep = ",";
2343 	}
2344 	if (test_bit(WriteMostly, &rdev->flags)) {
2345 		len += sprintf(page+len, "%swrite_mostly",sep);
2346 		sep = ",";
2347 	}
2348 	if (test_bit(Blocked, &rdev->flags)) {
2349 		len += sprintf(page+len, "%sblocked", sep);
2350 		sep = ",";
2351 	}
2352 	if (!test_bit(Faulty, &rdev->flags) &&
2353 	    !test_bit(In_sync, &rdev->flags)) {
2354 		len += sprintf(page+len, "%sspare", sep);
2355 		sep = ",";
2356 	}
2357 	return len+sprintf(page+len, "\n");
2358 }
2359 
2360 static ssize_t
state_store(mdk_rdev_t * rdev,const char * buf,size_t len)2361 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2362 {
2363 	/* can write
2364 	 *  faulty  - simulates and error
2365 	 *  remove  - disconnects the device
2366 	 *  writemostly - sets write_mostly
2367 	 *  -writemostly - clears write_mostly
2368 	 *  blocked - sets the Blocked flag
2369 	 *  -blocked - clears the Blocked flag
2370 	 *  insync - sets Insync providing device isn't active
2371 	 */
2372 	int err = -EINVAL;
2373 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2374 		md_error(rdev->mddev, rdev);
2375 		err = 0;
2376 	} else if (cmd_match(buf, "remove")) {
2377 		if (rdev->raid_disk >= 0)
2378 			err = -EBUSY;
2379 		else {
2380 			mddev_t *mddev = rdev->mddev;
2381 			kick_rdev_from_array(rdev);
2382 			if (mddev->pers)
2383 				md_update_sb(mddev, 1);
2384 			md_new_event(mddev);
2385 			err = 0;
2386 		}
2387 	} else if (cmd_match(buf, "writemostly")) {
2388 		set_bit(WriteMostly, &rdev->flags);
2389 		err = 0;
2390 	} else if (cmd_match(buf, "-writemostly")) {
2391 		clear_bit(WriteMostly, &rdev->flags);
2392 		err = 0;
2393 	} else if (cmd_match(buf, "blocked")) {
2394 		set_bit(Blocked, &rdev->flags);
2395 		err = 0;
2396 	} else if (cmd_match(buf, "-blocked")) {
2397 		clear_bit(Blocked, &rdev->flags);
2398 		wake_up(&rdev->blocked_wait);
2399 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2400 		md_wakeup_thread(rdev->mddev->thread);
2401 
2402 		err = 0;
2403 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2404 		set_bit(In_sync, &rdev->flags);
2405 		err = 0;
2406 	}
2407 	if (!err)
2408 		sysfs_notify_dirent_safe(rdev->sysfs_state);
2409 	return err ? err : len;
2410 }
2411 static struct rdev_sysfs_entry rdev_state =
2412 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2413 
2414 static ssize_t
errors_show(mdk_rdev_t * rdev,char * page)2415 errors_show(mdk_rdev_t *rdev, char *page)
2416 {
2417 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2418 }
2419 
2420 static ssize_t
errors_store(mdk_rdev_t * rdev,const char * buf,size_t len)2421 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2422 {
2423 	char *e;
2424 	unsigned long n = simple_strtoul(buf, &e, 10);
2425 	if (*buf && (*e == 0 || *e == '\n')) {
2426 		atomic_set(&rdev->corrected_errors, n);
2427 		return len;
2428 	}
2429 	return -EINVAL;
2430 }
2431 static struct rdev_sysfs_entry rdev_errors =
2432 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2433 
2434 static ssize_t
slot_show(mdk_rdev_t * rdev,char * page)2435 slot_show(mdk_rdev_t *rdev, char *page)
2436 {
2437 	if (rdev->raid_disk < 0)
2438 		return sprintf(page, "none\n");
2439 	else
2440 		return sprintf(page, "%d\n", rdev->raid_disk);
2441 }
2442 
2443 static ssize_t
slot_store(mdk_rdev_t * rdev,const char * buf,size_t len)2444 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2445 {
2446 	char *e;
2447 	int err;
2448 	char nm[20];
2449 	int slot = simple_strtoul(buf, &e, 10);
2450 	if (strncmp(buf, "none", 4)==0)
2451 		slot = -1;
2452 	else if (e==buf || (*e && *e!= '\n'))
2453 		return -EINVAL;
2454 	if (rdev->mddev->pers && slot == -1) {
2455 		/* Setting 'slot' on an active array requires also
2456 		 * updating the 'rd%d' link, and communicating
2457 		 * with the personality with ->hot_*_disk.
2458 		 * For now we only support removing
2459 		 * failed/spare devices.  This normally happens automatically,
2460 		 * but not when the metadata is externally managed.
2461 		 */
2462 		if (rdev->raid_disk == -1)
2463 			return -EEXIST;
2464 		/* personality does all needed checks */
2465 		if (rdev->mddev->pers->hot_add_disk == NULL)
2466 			return -EINVAL;
2467 		err = rdev->mddev->pers->
2468 			hot_remove_disk(rdev->mddev, rdev->raid_disk);
2469 		if (err)
2470 			return err;
2471 		sprintf(nm, "rd%d", rdev->raid_disk);
2472 		sysfs_remove_link(&rdev->mddev->kobj, nm);
2473 		rdev->raid_disk = -1;
2474 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2475 		md_wakeup_thread(rdev->mddev->thread);
2476 	} else if (rdev->mddev->pers) {
2477 		mdk_rdev_t *rdev2;
2478 		/* Activating a spare .. or possibly reactivating
2479 		 * if we ever get bitmaps working here.
2480 		 */
2481 
2482 		if (rdev->raid_disk != -1)
2483 			return -EBUSY;
2484 
2485 		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2486 			return -EBUSY;
2487 
2488 		if (rdev->mddev->pers->hot_add_disk == NULL)
2489 			return -EINVAL;
2490 
2491 		list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2492 			if (rdev2->raid_disk == slot)
2493 				return -EEXIST;
2494 
2495 		if (slot >= rdev->mddev->raid_disks &&
2496 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2497 			return -ENOSPC;
2498 
2499 		rdev->raid_disk = slot;
2500 		if (test_bit(In_sync, &rdev->flags))
2501 			rdev->saved_raid_disk = slot;
2502 		else
2503 			rdev->saved_raid_disk = -1;
2504 		err = rdev->mddev->pers->
2505 			hot_add_disk(rdev->mddev, rdev);
2506 		if (err) {
2507 			rdev->raid_disk = -1;
2508 			return err;
2509 		} else
2510 			sysfs_notify_dirent_safe(rdev->sysfs_state);
2511 		sprintf(nm, "rd%d", rdev->raid_disk);
2512 		if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2513 			/* failure here is OK */;
2514 		/* don't wakeup anyone, leave that to userspace. */
2515 	} else {
2516 		if (slot >= rdev->mddev->raid_disks &&
2517 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2518 			return -ENOSPC;
2519 		rdev->raid_disk = slot;
2520 		/* assume it is working */
2521 		clear_bit(Faulty, &rdev->flags);
2522 		clear_bit(WriteMostly, &rdev->flags);
2523 		set_bit(In_sync, &rdev->flags);
2524 		sysfs_notify_dirent_safe(rdev->sysfs_state);
2525 	}
2526 	return len;
2527 }
2528 
2529 
2530 static struct rdev_sysfs_entry rdev_slot =
2531 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2532 
2533 static ssize_t
offset_show(mdk_rdev_t * rdev,char * page)2534 offset_show(mdk_rdev_t *rdev, char *page)
2535 {
2536 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2537 }
2538 
2539 static ssize_t
offset_store(mdk_rdev_t * rdev,const char * buf,size_t len)2540 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2541 {
2542 	char *e;
2543 	unsigned long long offset = simple_strtoull(buf, &e, 10);
2544 	if (e==buf || (*e && *e != '\n'))
2545 		return -EINVAL;
2546 	if (rdev->mddev->pers && rdev->raid_disk >= 0)
2547 		return -EBUSY;
2548 	if (rdev->sectors && rdev->mddev->external)
2549 		/* Must set offset before size, so overlap checks
2550 		 * can be sane */
2551 		return -EBUSY;
2552 	rdev->data_offset = offset;
2553 	return len;
2554 }
2555 
2556 static struct rdev_sysfs_entry rdev_offset =
2557 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2558 
2559 static ssize_t
rdev_size_show(mdk_rdev_t * rdev,char * page)2560 rdev_size_show(mdk_rdev_t *rdev, char *page)
2561 {
2562 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2563 }
2564 
overlaps(sector_t s1,sector_t l1,sector_t s2,sector_t l2)2565 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2566 {
2567 	/* check if two start/length pairs overlap */
2568 	if (s1+l1 <= s2)
2569 		return 0;
2570 	if (s2+l2 <= s1)
2571 		return 0;
2572 	return 1;
2573 }
2574 
strict_blocks_to_sectors(const char * buf,sector_t * sectors)2575 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2576 {
2577 	unsigned long long blocks;
2578 	sector_t new;
2579 
2580 	if (strict_strtoull(buf, 10, &blocks) < 0)
2581 		return -EINVAL;
2582 
2583 	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2584 		return -EINVAL; /* sector conversion overflow */
2585 
2586 	new = blocks * 2;
2587 	if (new != blocks * 2)
2588 		return -EINVAL; /* unsigned long long to sector_t overflow */
2589 
2590 	*sectors = new;
2591 	return 0;
2592 }
2593 
2594 static ssize_t
rdev_size_store(mdk_rdev_t * rdev,const char * buf,size_t len)2595 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2596 {
2597 	mddev_t *my_mddev = rdev->mddev;
2598 	sector_t oldsectors = rdev->sectors;
2599 	sector_t sectors;
2600 
2601 	if (strict_blocks_to_sectors(buf, &sectors) < 0)
2602 		return -EINVAL;
2603 	if (my_mddev->pers && rdev->raid_disk >= 0) {
2604 		if (my_mddev->persistent) {
2605 			sectors = super_types[my_mddev->major_version].
2606 				rdev_size_change(rdev, sectors);
2607 			if (!sectors)
2608 				return -EBUSY;
2609 		} else if (!sectors)
2610 			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2611 				rdev->data_offset;
2612 	}
2613 	if (sectors < my_mddev->dev_sectors)
2614 		return -EINVAL; /* component must fit device */
2615 
2616 	rdev->sectors = sectors;
2617 	if (sectors > oldsectors && my_mddev->external) {
2618 		/* need to check that all other rdevs with the same ->bdev
2619 		 * do not overlap.  We need to unlock the mddev to avoid
2620 		 * a deadlock.  We have already changed rdev->sectors, and if
2621 		 * we have to change it back, we will have the lock again.
2622 		 */
2623 		mddev_t *mddev;
2624 		int overlap = 0;
2625 		struct list_head *tmp;
2626 
2627 		mddev_unlock(my_mddev);
2628 		for_each_mddev(mddev, tmp) {
2629 			mdk_rdev_t *rdev2;
2630 
2631 			mddev_lock(mddev);
2632 			list_for_each_entry(rdev2, &mddev->disks, same_set)
2633 				if (rdev->bdev == rdev2->bdev &&
2634 				    rdev != rdev2 &&
2635 				    overlaps(rdev->data_offset, rdev->sectors,
2636 					     rdev2->data_offset,
2637 					     rdev2->sectors)) {
2638 					overlap = 1;
2639 					break;
2640 				}
2641 			mddev_unlock(mddev);
2642 			if (overlap) {
2643 				mddev_put(mddev);
2644 				break;
2645 			}
2646 		}
2647 		mddev_lock(my_mddev);
2648 		if (overlap) {
2649 			/* Someone else could have slipped in a size
2650 			 * change here, but doing so is just silly.
2651 			 * We put oldsectors back because we *know* it is
2652 			 * safe, and trust userspace not to race with
2653 			 * itself
2654 			 */
2655 			rdev->sectors = oldsectors;
2656 			return -EBUSY;
2657 		}
2658 	}
2659 	return len;
2660 }
2661 
2662 static struct rdev_sysfs_entry rdev_size =
2663 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2664 
2665 
recovery_start_show(mdk_rdev_t * rdev,char * page)2666 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2667 {
2668 	unsigned long long recovery_start = rdev->recovery_offset;
2669 
2670 	if (test_bit(In_sync, &rdev->flags) ||
2671 	    recovery_start == MaxSector)
2672 		return sprintf(page, "none\n");
2673 
2674 	return sprintf(page, "%llu\n", recovery_start);
2675 }
2676 
recovery_start_store(mdk_rdev_t * rdev,const char * buf,size_t len)2677 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2678 {
2679 	unsigned long long recovery_start;
2680 
2681 	if (cmd_match(buf, "none"))
2682 		recovery_start = MaxSector;
2683 	else if (strict_strtoull(buf, 10, &recovery_start))
2684 		return -EINVAL;
2685 
2686 	if (rdev->mddev->pers &&
2687 	    rdev->raid_disk >= 0)
2688 		return -EBUSY;
2689 
2690 	rdev->recovery_offset = recovery_start;
2691 	if (recovery_start == MaxSector)
2692 		set_bit(In_sync, &rdev->flags);
2693 	else
2694 		clear_bit(In_sync, &rdev->flags);
2695 	return len;
2696 }
2697 
2698 static struct rdev_sysfs_entry rdev_recovery_start =
2699 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2700 
2701 static struct attribute *rdev_default_attrs[] = {
2702 	&rdev_state.attr,
2703 	&rdev_errors.attr,
2704 	&rdev_slot.attr,
2705 	&rdev_offset.attr,
2706 	&rdev_size.attr,
2707 	&rdev_recovery_start.attr,
2708 	NULL,
2709 };
2710 static ssize_t
rdev_attr_show(struct kobject * kobj,struct attribute * attr,char * page)2711 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2712 {
2713 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2714 	mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2715 	mddev_t *mddev = rdev->mddev;
2716 	ssize_t rv;
2717 
2718 	if (!entry->show)
2719 		return -EIO;
2720 
2721 	rv = mddev ? mddev_lock(mddev) : -EBUSY;
2722 	if (!rv) {
2723 		if (rdev->mddev == NULL)
2724 			rv = -EBUSY;
2725 		else
2726 			rv = entry->show(rdev, page);
2727 		mddev_unlock(mddev);
2728 	}
2729 	return rv;
2730 }
2731 
2732 static ssize_t
rdev_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)2733 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2734 	      const char *page, size_t length)
2735 {
2736 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2737 	mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2738 	ssize_t rv;
2739 	mddev_t *mddev = rdev->mddev;
2740 
2741 	if (!entry->store)
2742 		return -EIO;
2743 	if (!capable(CAP_SYS_ADMIN))
2744 		return -EACCES;
2745 	rv = mddev ? mddev_lock(mddev): -EBUSY;
2746 	if (!rv) {
2747 		if (rdev->mddev == NULL)
2748 			rv = -EBUSY;
2749 		else
2750 			rv = entry->store(rdev, page, length);
2751 		mddev_unlock(mddev);
2752 	}
2753 	return rv;
2754 }
2755 
rdev_free(struct kobject * ko)2756 static void rdev_free(struct kobject *ko)
2757 {
2758 	mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2759 	kfree(rdev);
2760 }
2761 static const struct sysfs_ops rdev_sysfs_ops = {
2762 	.show		= rdev_attr_show,
2763 	.store		= rdev_attr_store,
2764 };
2765 static struct kobj_type rdev_ktype = {
2766 	.release	= rdev_free,
2767 	.sysfs_ops	= &rdev_sysfs_ops,
2768 	.default_attrs	= rdev_default_attrs,
2769 };
2770 
md_rdev_init(mdk_rdev_t * rdev)2771 void md_rdev_init(mdk_rdev_t *rdev)
2772 {
2773 	rdev->desc_nr = -1;
2774 	rdev->saved_raid_disk = -1;
2775 	rdev->raid_disk = -1;
2776 	rdev->flags = 0;
2777 	rdev->data_offset = 0;
2778 	rdev->sb_events = 0;
2779 	rdev->last_read_error.tv_sec  = 0;
2780 	rdev->last_read_error.tv_nsec = 0;
2781 	atomic_set(&rdev->nr_pending, 0);
2782 	atomic_set(&rdev->read_errors, 0);
2783 	atomic_set(&rdev->corrected_errors, 0);
2784 
2785 	INIT_LIST_HEAD(&rdev->same_set);
2786 	init_waitqueue_head(&rdev->blocked_wait);
2787 }
2788 EXPORT_SYMBOL_GPL(md_rdev_init);
2789 /*
2790  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2791  *
2792  * mark the device faulty if:
2793  *
2794  *   - the device is nonexistent (zero size)
2795  *   - the device has no valid superblock
2796  *
2797  * a faulty rdev _never_ has rdev->sb set.
2798  */
md_import_device(dev_t newdev,int super_format,int super_minor)2799 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2800 {
2801 	char b[BDEVNAME_SIZE];
2802 	int err;
2803 	mdk_rdev_t *rdev;
2804 	sector_t size;
2805 
2806 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2807 	if (!rdev) {
2808 		printk(KERN_ERR "md: could not alloc mem for new device!\n");
2809 		return ERR_PTR(-ENOMEM);
2810 	}
2811 
2812 	md_rdev_init(rdev);
2813 	if ((err = alloc_disk_sb(rdev)))
2814 		goto abort_free;
2815 
2816 	err = lock_rdev(rdev, newdev, super_format == -2);
2817 	if (err)
2818 		goto abort_free;
2819 
2820 	kobject_init(&rdev->kobj, &rdev_ktype);
2821 
2822 	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
2823 	if (!size) {
2824 		printk(KERN_WARNING
2825 			"md: %s has zero or unknown size, marking faulty!\n",
2826 			bdevname(rdev->bdev,b));
2827 		err = -EINVAL;
2828 		goto abort_free;
2829 	}
2830 
2831 	if (super_format >= 0) {
2832 		err = super_types[super_format].
2833 			load_super(rdev, NULL, super_minor);
2834 		if (err == -EINVAL) {
2835 			printk(KERN_WARNING
2836 				"md: %s does not have a valid v%d.%d "
2837 			       "superblock, not importing!\n",
2838 				bdevname(rdev->bdev,b),
2839 			       super_format, super_minor);
2840 			goto abort_free;
2841 		}
2842 		if (err < 0) {
2843 			printk(KERN_WARNING
2844 				"md: could not read %s's sb, not importing!\n",
2845 				bdevname(rdev->bdev,b));
2846 			goto abort_free;
2847 		}
2848 	}
2849 
2850 	return rdev;
2851 
2852 abort_free:
2853 	if (rdev->sb_page) {
2854 		if (rdev->bdev)
2855 			unlock_rdev(rdev);
2856 		free_disk_sb(rdev);
2857 	}
2858 	kfree(rdev);
2859 	return ERR_PTR(err);
2860 }
2861 
2862 /*
2863  * Check a full RAID array for plausibility
2864  */
2865 
2866 
analyze_sbs(mddev_t * mddev)2867 static void analyze_sbs(mddev_t * mddev)
2868 {
2869 	int i;
2870 	mdk_rdev_t *rdev, *freshest, *tmp;
2871 	char b[BDEVNAME_SIZE];
2872 
2873 	freshest = NULL;
2874 	rdev_for_each(rdev, tmp, mddev)
2875 		switch (super_types[mddev->major_version].
2876 			load_super(rdev, freshest, mddev->minor_version)) {
2877 		case 1:
2878 			freshest = rdev;
2879 			break;
2880 		case 0:
2881 			break;
2882 		default:
2883 			printk( KERN_ERR \
2884 				"md: fatal superblock inconsistency in %s"
2885 				" -- removing from array\n",
2886 				bdevname(rdev->bdev,b));
2887 			kick_rdev_from_array(rdev);
2888 		}
2889 
2890 
2891 	super_types[mddev->major_version].
2892 		validate_super(mddev, freshest);
2893 
2894 	i = 0;
2895 	rdev_for_each(rdev, tmp, mddev) {
2896 		if (mddev->max_disks &&
2897 		    (rdev->desc_nr >= mddev->max_disks ||
2898 		     i > mddev->max_disks)) {
2899 			printk(KERN_WARNING
2900 			       "md: %s: %s: only %d devices permitted\n",
2901 			       mdname(mddev), bdevname(rdev->bdev, b),
2902 			       mddev->max_disks);
2903 			kick_rdev_from_array(rdev);
2904 			continue;
2905 		}
2906 		if (rdev != freshest)
2907 			if (super_types[mddev->major_version].
2908 			    validate_super(mddev, rdev)) {
2909 				printk(KERN_WARNING "md: kicking non-fresh %s"
2910 					" from array!\n",
2911 					bdevname(rdev->bdev,b));
2912 				kick_rdev_from_array(rdev);
2913 				continue;
2914 			}
2915 		if (mddev->level == LEVEL_MULTIPATH) {
2916 			rdev->desc_nr = i++;
2917 			rdev->raid_disk = rdev->desc_nr;
2918 			set_bit(In_sync, &rdev->flags);
2919 		} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2920 			rdev->raid_disk = -1;
2921 			clear_bit(In_sync, &rdev->flags);
2922 		}
2923 	}
2924 }
2925 
2926 /* Read a fixed-point number.
2927  * Numbers in sysfs attributes should be in "standard" units where
2928  * possible, so time should be in seconds.
2929  * However we internally use a a much smaller unit such as
2930  * milliseconds or jiffies.
2931  * This function takes a decimal number with a possible fractional
2932  * component, and produces an integer which is the result of
2933  * multiplying that number by 10^'scale'.
2934  * all without any floating-point arithmetic.
2935  */
strict_strtoul_scaled(const char * cp,unsigned long * res,int scale)2936 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2937 {
2938 	unsigned long result = 0;
2939 	long decimals = -1;
2940 	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2941 		if (*cp == '.')
2942 			decimals = 0;
2943 		else if (decimals < scale) {
2944 			unsigned int value;
2945 			value = *cp - '0';
2946 			result = result * 10 + value;
2947 			if (decimals >= 0)
2948 				decimals++;
2949 		}
2950 		cp++;
2951 	}
2952 	if (*cp == '\n')
2953 		cp++;
2954 	if (*cp)
2955 		return -EINVAL;
2956 	if (decimals < 0)
2957 		decimals = 0;
2958 	while (decimals < scale) {
2959 		result *= 10;
2960 		decimals ++;
2961 	}
2962 	*res = result;
2963 	return 0;
2964 }
2965 
2966 
2967 static void md_safemode_timeout(unsigned long data);
2968 
2969 static ssize_t
safe_delay_show(mddev_t * mddev,char * page)2970 safe_delay_show(mddev_t *mddev, char *page)
2971 {
2972 	int msec = (mddev->safemode_delay*1000)/HZ;
2973 	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2974 }
2975 static ssize_t
safe_delay_store(mddev_t * mddev,const char * cbuf,size_t len)2976 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2977 {
2978 	unsigned long msec;
2979 
2980 	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2981 		return -EINVAL;
2982 	if (msec == 0)
2983 		mddev->safemode_delay = 0;
2984 	else {
2985 		unsigned long old_delay = mddev->safemode_delay;
2986 		mddev->safemode_delay = (msec*HZ)/1000;
2987 		if (mddev->safemode_delay == 0)
2988 			mddev->safemode_delay = 1;
2989 		if (mddev->safemode_delay < old_delay)
2990 			md_safemode_timeout((unsigned long)mddev);
2991 	}
2992 	return len;
2993 }
2994 static struct md_sysfs_entry md_safe_delay =
2995 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2996 
2997 static ssize_t
level_show(mddev_t * mddev,char * page)2998 level_show(mddev_t *mddev, char *page)
2999 {
3000 	struct mdk_personality *p = mddev->pers;
3001 	if (p)
3002 		return sprintf(page, "%s\n", p->name);
3003 	else if (mddev->clevel[0])
3004 		return sprintf(page, "%s\n", mddev->clevel);
3005 	else if (mddev->level != LEVEL_NONE)
3006 		return sprintf(page, "%d\n", mddev->level);
3007 	else
3008 		return 0;
3009 }
3010 
3011 static ssize_t
level_store(mddev_t * mddev,const char * buf,size_t len)3012 level_store(mddev_t *mddev, const char *buf, size_t len)
3013 {
3014 	char clevel[16];
3015 	ssize_t rv = len;
3016 	struct mdk_personality *pers;
3017 	long level;
3018 	void *priv;
3019 	mdk_rdev_t *rdev;
3020 
3021 	if (mddev->pers == NULL) {
3022 		if (len == 0)
3023 			return 0;
3024 		if (len >= sizeof(mddev->clevel))
3025 			return -ENOSPC;
3026 		strncpy(mddev->clevel, buf, len);
3027 		if (mddev->clevel[len-1] == '\n')
3028 			len--;
3029 		mddev->clevel[len] = 0;
3030 		mddev->level = LEVEL_NONE;
3031 		return rv;
3032 	}
3033 
3034 	/* request to change the personality.  Need to ensure:
3035 	 *  - array is not engaged in resync/recovery/reshape
3036 	 *  - old personality can be suspended
3037 	 *  - new personality will access other array.
3038 	 */
3039 
3040 	if (mddev->sync_thread ||
3041 	    mddev->reshape_position != MaxSector ||
3042 	    mddev->sysfs_active)
3043 		return -EBUSY;
3044 
3045 	if (!mddev->pers->quiesce) {
3046 		printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3047 		       mdname(mddev), mddev->pers->name);
3048 		return -EINVAL;
3049 	}
3050 
3051 	/* Now find the new personality */
3052 	if (len == 0 || len >= sizeof(clevel))
3053 		return -EINVAL;
3054 	strncpy(clevel, buf, len);
3055 	if (clevel[len-1] == '\n')
3056 		len--;
3057 	clevel[len] = 0;
3058 	if (strict_strtol(clevel, 10, &level))
3059 		level = LEVEL_NONE;
3060 
3061 	if (request_module("md-%s", clevel) != 0)
3062 		request_module("md-level-%s", clevel);
3063 	spin_lock(&pers_lock);
3064 	pers = find_pers(level, clevel);
3065 	if (!pers || !try_module_get(pers->owner)) {
3066 		spin_unlock(&pers_lock);
3067 		printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3068 		return -EINVAL;
3069 	}
3070 	spin_unlock(&pers_lock);
3071 
3072 	if (pers == mddev->pers) {
3073 		/* Nothing to do! */
3074 		module_put(pers->owner);
3075 		return rv;
3076 	}
3077 	if (!pers->takeover) {
3078 		module_put(pers->owner);
3079 		printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3080 		       mdname(mddev), clevel);
3081 		return -EINVAL;
3082 	}
3083 
3084 	list_for_each_entry(rdev, &mddev->disks, same_set)
3085 		rdev->new_raid_disk = rdev->raid_disk;
3086 
3087 	/* ->takeover must set new_* and/or delta_disks
3088 	 * if it succeeds, and may set them when it fails.
3089 	 */
3090 	priv = pers->takeover(mddev);
3091 	if (IS_ERR(priv)) {
3092 		mddev->new_level = mddev->level;
3093 		mddev->new_layout = mddev->layout;
3094 		mddev->new_chunk_sectors = mddev->chunk_sectors;
3095 		mddev->raid_disks -= mddev->delta_disks;
3096 		mddev->delta_disks = 0;
3097 		module_put(pers->owner);
3098 		printk(KERN_WARNING "md: %s: %s would not accept array\n",
3099 		       mdname(mddev), clevel);
3100 		return PTR_ERR(priv);
3101 	}
3102 
3103 	/* Looks like we have a winner */
3104 	mddev_suspend(mddev);
3105 	mddev->pers->stop(mddev);
3106 
3107 	if (mddev->pers->sync_request == NULL &&
3108 	    pers->sync_request != NULL) {
3109 		/* need to add the md_redundancy_group */
3110 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3111 			printk(KERN_WARNING
3112 			       "md: cannot register extra attributes for %s\n",
3113 			       mdname(mddev));
3114 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3115 	}
3116 	if (mddev->pers->sync_request != NULL &&
3117 	    pers->sync_request == NULL) {
3118 		/* need to remove the md_redundancy_group */
3119 		if (mddev->to_remove == NULL)
3120 			mddev->to_remove = &md_redundancy_group;
3121 	}
3122 
3123 	if (mddev->pers->sync_request == NULL &&
3124 	    mddev->external) {
3125 		/* We are converting from a no-redundancy array
3126 		 * to a redundancy array and metadata is managed
3127 		 * externally so we need to be sure that writes
3128 		 * won't block due to a need to transition
3129 		 *      clean->dirty
3130 		 * until external management is started.
3131 		 */
3132 		mddev->in_sync = 0;
3133 		mddev->safemode_delay = 0;
3134 		mddev->safemode = 0;
3135 	}
3136 
3137 	list_for_each_entry(rdev, &mddev->disks, same_set) {
3138 		char nm[20];
3139 		if (rdev->raid_disk < 0)
3140 			continue;
3141 		if (rdev->new_raid_disk >= mddev->raid_disks)
3142 			rdev->new_raid_disk = -1;
3143 		if (rdev->new_raid_disk == rdev->raid_disk)
3144 			continue;
3145 		sprintf(nm, "rd%d", rdev->raid_disk);
3146 		sysfs_remove_link(&mddev->kobj, nm);
3147 	}
3148 	list_for_each_entry(rdev, &mddev->disks, same_set) {
3149 		if (rdev->raid_disk < 0)
3150 			continue;
3151 		if (rdev->new_raid_disk == rdev->raid_disk)
3152 			continue;
3153 		rdev->raid_disk = rdev->new_raid_disk;
3154 		if (rdev->raid_disk < 0)
3155 			clear_bit(In_sync, &rdev->flags);
3156 		else {
3157 			char nm[20];
3158 			sprintf(nm, "rd%d", rdev->raid_disk);
3159 			if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3160 				printk("md: cannot register %s for %s after level change\n",
3161 				       nm, mdname(mddev));
3162 		}
3163 	}
3164 
3165 	module_put(mddev->pers->owner);
3166 	mddev->pers = pers;
3167 	mddev->private = priv;
3168 	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3169 	mddev->level = mddev->new_level;
3170 	mddev->layout = mddev->new_layout;
3171 	mddev->chunk_sectors = mddev->new_chunk_sectors;
3172 	mddev->delta_disks = 0;
3173 	mddev->degraded = 0;
3174 	if (mddev->pers->sync_request == NULL) {
3175 		/* this is now an array without redundancy, so
3176 		 * it must always be in_sync
3177 		 */
3178 		mddev->in_sync = 1;
3179 		del_timer_sync(&mddev->safemode_timer);
3180 	}
3181 	pers->run(mddev);
3182 	mddev_resume(mddev);
3183 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
3184 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3185 	md_wakeup_thread(mddev->thread);
3186 	sysfs_notify(&mddev->kobj, NULL, "level");
3187 	md_new_event(mddev);
3188 	return rv;
3189 }
3190 
3191 static struct md_sysfs_entry md_level =
3192 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3193 
3194 
3195 static ssize_t
layout_show(mddev_t * mddev,char * page)3196 layout_show(mddev_t *mddev, char *page)
3197 {
3198 	/* just a number, not meaningful for all levels */
3199 	if (mddev->reshape_position != MaxSector &&
3200 	    mddev->layout != mddev->new_layout)
3201 		return sprintf(page, "%d (%d)\n",
3202 			       mddev->new_layout, mddev->layout);
3203 	return sprintf(page, "%d\n", mddev->layout);
3204 }
3205 
3206 static ssize_t
layout_store(mddev_t * mddev,const char * buf,size_t len)3207 layout_store(mddev_t *mddev, const char *buf, size_t len)
3208 {
3209 	char *e;
3210 	unsigned long n = simple_strtoul(buf, &e, 10);
3211 
3212 	if (!*buf || (*e && *e != '\n'))
3213 		return -EINVAL;
3214 
3215 	if (mddev->pers) {
3216 		int err;
3217 		if (mddev->pers->check_reshape == NULL)
3218 			return -EBUSY;
3219 		mddev->new_layout = n;
3220 		err = mddev->pers->check_reshape(mddev);
3221 		if (err) {
3222 			mddev->new_layout = mddev->layout;
3223 			return err;
3224 		}
3225 	} else {
3226 		mddev->new_layout = n;
3227 		if (mddev->reshape_position == MaxSector)
3228 			mddev->layout = n;
3229 	}
3230 	return len;
3231 }
3232 static struct md_sysfs_entry md_layout =
3233 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3234 
3235 
3236 static ssize_t
raid_disks_show(mddev_t * mddev,char * page)3237 raid_disks_show(mddev_t *mddev, char *page)
3238 {
3239 	if (mddev->raid_disks == 0)
3240 		return 0;
3241 	if (mddev->reshape_position != MaxSector &&
3242 	    mddev->delta_disks != 0)
3243 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3244 			       mddev->raid_disks - mddev->delta_disks);
3245 	return sprintf(page, "%d\n", mddev->raid_disks);
3246 }
3247 
3248 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3249 
3250 static ssize_t
raid_disks_store(mddev_t * mddev,const char * buf,size_t len)3251 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3252 {
3253 	char *e;
3254 	int rv = 0;
3255 	unsigned long n = simple_strtoul(buf, &e, 10);
3256 
3257 	if (!*buf || (*e && *e != '\n'))
3258 		return -EINVAL;
3259 
3260 	if (mddev->pers)
3261 		rv = update_raid_disks(mddev, n);
3262 	else if (mddev->reshape_position != MaxSector) {
3263 		int olddisks = mddev->raid_disks - mddev->delta_disks;
3264 		mddev->delta_disks = n - olddisks;
3265 		mddev->raid_disks = n;
3266 	} else
3267 		mddev->raid_disks = n;
3268 	return rv ? rv : len;
3269 }
3270 static struct md_sysfs_entry md_raid_disks =
3271 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3272 
3273 static ssize_t
chunk_size_show(mddev_t * mddev,char * page)3274 chunk_size_show(mddev_t *mddev, char *page)
3275 {
3276 	if (mddev->reshape_position != MaxSector &&
3277 	    mddev->chunk_sectors != mddev->new_chunk_sectors)
3278 		return sprintf(page, "%d (%d)\n",
3279 			       mddev->new_chunk_sectors << 9,
3280 			       mddev->chunk_sectors << 9);
3281 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3282 }
3283 
3284 static ssize_t
chunk_size_store(mddev_t * mddev,const char * buf,size_t len)3285 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3286 {
3287 	char *e;
3288 	unsigned long n = simple_strtoul(buf, &e, 10);
3289 
3290 	if (!*buf || (*e && *e != '\n'))
3291 		return -EINVAL;
3292 
3293 	if (mddev->pers) {
3294 		int err;
3295 		if (mddev->pers->check_reshape == NULL)
3296 			return -EBUSY;
3297 		mddev->new_chunk_sectors = n >> 9;
3298 		err = mddev->pers->check_reshape(mddev);
3299 		if (err) {
3300 			mddev->new_chunk_sectors = mddev->chunk_sectors;
3301 			return err;
3302 		}
3303 	} else {
3304 		mddev->new_chunk_sectors = n >> 9;
3305 		if (mddev->reshape_position == MaxSector)
3306 			mddev->chunk_sectors = n >> 9;
3307 	}
3308 	return len;
3309 }
3310 static struct md_sysfs_entry md_chunk_size =
3311 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3312 
3313 static ssize_t
resync_start_show(mddev_t * mddev,char * page)3314 resync_start_show(mddev_t *mddev, char *page)
3315 {
3316 	if (mddev->recovery_cp == MaxSector)
3317 		return sprintf(page, "none\n");
3318 	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3319 }
3320 
3321 static ssize_t
resync_start_store(mddev_t * mddev,const char * buf,size_t len)3322 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3323 {
3324 	char *e;
3325 	unsigned long long n = simple_strtoull(buf, &e, 10);
3326 
3327 	if (mddev->pers)
3328 		return -EBUSY;
3329 	if (cmd_match(buf, "none"))
3330 		n = MaxSector;
3331 	else if (!*buf || (*e && *e != '\n'))
3332 		return -EINVAL;
3333 
3334 	mddev->recovery_cp = n;
3335 	return len;
3336 }
3337 static struct md_sysfs_entry md_resync_start =
3338 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3339 
3340 /*
3341  * The array state can be:
3342  *
3343  * clear
3344  *     No devices, no size, no level
3345  *     Equivalent to STOP_ARRAY ioctl
3346  * inactive
3347  *     May have some settings, but array is not active
3348  *        all IO results in error
3349  *     When written, doesn't tear down array, but just stops it
3350  * suspended (not supported yet)
3351  *     All IO requests will block. The array can be reconfigured.
3352  *     Writing this, if accepted, will block until array is quiescent
3353  * readonly
3354  *     no resync can happen.  no superblocks get written.
3355  *     write requests fail
3356  * read-auto
3357  *     like readonly, but behaves like 'clean' on a write request.
3358  *
3359  * clean - no pending writes, but otherwise active.
3360  *     When written to inactive array, starts without resync
3361  *     If a write request arrives then
3362  *       if metadata is known, mark 'dirty' and switch to 'active'.
3363  *       if not known, block and switch to write-pending
3364  *     If written to an active array that has pending writes, then fails.
3365  * active
3366  *     fully active: IO and resync can be happening.
3367  *     When written to inactive array, starts with resync
3368  *
3369  * write-pending
3370  *     clean, but writes are blocked waiting for 'active' to be written.
3371  *
3372  * active-idle
3373  *     like active, but no writes have been seen for a while (100msec).
3374  *
3375  */
3376 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3377 		   write_pending, active_idle, bad_word};
3378 static char *array_states[] = {
3379 	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3380 	"write-pending", "active-idle", NULL };
3381 
match_word(const char * word,char ** list)3382 static int match_word(const char *word, char **list)
3383 {
3384 	int n;
3385 	for (n=0; list[n]; n++)
3386 		if (cmd_match(word, list[n]))
3387 			break;
3388 	return n;
3389 }
3390 
3391 static ssize_t
array_state_show(mddev_t * mddev,char * page)3392 array_state_show(mddev_t *mddev, char *page)
3393 {
3394 	enum array_state st = inactive;
3395 
3396 	if (mddev->pers)
3397 		switch(mddev->ro) {
3398 		case 1:
3399 			st = readonly;
3400 			break;
3401 		case 2:
3402 			st = read_auto;
3403 			break;
3404 		case 0:
3405 			if (mddev->in_sync)
3406 				st = clean;
3407 			else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3408 				st = write_pending;
3409 			else if (mddev->safemode)
3410 				st = active_idle;
3411 			else
3412 				st = active;
3413 		}
3414 	else {
3415 		if (list_empty(&mddev->disks) &&
3416 		    mddev->raid_disks == 0 &&
3417 		    mddev->dev_sectors == 0)
3418 			st = clear;
3419 		else
3420 			st = inactive;
3421 	}
3422 	return sprintf(page, "%s\n", array_states[st]);
3423 }
3424 
3425 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3426 static int md_set_readonly(mddev_t * mddev, int is_open);
3427 static int do_md_run(mddev_t * mddev);
3428 static int restart_array(mddev_t *mddev);
3429 
3430 static ssize_t
array_state_store(mddev_t * mddev,const char * buf,size_t len)3431 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3432 {
3433 	int err = -EINVAL;
3434 	enum array_state st = match_word(buf, array_states);
3435 	switch(st) {
3436 	case bad_word:
3437 		break;
3438 	case clear:
3439 		/* stopping an active array */
3440 		if (atomic_read(&mddev->openers) > 0)
3441 			return -EBUSY;
3442 		err = do_md_stop(mddev, 0, 0);
3443 		break;
3444 	case inactive:
3445 		/* stopping an active array */
3446 		if (mddev->pers) {
3447 			if (atomic_read(&mddev->openers) > 0)
3448 				return -EBUSY;
3449 			err = do_md_stop(mddev, 2, 0);
3450 		} else
3451 			err = 0; /* already inactive */
3452 		break;
3453 	case suspended:
3454 		break; /* not supported yet */
3455 	case readonly:
3456 		if (mddev->pers)
3457 			err = md_set_readonly(mddev, 0);
3458 		else {
3459 			mddev->ro = 1;
3460 			set_disk_ro(mddev->gendisk, 1);
3461 			err = do_md_run(mddev);
3462 		}
3463 		break;
3464 	case read_auto:
3465 		if (mddev->pers) {
3466 			if (mddev->ro == 0)
3467 				err = md_set_readonly(mddev, 0);
3468 			else if (mddev->ro == 1)
3469 				err = restart_array(mddev);
3470 			if (err == 0) {
3471 				mddev->ro = 2;
3472 				set_disk_ro(mddev->gendisk, 0);
3473 			}
3474 		} else {
3475 			mddev->ro = 2;
3476 			err = do_md_run(mddev);
3477 		}
3478 		break;
3479 	case clean:
3480 		if (mddev->pers) {
3481 			restart_array(mddev);
3482 			spin_lock_irq(&mddev->write_lock);
3483 			if (atomic_read(&mddev->writes_pending) == 0) {
3484 				if (mddev->in_sync == 0) {
3485 					mddev->in_sync = 1;
3486 					if (mddev->safemode == 1)
3487 						mddev->safemode = 0;
3488 					set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3489 				}
3490 				err = 0;
3491 			} else
3492 				err = -EBUSY;
3493 			spin_unlock_irq(&mddev->write_lock);
3494 		} else
3495 			err = -EINVAL;
3496 		break;
3497 	case active:
3498 		if (mddev->pers) {
3499 			restart_array(mddev);
3500 			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3501 			wake_up(&mddev->sb_wait);
3502 			err = 0;
3503 		} else {
3504 			mddev->ro = 0;
3505 			set_disk_ro(mddev->gendisk, 0);
3506 			err = do_md_run(mddev);
3507 		}
3508 		break;
3509 	case write_pending:
3510 	case active_idle:
3511 		/* these cannot be set */
3512 		break;
3513 	}
3514 	if (err)
3515 		return err;
3516 	else {
3517 		sysfs_notify_dirent_safe(mddev->sysfs_state);
3518 		return len;
3519 	}
3520 }
3521 static struct md_sysfs_entry md_array_state =
3522 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3523 
3524 static ssize_t
max_corrected_read_errors_show(mddev_t * mddev,char * page)3525 max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3526 	return sprintf(page, "%d\n",
3527 		       atomic_read(&mddev->max_corr_read_errors));
3528 }
3529 
3530 static ssize_t
max_corrected_read_errors_store(mddev_t * mddev,const char * buf,size_t len)3531 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3532 {
3533 	char *e;
3534 	unsigned long n = simple_strtoul(buf, &e, 10);
3535 
3536 	if (*buf && (*e == 0 || *e == '\n')) {
3537 		atomic_set(&mddev->max_corr_read_errors, n);
3538 		return len;
3539 	}
3540 	return -EINVAL;
3541 }
3542 
3543 static struct md_sysfs_entry max_corr_read_errors =
3544 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3545 	max_corrected_read_errors_store);
3546 
3547 static ssize_t
null_show(mddev_t * mddev,char * page)3548 null_show(mddev_t *mddev, char *page)
3549 {
3550 	return -EINVAL;
3551 }
3552 
3553 static ssize_t
new_dev_store(mddev_t * mddev,const char * buf,size_t len)3554 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3555 {
3556 	/* buf must be %d:%d\n? giving major and minor numbers */
3557 	/* The new device is added to the array.
3558 	 * If the array has a persistent superblock, we read the
3559 	 * superblock to initialise info and check validity.
3560 	 * Otherwise, only checking done is that in bind_rdev_to_array,
3561 	 * which mainly checks size.
3562 	 */
3563 	char *e;
3564 	int major = simple_strtoul(buf, &e, 10);
3565 	int minor;
3566 	dev_t dev;
3567 	mdk_rdev_t *rdev;
3568 	int err;
3569 
3570 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3571 		return -EINVAL;
3572 	minor = simple_strtoul(e+1, &e, 10);
3573 	if (*e && *e != '\n')
3574 		return -EINVAL;
3575 	dev = MKDEV(major, minor);
3576 	if (major != MAJOR(dev) ||
3577 	    minor != MINOR(dev))
3578 		return -EOVERFLOW;
3579 
3580 
3581 	if (mddev->persistent) {
3582 		rdev = md_import_device(dev, mddev->major_version,
3583 					mddev->minor_version);
3584 		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3585 			mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3586 						       mdk_rdev_t, same_set);
3587 			err = super_types[mddev->major_version]
3588 				.load_super(rdev, rdev0, mddev->minor_version);
3589 			if (err < 0)
3590 				goto out;
3591 		}
3592 	} else if (mddev->external)
3593 		rdev = md_import_device(dev, -2, -1);
3594 	else
3595 		rdev = md_import_device(dev, -1, -1);
3596 
3597 	if (IS_ERR(rdev))
3598 		return PTR_ERR(rdev);
3599 	err = bind_rdev_to_array(rdev, mddev);
3600  out:
3601 	if (err)
3602 		export_rdev(rdev);
3603 	return err ? err : len;
3604 }
3605 
3606 static struct md_sysfs_entry md_new_device =
3607 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3608 
3609 static ssize_t
bitmap_store(mddev_t * mddev,const char * buf,size_t len)3610 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3611 {
3612 	char *end;
3613 	unsigned long chunk, end_chunk;
3614 
3615 	if (!mddev->bitmap)
3616 		goto out;
3617 	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3618 	while (*buf) {
3619 		chunk = end_chunk = simple_strtoul(buf, &end, 0);
3620 		if (buf == end) break;
3621 		if (*end == '-') { /* range */
3622 			buf = end + 1;
3623 			end_chunk = simple_strtoul(buf, &end, 0);
3624 			if (buf == end) break;
3625 		}
3626 		if (*end && !isspace(*end)) break;
3627 		bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3628 		buf = skip_spaces(end);
3629 	}
3630 	bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3631 out:
3632 	return len;
3633 }
3634 
3635 static struct md_sysfs_entry md_bitmap =
3636 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3637 
3638 static ssize_t
size_show(mddev_t * mddev,char * page)3639 size_show(mddev_t *mddev, char *page)
3640 {
3641 	return sprintf(page, "%llu\n",
3642 		(unsigned long long)mddev->dev_sectors / 2);
3643 }
3644 
3645 static int update_size(mddev_t *mddev, sector_t num_sectors);
3646 
3647 static ssize_t
size_store(mddev_t * mddev,const char * buf,size_t len)3648 size_store(mddev_t *mddev, const char *buf, size_t len)
3649 {
3650 	/* If array is inactive, we can reduce the component size, but
3651 	 * not increase it (except from 0).
3652 	 * If array is active, we can try an on-line resize
3653 	 */
3654 	sector_t sectors;
3655 	int err = strict_blocks_to_sectors(buf, &sectors);
3656 
3657 	if (err < 0)
3658 		return err;
3659 	if (mddev->pers) {
3660 		err = update_size(mddev, sectors);
3661 		md_update_sb(mddev, 1);
3662 	} else {
3663 		if (mddev->dev_sectors == 0 ||
3664 		    mddev->dev_sectors > sectors)
3665 			mddev->dev_sectors = sectors;
3666 		else
3667 			err = -ENOSPC;
3668 	}
3669 	return err ? err : len;
3670 }
3671 
3672 static struct md_sysfs_entry md_size =
3673 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3674 
3675 
3676 /* Metdata version.
3677  * This is one of
3678  *   'none' for arrays with no metadata (good luck...)
3679  *   'external' for arrays with externally managed metadata,
3680  * or N.M for internally known formats
3681  */
3682 static ssize_t
metadata_show(mddev_t * mddev,char * page)3683 metadata_show(mddev_t *mddev, char *page)
3684 {
3685 	if (mddev->persistent)
3686 		return sprintf(page, "%d.%d\n",
3687 			       mddev->major_version, mddev->minor_version);
3688 	else if (mddev->external)
3689 		return sprintf(page, "external:%s\n", mddev->metadata_type);
3690 	else
3691 		return sprintf(page, "none\n");
3692 }
3693 
3694 static ssize_t
metadata_store(mddev_t * mddev,const char * buf,size_t len)3695 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3696 {
3697 	int major, minor;
3698 	char *e;
3699 	/* Changing the details of 'external' metadata is
3700 	 * always permitted.  Otherwise there must be
3701 	 * no devices attached to the array.
3702 	 */
3703 	if (mddev->external && strncmp(buf, "external:", 9) == 0)
3704 		;
3705 	else if (!list_empty(&mddev->disks))
3706 		return -EBUSY;
3707 
3708 	if (cmd_match(buf, "none")) {
3709 		mddev->persistent = 0;
3710 		mddev->external = 0;
3711 		mddev->major_version = 0;
3712 		mddev->minor_version = 90;
3713 		return len;
3714 	}
3715 	if (strncmp(buf, "external:", 9) == 0) {
3716 		size_t namelen = len-9;
3717 		if (namelen >= sizeof(mddev->metadata_type))
3718 			namelen = sizeof(mddev->metadata_type)-1;
3719 		strncpy(mddev->metadata_type, buf+9, namelen);
3720 		mddev->metadata_type[namelen] = 0;
3721 		if (namelen && mddev->metadata_type[namelen-1] == '\n')
3722 			mddev->metadata_type[--namelen] = 0;
3723 		mddev->persistent = 0;
3724 		mddev->external = 1;
3725 		mddev->major_version = 0;
3726 		mddev->minor_version = 90;
3727 		return len;
3728 	}
3729 	major = simple_strtoul(buf, &e, 10);
3730 	if (e==buf || *e != '.')
3731 		return -EINVAL;
3732 	buf = e+1;
3733 	minor = simple_strtoul(buf, &e, 10);
3734 	if (e==buf || (*e && *e != '\n') )
3735 		return -EINVAL;
3736 	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3737 		return -ENOENT;
3738 	mddev->major_version = major;
3739 	mddev->minor_version = minor;
3740 	mddev->persistent = 1;
3741 	mddev->external = 0;
3742 	return len;
3743 }
3744 
3745 static struct md_sysfs_entry md_metadata =
3746 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3747 
3748 static ssize_t
action_show(mddev_t * mddev,char * page)3749 action_show(mddev_t *mddev, char *page)
3750 {
3751 	char *type = "idle";
3752 	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3753 		type = "frozen";
3754 	else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3755 	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3756 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3757 			type = "reshape";
3758 		else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3759 			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3760 				type = "resync";
3761 			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3762 				type = "check";
3763 			else
3764 				type = "repair";
3765 		} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3766 			type = "recover";
3767 	}
3768 	return sprintf(page, "%s\n", type);
3769 }
3770 
3771 static void reap_sync_thread(mddev_t *mddev);
3772 
3773 static ssize_t
action_store(mddev_t * mddev,const char * page,size_t len)3774 action_store(mddev_t *mddev, const char *page, size_t len)
3775 {
3776 	if (!mddev->pers || !mddev->pers->sync_request)
3777 		return -EINVAL;
3778 
3779 	if (cmd_match(page, "frozen"))
3780 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3781 	else
3782 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3783 
3784 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3785 		if (mddev->sync_thread) {
3786 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3787 			reap_sync_thread(mddev);
3788 		}
3789 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3790 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3791 		return -EBUSY;
3792 	else if (cmd_match(page, "resync"))
3793 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3794 	else if (cmd_match(page, "recover")) {
3795 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3796 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3797 	} else if (cmd_match(page, "reshape")) {
3798 		int err;
3799 		if (mddev->pers->start_reshape == NULL)
3800 			return -EINVAL;
3801 		err = mddev->pers->start_reshape(mddev);
3802 		if (err)
3803 			return err;
3804 		sysfs_notify(&mddev->kobj, NULL, "degraded");
3805 	} else {
3806 		if (cmd_match(page, "check"))
3807 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3808 		else if (!cmd_match(page, "repair"))
3809 			return -EINVAL;
3810 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3811 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3812 	}
3813 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3814 	md_wakeup_thread(mddev->thread);
3815 	sysfs_notify_dirent_safe(mddev->sysfs_action);
3816 	return len;
3817 }
3818 
3819 static ssize_t
mismatch_cnt_show(mddev_t * mddev,char * page)3820 mismatch_cnt_show(mddev_t *mddev, char *page)
3821 {
3822 	return sprintf(page, "%llu\n",
3823 		       (unsigned long long) mddev->resync_mismatches);
3824 }
3825 
3826 static struct md_sysfs_entry md_scan_mode =
3827 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3828 
3829 
3830 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3831 
3832 static ssize_t
sync_min_show(mddev_t * mddev,char * page)3833 sync_min_show(mddev_t *mddev, char *page)
3834 {
3835 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
3836 		       mddev->sync_speed_min ? "local": "system");
3837 }
3838 
3839 static ssize_t
sync_min_store(mddev_t * mddev,const char * buf,size_t len)3840 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3841 {
3842 	int min;
3843 	char *e;
3844 	if (strncmp(buf, "system", 6)==0) {
3845 		mddev->sync_speed_min = 0;
3846 		return len;
3847 	}
3848 	min = simple_strtoul(buf, &e, 10);
3849 	if (buf == e || (*e && *e != '\n') || min <= 0)
3850 		return -EINVAL;
3851 	mddev->sync_speed_min = min;
3852 	return len;
3853 }
3854 
3855 static struct md_sysfs_entry md_sync_min =
3856 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3857 
3858 static ssize_t
sync_max_show(mddev_t * mddev,char * page)3859 sync_max_show(mddev_t *mddev, char *page)
3860 {
3861 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
3862 		       mddev->sync_speed_max ? "local": "system");
3863 }
3864 
3865 static ssize_t
sync_max_store(mddev_t * mddev,const char * buf,size_t len)3866 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3867 {
3868 	int max;
3869 	char *e;
3870 	if (strncmp(buf, "system", 6)==0) {
3871 		mddev->sync_speed_max = 0;
3872 		return len;
3873 	}
3874 	max = simple_strtoul(buf, &e, 10);
3875 	if (buf == e || (*e && *e != '\n') || max <= 0)
3876 		return -EINVAL;
3877 	mddev->sync_speed_max = max;
3878 	return len;
3879 }
3880 
3881 static struct md_sysfs_entry md_sync_max =
3882 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3883 
3884 static ssize_t
degraded_show(mddev_t * mddev,char * page)3885 degraded_show(mddev_t *mddev, char *page)
3886 {
3887 	return sprintf(page, "%d\n", mddev->degraded);
3888 }
3889 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3890 
3891 static ssize_t
sync_force_parallel_show(mddev_t * mddev,char * page)3892 sync_force_parallel_show(mddev_t *mddev, char *page)
3893 {
3894 	return sprintf(page, "%d\n", mddev->parallel_resync);
3895 }
3896 
3897 static ssize_t
sync_force_parallel_store(mddev_t * mddev,const char * buf,size_t len)3898 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3899 {
3900 	long n;
3901 
3902 	if (strict_strtol(buf, 10, &n))
3903 		return -EINVAL;
3904 
3905 	if (n != 0 && n != 1)
3906 		return -EINVAL;
3907 
3908 	mddev->parallel_resync = n;
3909 
3910 	if (mddev->sync_thread)
3911 		wake_up(&resync_wait);
3912 
3913 	return len;
3914 }
3915 
3916 /* force parallel resync, even with shared block devices */
3917 static struct md_sysfs_entry md_sync_force_parallel =
3918 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3919        sync_force_parallel_show, sync_force_parallel_store);
3920 
3921 static ssize_t
sync_speed_show(mddev_t * mddev,char * page)3922 sync_speed_show(mddev_t *mddev, char *page)
3923 {
3924 	unsigned long resync, dt, db;
3925 	if (mddev->curr_resync == 0)
3926 		return sprintf(page, "none\n");
3927 	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3928 	dt = (jiffies - mddev->resync_mark) / HZ;
3929 	if (!dt) dt++;
3930 	db = resync - mddev->resync_mark_cnt;
3931 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3932 }
3933 
3934 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3935 
3936 static ssize_t
sync_completed_show(mddev_t * mddev,char * page)3937 sync_completed_show(mddev_t *mddev, char *page)
3938 {
3939 	unsigned long long max_sectors, resync;
3940 
3941 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3942 		return sprintf(page, "none\n");
3943 
3944 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3945 		max_sectors = mddev->resync_max_sectors;
3946 	else
3947 		max_sectors = mddev->dev_sectors;
3948 
3949 	resync = mddev->curr_resync_completed;
3950 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
3951 }
3952 
3953 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3954 
3955 static ssize_t
min_sync_show(mddev_t * mddev,char * page)3956 min_sync_show(mddev_t *mddev, char *page)
3957 {
3958 	return sprintf(page, "%llu\n",
3959 		       (unsigned long long)mddev->resync_min);
3960 }
3961 static ssize_t
min_sync_store(mddev_t * mddev,const char * buf,size_t len)3962 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3963 {
3964 	unsigned long long min;
3965 	if (strict_strtoull(buf, 10, &min))
3966 		return -EINVAL;
3967 	if (min > mddev->resync_max)
3968 		return -EINVAL;
3969 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3970 		return -EBUSY;
3971 
3972 	/* Must be a multiple of chunk_size */
3973 	if (mddev->chunk_sectors) {
3974 		sector_t temp = min;
3975 		if (sector_div(temp, mddev->chunk_sectors))
3976 			return -EINVAL;
3977 	}
3978 	mddev->resync_min = min;
3979 
3980 	return len;
3981 }
3982 
3983 static struct md_sysfs_entry md_min_sync =
3984 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3985 
3986 static ssize_t
max_sync_show(mddev_t * mddev,char * page)3987 max_sync_show(mddev_t *mddev, char *page)
3988 {
3989 	if (mddev->resync_max == MaxSector)
3990 		return sprintf(page, "max\n");
3991 	else
3992 		return sprintf(page, "%llu\n",
3993 			       (unsigned long long)mddev->resync_max);
3994 }
3995 static ssize_t
max_sync_store(mddev_t * mddev,const char * buf,size_t len)3996 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3997 {
3998 	if (strncmp(buf, "max", 3) == 0)
3999 		mddev->resync_max = MaxSector;
4000 	else {
4001 		unsigned long long max;
4002 		if (strict_strtoull(buf, 10, &max))
4003 			return -EINVAL;
4004 		if (max < mddev->resync_min)
4005 			return -EINVAL;
4006 		if (max < mddev->resync_max &&
4007 		    mddev->ro == 0 &&
4008 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4009 			return -EBUSY;
4010 
4011 		/* Must be a multiple of chunk_size */
4012 		if (mddev->chunk_sectors) {
4013 			sector_t temp = max;
4014 			if (sector_div(temp, mddev->chunk_sectors))
4015 				return -EINVAL;
4016 		}
4017 		mddev->resync_max = max;
4018 	}
4019 	wake_up(&mddev->recovery_wait);
4020 	return len;
4021 }
4022 
4023 static struct md_sysfs_entry md_max_sync =
4024 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4025 
4026 static ssize_t
suspend_lo_show(mddev_t * mddev,char * page)4027 suspend_lo_show(mddev_t *mddev, char *page)
4028 {
4029 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4030 }
4031 
4032 static ssize_t
suspend_lo_store(mddev_t * mddev,const char * buf,size_t len)4033 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
4034 {
4035 	char *e;
4036 	unsigned long long new = simple_strtoull(buf, &e, 10);
4037 	unsigned long long old = mddev->suspend_lo;
4038 
4039 	if (mddev->pers == NULL ||
4040 	    mddev->pers->quiesce == NULL)
4041 		return -EINVAL;
4042 	if (buf == e || (*e && *e != '\n'))
4043 		return -EINVAL;
4044 
4045 	mddev->suspend_lo = new;
4046 	if (new >= old)
4047 		/* Shrinking suspended region */
4048 		mddev->pers->quiesce(mddev, 2);
4049 	else {
4050 		/* Expanding suspended region - need to wait */
4051 		mddev->pers->quiesce(mddev, 1);
4052 		mddev->pers->quiesce(mddev, 0);
4053 	}
4054 	return len;
4055 }
4056 static struct md_sysfs_entry md_suspend_lo =
4057 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4058 
4059 
4060 static ssize_t
suspend_hi_show(mddev_t * mddev,char * page)4061 suspend_hi_show(mddev_t *mddev, char *page)
4062 {
4063 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4064 }
4065 
4066 static ssize_t
suspend_hi_store(mddev_t * mddev,const char * buf,size_t len)4067 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
4068 {
4069 	char *e;
4070 	unsigned long long new = simple_strtoull(buf, &e, 10);
4071 	unsigned long long old = mddev->suspend_hi;
4072 
4073 	if (mddev->pers == NULL ||
4074 	    mddev->pers->quiesce == NULL)
4075 		return -EINVAL;
4076 	if (buf == e || (*e && *e != '\n'))
4077 		return -EINVAL;
4078 
4079 	mddev->suspend_hi = new;
4080 	if (new <= old)
4081 		/* Shrinking suspended region */
4082 		mddev->pers->quiesce(mddev, 2);
4083 	else {
4084 		/* Expanding suspended region - need to wait */
4085 		mddev->pers->quiesce(mddev, 1);
4086 		mddev->pers->quiesce(mddev, 0);
4087 	}
4088 	return len;
4089 }
4090 static struct md_sysfs_entry md_suspend_hi =
4091 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4092 
4093 static ssize_t
reshape_position_show(mddev_t * mddev,char * page)4094 reshape_position_show(mddev_t *mddev, char *page)
4095 {
4096 	if (mddev->reshape_position != MaxSector)
4097 		return sprintf(page, "%llu\n",
4098 			       (unsigned long long)mddev->reshape_position);
4099 	strcpy(page, "none\n");
4100 	return 5;
4101 }
4102 
4103 static ssize_t
reshape_position_store(mddev_t * mddev,const char * buf,size_t len)4104 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
4105 {
4106 	char *e;
4107 	unsigned long long new = simple_strtoull(buf, &e, 10);
4108 	if (mddev->pers)
4109 		return -EBUSY;
4110 	if (buf == e || (*e && *e != '\n'))
4111 		return -EINVAL;
4112 	mddev->reshape_position = new;
4113 	mddev->delta_disks = 0;
4114 	mddev->new_level = mddev->level;
4115 	mddev->new_layout = mddev->layout;
4116 	mddev->new_chunk_sectors = mddev->chunk_sectors;
4117 	return len;
4118 }
4119 
4120 static struct md_sysfs_entry md_reshape_position =
4121 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4122        reshape_position_store);
4123 
4124 static ssize_t
array_size_show(mddev_t * mddev,char * page)4125 array_size_show(mddev_t *mddev, char *page)
4126 {
4127 	if (mddev->external_size)
4128 		return sprintf(page, "%llu\n",
4129 			       (unsigned long long)mddev->array_sectors/2);
4130 	else
4131 		return sprintf(page, "default\n");
4132 }
4133 
4134 static ssize_t
array_size_store(mddev_t * mddev,const char * buf,size_t len)4135 array_size_store(mddev_t *mddev, const char *buf, size_t len)
4136 {
4137 	sector_t sectors;
4138 
4139 	if (strncmp(buf, "default", 7) == 0) {
4140 		if (mddev->pers)
4141 			sectors = mddev->pers->size(mddev, 0, 0);
4142 		else
4143 			sectors = mddev->array_sectors;
4144 
4145 		mddev->external_size = 0;
4146 	} else {
4147 		if (strict_blocks_to_sectors(buf, &sectors) < 0)
4148 			return -EINVAL;
4149 		if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4150 			return -E2BIG;
4151 
4152 		mddev->external_size = 1;
4153 	}
4154 
4155 	mddev->array_sectors = sectors;
4156 	if (mddev->pers) {
4157 		set_capacity(mddev->gendisk, mddev->array_sectors);
4158 		revalidate_disk(mddev->gendisk);
4159 	}
4160 	return len;
4161 }
4162 
4163 static struct md_sysfs_entry md_array_size =
4164 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4165        array_size_store);
4166 
4167 static struct attribute *md_default_attrs[] = {
4168 	&md_level.attr,
4169 	&md_layout.attr,
4170 	&md_raid_disks.attr,
4171 	&md_chunk_size.attr,
4172 	&md_size.attr,
4173 	&md_resync_start.attr,
4174 	&md_metadata.attr,
4175 	&md_new_device.attr,
4176 	&md_safe_delay.attr,
4177 	&md_array_state.attr,
4178 	&md_reshape_position.attr,
4179 	&md_array_size.attr,
4180 	&max_corr_read_errors.attr,
4181 	NULL,
4182 };
4183 
4184 static struct attribute *md_redundancy_attrs[] = {
4185 	&md_scan_mode.attr,
4186 	&md_mismatches.attr,
4187 	&md_sync_min.attr,
4188 	&md_sync_max.attr,
4189 	&md_sync_speed.attr,
4190 	&md_sync_force_parallel.attr,
4191 	&md_sync_completed.attr,
4192 	&md_min_sync.attr,
4193 	&md_max_sync.attr,
4194 	&md_suspend_lo.attr,
4195 	&md_suspend_hi.attr,
4196 	&md_bitmap.attr,
4197 	&md_degraded.attr,
4198 	NULL,
4199 };
4200 static struct attribute_group md_redundancy_group = {
4201 	.name = NULL,
4202 	.attrs = md_redundancy_attrs,
4203 };
4204 
4205 
4206 static ssize_t
md_attr_show(struct kobject * kobj,struct attribute * attr,char * page)4207 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4208 {
4209 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4210 	mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4211 	ssize_t rv;
4212 
4213 	if (!entry->show)
4214 		return -EIO;
4215 	rv = mddev_lock(mddev);
4216 	if (!rv) {
4217 		rv = entry->show(mddev, page);
4218 		mddev_unlock(mddev);
4219 	}
4220 	return rv;
4221 }
4222 
4223 static ssize_t
md_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)4224 md_attr_store(struct kobject *kobj, struct attribute *attr,
4225 	      const char *page, size_t length)
4226 {
4227 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4228 	mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4229 	ssize_t rv;
4230 
4231 	if (!entry->store)
4232 		return -EIO;
4233 	if (!capable(CAP_SYS_ADMIN))
4234 		return -EACCES;
4235 	rv = mddev_lock(mddev);
4236 	if (mddev->hold_active == UNTIL_IOCTL)
4237 		mddev->hold_active = 0;
4238 	if (!rv) {
4239 		rv = entry->store(mddev, page, length);
4240 		mddev_unlock(mddev);
4241 	}
4242 	return rv;
4243 }
4244 
md_free(struct kobject * ko)4245 static void md_free(struct kobject *ko)
4246 {
4247 	mddev_t *mddev = container_of(ko, mddev_t, kobj);
4248 
4249 	if (mddev->sysfs_state)
4250 		sysfs_put(mddev->sysfs_state);
4251 
4252 	if (mddev->gendisk) {
4253 		del_gendisk(mddev->gendisk);
4254 		put_disk(mddev->gendisk);
4255 	}
4256 	if (mddev->queue)
4257 		blk_cleanup_queue(mddev->queue);
4258 
4259 	kfree(mddev);
4260 }
4261 
4262 static const struct sysfs_ops md_sysfs_ops = {
4263 	.show	= md_attr_show,
4264 	.store	= md_attr_store,
4265 };
4266 static struct kobj_type md_ktype = {
4267 	.release	= md_free,
4268 	.sysfs_ops	= &md_sysfs_ops,
4269 	.default_attrs	= md_default_attrs,
4270 };
4271 
4272 int mdp_major = 0;
4273 
mddev_delayed_delete(struct work_struct * ws)4274 static void mddev_delayed_delete(struct work_struct *ws)
4275 {
4276 	mddev_t *mddev = container_of(ws, mddev_t, del_work);
4277 
4278 	sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4279 	kobject_del(&mddev->kobj);
4280 	kobject_put(&mddev->kobj);
4281 }
4282 
md_alloc(dev_t dev,char * name)4283 static int md_alloc(dev_t dev, char *name)
4284 {
4285 	static DEFINE_MUTEX(disks_mutex);
4286 	mddev_t *mddev = mddev_find(dev);
4287 	struct gendisk *disk;
4288 	int partitioned;
4289 	int shift;
4290 	int unit;
4291 	int error;
4292 
4293 	if (!mddev)
4294 		return -ENODEV;
4295 
4296 	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4297 	shift = partitioned ? MdpMinorShift : 0;
4298 	unit = MINOR(mddev->unit) >> shift;
4299 
4300 	/* wait for any previous instance of this device to be
4301 	 * completely removed (mddev_delayed_delete).
4302 	 */
4303 	flush_workqueue(md_misc_wq);
4304 
4305 	mutex_lock(&disks_mutex);
4306 	error = -EEXIST;
4307 	if (mddev->gendisk)
4308 		goto abort;
4309 
4310 	if (name) {
4311 		/* Need to ensure that 'name' is not a duplicate.
4312 		 */
4313 		mddev_t *mddev2;
4314 		spin_lock(&all_mddevs_lock);
4315 
4316 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4317 			if (mddev2->gendisk &&
4318 			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
4319 				spin_unlock(&all_mddevs_lock);
4320 				goto abort;
4321 			}
4322 		spin_unlock(&all_mddevs_lock);
4323 	}
4324 
4325 	error = -ENOMEM;
4326 	mddev->queue = blk_alloc_queue(GFP_KERNEL);
4327 	if (!mddev->queue)
4328 		goto abort;
4329 	mddev->queue->queuedata = mddev;
4330 
4331 	blk_queue_make_request(mddev->queue, md_make_request);
4332 
4333 	disk = alloc_disk(1 << shift);
4334 	if (!disk) {
4335 		blk_cleanup_queue(mddev->queue);
4336 		mddev->queue = NULL;
4337 		goto abort;
4338 	}
4339 	disk->major = MAJOR(mddev->unit);
4340 	disk->first_minor = unit << shift;
4341 	if (name)
4342 		strcpy(disk->disk_name, name);
4343 	else if (partitioned)
4344 		sprintf(disk->disk_name, "md_d%d", unit);
4345 	else
4346 		sprintf(disk->disk_name, "md%d", unit);
4347 	disk->fops = &md_fops;
4348 	disk->private_data = mddev;
4349 	disk->queue = mddev->queue;
4350 	/* Allow extended partitions.  This makes the
4351 	 * 'mdp' device redundant, but we can't really
4352 	 * remove it now.
4353 	 */
4354 	disk->flags |= GENHD_FL_EXT_DEVT;
4355 	add_disk(disk);
4356 	mddev->gendisk = disk;
4357 	error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4358 				     &disk_to_dev(disk)->kobj, "%s", "md");
4359 	if (error) {
4360 		/* This isn't possible, but as kobject_init_and_add is marked
4361 		 * __must_check, we must do something with the result
4362 		 */
4363 		printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4364 		       disk->disk_name);
4365 		error = 0;
4366 	}
4367 	if (mddev->kobj.sd &&
4368 	    sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4369 		printk(KERN_DEBUG "pointless warning\n");
4370 
4371 	blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4372  abort:
4373 	mutex_unlock(&disks_mutex);
4374 	if (!error && mddev->kobj.sd) {
4375 		kobject_uevent(&mddev->kobj, KOBJ_ADD);
4376 		mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4377 	}
4378 	mddev_put(mddev);
4379 	return error;
4380 }
4381 
md_probe(dev_t dev,int * part,void * data)4382 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4383 {
4384 	md_alloc(dev, NULL);
4385 	return NULL;
4386 }
4387 
add_named_array(const char * val,struct kernel_param * kp)4388 static int add_named_array(const char *val, struct kernel_param *kp)
4389 {
4390 	/* val must be "md_*" where * is not all digits.
4391 	 * We allocate an array with a large free minor number, and
4392 	 * set the name to val.  val must not already be an active name.
4393 	 */
4394 	int len = strlen(val);
4395 	char buf[DISK_NAME_LEN];
4396 
4397 	while (len && val[len-1] == '\n')
4398 		len--;
4399 	if (len >= DISK_NAME_LEN)
4400 		return -E2BIG;
4401 	strlcpy(buf, val, len+1);
4402 	if (strncmp(buf, "md_", 3) != 0)
4403 		return -EINVAL;
4404 	return md_alloc(0, buf);
4405 }
4406 
md_safemode_timeout(unsigned long data)4407 static void md_safemode_timeout(unsigned long data)
4408 {
4409 	mddev_t *mddev = (mddev_t *) data;
4410 
4411 	if (!atomic_read(&mddev->writes_pending)) {
4412 		mddev->safemode = 1;
4413 		if (mddev->external)
4414 			sysfs_notify_dirent_safe(mddev->sysfs_state);
4415 	}
4416 	md_wakeup_thread(mddev->thread);
4417 }
4418 
4419 static int start_dirty_degraded;
4420 
md_run(mddev_t * mddev)4421 int md_run(mddev_t *mddev)
4422 {
4423 	int err;
4424 	mdk_rdev_t *rdev;
4425 	struct mdk_personality *pers;
4426 
4427 	if (list_empty(&mddev->disks))
4428 		/* cannot run an array with no devices.. */
4429 		return -EINVAL;
4430 
4431 	if (mddev->pers)
4432 		return -EBUSY;
4433 	/* Cannot run until previous stop completes properly */
4434 	if (mddev->sysfs_active)
4435 		return -EBUSY;
4436 
4437 	/*
4438 	 * Analyze all RAID superblock(s)
4439 	 */
4440 	if (!mddev->raid_disks) {
4441 		if (!mddev->persistent)
4442 			return -EINVAL;
4443 		analyze_sbs(mddev);
4444 	}
4445 
4446 	if (mddev->level != LEVEL_NONE)
4447 		request_module("md-level-%d", mddev->level);
4448 	else if (mddev->clevel[0])
4449 		request_module("md-%s", mddev->clevel);
4450 
4451 	/*
4452 	 * Drop all container device buffers, from now on
4453 	 * the only valid external interface is through the md
4454 	 * device.
4455 	 */
4456 	list_for_each_entry(rdev, &mddev->disks, same_set) {
4457 		if (test_bit(Faulty, &rdev->flags))
4458 			continue;
4459 		sync_blockdev(rdev->bdev);
4460 		invalidate_bdev(rdev->bdev);
4461 
4462 		/* perform some consistency tests on the device.
4463 		 * We don't want the data to overlap the metadata,
4464 		 * Internal Bitmap issues have been handled elsewhere.
4465 		 */
4466 		if (rdev->meta_bdev) {
4467 			/* Nothing to check */;
4468 		} else if (rdev->data_offset < rdev->sb_start) {
4469 			if (mddev->dev_sectors &&
4470 			    rdev->data_offset + mddev->dev_sectors
4471 			    > rdev->sb_start) {
4472 				printk("md: %s: data overlaps metadata\n",
4473 				       mdname(mddev));
4474 				return -EINVAL;
4475 			}
4476 		} else {
4477 			if (rdev->sb_start + rdev->sb_size/512
4478 			    > rdev->data_offset) {
4479 				printk("md: %s: metadata overlaps data\n",
4480 				       mdname(mddev));
4481 				return -EINVAL;
4482 			}
4483 		}
4484 		sysfs_notify_dirent_safe(rdev->sysfs_state);
4485 	}
4486 
4487 	if (mddev->bio_set == NULL)
4488 		mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
4489 
4490 	spin_lock(&pers_lock);
4491 	pers = find_pers(mddev->level, mddev->clevel);
4492 	if (!pers || !try_module_get(pers->owner)) {
4493 		spin_unlock(&pers_lock);
4494 		if (mddev->level != LEVEL_NONE)
4495 			printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4496 			       mddev->level);
4497 		else
4498 			printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4499 			       mddev->clevel);
4500 		return -EINVAL;
4501 	}
4502 	mddev->pers = pers;
4503 	spin_unlock(&pers_lock);
4504 	if (mddev->level != pers->level) {
4505 		mddev->level = pers->level;
4506 		mddev->new_level = pers->level;
4507 	}
4508 	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4509 
4510 	if (mddev->reshape_position != MaxSector &&
4511 	    pers->start_reshape == NULL) {
4512 		/* This personality cannot handle reshaping... */
4513 		mddev->pers = NULL;
4514 		module_put(pers->owner);
4515 		return -EINVAL;
4516 	}
4517 
4518 	if (pers->sync_request) {
4519 		/* Warn if this is a potentially silly
4520 		 * configuration.
4521 		 */
4522 		char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4523 		mdk_rdev_t *rdev2;
4524 		int warned = 0;
4525 
4526 		list_for_each_entry(rdev, &mddev->disks, same_set)
4527 			list_for_each_entry(rdev2, &mddev->disks, same_set) {
4528 				if (rdev < rdev2 &&
4529 				    rdev->bdev->bd_contains ==
4530 				    rdev2->bdev->bd_contains) {
4531 					printk(KERN_WARNING
4532 					       "%s: WARNING: %s appears to be"
4533 					       " on the same physical disk as"
4534 					       " %s.\n",
4535 					       mdname(mddev),
4536 					       bdevname(rdev->bdev,b),
4537 					       bdevname(rdev2->bdev,b2));
4538 					warned = 1;
4539 				}
4540 			}
4541 
4542 		if (warned)
4543 			printk(KERN_WARNING
4544 			       "True protection against single-disk"
4545 			       " failure might be compromised.\n");
4546 	}
4547 
4548 	mddev->recovery = 0;
4549 	/* may be over-ridden by personality */
4550 	mddev->resync_max_sectors = mddev->dev_sectors;
4551 
4552 	mddev->ok_start_degraded = start_dirty_degraded;
4553 
4554 	if (start_readonly && mddev->ro == 0)
4555 		mddev->ro = 2; /* read-only, but switch on first write */
4556 
4557 	err = mddev->pers->run(mddev);
4558 	if (err)
4559 		printk(KERN_ERR "md: pers->run() failed ...\n");
4560 	else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4561 		WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4562 			  " but 'external_size' not in effect?\n", __func__);
4563 		printk(KERN_ERR
4564 		       "md: invalid array_size %llu > default size %llu\n",
4565 		       (unsigned long long)mddev->array_sectors / 2,
4566 		       (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4567 		err = -EINVAL;
4568 		mddev->pers->stop(mddev);
4569 	}
4570 	if (err == 0 && mddev->pers->sync_request) {
4571 		err = bitmap_create(mddev);
4572 		if (err) {
4573 			printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4574 			       mdname(mddev), err);
4575 			mddev->pers->stop(mddev);
4576 		}
4577 	}
4578 	if (err) {
4579 		module_put(mddev->pers->owner);
4580 		mddev->pers = NULL;
4581 		bitmap_destroy(mddev);
4582 		return err;
4583 	}
4584 	if (mddev->pers->sync_request) {
4585 		if (mddev->kobj.sd &&
4586 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4587 			printk(KERN_WARNING
4588 			       "md: cannot register extra attributes for %s\n",
4589 			       mdname(mddev));
4590 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
4591 	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
4592 		mddev->ro = 0;
4593 
4594  	atomic_set(&mddev->writes_pending,0);
4595 	atomic_set(&mddev->max_corr_read_errors,
4596 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4597 	mddev->safemode = 0;
4598 	mddev->safemode_timer.function = md_safemode_timeout;
4599 	mddev->safemode_timer.data = (unsigned long) mddev;
4600 	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4601 	mddev->in_sync = 1;
4602 	smp_wmb();
4603 	mddev->ready = 1;
4604 	list_for_each_entry(rdev, &mddev->disks, same_set)
4605 		if (rdev->raid_disk >= 0) {
4606 			char nm[20];
4607 			sprintf(nm, "rd%d", rdev->raid_disk);
4608 			if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4609 				/* failure here is OK */;
4610 		}
4611 
4612 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4613 
4614 	if (mddev->flags)
4615 		md_update_sb(mddev, 0);
4616 
4617 	md_wakeup_thread(mddev->thread);
4618 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4619 
4620 	md_new_event(mddev);
4621 	sysfs_notify_dirent_safe(mddev->sysfs_state);
4622 	sysfs_notify_dirent_safe(mddev->sysfs_action);
4623 	sysfs_notify(&mddev->kobj, NULL, "degraded");
4624 	return 0;
4625 }
4626 EXPORT_SYMBOL_GPL(md_run);
4627 
do_md_run(mddev_t * mddev)4628 static int do_md_run(mddev_t *mddev)
4629 {
4630 	int err;
4631 
4632 	err = md_run(mddev);
4633 	if (err)
4634 		goto out;
4635 	err = bitmap_load(mddev);
4636 	if (err) {
4637 		bitmap_destroy(mddev);
4638 		goto out;
4639 	}
4640 	set_capacity(mddev->gendisk, mddev->array_sectors);
4641 	revalidate_disk(mddev->gendisk);
4642 	mddev->changed = 1;
4643 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4644 out:
4645 	return err;
4646 }
4647 
restart_array(mddev_t * mddev)4648 static int restart_array(mddev_t *mddev)
4649 {
4650 	struct gendisk *disk = mddev->gendisk;
4651 
4652 	/* Complain if it has no devices */
4653 	if (list_empty(&mddev->disks))
4654 		return -ENXIO;
4655 	if (!mddev->pers)
4656 		return -EINVAL;
4657 	if (!mddev->ro)
4658 		return -EBUSY;
4659 	mddev->safemode = 0;
4660 	mddev->ro = 0;
4661 	set_disk_ro(disk, 0);
4662 	printk(KERN_INFO "md: %s switched to read-write mode.\n",
4663 		mdname(mddev));
4664 	/* Kick recovery or resync if necessary */
4665 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4666 	md_wakeup_thread(mddev->thread);
4667 	md_wakeup_thread(mddev->sync_thread);
4668 	sysfs_notify_dirent_safe(mddev->sysfs_state);
4669 	return 0;
4670 }
4671 
4672 /* similar to deny_write_access, but accounts for our holding a reference
4673  * to the file ourselves */
deny_bitmap_write_access(struct file * file)4674 static int deny_bitmap_write_access(struct file * file)
4675 {
4676 	struct inode *inode = file->f_mapping->host;
4677 
4678 	spin_lock(&inode->i_lock);
4679 	if (atomic_read(&inode->i_writecount) > 1) {
4680 		spin_unlock(&inode->i_lock);
4681 		return -ETXTBSY;
4682 	}
4683 	atomic_set(&inode->i_writecount, -1);
4684 	spin_unlock(&inode->i_lock);
4685 
4686 	return 0;
4687 }
4688 
restore_bitmap_write_access(struct file * file)4689 void restore_bitmap_write_access(struct file *file)
4690 {
4691 	struct inode *inode = file->f_mapping->host;
4692 
4693 	spin_lock(&inode->i_lock);
4694 	atomic_set(&inode->i_writecount, 1);
4695 	spin_unlock(&inode->i_lock);
4696 }
4697 
md_clean(mddev_t * mddev)4698 static void md_clean(mddev_t *mddev)
4699 {
4700 	mddev->array_sectors = 0;
4701 	mddev->external_size = 0;
4702 	mddev->dev_sectors = 0;
4703 	mddev->raid_disks = 0;
4704 	mddev->recovery_cp = 0;
4705 	mddev->resync_min = 0;
4706 	mddev->resync_max = MaxSector;
4707 	mddev->reshape_position = MaxSector;
4708 	mddev->external = 0;
4709 	mddev->persistent = 0;
4710 	mddev->level = LEVEL_NONE;
4711 	mddev->clevel[0] = 0;
4712 	mddev->flags = 0;
4713 	mddev->ro = 0;
4714 	mddev->metadata_type[0] = 0;
4715 	mddev->chunk_sectors = 0;
4716 	mddev->ctime = mddev->utime = 0;
4717 	mddev->layout = 0;
4718 	mddev->max_disks = 0;
4719 	mddev->events = 0;
4720 	mddev->can_decrease_events = 0;
4721 	mddev->delta_disks = 0;
4722 	mddev->new_level = LEVEL_NONE;
4723 	mddev->new_layout = 0;
4724 	mddev->new_chunk_sectors = 0;
4725 	mddev->curr_resync = 0;
4726 	mddev->resync_mismatches = 0;
4727 	mddev->suspend_lo = mddev->suspend_hi = 0;
4728 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
4729 	mddev->recovery = 0;
4730 	mddev->in_sync = 0;
4731 	mddev->changed = 0;
4732 	mddev->degraded = 0;
4733 	mddev->safemode = 0;
4734 	mddev->bitmap_info.offset = 0;
4735 	mddev->bitmap_info.default_offset = 0;
4736 	mddev->bitmap_info.chunksize = 0;
4737 	mddev->bitmap_info.daemon_sleep = 0;
4738 	mddev->bitmap_info.max_write_behind = 0;
4739 }
4740 
__md_stop_writes(mddev_t * mddev)4741 static void __md_stop_writes(mddev_t *mddev)
4742 {
4743 	if (mddev->sync_thread) {
4744 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4745 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4746 		reap_sync_thread(mddev);
4747 	}
4748 
4749 	del_timer_sync(&mddev->safemode_timer);
4750 
4751 	bitmap_flush(mddev);
4752 	md_super_wait(mddev);
4753 
4754 	if (!mddev->in_sync || mddev->flags) {
4755 		/* mark array as shutdown cleanly */
4756 		mddev->in_sync = 1;
4757 		md_update_sb(mddev, 1);
4758 	}
4759 }
4760 
md_stop_writes(mddev_t * mddev)4761 void md_stop_writes(mddev_t *mddev)
4762 {
4763 	mddev_lock(mddev);
4764 	__md_stop_writes(mddev);
4765 	mddev_unlock(mddev);
4766 }
4767 EXPORT_SYMBOL_GPL(md_stop_writes);
4768 
md_stop(mddev_t * mddev)4769 void md_stop(mddev_t *mddev)
4770 {
4771 	mddev->ready = 0;
4772 	mddev->pers->stop(mddev);
4773 	if (mddev->pers->sync_request && mddev->to_remove == NULL)
4774 		mddev->to_remove = &md_redundancy_group;
4775 	module_put(mddev->pers->owner);
4776 	mddev->pers = NULL;
4777 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4778 }
4779 EXPORT_SYMBOL_GPL(md_stop);
4780 
md_set_readonly(mddev_t * mddev,int is_open)4781 static int md_set_readonly(mddev_t *mddev, int is_open)
4782 {
4783 	int err = 0;
4784 	mutex_lock(&mddev->open_mutex);
4785 	if (atomic_read(&mddev->openers) > is_open) {
4786 		printk("md: %s still in use.\n",mdname(mddev));
4787 		err = -EBUSY;
4788 		goto out;
4789 	}
4790 	if (mddev->pers) {
4791 		__md_stop_writes(mddev);
4792 
4793 		err  = -ENXIO;
4794 		if (mddev->ro==1)
4795 			goto out;
4796 		mddev->ro = 1;
4797 		set_disk_ro(mddev->gendisk, 1);
4798 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4799 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4800 		err = 0;
4801 	}
4802 out:
4803 	mutex_unlock(&mddev->open_mutex);
4804 	return err;
4805 }
4806 
4807 /* mode:
4808  *   0 - completely stop and dis-assemble array
4809  *   2 - stop but do not disassemble array
4810  */
do_md_stop(mddev_t * mddev,int mode,int is_open)4811 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4812 {
4813 	struct gendisk *disk = mddev->gendisk;
4814 	mdk_rdev_t *rdev;
4815 
4816 	mutex_lock(&mddev->open_mutex);
4817 	if (atomic_read(&mddev->openers) > is_open ||
4818 	    mddev->sysfs_active) {
4819 		printk("md: %s still in use.\n",mdname(mddev));
4820 		mutex_unlock(&mddev->open_mutex);
4821 		return -EBUSY;
4822 	}
4823 
4824 	if (mddev->pers) {
4825 		if (mddev->ro)
4826 			set_disk_ro(disk, 0);
4827 
4828 		__md_stop_writes(mddev);
4829 		md_stop(mddev);
4830 		mddev->queue->merge_bvec_fn = NULL;
4831 		mddev->queue->backing_dev_info.congested_fn = NULL;
4832 
4833 		/* tell userspace to handle 'inactive' */
4834 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4835 
4836 		list_for_each_entry(rdev, &mddev->disks, same_set)
4837 			if (rdev->raid_disk >= 0) {
4838 				char nm[20];
4839 				sprintf(nm, "rd%d", rdev->raid_disk);
4840 				sysfs_remove_link(&mddev->kobj, nm);
4841 			}
4842 
4843 		set_capacity(disk, 0);
4844 		mutex_unlock(&mddev->open_mutex);
4845 		mddev->changed = 1;
4846 		revalidate_disk(disk);
4847 
4848 		if (mddev->ro)
4849 			mddev->ro = 0;
4850 	} else
4851 		mutex_unlock(&mddev->open_mutex);
4852 	/*
4853 	 * Free resources if final stop
4854 	 */
4855 	if (mode == 0) {
4856 		printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4857 
4858 		bitmap_destroy(mddev);
4859 		if (mddev->bitmap_info.file) {
4860 			restore_bitmap_write_access(mddev->bitmap_info.file);
4861 			fput(mddev->bitmap_info.file);
4862 			mddev->bitmap_info.file = NULL;
4863 		}
4864 		mddev->bitmap_info.offset = 0;
4865 
4866 		export_array(mddev);
4867 
4868 		md_clean(mddev);
4869 		kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4870 		if (mddev->hold_active == UNTIL_STOP)
4871 			mddev->hold_active = 0;
4872 	}
4873 	blk_integrity_unregister(disk);
4874 	md_new_event(mddev);
4875 	sysfs_notify_dirent_safe(mddev->sysfs_state);
4876 	return 0;
4877 }
4878 
4879 #ifndef MODULE
autorun_array(mddev_t * mddev)4880 static void autorun_array(mddev_t *mddev)
4881 {
4882 	mdk_rdev_t *rdev;
4883 	int err;
4884 
4885 	if (list_empty(&mddev->disks))
4886 		return;
4887 
4888 	printk(KERN_INFO "md: running: ");
4889 
4890 	list_for_each_entry(rdev, &mddev->disks, same_set) {
4891 		char b[BDEVNAME_SIZE];
4892 		printk("<%s>", bdevname(rdev->bdev,b));
4893 	}
4894 	printk("\n");
4895 
4896 	err = do_md_run(mddev);
4897 	if (err) {
4898 		printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4899 		do_md_stop(mddev, 0, 0);
4900 	}
4901 }
4902 
4903 /*
4904  * lets try to run arrays based on all disks that have arrived
4905  * until now. (those are in pending_raid_disks)
4906  *
4907  * the method: pick the first pending disk, collect all disks with
4908  * the same UUID, remove all from the pending list and put them into
4909  * the 'same_array' list. Then order this list based on superblock
4910  * update time (freshest comes first), kick out 'old' disks and
4911  * compare superblocks. If everything's fine then run it.
4912  *
4913  * If "unit" is allocated, then bump its reference count
4914  */
autorun_devices(int part)4915 static void autorun_devices(int part)
4916 {
4917 	mdk_rdev_t *rdev0, *rdev, *tmp;
4918 	mddev_t *mddev;
4919 	char b[BDEVNAME_SIZE];
4920 
4921 	printk(KERN_INFO "md: autorun ...\n");
4922 	while (!list_empty(&pending_raid_disks)) {
4923 		int unit;
4924 		dev_t dev;
4925 		LIST_HEAD(candidates);
4926 		rdev0 = list_entry(pending_raid_disks.next,
4927 					 mdk_rdev_t, same_set);
4928 
4929 		printk(KERN_INFO "md: considering %s ...\n",
4930 			bdevname(rdev0->bdev,b));
4931 		INIT_LIST_HEAD(&candidates);
4932 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4933 			if (super_90_load(rdev, rdev0, 0) >= 0) {
4934 				printk(KERN_INFO "md:  adding %s ...\n",
4935 					bdevname(rdev->bdev,b));
4936 				list_move(&rdev->same_set, &candidates);
4937 			}
4938 		/*
4939 		 * now we have a set of devices, with all of them having
4940 		 * mostly sane superblocks. It's time to allocate the
4941 		 * mddev.
4942 		 */
4943 		if (part) {
4944 			dev = MKDEV(mdp_major,
4945 				    rdev0->preferred_minor << MdpMinorShift);
4946 			unit = MINOR(dev) >> MdpMinorShift;
4947 		} else {
4948 			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4949 			unit = MINOR(dev);
4950 		}
4951 		if (rdev0->preferred_minor != unit) {
4952 			printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4953 			       bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4954 			break;
4955 		}
4956 
4957 		md_probe(dev, NULL, NULL);
4958 		mddev = mddev_find(dev);
4959 		if (!mddev || !mddev->gendisk) {
4960 			if (mddev)
4961 				mddev_put(mddev);
4962 			printk(KERN_ERR
4963 				"md: cannot allocate memory for md drive.\n");
4964 			break;
4965 		}
4966 		if (mddev_lock(mddev))
4967 			printk(KERN_WARNING "md: %s locked, cannot run\n",
4968 			       mdname(mddev));
4969 		else if (mddev->raid_disks || mddev->major_version
4970 			 || !list_empty(&mddev->disks)) {
4971 			printk(KERN_WARNING
4972 				"md: %s already running, cannot run %s\n",
4973 				mdname(mddev), bdevname(rdev0->bdev,b));
4974 			mddev_unlock(mddev);
4975 		} else {
4976 			printk(KERN_INFO "md: created %s\n", mdname(mddev));
4977 			mddev->persistent = 1;
4978 			rdev_for_each_list(rdev, tmp, &candidates) {
4979 				list_del_init(&rdev->same_set);
4980 				if (bind_rdev_to_array(rdev, mddev))
4981 					export_rdev(rdev);
4982 			}
4983 			autorun_array(mddev);
4984 			mddev_unlock(mddev);
4985 		}
4986 		/* on success, candidates will be empty, on error
4987 		 * it won't...
4988 		 */
4989 		rdev_for_each_list(rdev, tmp, &candidates) {
4990 			list_del_init(&rdev->same_set);
4991 			export_rdev(rdev);
4992 		}
4993 		mddev_put(mddev);
4994 	}
4995 	printk(KERN_INFO "md: ... autorun DONE.\n");
4996 }
4997 #endif /* !MODULE */
4998 
get_version(void __user * arg)4999 static int get_version(void __user * arg)
5000 {
5001 	mdu_version_t ver;
5002 
5003 	ver.major = MD_MAJOR_VERSION;
5004 	ver.minor = MD_MINOR_VERSION;
5005 	ver.patchlevel = MD_PATCHLEVEL_VERSION;
5006 
5007 	if (copy_to_user(arg, &ver, sizeof(ver)))
5008 		return -EFAULT;
5009 
5010 	return 0;
5011 }
5012 
get_array_info(mddev_t * mddev,void __user * arg)5013 static int get_array_info(mddev_t * mddev, void __user * arg)
5014 {
5015 	mdu_array_info_t info;
5016 	int nr,working,insync,failed,spare;
5017 	mdk_rdev_t *rdev;
5018 
5019 	nr=working=insync=failed=spare=0;
5020 	list_for_each_entry(rdev, &mddev->disks, same_set) {
5021 		nr++;
5022 		if (test_bit(Faulty, &rdev->flags))
5023 			failed++;
5024 		else {
5025 			working++;
5026 			if (test_bit(In_sync, &rdev->flags))
5027 				insync++;
5028 			else
5029 				spare++;
5030 		}
5031 	}
5032 
5033 	info.major_version = mddev->major_version;
5034 	info.minor_version = mddev->minor_version;
5035 	info.patch_version = MD_PATCHLEVEL_VERSION;
5036 	info.ctime         = mddev->ctime;
5037 	info.level         = mddev->level;
5038 	info.size          = mddev->dev_sectors / 2;
5039 	if (info.size != mddev->dev_sectors / 2) /* overflow */
5040 		info.size = -1;
5041 	info.nr_disks      = nr;
5042 	info.raid_disks    = mddev->raid_disks;
5043 	info.md_minor      = mddev->md_minor;
5044 	info.not_persistent= !mddev->persistent;
5045 
5046 	info.utime         = mddev->utime;
5047 	info.state         = 0;
5048 	if (mddev->in_sync)
5049 		info.state = (1<<MD_SB_CLEAN);
5050 	if (mddev->bitmap && mddev->bitmap_info.offset)
5051 		info.state = (1<<MD_SB_BITMAP_PRESENT);
5052 	info.active_disks  = insync;
5053 	info.working_disks = working;
5054 	info.failed_disks  = failed;
5055 	info.spare_disks   = spare;
5056 
5057 	info.layout        = mddev->layout;
5058 	info.chunk_size    = mddev->chunk_sectors << 9;
5059 
5060 	if (copy_to_user(arg, &info, sizeof(info)))
5061 		return -EFAULT;
5062 
5063 	return 0;
5064 }
5065 
get_bitmap_file(mddev_t * mddev,void __user * arg)5066 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
5067 {
5068 	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5069 	char *ptr, *buf = NULL;
5070 	int err = -ENOMEM;
5071 
5072 	if (md_allow_write(mddev))
5073 		file = kmalloc(sizeof(*file), GFP_NOIO);
5074 	else
5075 		file = kmalloc(sizeof(*file), GFP_KERNEL);
5076 
5077 	if (!file)
5078 		goto out;
5079 
5080 	/* bitmap disabled, zero the first byte and copy out */
5081 	if (!mddev->bitmap || !mddev->bitmap->file) {
5082 		file->pathname[0] = '\0';
5083 		goto copy_out;
5084 	}
5085 
5086 	buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
5087 	if (!buf)
5088 		goto out;
5089 
5090 	ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
5091 	if (IS_ERR(ptr))
5092 		goto out;
5093 
5094 	strcpy(file->pathname, ptr);
5095 
5096 copy_out:
5097 	err = 0;
5098 	if (copy_to_user(arg, file, sizeof(*file)))
5099 		err = -EFAULT;
5100 out:
5101 	kfree(buf);
5102 	kfree(file);
5103 	return err;
5104 }
5105 
get_disk_info(mddev_t * mddev,void __user * arg)5106 static int get_disk_info(mddev_t * mddev, void __user * arg)
5107 {
5108 	mdu_disk_info_t info;
5109 	mdk_rdev_t *rdev;
5110 
5111 	if (copy_from_user(&info, arg, sizeof(info)))
5112 		return -EFAULT;
5113 
5114 	rdev = find_rdev_nr(mddev, info.number);
5115 	if (rdev) {
5116 		info.major = MAJOR(rdev->bdev->bd_dev);
5117 		info.minor = MINOR(rdev->bdev->bd_dev);
5118 		info.raid_disk = rdev->raid_disk;
5119 		info.state = 0;
5120 		if (test_bit(Faulty, &rdev->flags))
5121 			info.state |= (1<<MD_DISK_FAULTY);
5122 		else if (test_bit(In_sync, &rdev->flags)) {
5123 			info.state |= (1<<MD_DISK_ACTIVE);
5124 			info.state |= (1<<MD_DISK_SYNC);
5125 		}
5126 		if (test_bit(WriteMostly, &rdev->flags))
5127 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
5128 	} else {
5129 		info.major = info.minor = 0;
5130 		info.raid_disk = -1;
5131 		info.state = (1<<MD_DISK_REMOVED);
5132 	}
5133 
5134 	if (copy_to_user(arg, &info, sizeof(info)))
5135 		return -EFAULT;
5136 
5137 	return 0;
5138 }
5139 
add_new_disk(mddev_t * mddev,mdu_disk_info_t * info)5140 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5141 {
5142 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5143 	mdk_rdev_t *rdev;
5144 	dev_t dev = MKDEV(info->major,info->minor);
5145 
5146 	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5147 		return -EOVERFLOW;
5148 
5149 	if (!mddev->raid_disks) {
5150 		int err;
5151 		/* expecting a device which has a superblock */
5152 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5153 		if (IS_ERR(rdev)) {
5154 			printk(KERN_WARNING
5155 				"md: md_import_device returned %ld\n",
5156 				PTR_ERR(rdev));
5157 			return PTR_ERR(rdev);
5158 		}
5159 		if (!list_empty(&mddev->disks)) {
5160 			mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
5161 							mdk_rdev_t, same_set);
5162 			err = super_types[mddev->major_version]
5163 				.load_super(rdev, rdev0, mddev->minor_version);
5164 			if (err < 0) {
5165 				printk(KERN_WARNING
5166 					"md: %s has different UUID to %s\n",
5167 					bdevname(rdev->bdev,b),
5168 					bdevname(rdev0->bdev,b2));
5169 				export_rdev(rdev);
5170 				return -EINVAL;
5171 			}
5172 		}
5173 		err = bind_rdev_to_array(rdev, mddev);
5174 		if (err)
5175 			export_rdev(rdev);
5176 		return err;
5177 	}
5178 
5179 	/*
5180 	 * add_new_disk can be used once the array is assembled
5181 	 * to add "hot spares".  They must already have a superblock
5182 	 * written
5183 	 */
5184 	if (mddev->pers) {
5185 		int err;
5186 		if (!mddev->pers->hot_add_disk) {
5187 			printk(KERN_WARNING
5188 				"%s: personality does not support diskops!\n",
5189 			       mdname(mddev));
5190 			return -EINVAL;
5191 		}
5192 		if (mddev->persistent)
5193 			rdev = md_import_device(dev, mddev->major_version,
5194 						mddev->minor_version);
5195 		else
5196 			rdev = md_import_device(dev, -1, -1);
5197 		if (IS_ERR(rdev)) {
5198 			printk(KERN_WARNING
5199 				"md: md_import_device returned %ld\n",
5200 				PTR_ERR(rdev));
5201 			return PTR_ERR(rdev);
5202 		}
5203 		/* set saved_raid_disk if appropriate */
5204 		if (!mddev->persistent) {
5205 			if (info->state & (1<<MD_DISK_SYNC)  &&
5206 			    info->raid_disk < mddev->raid_disks) {
5207 				rdev->raid_disk = info->raid_disk;
5208 				set_bit(In_sync, &rdev->flags);
5209 			} else
5210 				rdev->raid_disk = -1;
5211 		} else
5212 			super_types[mddev->major_version].
5213 				validate_super(mddev, rdev);
5214 		if (test_bit(In_sync, &rdev->flags))
5215 			rdev->saved_raid_disk = rdev->raid_disk;
5216 		else
5217 			rdev->saved_raid_disk = -1;
5218 
5219 		clear_bit(In_sync, &rdev->flags); /* just to be sure */
5220 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5221 			set_bit(WriteMostly, &rdev->flags);
5222 		else
5223 			clear_bit(WriteMostly, &rdev->flags);
5224 
5225 		rdev->raid_disk = -1;
5226 		err = bind_rdev_to_array(rdev, mddev);
5227 		if (!err && !mddev->pers->hot_remove_disk) {
5228 			/* If there is hot_add_disk but no hot_remove_disk
5229 			 * then added disks for geometry changes,
5230 			 * and should be added immediately.
5231 			 */
5232 			super_types[mddev->major_version].
5233 				validate_super(mddev, rdev);
5234 			err = mddev->pers->hot_add_disk(mddev, rdev);
5235 			if (err)
5236 				unbind_rdev_from_array(rdev);
5237 		}
5238 		if (err)
5239 			export_rdev(rdev);
5240 		else
5241 			sysfs_notify_dirent_safe(rdev->sysfs_state);
5242 
5243 		md_update_sb(mddev, 1);
5244 		if (mddev->degraded)
5245 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5246 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5247 		md_wakeup_thread(mddev->thread);
5248 		return err;
5249 	}
5250 
5251 	/* otherwise, add_new_disk is only allowed
5252 	 * for major_version==0 superblocks
5253 	 */
5254 	if (mddev->major_version != 0) {
5255 		printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5256 		       mdname(mddev));
5257 		return -EINVAL;
5258 	}
5259 
5260 	if (!(info->state & (1<<MD_DISK_FAULTY))) {
5261 		int err;
5262 		rdev = md_import_device(dev, -1, 0);
5263 		if (IS_ERR(rdev)) {
5264 			printk(KERN_WARNING
5265 				"md: error, md_import_device() returned %ld\n",
5266 				PTR_ERR(rdev));
5267 			return PTR_ERR(rdev);
5268 		}
5269 		rdev->desc_nr = info->number;
5270 		if (info->raid_disk < mddev->raid_disks)
5271 			rdev->raid_disk = info->raid_disk;
5272 		else
5273 			rdev->raid_disk = -1;
5274 
5275 		if (rdev->raid_disk < mddev->raid_disks)
5276 			if (info->state & (1<<MD_DISK_SYNC))
5277 				set_bit(In_sync, &rdev->flags);
5278 
5279 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5280 			set_bit(WriteMostly, &rdev->flags);
5281 
5282 		if (!mddev->persistent) {
5283 			printk(KERN_INFO "md: nonpersistent superblock ...\n");
5284 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5285 		} else
5286 			rdev->sb_start = calc_dev_sboffset(rdev);
5287 		rdev->sectors = rdev->sb_start;
5288 
5289 		err = bind_rdev_to_array(rdev, mddev);
5290 		if (err) {
5291 			export_rdev(rdev);
5292 			return err;
5293 		}
5294 	}
5295 
5296 	return 0;
5297 }
5298 
hot_remove_disk(mddev_t * mddev,dev_t dev)5299 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5300 {
5301 	char b[BDEVNAME_SIZE];
5302 	mdk_rdev_t *rdev;
5303 
5304 	rdev = find_rdev(mddev, dev);
5305 	if (!rdev)
5306 		return -ENXIO;
5307 
5308 	if (rdev->raid_disk >= 0)
5309 		goto busy;
5310 
5311 	kick_rdev_from_array(rdev);
5312 	md_update_sb(mddev, 1);
5313 	md_new_event(mddev);
5314 
5315 	return 0;
5316 busy:
5317 	printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5318 		bdevname(rdev->bdev,b), mdname(mddev));
5319 	return -EBUSY;
5320 }
5321 
hot_add_disk(mddev_t * mddev,dev_t dev)5322 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5323 {
5324 	char b[BDEVNAME_SIZE];
5325 	int err;
5326 	mdk_rdev_t *rdev;
5327 
5328 	if (!mddev->pers)
5329 		return -ENODEV;
5330 
5331 	if (mddev->major_version != 0) {
5332 		printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5333 			" version-0 superblocks.\n",
5334 			mdname(mddev));
5335 		return -EINVAL;
5336 	}
5337 	if (!mddev->pers->hot_add_disk) {
5338 		printk(KERN_WARNING
5339 			"%s: personality does not support diskops!\n",
5340 			mdname(mddev));
5341 		return -EINVAL;
5342 	}
5343 
5344 	rdev = md_import_device(dev, -1, 0);
5345 	if (IS_ERR(rdev)) {
5346 		printk(KERN_WARNING
5347 			"md: error, md_import_device() returned %ld\n",
5348 			PTR_ERR(rdev));
5349 		return -EINVAL;
5350 	}
5351 
5352 	if (mddev->persistent)
5353 		rdev->sb_start = calc_dev_sboffset(rdev);
5354 	else
5355 		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5356 
5357 	rdev->sectors = rdev->sb_start;
5358 
5359 	if (test_bit(Faulty, &rdev->flags)) {
5360 		printk(KERN_WARNING
5361 			"md: can not hot-add faulty %s disk to %s!\n",
5362 			bdevname(rdev->bdev,b), mdname(mddev));
5363 		err = -EINVAL;
5364 		goto abort_export;
5365 	}
5366 	clear_bit(In_sync, &rdev->flags);
5367 	rdev->desc_nr = -1;
5368 	rdev->saved_raid_disk = -1;
5369 	err = bind_rdev_to_array(rdev, mddev);
5370 	if (err)
5371 		goto abort_export;
5372 
5373 	/*
5374 	 * The rest should better be atomic, we can have disk failures
5375 	 * noticed in interrupt contexts ...
5376 	 */
5377 
5378 	rdev->raid_disk = -1;
5379 
5380 	md_update_sb(mddev, 1);
5381 
5382 	/*
5383 	 * Kick recovery, maybe this spare has to be added to the
5384 	 * array immediately.
5385 	 */
5386 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5387 	md_wakeup_thread(mddev->thread);
5388 	md_new_event(mddev);
5389 	return 0;
5390 
5391 abort_export:
5392 	export_rdev(rdev);
5393 	return err;
5394 }
5395 
set_bitmap_file(mddev_t * mddev,int fd)5396 static int set_bitmap_file(mddev_t *mddev, int fd)
5397 {
5398 	int err;
5399 
5400 	if (mddev->pers) {
5401 		if (!mddev->pers->quiesce)
5402 			return -EBUSY;
5403 		if (mddev->recovery || mddev->sync_thread)
5404 			return -EBUSY;
5405 		/* we should be able to change the bitmap.. */
5406 	}
5407 
5408 
5409 	if (fd >= 0) {
5410 		if (mddev->bitmap)
5411 			return -EEXIST; /* cannot add when bitmap is present */
5412 		mddev->bitmap_info.file = fget(fd);
5413 
5414 		if (mddev->bitmap_info.file == NULL) {
5415 			printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5416 			       mdname(mddev));
5417 			return -EBADF;
5418 		}
5419 
5420 		err = deny_bitmap_write_access(mddev->bitmap_info.file);
5421 		if (err) {
5422 			printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5423 			       mdname(mddev));
5424 			fput(mddev->bitmap_info.file);
5425 			mddev->bitmap_info.file = NULL;
5426 			return err;
5427 		}
5428 		mddev->bitmap_info.offset = 0; /* file overrides offset */
5429 	} else if (mddev->bitmap == NULL)
5430 		return -ENOENT; /* cannot remove what isn't there */
5431 	err = 0;
5432 	if (mddev->pers) {
5433 		mddev->pers->quiesce(mddev, 1);
5434 		if (fd >= 0) {
5435 			err = bitmap_create(mddev);
5436 			if (!err)
5437 				err = bitmap_load(mddev);
5438 		}
5439 		if (fd < 0 || err) {
5440 			bitmap_destroy(mddev);
5441 			fd = -1; /* make sure to put the file */
5442 		}
5443 		mddev->pers->quiesce(mddev, 0);
5444 	}
5445 	if (fd < 0) {
5446 		if (mddev->bitmap_info.file) {
5447 			restore_bitmap_write_access(mddev->bitmap_info.file);
5448 			fput(mddev->bitmap_info.file);
5449 		}
5450 		mddev->bitmap_info.file = NULL;
5451 	}
5452 
5453 	return err;
5454 }
5455 
5456 /*
5457  * set_array_info is used two different ways
5458  * The original usage is when creating a new array.
5459  * In this usage, raid_disks is > 0 and it together with
5460  *  level, size, not_persistent,layout,chunksize determine the
5461  *  shape of the array.
5462  *  This will always create an array with a type-0.90.0 superblock.
5463  * The newer usage is when assembling an array.
5464  *  In this case raid_disks will be 0, and the major_version field is
5465  *  use to determine which style super-blocks are to be found on the devices.
5466  *  The minor and patch _version numbers are also kept incase the
5467  *  super_block handler wishes to interpret them.
5468  */
set_array_info(mddev_t * mddev,mdu_array_info_t * info)5469 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5470 {
5471 
5472 	if (info->raid_disks == 0) {
5473 		/* just setting version number for superblock loading */
5474 		if (info->major_version < 0 ||
5475 		    info->major_version >= ARRAY_SIZE(super_types) ||
5476 		    super_types[info->major_version].name == NULL) {
5477 			/* maybe try to auto-load a module? */
5478 			printk(KERN_INFO
5479 				"md: superblock version %d not known\n",
5480 				info->major_version);
5481 			return -EINVAL;
5482 		}
5483 		mddev->major_version = info->major_version;
5484 		mddev->minor_version = info->minor_version;
5485 		mddev->patch_version = info->patch_version;
5486 		mddev->persistent = !info->not_persistent;
5487 		/* ensure mddev_put doesn't delete this now that there
5488 		 * is some minimal configuration.
5489 		 */
5490 		mddev->ctime         = get_seconds();
5491 		return 0;
5492 	}
5493 	mddev->major_version = MD_MAJOR_VERSION;
5494 	mddev->minor_version = MD_MINOR_VERSION;
5495 	mddev->patch_version = MD_PATCHLEVEL_VERSION;
5496 	mddev->ctime         = get_seconds();
5497 
5498 	mddev->level         = info->level;
5499 	mddev->clevel[0]     = 0;
5500 	mddev->dev_sectors   = 2 * (sector_t)info->size;
5501 	mddev->raid_disks    = info->raid_disks;
5502 	/* don't set md_minor, it is determined by which /dev/md* was
5503 	 * openned
5504 	 */
5505 	if (info->state & (1<<MD_SB_CLEAN))
5506 		mddev->recovery_cp = MaxSector;
5507 	else
5508 		mddev->recovery_cp = 0;
5509 	mddev->persistent    = ! info->not_persistent;
5510 	mddev->external	     = 0;
5511 
5512 	mddev->layout        = info->layout;
5513 	mddev->chunk_sectors = info->chunk_size >> 9;
5514 
5515 	mddev->max_disks     = MD_SB_DISKS;
5516 
5517 	if (mddev->persistent)
5518 		mddev->flags         = 0;
5519 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
5520 
5521 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5522 	mddev->bitmap_info.offset = 0;
5523 
5524 	mddev->reshape_position = MaxSector;
5525 
5526 	/*
5527 	 * Generate a 128 bit UUID
5528 	 */
5529 	get_random_bytes(mddev->uuid, 16);
5530 
5531 	mddev->new_level = mddev->level;
5532 	mddev->new_chunk_sectors = mddev->chunk_sectors;
5533 	mddev->new_layout = mddev->layout;
5534 	mddev->delta_disks = 0;
5535 
5536 	return 0;
5537 }
5538 
md_set_array_sectors(mddev_t * mddev,sector_t array_sectors)5539 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5540 {
5541 	WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5542 
5543 	if (mddev->external_size)
5544 		return;
5545 
5546 	mddev->array_sectors = array_sectors;
5547 }
5548 EXPORT_SYMBOL(md_set_array_sectors);
5549 
update_size(mddev_t * mddev,sector_t num_sectors)5550 static int update_size(mddev_t *mddev, sector_t num_sectors)
5551 {
5552 	mdk_rdev_t *rdev;
5553 	int rv;
5554 	int fit = (num_sectors == 0);
5555 
5556 	if (mddev->pers->resize == NULL)
5557 		return -EINVAL;
5558 	/* The "num_sectors" is the number of sectors of each device that
5559 	 * is used.  This can only make sense for arrays with redundancy.
5560 	 * linear and raid0 always use whatever space is available. We can only
5561 	 * consider changing this number if no resync or reconstruction is
5562 	 * happening, and if the new size is acceptable. It must fit before the
5563 	 * sb_start or, if that is <data_offset, it must fit before the size
5564 	 * of each device.  If num_sectors is zero, we find the largest size
5565 	 * that fits.
5566 	 */
5567 	if (mddev->sync_thread)
5568 		return -EBUSY;
5569 	if (mddev->bitmap)
5570 		/* Sorry, cannot grow a bitmap yet, just remove it,
5571 		 * grow, and re-add.
5572 		 */
5573 		return -EBUSY;
5574 	list_for_each_entry(rdev, &mddev->disks, same_set) {
5575 		sector_t avail = rdev->sectors;
5576 
5577 		if (fit && (num_sectors == 0 || num_sectors > avail))
5578 			num_sectors = avail;
5579 		if (avail < num_sectors)
5580 			return -ENOSPC;
5581 	}
5582 	rv = mddev->pers->resize(mddev, num_sectors);
5583 	if (!rv)
5584 		revalidate_disk(mddev->gendisk);
5585 	return rv;
5586 }
5587 
update_raid_disks(mddev_t * mddev,int raid_disks)5588 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5589 {
5590 	int rv;
5591 	/* change the number of raid disks */
5592 	if (mddev->pers->check_reshape == NULL)
5593 		return -EINVAL;
5594 	if (raid_disks <= 0 ||
5595 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
5596 		return -EINVAL;
5597 	if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5598 		return -EBUSY;
5599 	mddev->delta_disks = raid_disks - mddev->raid_disks;
5600 
5601 	rv = mddev->pers->check_reshape(mddev);
5602 	if (rv < 0)
5603 		mddev->delta_disks = 0;
5604 	return rv;
5605 }
5606 
5607 
5608 /*
5609  * update_array_info is used to change the configuration of an
5610  * on-line array.
5611  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5612  * fields in the info are checked against the array.
5613  * Any differences that cannot be handled will cause an error.
5614  * Normally, only one change can be managed at a time.
5615  */
update_array_info(mddev_t * mddev,mdu_array_info_t * info)5616 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5617 {
5618 	int rv = 0;
5619 	int cnt = 0;
5620 	int state = 0;
5621 
5622 	/* calculate expected state,ignoring low bits */
5623 	if (mddev->bitmap && mddev->bitmap_info.offset)
5624 		state |= (1 << MD_SB_BITMAP_PRESENT);
5625 
5626 	if (mddev->major_version != info->major_version ||
5627 	    mddev->minor_version != info->minor_version ||
5628 /*	    mddev->patch_version != info->patch_version || */
5629 	    mddev->ctime         != info->ctime         ||
5630 	    mddev->level         != info->level         ||
5631 /*	    mddev->layout        != info->layout        || */
5632 	    !mddev->persistent	 != info->not_persistent||
5633 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
5634 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5635 	    ((state^info->state) & 0xfffffe00)
5636 		)
5637 		return -EINVAL;
5638 	/* Check there is only one change */
5639 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5640 		cnt++;
5641 	if (mddev->raid_disks != info->raid_disks)
5642 		cnt++;
5643 	if (mddev->layout != info->layout)
5644 		cnt++;
5645 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5646 		cnt++;
5647 	if (cnt == 0)
5648 		return 0;
5649 	if (cnt > 1)
5650 		return -EINVAL;
5651 
5652 	if (mddev->layout != info->layout) {
5653 		/* Change layout
5654 		 * we don't need to do anything at the md level, the
5655 		 * personality will take care of it all.
5656 		 */
5657 		if (mddev->pers->check_reshape == NULL)
5658 			return -EINVAL;
5659 		else {
5660 			mddev->new_layout = info->layout;
5661 			rv = mddev->pers->check_reshape(mddev);
5662 			if (rv)
5663 				mddev->new_layout = mddev->layout;
5664 			return rv;
5665 		}
5666 	}
5667 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5668 		rv = update_size(mddev, (sector_t)info->size * 2);
5669 
5670 	if (mddev->raid_disks    != info->raid_disks)
5671 		rv = update_raid_disks(mddev, info->raid_disks);
5672 
5673 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5674 		if (mddev->pers->quiesce == NULL)
5675 			return -EINVAL;
5676 		if (mddev->recovery || mddev->sync_thread)
5677 			return -EBUSY;
5678 		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5679 			/* add the bitmap */
5680 			if (mddev->bitmap)
5681 				return -EEXIST;
5682 			if (mddev->bitmap_info.default_offset == 0)
5683 				return -EINVAL;
5684 			mddev->bitmap_info.offset =
5685 				mddev->bitmap_info.default_offset;
5686 			mddev->pers->quiesce(mddev, 1);
5687 			rv = bitmap_create(mddev);
5688 			if (!rv)
5689 				rv = bitmap_load(mddev);
5690 			if (rv)
5691 				bitmap_destroy(mddev);
5692 			mddev->pers->quiesce(mddev, 0);
5693 		} else {
5694 			/* remove the bitmap */
5695 			if (!mddev->bitmap)
5696 				return -ENOENT;
5697 			if (mddev->bitmap->file)
5698 				return -EINVAL;
5699 			mddev->pers->quiesce(mddev, 1);
5700 			bitmap_destroy(mddev);
5701 			mddev->pers->quiesce(mddev, 0);
5702 			mddev->bitmap_info.offset = 0;
5703 		}
5704 	}
5705 	md_update_sb(mddev, 1);
5706 	return rv;
5707 }
5708 
set_disk_faulty(mddev_t * mddev,dev_t dev)5709 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5710 {
5711 	mdk_rdev_t *rdev;
5712 
5713 	if (mddev->pers == NULL)
5714 		return -ENODEV;
5715 
5716 	rdev = find_rdev(mddev, dev);
5717 	if (!rdev)
5718 		return -ENODEV;
5719 
5720 	md_error(mddev, rdev);
5721 	return 0;
5722 }
5723 
5724 /*
5725  * We have a problem here : there is no easy way to give a CHS
5726  * virtual geometry. We currently pretend that we have a 2 heads
5727  * 4 sectors (with a BIG number of cylinders...). This drives
5728  * dosfs just mad... ;-)
5729  */
md_getgeo(struct block_device * bdev,struct hd_geometry * geo)5730 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5731 {
5732 	mddev_t *mddev = bdev->bd_disk->private_data;
5733 
5734 	geo->heads = 2;
5735 	geo->sectors = 4;
5736 	geo->cylinders = mddev->array_sectors / 8;
5737 	return 0;
5738 }
5739 
md_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)5740 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5741 			unsigned int cmd, unsigned long arg)
5742 {
5743 	int err = 0;
5744 	void __user *argp = (void __user *)arg;
5745 	mddev_t *mddev = NULL;
5746 	int ro;
5747 
5748 	if (!capable(CAP_SYS_ADMIN))
5749 		return -EACCES;
5750 
5751 	/*
5752 	 * Commands dealing with the RAID driver but not any
5753 	 * particular array:
5754 	 */
5755 	switch (cmd)
5756 	{
5757 		case RAID_VERSION:
5758 			err = get_version(argp);
5759 			goto done;
5760 
5761 		case PRINT_RAID_DEBUG:
5762 			err = 0;
5763 			md_print_devices();
5764 			goto done;
5765 
5766 #ifndef MODULE
5767 		case RAID_AUTORUN:
5768 			err = 0;
5769 			autostart_arrays(arg);
5770 			goto done;
5771 #endif
5772 		default:;
5773 	}
5774 
5775 	/*
5776 	 * Commands creating/starting a new array:
5777 	 */
5778 
5779 	mddev = bdev->bd_disk->private_data;
5780 
5781 	if (!mddev) {
5782 		BUG();
5783 		goto abort;
5784 	}
5785 
5786 	err = mddev_lock(mddev);
5787 	if (err) {
5788 		printk(KERN_INFO
5789 			"md: ioctl lock interrupted, reason %d, cmd %d\n",
5790 			err, cmd);
5791 		goto abort;
5792 	}
5793 
5794 	switch (cmd)
5795 	{
5796 		case SET_ARRAY_INFO:
5797 			{
5798 				mdu_array_info_t info;
5799 				if (!arg)
5800 					memset(&info, 0, sizeof(info));
5801 				else if (copy_from_user(&info, argp, sizeof(info))) {
5802 					err = -EFAULT;
5803 					goto abort_unlock;
5804 				}
5805 				if (mddev->pers) {
5806 					err = update_array_info(mddev, &info);
5807 					if (err) {
5808 						printk(KERN_WARNING "md: couldn't update"
5809 						       " array info. %d\n", err);
5810 						goto abort_unlock;
5811 					}
5812 					goto done_unlock;
5813 				}
5814 				if (!list_empty(&mddev->disks)) {
5815 					printk(KERN_WARNING
5816 					       "md: array %s already has disks!\n",
5817 					       mdname(mddev));
5818 					err = -EBUSY;
5819 					goto abort_unlock;
5820 				}
5821 				if (mddev->raid_disks) {
5822 					printk(KERN_WARNING
5823 					       "md: array %s already initialised!\n",
5824 					       mdname(mddev));
5825 					err = -EBUSY;
5826 					goto abort_unlock;
5827 				}
5828 				err = set_array_info(mddev, &info);
5829 				if (err) {
5830 					printk(KERN_WARNING "md: couldn't set"
5831 					       " array info. %d\n", err);
5832 					goto abort_unlock;
5833 				}
5834 			}
5835 			goto done_unlock;
5836 
5837 		default:;
5838 	}
5839 
5840 	/*
5841 	 * Commands querying/configuring an existing array:
5842 	 */
5843 	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5844 	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5845 	if ((!mddev->raid_disks && !mddev->external)
5846 	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5847 	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5848 	    && cmd != GET_BITMAP_FILE) {
5849 		err = -ENODEV;
5850 		goto abort_unlock;
5851 	}
5852 
5853 	/*
5854 	 * Commands even a read-only array can execute:
5855 	 */
5856 	switch (cmd)
5857 	{
5858 		case GET_ARRAY_INFO:
5859 			err = get_array_info(mddev, argp);
5860 			goto done_unlock;
5861 
5862 		case GET_BITMAP_FILE:
5863 			err = get_bitmap_file(mddev, argp);
5864 			goto done_unlock;
5865 
5866 		case GET_DISK_INFO:
5867 			err = get_disk_info(mddev, argp);
5868 			goto done_unlock;
5869 
5870 		case RESTART_ARRAY_RW:
5871 			err = restart_array(mddev);
5872 			goto done_unlock;
5873 
5874 		case STOP_ARRAY:
5875 			err = do_md_stop(mddev, 0, 1);
5876 			goto done_unlock;
5877 
5878 		case STOP_ARRAY_RO:
5879 			err = md_set_readonly(mddev, 1);
5880 			goto done_unlock;
5881 
5882 		case BLKROSET:
5883 			if (get_user(ro, (int __user *)(arg))) {
5884 				err = -EFAULT;
5885 				goto done_unlock;
5886 			}
5887 			err = -EINVAL;
5888 
5889 			/* if the bdev is going readonly the value of mddev->ro
5890 			 * does not matter, no writes are coming
5891 			 */
5892 			if (ro)
5893 				goto done_unlock;
5894 
5895 			/* are we are already prepared for writes? */
5896 			if (mddev->ro != 1)
5897 				goto done_unlock;
5898 
5899 			/* transitioning to readauto need only happen for
5900 			 * arrays that call md_write_start
5901 			 */
5902 			if (mddev->pers) {
5903 				err = restart_array(mddev);
5904 				if (err == 0) {
5905 					mddev->ro = 2;
5906 					set_disk_ro(mddev->gendisk, 0);
5907 				}
5908 			}
5909 			goto done_unlock;
5910 	}
5911 
5912 	/*
5913 	 * The remaining ioctls are changing the state of the
5914 	 * superblock, so we do not allow them on read-only arrays.
5915 	 * However non-MD ioctls (e.g. get-size) will still come through
5916 	 * here and hit the 'default' below, so only disallow
5917 	 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5918 	 */
5919 	if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5920 		if (mddev->ro == 2) {
5921 			mddev->ro = 0;
5922 			sysfs_notify_dirent_safe(mddev->sysfs_state);
5923 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5924 			md_wakeup_thread(mddev->thread);
5925 		} else {
5926 			err = -EROFS;
5927 			goto abort_unlock;
5928 		}
5929 	}
5930 
5931 	switch (cmd)
5932 	{
5933 		case ADD_NEW_DISK:
5934 		{
5935 			mdu_disk_info_t info;
5936 			if (copy_from_user(&info, argp, sizeof(info)))
5937 				err = -EFAULT;
5938 			else
5939 				err = add_new_disk(mddev, &info);
5940 			goto done_unlock;
5941 		}
5942 
5943 		case HOT_REMOVE_DISK:
5944 			err = hot_remove_disk(mddev, new_decode_dev(arg));
5945 			goto done_unlock;
5946 
5947 		case HOT_ADD_DISK:
5948 			err = hot_add_disk(mddev, new_decode_dev(arg));
5949 			goto done_unlock;
5950 
5951 		case SET_DISK_FAULTY:
5952 			err = set_disk_faulty(mddev, new_decode_dev(arg));
5953 			goto done_unlock;
5954 
5955 		case RUN_ARRAY:
5956 			err = do_md_run(mddev);
5957 			goto done_unlock;
5958 
5959 		case SET_BITMAP_FILE:
5960 			err = set_bitmap_file(mddev, (int)arg);
5961 			goto done_unlock;
5962 
5963 		default:
5964 			err = -EINVAL;
5965 			goto abort_unlock;
5966 	}
5967 
5968 done_unlock:
5969 abort_unlock:
5970 	if (mddev->hold_active == UNTIL_IOCTL &&
5971 	    err != -EINVAL)
5972 		mddev->hold_active = 0;
5973 	mddev_unlock(mddev);
5974 
5975 	return err;
5976 done:
5977 	if (err)
5978 		MD_BUG();
5979 abort:
5980 	return err;
5981 }
5982 #ifdef CONFIG_COMPAT
md_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)5983 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5984 		    unsigned int cmd, unsigned long arg)
5985 {
5986 	switch (cmd) {
5987 	case HOT_REMOVE_DISK:
5988 	case HOT_ADD_DISK:
5989 	case SET_DISK_FAULTY:
5990 	case SET_BITMAP_FILE:
5991 		/* These take in integer arg, do not convert */
5992 		break;
5993 	default:
5994 		arg = (unsigned long)compat_ptr(arg);
5995 		break;
5996 	}
5997 
5998 	return md_ioctl(bdev, mode, cmd, arg);
5999 }
6000 #endif /* CONFIG_COMPAT */
6001 
md_open(struct block_device * bdev,fmode_t mode)6002 static int md_open(struct block_device *bdev, fmode_t mode)
6003 {
6004 	/*
6005 	 * Succeed if we can lock the mddev, which confirms that
6006 	 * it isn't being stopped right now.
6007 	 */
6008 	mddev_t *mddev = mddev_find(bdev->bd_dev);
6009 	int err;
6010 
6011 	if (mddev->gendisk != bdev->bd_disk) {
6012 		/* we are racing with mddev_put which is discarding this
6013 		 * bd_disk.
6014 		 */
6015 		mddev_put(mddev);
6016 		/* Wait until bdev->bd_disk is definitely gone */
6017 		flush_workqueue(md_misc_wq);
6018 		/* Then retry the open from the top */
6019 		return -ERESTARTSYS;
6020 	}
6021 	BUG_ON(mddev != bdev->bd_disk->private_data);
6022 
6023 	if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
6024 		goto out;
6025 
6026 	err = 0;
6027 	atomic_inc(&mddev->openers);
6028 	mutex_unlock(&mddev->open_mutex);
6029 
6030 	check_disk_change(bdev);
6031  out:
6032 	return err;
6033 }
6034 
md_release(struct gendisk * disk,fmode_t mode)6035 static int md_release(struct gendisk *disk, fmode_t mode)
6036 {
6037  	mddev_t *mddev = disk->private_data;
6038 
6039 	BUG_ON(!mddev);
6040 	atomic_dec(&mddev->openers);
6041 	mddev_put(mddev);
6042 
6043 	return 0;
6044 }
6045 
md_media_changed(struct gendisk * disk)6046 static int md_media_changed(struct gendisk *disk)
6047 {
6048 	mddev_t *mddev = disk->private_data;
6049 
6050 	return mddev->changed;
6051 }
6052 
md_revalidate(struct gendisk * disk)6053 static int md_revalidate(struct gendisk *disk)
6054 {
6055 	mddev_t *mddev = disk->private_data;
6056 
6057 	mddev->changed = 0;
6058 	return 0;
6059 }
6060 static const struct block_device_operations md_fops =
6061 {
6062 	.owner		= THIS_MODULE,
6063 	.open		= md_open,
6064 	.release	= md_release,
6065 	.ioctl		= md_ioctl,
6066 #ifdef CONFIG_COMPAT
6067 	.compat_ioctl	= md_compat_ioctl,
6068 #endif
6069 	.getgeo		= md_getgeo,
6070 	.media_changed  = md_media_changed,
6071 	.revalidate_disk= md_revalidate,
6072 };
6073 
md_thread(void * arg)6074 static int md_thread(void * arg)
6075 {
6076 	mdk_thread_t *thread = arg;
6077 
6078 	/*
6079 	 * md_thread is a 'system-thread', it's priority should be very
6080 	 * high. We avoid resource deadlocks individually in each
6081 	 * raid personality. (RAID5 does preallocation) We also use RR and
6082 	 * the very same RT priority as kswapd, thus we will never get
6083 	 * into a priority inversion deadlock.
6084 	 *
6085 	 * we definitely have to have equal or higher priority than
6086 	 * bdflush, otherwise bdflush will deadlock if there are too
6087 	 * many dirty RAID5 blocks.
6088 	 */
6089 
6090 	allow_signal(SIGKILL);
6091 	while (!kthread_should_stop()) {
6092 
6093 		/* We need to wait INTERRUPTIBLE so that
6094 		 * we don't add to the load-average.
6095 		 * That means we need to be sure no signals are
6096 		 * pending
6097 		 */
6098 		if (signal_pending(current))
6099 			flush_signals(current);
6100 
6101 		wait_event_interruptible_timeout
6102 			(thread->wqueue,
6103 			 test_bit(THREAD_WAKEUP, &thread->flags)
6104 			 || kthread_should_stop(),
6105 			 thread->timeout);
6106 
6107 		clear_bit(THREAD_WAKEUP, &thread->flags);
6108 		if (!kthread_should_stop())
6109 			thread->run(thread->mddev);
6110 	}
6111 
6112 	return 0;
6113 }
6114 
md_wakeup_thread(mdk_thread_t * thread)6115 void md_wakeup_thread(mdk_thread_t *thread)
6116 {
6117 	if (thread) {
6118 		dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
6119 		set_bit(THREAD_WAKEUP, &thread->flags);
6120 		wake_up(&thread->wqueue);
6121 	}
6122 }
6123 
md_register_thread(void (* run)(mddev_t *),mddev_t * mddev,const char * name)6124 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
6125 				 const char *name)
6126 {
6127 	mdk_thread_t *thread;
6128 
6129 	thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
6130 	if (!thread)
6131 		return NULL;
6132 
6133 	init_waitqueue_head(&thread->wqueue);
6134 
6135 	thread->run = run;
6136 	thread->mddev = mddev;
6137 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
6138 	thread->tsk = kthread_run(md_thread, thread,
6139 				  "%s_%s",
6140 				  mdname(thread->mddev),
6141 				  name ?: mddev->pers->name);
6142 	if (IS_ERR(thread->tsk)) {
6143 		kfree(thread);
6144 		return NULL;
6145 	}
6146 	return thread;
6147 }
6148 
md_unregister_thread(mdk_thread_t * thread)6149 void md_unregister_thread(mdk_thread_t *thread)
6150 {
6151 	if (!thread)
6152 		return;
6153 	dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6154 
6155 	kthread_stop(thread->tsk);
6156 	kfree(thread);
6157 }
6158 
md_error(mddev_t * mddev,mdk_rdev_t * rdev)6159 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
6160 {
6161 	if (!mddev) {
6162 		MD_BUG();
6163 		return;
6164 	}
6165 
6166 	if (!rdev || test_bit(Faulty, &rdev->flags))
6167 		return;
6168 
6169 	if (mddev->external)
6170 		set_bit(Blocked, &rdev->flags);
6171 /*
6172 	dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
6173 		mdname(mddev),
6174 		MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
6175 		__builtin_return_address(0),__builtin_return_address(1),
6176 		__builtin_return_address(2),__builtin_return_address(3));
6177 */
6178 	if (!mddev->pers)
6179 		return;
6180 	if (!mddev->pers->error_handler)
6181 		return;
6182 	mddev->pers->error_handler(mddev,rdev);
6183 	if (mddev->degraded)
6184 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6185 	sysfs_notify_dirent_safe(rdev->sysfs_state);
6186 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6187 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6188 	md_wakeup_thread(mddev->thread);
6189 	if (mddev->event_work.func)
6190 		queue_work(md_misc_wq, &mddev->event_work);
6191 	md_new_event_inintr(mddev);
6192 }
6193 
6194 /* seq_file implementation /proc/mdstat */
6195 
status_unused(struct seq_file * seq)6196 static void status_unused(struct seq_file *seq)
6197 {
6198 	int i = 0;
6199 	mdk_rdev_t *rdev;
6200 
6201 	seq_printf(seq, "unused devices: ");
6202 
6203 	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6204 		char b[BDEVNAME_SIZE];
6205 		i++;
6206 		seq_printf(seq, "%s ",
6207 			      bdevname(rdev->bdev,b));
6208 	}
6209 	if (!i)
6210 		seq_printf(seq, "<none>");
6211 
6212 	seq_printf(seq, "\n");
6213 }
6214 
6215 
status_resync(struct seq_file * seq,mddev_t * mddev)6216 static void status_resync(struct seq_file *seq, mddev_t * mddev)
6217 {
6218 	sector_t max_sectors, resync, res;
6219 	unsigned long dt, db;
6220 	sector_t rt;
6221 	int scale;
6222 	unsigned int per_milli;
6223 
6224 	resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
6225 
6226 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6227 		max_sectors = mddev->resync_max_sectors;
6228 	else
6229 		max_sectors = mddev->dev_sectors;
6230 
6231 	/*
6232 	 * Should not happen.
6233 	 */
6234 	if (!max_sectors) {
6235 		MD_BUG();
6236 		return;
6237 	}
6238 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
6239 	 * in a sector_t, and (max_sectors>>scale) will fit in a
6240 	 * u32, as those are the requirements for sector_div.
6241 	 * Thus 'scale' must be at least 10
6242 	 */
6243 	scale = 10;
6244 	if (sizeof(sector_t) > sizeof(unsigned long)) {
6245 		while ( max_sectors/2 > (1ULL<<(scale+32)))
6246 			scale++;
6247 	}
6248 	res = (resync>>scale)*1000;
6249 	sector_div(res, (u32)((max_sectors>>scale)+1));
6250 
6251 	per_milli = res;
6252 	{
6253 		int i, x = per_milli/50, y = 20-x;
6254 		seq_printf(seq, "[");
6255 		for (i = 0; i < x; i++)
6256 			seq_printf(seq, "=");
6257 		seq_printf(seq, ">");
6258 		for (i = 0; i < y; i++)
6259 			seq_printf(seq, ".");
6260 		seq_printf(seq, "] ");
6261 	}
6262 	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6263 		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6264 		    "reshape" :
6265 		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6266 		     "check" :
6267 		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6268 		      "resync" : "recovery"))),
6269 		   per_milli/10, per_milli % 10,
6270 		   (unsigned long long) resync/2,
6271 		   (unsigned long long) max_sectors/2);
6272 
6273 	/*
6274 	 * dt: time from mark until now
6275 	 * db: blocks written from mark until now
6276 	 * rt: remaining time
6277 	 *
6278 	 * rt is a sector_t, so could be 32bit or 64bit.
6279 	 * So we divide before multiply in case it is 32bit and close
6280 	 * to the limit.
6281 	 * We scale the divisor (db) by 32 to avoid losing precision
6282 	 * near the end of resync when the number of remaining sectors
6283 	 * is close to 'db'.
6284 	 * We then divide rt by 32 after multiplying by db to compensate.
6285 	 * The '+1' avoids division by zero if db is very small.
6286 	 */
6287 	dt = ((jiffies - mddev->resync_mark) / HZ);
6288 	if (!dt) dt++;
6289 	db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6290 		- mddev->resync_mark_cnt;
6291 
6292 	rt = max_sectors - resync;    /* number of remaining sectors */
6293 	sector_div(rt, db/32+1);
6294 	rt *= dt;
6295 	rt >>= 5;
6296 
6297 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
6298 		   ((unsigned long)rt % 60)/6);
6299 
6300 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
6301 }
6302 
md_seq_start(struct seq_file * seq,loff_t * pos)6303 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
6304 {
6305 	struct list_head *tmp;
6306 	loff_t l = *pos;
6307 	mddev_t *mddev;
6308 
6309 	if (l >= 0x10000)
6310 		return NULL;
6311 	if (!l--)
6312 		/* header */
6313 		return (void*)1;
6314 
6315 	spin_lock(&all_mddevs_lock);
6316 	list_for_each(tmp,&all_mddevs)
6317 		if (!l--) {
6318 			mddev = list_entry(tmp, mddev_t, all_mddevs);
6319 			mddev_get(mddev);
6320 			spin_unlock(&all_mddevs_lock);
6321 			return mddev;
6322 		}
6323 	spin_unlock(&all_mddevs_lock);
6324 	if (!l--)
6325 		return (void*)2;/* tail */
6326 	return NULL;
6327 }
6328 
md_seq_next(struct seq_file * seq,void * v,loff_t * pos)6329 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6330 {
6331 	struct list_head *tmp;
6332 	mddev_t *next_mddev, *mddev = v;
6333 
6334 	++*pos;
6335 	if (v == (void*)2)
6336 		return NULL;
6337 
6338 	spin_lock(&all_mddevs_lock);
6339 	if (v == (void*)1)
6340 		tmp = all_mddevs.next;
6341 	else
6342 		tmp = mddev->all_mddevs.next;
6343 	if (tmp != &all_mddevs)
6344 		next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
6345 	else {
6346 		next_mddev = (void*)2;
6347 		*pos = 0x10000;
6348 	}
6349 	spin_unlock(&all_mddevs_lock);
6350 
6351 	if (v != (void*)1)
6352 		mddev_put(mddev);
6353 	return next_mddev;
6354 
6355 }
6356 
md_seq_stop(struct seq_file * seq,void * v)6357 static void md_seq_stop(struct seq_file *seq, void *v)
6358 {
6359 	mddev_t *mddev = v;
6360 
6361 	if (mddev && v != (void*)1 && v != (void*)2)
6362 		mddev_put(mddev);
6363 }
6364 
6365 struct mdstat_info {
6366 	int event;
6367 };
6368 
md_seq_show(struct seq_file * seq,void * v)6369 static int md_seq_show(struct seq_file *seq, void *v)
6370 {
6371 	mddev_t *mddev = v;
6372 	sector_t sectors;
6373 	mdk_rdev_t *rdev;
6374 	struct mdstat_info *mi = seq->private;
6375 	struct bitmap *bitmap;
6376 
6377 	if (v == (void*)1) {
6378 		struct mdk_personality *pers;
6379 		seq_printf(seq, "Personalities : ");
6380 		spin_lock(&pers_lock);
6381 		list_for_each_entry(pers, &pers_list, list)
6382 			seq_printf(seq, "[%s] ", pers->name);
6383 
6384 		spin_unlock(&pers_lock);
6385 		seq_printf(seq, "\n");
6386 		mi->event = atomic_read(&md_event_count);
6387 		return 0;
6388 	}
6389 	if (v == (void*)2) {
6390 		status_unused(seq);
6391 		return 0;
6392 	}
6393 
6394 	if (mddev_lock(mddev) < 0)
6395 		return -EINTR;
6396 
6397 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6398 		seq_printf(seq, "%s : %sactive", mdname(mddev),
6399 						mddev->pers ? "" : "in");
6400 		if (mddev->pers) {
6401 			if (mddev->ro==1)
6402 				seq_printf(seq, " (read-only)");
6403 			if (mddev->ro==2)
6404 				seq_printf(seq, " (auto-read-only)");
6405 			seq_printf(seq, " %s", mddev->pers->name);
6406 		}
6407 
6408 		sectors = 0;
6409 		list_for_each_entry(rdev, &mddev->disks, same_set) {
6410 			char b[BDEVNAME_SIZE];
6411 			seq_printf(seq, " %s[%d]",
6412 				bdevname(rdev->bdev,b), rdev->desc_nr);
6413 			if (test_bit(WriteMostly, &rdev->flags))
6414 				seq_printf(seq, "(W)");
6415 			if (test_bit(Faulty, &rdev->flags)) {
6416 				seq_printf(seq, "(F)");
6417 				continue;
6418 			} else if (rdev->raid_disk < 0)
6419 				seq_printf(seq, "(S)"); /* spare */
6420 			sectors += rdev->sectors;
6421 		}
6422 
6423 		if (!list_empty(&mddev->disks)) {
6424 			if (mddev->pers)
6425 				seq_printf(seq, "\n      %llu blocks",
6426 					   (unsigned long long)
6427 					   mddev->array_sectors / 2);
6428 			else
6429 				seq_printf(seq, "\n      %llu blocks",
6430 					   (unsigned long long)sectors / 2);
6431 		}
6432 		if (mddev->persistent) {
6433 			if (mddev->major_version != 0 ||
6434 			    mddev->minor_version != 90) {
6435 				seq_printf(seq," super %d.%d",
6436 					   mddev->major_version,
6437 					   mddev->minor_version);
6438 			}
6439 		} else if (mddev->external)
6440 			seq_printf(seq, " super external:%s",
6441 				   mddev->metadata_type);
6442 		else
6443 			seq_printf(seq, " super non-persistent");
6444 
6445 		if (mddev->pers) {
6446 			mddev->pers->status(seq, mddev);
6447 	 		seq_printf(seq, "\n      ");
6448 			if (mddev->pers->sync_request) {
6449 				if (mddev->curr_resync > 2) {
6450 					status_resync(seq, mddev);
6451 					seq_printf(seq, "\n      ");
6452 				} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6453 					seq_printf(seq, "\tresync=DELAYED\n      ");
6454 				else if (mddev->recovery_cp < MaxSector)
6455 					seq_printf(seq, "\tresync=PENDING\n      ");
6456 			}
6457 		} else
6458 			seq_printf(seq, "\n       ");
6459 
6460 		if ((bitmap = mddev->bitmap)) {
6461 			unsigned long chunk_kb;
6462 			unsigned long flags;
6463 			spin_lock_irqsave(&bitmap->lock, flags);
6464 			chunk_kb = mddev->bitmap_info.chunksize >> 10;
6465 			seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6466 				"%lu%s chunk",
6467 				bitmap->pages - bitmap->missing_pages,
6468 				bitmap->pages,
6469 				(bitmap->pages - bitmap->missing_pages)
6470 					<< (PAGE_SHIFT - 10),
6471 				chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6472 				chunk_kb ? "KB" : "B");
6473 			if (bitmap->file) {
6474 				seq_printf(seq, ", file: ");
6475 				seq_path(seq, &bitmap->file->f_path, " \t\n");
6476 			}
6477 
6478 			seq_printf(seq, "\n");
6479 			spin_unlock_irqrestore(&bitmap->lock, flags);
6480 		}
6481 
6482 		seq_printf(seq, "\n");
6483 	}
6484 	mddev_unlock(mddev);
6485 
6486 	return 0;
6487 }
6488 
6489 static const struct seq_operations md_seq_ops = {
6490 	.start  = md_seq_start,
6491 	.next   = md_seq_next,
6492 	.stop   = md_seq_stop,
6493 	.show   = md_seq_show,
6494 };
6495 
md_seq_open(struct inode * inode,struct file * file)6496 static int md_seq_open(struct inode *inode, struct file *file)
6497 {
6498 	int error;
6499 	struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6500 	if (mi == NULL)
6501 		return -ENOMEM;
6502 
6503 	error = seq_open(file, &md_seq_ops);
6504 	if (error)
6505 		kfree(mi);
6506 	else {
6507 		struct seq_file *p = file->private_data;
6508 		p->private = mi;
6509 		mi->event = atomic_read(&md_event_count);
6510 	}
6511 	return error;
6512 }
6513 
mdstat_poll(struct file * filp,poll_table * wait)6514 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6515 {
6516 	struct seq_file *m = filp->private_data;
6517 	struct mdstat_info *mi = m->private;
6518 	int mask;
6519 
6520 	poll_wait(filp, &md_event_waiters, wait);
6521 
6522 	/* always allow read */
6523 	mask = POLLIN | POLLRDNORM;
6524 
6525 	if (mi->event != atomic_read(&md_event_count))
6526 		mask |= POLLERR | POLLPRI;
6527 	return mask;
6528 }
6529 
6530 static const struct file_operations md_seq_fops = {
6531 	.owner		= THIS_MODULE,
6532 	.open           = md_seq_open,
6533 	.read           = seq_read,
6534 	.llseek         = seq_lseek,
6535 	.release	= seq_release_private,
6536 	.poll		= mdstat_poll,
6537 };
6538 
register_md_personality(struct mdk_personality * p)6539 int register_md_personality(struct mdk_personality *p)
6540 {
6541 	spin_lock(&pers_lock);
6542 	list_add_tail(&p->list, &pers_list);
6543 	printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6544 	spin_unlock(&pers_lock);
6545 	return 0;
6546 }
6547 
unregister_md_personality(struct mdk_personality * p)6548 int unregister_md_personality(struct mdk_personality *p)
6549 {
6550 	printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6551 	spin_lock(&pers_lock);
6552 	list_del_init(&p->list);
6553 	spin_unlock(&pers_lock);
6554 	return 0;
6555 }
6556 
is_mddev_idle(mddev_t * mddev,int init)6557 static int is_mddev_idle(mddev_t *mddev, int init)
6558 {
6559 	mdk_rdev_t * rdev;
6560 	int idle;
6561 	int curr_events;
6562 
6563 	idle = 1;
6564 	rcu_read_lock();
6565 	rdev_for_each_rcu(rdev, mddev) {
6566 		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6567 		curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6568 			      (int)part_stat_read(&disk->part0, sectors[1]) -
6569 			      atomic_read(&disk->sync_io);
6570 		/* sync IO will cause sync_io to increase before the disk_stats
6571 		 * as sync_io is counted when a request starts, and
6572 		 * disk_stats is counted when it completes.
6573 		 * So resync activity will cause curr_events to be smaller than
6574 		 * when there was no such activity.
6575 		 * non-sync IO will cause disk_stat to increase without
6576 		 * increasing sync_io so curr_events will (eventually)
6577 		 * be larger than it was before.  Once it becomes
6578 		 * substantially larger, the test below will cause
6579 		 * the array to appear non-idle, and resync will slow
6580 		 * down.
6581 		 * If there is a lot of outstanding resync activity when
6582 		 * we set last_event to curr_events, then all that activity
6583 		 * completing might cause the array to appear non-idle
6584 		 * and resync will be slowed down even though there might
6585 		 * not have been non-resync activity.  This will only
6586 		 * happen once though.  'last_events' will soon reflect
6587 		 * the state where there is little or no outstanding
6588 		 * resync requests, and further resync activity will
6589 		 * always make curr_events less than last_events.
6590 		 *
6591 		 */
6592 		if (init || curr_events - rdev->last_events > 64) {
6593 			rdev->last_events = curr_events;
6594 			idle = 0;
6595 		}
6596 	}
6597 	rcu_read_unlock();
6598 	return idle;
6599 }
6600 
md_done_sync(mddev_t * mddev,int blocks,int ok)6601 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6602 {
6603 	/* another "blocks" (512byte) blocks have been synced */
6604 	atomic_sub(blocks, &mddev->recovery_active);
6605 	wake_up(&mddev->recovery_wait);
6606 	if (!ok) {
6607 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6608 		md_wakeup_thread(mddev->thread);
6609 		// stop recovery, signal do_sync ....
6610 	}
6611 }
6612 
6613 
6614 /* md_write_start(mddev, bi)
6615  * If we need to update some array metadata (e.g. 'active' flag
6616  * in superblock) before writing, schedule a superblock update
6617  * and wait for it to complete.
6618  */
md_write_start(mddev_t * mddev,struct bio * bi)6619 void md_write_start(mddev_t *mddev, struct bio *bi)
6620 {
6621 	int did_change = 0;
6622 	if (bio_data_dir(bi) != WRITE)
6623 		return;
6624 
6625 	BUG_ON(mddev->ro == 1);
6626 	if (mddev->ro == 2) {
6627 		/* need to switch to read/write */
6628 		mddev->ro = 0;
6629 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6630 		md_wakeup_thread(mddev->thread);
6631 		md_wakeup_thread(mddev->sync_thread);
6632 		did_change = 1;
6633 	}
6634 	atomic_inc(&mddev->writes_pending);
6635 	if (mddev->safemode == 1)
6636 		mddev->safemode = 0;
6637 	if (mddev->in_sync) {
6638 		spin_lock_irq(&mddev->write_lock);
6639 		if (mddev->in_sync) {
6640 			mddev->in_sync = 0;
6641 			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6642 			set_bit(MD_CHANGE_PENDING, &mddev->flags);
6643 			md_wakeup_thread(mddev->thread);
6644 			did_change = 1;
6645 		}
6646 		spin_unlock_irq(&mddev->write_lock);
6647 	}
6648 	if (did_change)
6649 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6650 	wait_event(mddev->sb_wait,
6651 		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6652 }
6653 
md_write_end(mddev_t * mddev)6654 void md_write_end(mddev_t *mddev)
6655 {
6656 	if (atomic_dec_and_test(&mddev->writes_pending)) {
6657 		if (mddev->safemode == 2)
6658 			md_wakeup_thread(mddev->thread);
6659 		else if (mddev->safemode_delay)
6660 			mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6661 	}
6662 }
6663 
6664 /* md_allow_write(mddev)
6665  * Calling this ensures that the array is marked 'active' so that writes
6666  * may proceed without blocking.  It is important to call this before
6667  * attempting a GFP_KERNEL allocation while holding the mddev lock.
6668  * Must be called with mddev_lock held.
6669  *
6670  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6671  * is dropped, so return -EAGAIN after notifying userspace.
6672  */
md_allow_write(mddev_t * mddev)6673 int md_allow_write(mddev_t *mddev)
6674 {
6675 	if (!mddev->pers)
6676 		return 0;
6677 	if (mddev->ro)
6678 		return 0;
6679 	if (!mddev->pers->sync_request)
6680 		return 0;
6681 
6682 	spin_lock_irq(&mddev->write_lock);
6683 	if (mddev->in_sync) {
6684 		mddev->in_sync = 0;
6685 		set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6686 		set_bit(MD_CHANGE_PENDING, &mddev->flags);
6687 		if (mddev->safemode_delay &&
6688 		    mddev->safemode == 0)
6689 			mddev->safemode = 1;
6690 		spin_unlock_irq(&mddev->write_lock);
6691 		md_update_sb(mddev, 0);
6692 		sysfs_notify_dirent_safe(mddev->sysfs_state);
6693 	} else
6694 		spin_unlock_irq(&mddev->write_lock);
6695 
6696 	if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6697 		return -EAGAIN;
6698 	else
6699 		return 0;
6700 }
6701 EXPORT_SYMBOL_GPL(md_allow_write);
6702 
6703 #define SYNC_MARKS	10
6704 #define	SYNC_MARK_STEP	(3*HZ)
md_do_sync(mddev_t * mddev)6705 void md_do_sync(mddev_t *mddev)
6706 {
6707 	mddev_t *mddev2;
6708 	unsigned int currspeed = 0,
6709 		 window;
6710 	sector_t max_sectors,j, io_sectors;
6711 	unsigned long mark[SYNC_MARKS];
6712 	sector_t mark_cnt[SYNC_MARKS];
6713 	int last_mark,m;
6714 	struct list_head *tmp;
6715 	sector_t last_check;
6716 	int skipped = 0;
6717 	mdk_rdev_t *rdev;
6718 	char *desc;
6719 
6720 	/* just incase thread restarts... */
6721 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6722 		return;
6723 	if (mddev->ro) /* never try to sync a read-only array */
6724 		return;
6725 
6726 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6727 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6728 			desc = "data-check";
6729 		else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6730 			desc = "requested-resync";
6731 		else
6732 			desc = "resync";
6733 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6734 		desc = "reshape";
6735 	else
6736 		desc = "recovery";
6737 
6738 	/* we overload curr_resync somewhat here.
6739 	 * 0 == not engaged in resync at all
6740 	 * 2 == checking that there is no conflict with another sync
6741 	 * 1 == like 2, but have yielded to allow conflicting resync to
6742 	 *		commense
6743 	 * other == active in resync - this many blocks
6744 	 *
6745 	 * Before starting a resync we must have set curr_resync to
6746 	 * 2, and then checked that every "conflicting" array has curr_resync
6747 	 * less than ours.  When we find one that is the same or higher
6748 	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
6749 	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6750 	 * This will mean we have to start checking from the beginning again.
6751 	 *
6752 	 */
6753 
6754 	do {
6755 		mddev->curr_resync = 2;
6756 
6757 	try_again:
6758 		if (kthread_should_stop())
6759 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6760 
6761 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6762 			goto skip;
6763 		for_each_mddev(mddev2, tmp) {
6764 			if (mddev2 == mddev)
6765 				continue;
6766 			if (!mddev->parallel_resync
6767 			&&  mddev2->curr_resync
6768 			&&  match_mddev_units(mddev, mddev2)) {
6769 				DEFINE_WAIT(wq);
6770 				if (mddev < mddev2 && mddev->curr_resync == 2) {
6771 					/* arbitrarily yield */
6772 					mddev->curr_resync = 1;
6773 					wake_up(&resync_wait);
6774 				}
6775 				if (mddev > mddev2 && mddev->curr_resync == 1)
6776 					/* no need to wait here, we can wait the next
6777 					 * time 'round when curr_resync == 2
6778 					 */
6779 					continue;
6780 				/* We need to wait 'interruptible' so as not to
6781 				 * contribute to the load average, and not to
6782 				 * be caught by 'softlockup'
6783 				 */
6784 				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6785 				if (!kthread_should_stop() &&
6786 				    mddev2->curr_resync >= mddev->curr_resync) {
6787 					printk(KERN_INFO "md: delaying %s of %s"
6788 					       " until %s has finished (they"
6789 					       " share one or more physical units)\n",
6790 					       desc, mdname(mddev), mdname(mddev2));
6791 					mddev_put(mddev2);
6792 					if (signal_pending(current))
6793 						flush_signals(current);
6794 					schedule();
6795 					finish_wait(&resync_wait, &wq);
6796 					goto try_again;
6797 				}
6798 				finish_wait(&resync_wait, &wq);
6799 			}
6800 		}
6801 	} while (mddev->curr_resync < 2);
6802 
6803 	j = 0;
6804 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6805 		/* resync follows the size requested by the personality,
6806 		 * which defaults to physical size, but can be virtual size
6807 		 */
6808 		max_sectors = mddev->resync_max_sectors;
6809 		mddev->resync_mismatches = 0;
6810 		/* we don't use the checkpoint if there's a bitmap */
6811 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6812 			j = mddev->resync_min;
6813 		else if (!mddev->bitmap)
6814 			j = mddev->recovery_cp;
6815 
6816 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6817 		max_sectors = mddev->dev_sectors;
6818 	else {
6819 		/* recovery follows the physical size of devices */
6820 		max_sectors = mddev->dev_sectors;
6821 		j = MaxSector;
6822 		rcu_read_lock();
6823 		list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6824 			if (rdev->raid_disk >= 0 &&
6825 			    !test_bit(Faulty, &rdev->flags) &&
6826 			    !test_bit(In_sync, &rdev->flags) &&
6827 			    rdev->recovery_offset < j)
6828 				j = rdev->recovery_offset;
6829 		rcu_read_unlock();
6830 	}
6831 
6832 	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6833 	printk(KERN_INFO "md: minimum _guaranteed_  speed:"
6834 		" %d KB/sec/disk.\n", speed_min(mddev));
6835 	printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6836 	       "(but not more than %d KB/sec) for %s.\n",
6837 	       speed_max(mddev), desc);
6838 
6839 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6840 
6841 	io_sectors = 0;
6842 	for (m = 0; m < SYNC_MARKS; m++) {
6843 		mark[m] = jiffies;
6844 		mark_cnt[m] = io_sectors;
6845 	}
6846 	last_mark = 0;
6847 	mddev->resync_mark = mark[last_mark];
6848 	mddev->resync_mark_cnt = mark_cnt[last_mark];
6849 
6850 	/*
6851 	 * Tune reconstruction:
6852 	 */
6853 	window = 32*(PAGE_SIZE/512);
6854 	printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6855 		window/2,(unsigned long long) max_sectors/2);
6856 
6857 	atomic_set(&mddev->recovery_active, 0);
6858 	last_check = 0;
6859 
6860 	if (j>2) {
6861 		printk(KERN_INFO
6862 		       "md: resuming %s of %s from checkpoint.\n",
6863 		       desc, mdname(mddev));
6864 		mddev->curr_resync = j;
6865 	}
6866 	mddev->curr_resync_completed = j;
6867 
6868 	while (j < max_sectors) {
6869 		sector_t sectors;
6870 
6871 		skipped = 0;
6872 
6873 		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6874 		    ((mddev->curr_resync > mddev->curr_resync_completed &&
6875 		      (mddev->curr_resync - mddev->curr_resync_completed)
6876 		      > (max_sectors >> 4)) ||
6877 		     (j - mddev->curr_resync_completed)*2
6878 		     >= mddev->resync_max - mddev->curr_resync_completed
6879 			    )) {
6880 			/* time to update curr_resync_completed */
6881 			wait_event(mddev->recovery_wait,
6882 				   atomic_read(&mddev->recovery_active) == 0);
6883 			mddev->curr_resync_completed = j;
6884 			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6885 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6886 		}
6887 
6888 		while (j >= mddev->resync_max && !kthread_should_stop()) {
6889 			/* As this condition is controlled by user-space,
6890 			 * we can block indefinitely, so use '_interruptible'
6891 			 * to avoid triggering warnings.
6892 			 */
6893 			flush_signals(current); /* just in case */
6894 			wait_event_interruptible(mddev->recovery_wait,
6895 						 mddev->resync_max > j
6896 						 || kthread_should_stop());
6897 		}
6898 
6899 		if (kthread_should_stop())
6900 			goto interrupted;
6901 
6902 		sectors = mddev->pers->sync_request(mddev, j, &skipped,
6903 						  currspeed < speed_min(mddev));
6904 		if (sectors == 0) {
6905 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6906 			goto out;
6907 		}
6908 
6909 		if (!skipped) { /* actual IO requested */
6910 			io_sectors += sectors;
6911 			atomic_add(sectors, &mddev->recovery_active);
6912 		}
6913 
6914 		j += sectors;
6915 		if (j>1) mddev->curr_resync = j;
6916 		mddev->curr_mark_cnt = io_sectors;
6917 		if (last_check == 0)
6918 			/* this is the earliers that rebuilt will be
6919 			 * visible in /proc/mdstat
6920 			 */
6921 			md_new_event(mddev);
6922 
6923 		if (last_check + window > io_sectors || j == max_sectors)
6924 			continue;
6925 
6926 		last_check = io_sectors;
6927 
6928 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6929 			break;
6930 
6931 	repeat:
6932 		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6933 			/* step marks */
6934 			int next = (last_mark+1) % SYNC_MARKS;
6935 
6936 			mddev->resync_mark = mark[next];
6937 			mddev->resync_mark_cnt = mark_cnt[next];
6938 			mark[next] = jiffies;
6939 			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6940 			last_mark = next;
6941 		}
6942 
6943 
6944 		if (kthread_should_stop())
6945 			goto interrupted;
6946 
6947 
6948 		/*
6949 		 * this loop exits only if either when we are slower than
6950 		 * the 'hard' speed limit, or the system was IO-idle for
6951 		 * a jiffy.
6952 		 * the system might be non-idle CPU-wise, but we only care
6953 		 * about not overloading the IO subsystem. (things like an
6954 		 * e2fsck being done on the RAID array should execute fast)
6955 		 */
6956 		cond_resched();
6957 
6958 		currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6959 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
6960 
6961 		if (currspeed > speed_min(mddev)) {
6962 			if ((currspeed > speed_max(mddev)) ||
6963 					!is_mddev_idle(mddev, 0)) {
6964 				msleep(500);
6965 				goto repeat;
6966 			}
6967 		}
6968 	}
6969 	printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6970 	/*
6971 	 * this also signals 'finished resyncing' to md_stop
6972 	 */
6973  out:
6974 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6975 
6976 	/* tell personality that we are finished */
6977 	mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6978 
6979 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6980 	    mddev->curr_resync > 2) {
6981 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6982 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6983 				if (mddev->curr_resync >= mddev->recovery_cp) {
6984 					printk(KERN_INFO
6985 					       "md: checkpointing %s of %s.\n",
6986 					       desc, mdname(mddev));
6987 					mddev->recovery_cp = mddev->curr_resync;
6988 				}
6989 			} else
6990 				mddev->recovery_cp = MaxSector;
6991 		} else {
6992 			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6993 				mddev->curr_resync = MaxSector;
6994 			rcu_read_lock();
6995 			list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6996 				if (rdev->raid_disk >= 0 &&
6997 				    mddev->delta_disks >= 0 &&
6998 				    !test_bit(Faulty, &rdev->flags) &&
6999 				    !test_bit(In_sync, &rdev->flags) &&
7000 				    rdev->recovery_offset < mddev->curr_resync)
7001 					rdev->recovery_offset = mddev->curr_resync;
7002 			rcu_read_unlock();
7003 		}
7004 	}
7005 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
7006 
7007  skip:
7008 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7009 		/* We completed so min/max setting can be forgotten if used. */
7010 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7011 			mddev->resync_min = 0;
7012 		mddev->resync_max = MaxSector;
7013 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7014 		mddev->resync_min = mddev->curr_resync_completed;
7015 	mddev->curr_resync = 0;
7016 	wake_up(&resync_wait);
7017 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
7018 	md_wakeup_thread(mddev->thread);
7019 	return;
7020 
7021  interrupted:
7022 	/*
7023 	 * got a signal, exit.
7024 	 */
7025 	printk(KERN_INFO
7026 	       "md: md_do_sync() got signal ... exiting\n");
7027 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7028 	goto out;
7029 
7030 }
7031 EXPORT_SYMBOL_GPL(md_do_sync);
7032 
7033 
remove_and_add_spares(mddev_t * mddev)7034 static int remove_and_add_spares(mddev_t *mddev)
7035 {
7036 	mdk_rdev_t *rdev;
7037 	int spares = 0;
7038 
7039 	mddev->curr_resync_completed = 0;
7040 
7041 	list_for_each_entry(rdev, &mddev->disks, same_set)
7042 		if (rdev->raid_disk >= 0 &&
7043 		    !test_bit(Blocked, &rdev->flags) &&
7044 		    (test_bit(Faulty, &rdev->flags) ||
7045 		     ! test_bit(In_sync, &rdev->flags)) &&
7046 		    atomic_read(&rdev->nr_pending)==0) {
7047 			if (mddev->pers->hot_remove_disk(
7048 				    mddev, rdev->raid_disk)==0) {
7049 				char nm[20];
7050 				sprintf(nm,"rd%d", rdev->raid_disk);
7051 				sysfs_remove_link(&mddev->kobj, nm);
7052 				rdev->raid_disk = -1;
7053 			}
7054 		}
7055 
7056 	if (mddev->degraded && !mddev->recovery_disabled) {
7057 		list_for_each_entry(rdev, &mddev->disks, same_set) {
7058 			if (rdev->raid_disk >= 0 &&
7059 			    !test_bit(In_sync, &rdev->flags) &&
7060 			    !test_bit(Blocked, &rdev->flags))
7061 				spares++;
7062 			if (rdev->raid_disk < 0
7063 			    && !test_bit(Faulty, &rdev->flags)) {
7064 				rdev->recovery_offset = 0;
7065 				if (mddev->pers->
7066 				    hot_add_disk(mddev, rdev) == 0) {
7067 					char nm[20];
7068 					sprintf(nm, "rd%d", rdev->raid_disk);
7069 					if (sysfs_create_link(&mddev->kobj,
7070 							      &rdev->kobj, nm))
7071 						/* failure here is OK */;
7072 					spares++;
7073 					md_new_event(mddev);
7074 					set_bit(MD_CHANGE_DEVS, &mddev->flags);
7075 				} else
7076 					break;
7077 			}
7078 		}
7079 	}
7080 	return spares;
7081 }
7082 
reap_sync_thread(mddev_t * mddev)7083 static void reap_sync_thread(mddev_t *mddev)
7084 {
7085 	mdk_rdev_t *rdev;
7086 
7087 	/* resync has finished, collect result */
7088 	md_unregister_thread(mddev->sync_thread);
7089 	mddev->sync_thread = NULL;
7090 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7091 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7092 		/* success...*/
7093 		/* activate any spares */
7094 		if (mddev->pers->spare_active(mddev))
7095 			sysfs_notify(&mddev->kobj, NULL,
7096 				     "degraded");
7097 	}
7098 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7099 	    mddev->pers->finish_reshape)
7100 		mddev->pers->finish_reshape(mddev);
7101 	md_update_sb(mddev, 1);
7102 
7103 	/* if array is no-longer degraded, then any saved_raid_disk
7104 	 * information must be scrapped
7105 	 */
7106 	if (!mddev->degraded)
7107 		list_for_each_entry(rdev, &mddev->disks, same_set)
7108 			rdev->saved_raid_disk = -1;
7109 
7110 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7111 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7112 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7113 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7114 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7115 	/* flag recovery needed just to double check */
7116 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7117 	sysfs_notify_dirent_safe(mddev->sysfs_action);
7118 	md_new_event(mddev);
7119 }
7120 
7121 /*
7122  * This routine is regularly called by all per-raid-array threads to
7123  * deal with generic issues like resync and super-block update.
7124  * Raid personalities that don't have a thread (linear/raid0) do not
7125  * need this as they never do any recovery or update the superblock.
7126  *
7127  * It does not do any resync itself, but rather "forks" off other threads
7128  * to do that as needed.
7129  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
7130  * "->recovery" and create a thread at ->sync_thread.
7131  * When the thread finishes it sets MD_RECOVERY_DONE
7132  * and wakeups up this thread which will reap the thread and finish up.
7133  * This thread also removes any faulty devices (with nr_pending == 0).
7134  *
7135  * The overall approach is:
7136  *  1/ if the superblock needs updating, update it.
7137  *  2/ If a recovery thread is running, don't do anything else.
7138  *  3/ If recovery has finished, clean up, possibly marking spares active.
7139  *  4/ If there are any faulty devices, remove them.
7140  *  5/ If array is degraded, try to add spares devices
7141  *  6/ If array has spares or is not in-sync, start a resync thread.
7142  */
md_check_recovery(mddev_t * mddev)7143 void md_check_recovery(mddev_t *mddev)
7144 {
7145 	if (mddev->bitmap)
7146 		bitmap_daemon_work(mddev);
7147 
7148 	if (mddev->ro)
7149 		return;
7150 
7151 	if (signal_pending(current)) {
7152 		if (mddev->pers->sync_request && !mddev->external) {
7153 			printk(KERN_INFO "md: %s in immediate safe mode\n",
7154 			       mdname(mddev));
7155 			mddev->safemode = 2;
7156 		}
7157 		flush_signals(current);
7158 	}
7159 
7160 	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7161 		return;
7162 	if ( ! (
7163 		(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7164 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7165 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7166 		(mddev->external == 0 && mddev->safemode == 1) ||
7167 		(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7168 		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
7169 		))
7170 		return;
7171 
7172 	if (mddev_trylock(mddev)) {
7173 		int spares = 0;
7174 
7175 		if (mddev->ro) {
7176 			/* Only thing we do on a ro array is remove
7177 			 * failed devices.
7178 			 */
7179 			mdk_rdev_t *rdev;
7180 			list_for_each_entry(rdev, &mddev->disks, same_set)
7181 				if (rdev->raid_disk >= 0 &&
7182 				    !test_bit(Blocked, &rdev->flags) &&
7183 				    test_bit(Faulty, &rdev->flags) &&
7184 				    atomic_read(&rdev->nr_pending)==0) {
7185 					if (mddev->pers->hot_remove_disk(
7186 						    mddev, rdev->raid_disk)==0) {
7187 						char nm[20];
7188 						sprintf(nm,"rd%d", rdev->raid_disk);
7189 						sysfs_remove_link(&mddev->kobj, nm);
7190 						rdev->raid_disk = -1;
7191 					}
7192 				}
7193 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7194 			goto unlock;
7195 		}
7196 
7197 		if (!mddev->external) {
7198 			int did_change = 0;
7199 			spin_lock_irq(&mddev->write_lock);
7200 			if (mddev->safemode &&
7201 			    !atomic_read(&mddev->writes_pending) &&
7202 			    !mddev->in_sync &&
7203 			    mddev->recovery_cp == MaxSector) {
7204 				mddev->in_sync = 1;
7205 				did_change = 1;
7206 				set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7207 			}
7208 			if (mddev->safemode == 1)
7209 				mddev->safemode = 0;
7210 			spin_unlock_irq(&mddev->write_lock);
7211 			if (did_change)
7212 				sysfs_notify_dirent_safe(mddev->sysfs_state);
7213 		}
7214 
7215 		if (mddev->flags)
7216 			md_update_sb(mddev, 0);
7217 
7218 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7219 		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7220 			/* resync/recovery still happening */
7221 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7222 			goto unlock;
7223 		}
7224 		if (mddev->sync_thread) {
7225 			reap_sync_thread(mddev);
7226 			goto unlock;
7227 		}
7228 		/* Set RUNNING before clearing NEEDED to avoid
7229 		 * any transients in the value of "sync_action".
7230 		 */
7231 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7232 		clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7233 		/* Clear some bits that don't mean anything, but
7234 		 * might be left set
7235 		 */
7236 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7237 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7238 
7239 		if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7240 			goto unlock;
7241 		/* no recovery is running.
7242 		 * remove any failed drives, then
7243 		 * add spares if possible.
7244 		 * Spare are also removed and re-added, to allow
7245 		 * the personality to fail the re-add.
7246 		 */
7247 
7248 		if (mddev->reshape_position != MaxSector) {
7249 			if (mddev->pers->check_reshape == NULL ||
7250 			    mddev->pers->check_reshape(mddev) != 0)
7251 				/* Cannot proceed */
7252 				goto unlock;
7253 			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7254 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7255 		} else if ((spares = remove_and_add_spares(mddev))) {
7256 			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7257 			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7258 			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7259 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7260 		} else if (mddev->recovery_cp < MaxSector) {
7261 			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7262 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7263 		} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7264 			/* nothing to be done ... */
7265 			goto unlock;
7266 
7267 		if (mddev->pers->sync_request) {
7268 			if (spares && mddev->bitmap && ! mddev->bitmap->file) {
7269 				/* We are adding a device or devices to an array
7270 				 * which has the bitmap stored on all devices.
7271 				 * So make sure all bitmap pages get written
7272 				 */
7273 				bitmap_write_all(mddev->bitmap);
7274 			}
7275 			mddev->sync_thread = md_register_thread(md_do_sync,
7276 								mddev,
7277 								"resync");
7278 			if (!mddev->sync_thread) {
7279 				printk(KERN_ERR "%s: could not start resync"
7280 					" thread...\n",
7281 					mdname(mddev));
7282 				/* leave the spares where they are, it shouldn't hurt */
7283 				clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7284 				clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7285 				clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7286 				clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7287 				clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7288 			} else
7289 				md_wakeup_thread(mddev->sync_thread);
7290 			sysfs_notify_dirent_safe(mddev->sysfs_action);
7291 			md_new_event(mddev);
7292 		}
7293 	unlock:
7294 		if (!mddev->sync_thread) {
7295 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7296 			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7297 					       &mddev->recovery))
7298 				if (mddev->sysfs_action)
7299 					sysfs_notify_dirent_safe(mddev->sysfs_action);
7300 		}
7301 		mddev_unlock(mddev);
7302 	}
7303 }
7304 
md_wait_for_blocked_rdev(mdk_rdev_t * rdev,mddev_t * mddev)7305 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
7306 {
7307 	sysfs_notify_dirent_safe(rdev->sysfs_state);
7308 	wait_event_timeout(rdev->blocked_wait,
7309 			   !test_bit(Blocked, &rdev->flags),
7310 			   msecs_to_jiffies(5000));
7311 	rdev_dec_pending(rdev, mddev);
7312 }
7313 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7314 
md_notify_reboot(struct notifier_block * this,unsigned long code,void * x)7315 static int md_notify_reboot(struct notifier_block *this,
7316 			    unsigned long code, void *x)
7317 {
7318 	struct list_head *tmp;
7319 	mddev_t *mddev;
7320 
7321 	if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
7322 
7323 		printk(KERN_INFO "md: stopping all md devices.\n");
7324 
7325 		for_each_mddev(mddev, tmp)
7326 			if (mddev_trylock(mddev)) {
7327 				/* Force a switch to readonly even array
7328 				 * appears to still be in use.  Hence
7329 				 * the '100'.
7330 				 */
7331 				md_set_readonly(mddev, 100);
7332 				mddev_unlock(mddev);
7333 			}
7334 		/*
7335 		 * certain more exotic SCSI devices are known to be
7336 		 * volatile wrt too early system reboots. While the
7337 		 * right place to handle this issue is the given
7338 		 * driver, we do want to have a safe RAID driver ...
7339 		 */
7340 		mdelay(1000*1);
7341 	}
7342 	return NOTIFY_DONE;
7343 }
7344 
7345 static struct notifier_block md_notifier = {
7346 	.notifier_call	= md_notify_reboot,
7347 	.next		= NULL,
7348 	.priority	= INT_MAX, /* before any real devices */
7349 };
7350 
md_geninit(void)7351 static void md_geninit(void)
7352 {
7353 	dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
7354 
7355 	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
7356 }
7357 
md_init(void)7358 static int __init md_init(void)
7359 {
7360 	int ret = -ENOMEM;
7361 
7362 	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
7363 	if (!md_wq)
7364 		goto err_wq;
7365 
7366 	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
7367 	if (!md_misc_wq)
7368 		goto err_misc_wq;
7369 
7370 	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
7371 		goto err_md;
7372 
7373 	if ((ret = register_blkdev(0, "mdp")) < 0)
7374 		goto err_mdp;
7375 	mdp_major = ret;
7376 
7377 	blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
7378 			    md_probe, NULL, NULL);
7379 	blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
7380 			    md_probe, NULL, NULL);
7381 
7382 	register_reboot_notifier(&md_notifier);
7383 	raid_table_header = register_sysctl_table(raid_root_table);
7384 
7385 	md_geninit();
7386 	return 0;
7387 
7388 err_mdp:
7389 	unregister_blkdev(MD_MAJOR, "md");
7390 err_md:
7391 	destroy_workqueue(md_misc_wq);
7392 err_misc_wq:
7393 	destroy_workqueue(md_wq);
7394 err_wq:
7395 	return ret;
7396 }
7397 
7398 #ifndef MODULE
7399 
7400 /*
7401  * Searches all registered partitions for autorun RAID arrays
7402  * at boot time.
7403  */
7404 
7405 static LIST_HEAD(all_detected_devices);
7406 struct detected_devices_node {
7407 	struct list_head list;
7408 	dev_t dev;
7409 };
7410 
md_autodetect_dev(dev_t dev)7411 void md_autodetect_dev(dev_t dev)
7412 {
7413 	struct detected_devices_node *node_detected_dev;
7414 
7415 	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
7416 	if (node_detected_dev) {
7417 		node_detected_dev->dev = dev;
7418 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
7419 	} else {
7420 		printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
7421 			", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
7422 	}
7423 }
7424 
7425 
autostart_arrays(int part)7426 static void autostart_arrays(int part)
7427 {
7428 	mdk_rdev_t *rdev;
7429 	struct detected_devices_node *node_detected_dev;
7430 	dev_t dev;
7431 	int i_scanned, i_passed;
7432 
7433 	i_scanned = 0;
7434 	i_passed = 0;
7435 
7436 	printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
7437 
7438 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
7439 		i_scanned++;
7440 		node_detected_dev = list_entry(all_detected_devices.next,
7441 					struct detected_devices_node, list);
7442 		list_del(&node_detected_dev->list);
7443 		dev = node_detected_dev->dev;
7444 		kfree(node_detected_dev);
7445 		rdev = md_import_device(dev,0, 90);
7446 		if (IS_ERR(rdev))
7447 			continue;
7448 
7449 		if (test_bit(Faulty, &rdev->flags)) {
7450 			MD_BUG();
7451 			continue;
7452 		}
7453 		set_bit(AutoDetected, &rdev->flags);
7454 		list_add(&rdev->same_set, &pending_raid_disks);
7455 		i_passed++;
7456 	}
7457 
7458 	printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
7459 						i_scanned, i_passed);
7460 
7461 	autorun_devices(part);
7462 }
7463 
7464 #endif /* !MODULE */
7465 
md_exit(void)7466 static __exit void md_exit(void)
7467 {
7468 	mddev_t *mddev;
7469 	struct list_head *tmp;
7470 
7471 	blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
7472 	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
7473 
7474 	unregister_blkdev(MD_MAJOR,"md");
7475 	unregister_blkdev(mdp_major, "mdp");
7476 	unregister_reboot_notifier(&md_notifier);
7477 	unregister_sysctl_table(raid_table_header);
7478 	remove_proc_entry("mdstat", NULL);
7479 	for_each_mddev(mddev, tmp) {
7480 		export_array(mddev);
7481 		mddev->hold_active = 0;
7482 	}
7483 	destroy_workqueue(md_misc_wq);
7484 	destroy_workqueue(md_wq);
7485 }
7486 
7487 subsys_initcall(md_init);
module_exit(md_exit)7488 module_exit(md_exit)
7489 
7490 static int get_ro(char *buffer, struct kernel_param *kp)
7491 {
7492 	return sprintf(buffer, "%d", start_readonly);
7493 }
set_ro(const char * val,struct kernel_param * kp)7494 static int set_ro(const char *val, struct kernel_param *kp)
7495 {
7496 	char *e;
7497 	int num = simple_strtoul(val, &e, 10);
7498 	if (*val && (*e == '\0' || *e == '\n')) {
7499 		start_readonly = num;
7500 		return 0;
7501 	}
7502 	return -EINVAL;
7503 }
7504 
7505 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
7506 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
7507 
7508 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
7509 
7510 EXPORT_SYMBOL(register_md_personality);
7511 EXPORT_SYMBOL(unregister_md_personality);
7512 EXPORT_SYMBOL(md_error);
7513 EXPORT_SYMBOL(md_done_sync);
7514 EXPORT_SYMBOL(md_write_start);
7515 EXPORT_SYMBOL(md_write_end);
7516 EXPORT_SYMBOL(md_register_thread);
7517 EXPORT_SYMBOL(md_unregister_thread);
7518 EXPORT_SYMBOL(md_wakeup_thread);
7519 EXPORT_SYMBOL(md_check_recovery);
7520 MODULE_LICENSE("GPL");
7521 MODULE_DESCRIPTION("MD RAID framework");
7522 MODULE_ALIAS("md");
7523 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
7524