1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for further copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/seq_file.h>
25 #include "md.h"
26 #include "raid10.h"
27 #include "raid0.h"
28 #include "bitmap.h"
29 
30 /*
31  * RAID10 provides a combination of RAID0 and RAID1 functionality.
32  * The layout of data is defined by
33  *    chunk_size
34  *    raid_disks
35  *    near_copies (stored in low byte of layout)
36  *    far_copies (stored in second byte of layout)
37  *    far_offset (stored in bit 16 of layout )
38  *
39  * The data to be stored is divided into chunks using chunksize.
40  * Each device is divided into far_copies sections.
41  * In each section, chunks are laid out in a style similar to raid0, but
42  * near_copies copies of each chunk is stored (each on a different drive).
43  * The starting device for each section is offset near_copies from the starting
44  * device of the previous section.
45  * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
46  * drive.
47  * near_copies and far_copies must be at least one, and their product is at most
48  * raid_disks.
49  *
50  * If far_offset is true, then the far_copies are handled a bit differently.
51  * The copies are still in different stripes, but instead of be very far apart
52  * on disk, there are adjacent stripes.
53  */
54 
55 /*
56  * Number of guaranteed r10bios in case of extreme VM load:
57  */
58 #define	NR_RAID10_BIOS 256
59 
60 static void allow_barrier(conf_t *conf);
61 static void lower_barrier(conf_t *conf);
62 
r10bio_pool_alloc(gfp_t gfp_flags,void * data)63 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
64 {
65 	conf_t *conf = data;
66 	int size = offsetof(struct r10bio_s, devs[conf->copies]);
67 
68 	/* allocate a r10bio with room for raid_disks entries in the bios array */
69 	return kzalloc(size, gfp_flags);
70 }
71 
r10bio_pool_free(void * r10_bio,void * data)72 static void r10bio_pool_free(void *r10_bio, void *data)
73 {
74 	kfree(r10_bio);
75 }
76 
77 /* Maximum size of each resync request */
78 #define RESYNC_BLOCK_SIZE (64*1024)
79 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
80 /* amount of memory to reserve for resync requests */
81 #define RESYNC_WINDOW (1024*1024)
82 /* maximum number of concurrent requests, memory permitting */
83 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
84 
85 /*
86  * When performing a resync, we need to read and compare, so
87  * we need as many pages are there are copies.
88  * When performing a recovery, we need 2 bios, one for read,
89  * one for write (we recover only one drive per r10buf)
90  *
91  */
r10buf_pool_alloc(gfp_t gfp_flags,void * data)92 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
93 {
94 	conf_t *conf = data;
95 	struct page *page;
96 	r10bio_t *r10_bio;
97 	struct bio *bio;
98 	int i, j;
99 	int nalloc;
100 
101 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
102 	if (!r10_bio)
103 		return NULL;
104 
105 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
106 		nalloc = conf->copies; /* resync */
107 	else
108 		nalloc = 2; /* recovery */
109 
110 	/*
111 	 * Allocate bios.
112 	 */
113 	for (j = nalloc ; j-- ; ) {
114 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
115 		if (!bio)
116 			goto out_free_bio;
117 		r10_bio->devs[j].bio = bio;
118 	}
119 	/*
120 	 * Allocate RESYNC_PAGES data pages and attach them
121 	 * where needed.
122 	 */
123 	for (j = 0 ; j < nalloc; j++) {
124 		bio = r10_bio->devs[j].bio;
125 		for (i = 0; i < RESYNC_PAGES; i++) {
126 			page = alloc_page(gfp_flags);
127 			if (unlikely(!page))
128 				goto out_free_pages;
129 
130 			bio->bi_io_vec[i].bv_page = page;
131 		}
132 	}
133 
134 	return r10_bio;
135 
136 out_free_pages:
137 	for ( ; i > 0 ; i--)
138 		safe_put_page(bio->bi_io_vec[i-1].bv_page);
139 	while (j--)
140 		for (i = 0; i < RESYNC_PAGES ; i++)
141 			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
142 	j = -1;
143 out_free_bio:
144 	while ( ++j < nalloc )
145 		bio_put(r10_bio->devs[j].bio);
146 	r10bio_pool_free(r10_bio, conf);
147 	return NULL;
148 }
149 
r10buf_pool_free(void * __r10_bio,void * data)150 static void r10buf_pool_free(void *__r10_bio, void *data)
151 {
152 	int i;
153 	conf_t *conf = data;
154 	r10bio_t *r10bio = __r10_bio;
155 	int j;
156 
157 	for (j=0; j < conf->copies; j++) {
158 		struct bio *bio = r10bio->devs[j].bio;
159 		if (bio) {
160 			for (i = 0; i < RESYNC_PAGES; i++) {
161 				safe_put_page(bio->bi_io_vec[i].bv_page);
162 				bio->bi_io_vec[i].bv_page = NULL;
163 			}
164 			bio_put(bio);
165 		}
166 	}
167 	r10bio_pool_free(r10bio, conf);
168 }
169 
put_all_bios(conf_t * conf,r10bio_t * r10_bio)170 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
171 {
172 	int i;
173 
174 	for (i = 0; i < conf->copies; i++) {
175 		struct bio **bio = & r10_bio->devs[i].bio;
176 		if (*bio && *bio != IO_BLOCKED)
177 			bio_put(*bio);
178 		*bio = NULL;
179 	}
180 }
181 
free_r10bio(r10bio_t * r10_bio)182 static void free_r10bio(r10bio_t *r10_bio)
183 {
184 	conf_t *conf = r10_bio->mddev->private;
185 
186 	/*
187 	 * Wake up any possible resync thread that waits for the device
188 	 * to go idle.
189 	 */
190 	allow_barrier(conf);
191 
192 	put_all_bios(conf, r10_bio);
193 	mempool_free(r10_bio, conf->r10bio_pool);
194 }
195 
put_buf(r10bio_t * r10_bio)196 static void put_buf(r10bio_t *r10_bio)
197 {
198 	conf_t *conf = r10_bio->mddev->private;
199 
200 	mempool_free(r10_bio, conf->r10buf_pool);
201 
202 	lower_barrier(conf);
203 }
204 
reschedule_retry(r10bio_t * r10_bio)205 static void reschedule_retry(r10bio_t *r10_bio)
206 {
207 	unsigned long flags;
208 	mddev_t *mddev = r10_bio->mddev;
209 	conf_t *conf = mddev->private;
210 
211 	spin_lock_irqsave(&conf->device_lock, flags);
212 	list_add(&r10_bio->retry_list, &conf->retry_list);
213 	conf->nr_queued ++;
214 	spin_unlock_irqrestore(&conf->device_lock, flags);
215 
216 	/* wake up frozen array... */
217 	wake_up(&conf->wait_barrier);
218 
219 	md_wakeup_thread(mddev->thread);
220 }
221 
222 /*
223  * raid_end_bio_io() is called when we have finished servicing a mirrored
224  * operation and are ready to return a success/failure code to the buffer
225  * cache layer.
226  */
raid_end_bio_io(r10bio_t * r10_bio)227 static void raid_end_bio_io(r10bio_t *r10_bio)
228 {
229 	struct bio *bio = r10_bio->master_bio;
230 
231 	bio_endio(bio,
232 		test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
233 	free_r10bio(r10_bio);
234 }
235 
236 /*
237  * Update disk head position estimator based on IRQ completion info.
238  */
update_head_pos(int slot,r10bio_t * r10_bio)239 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
240 {
241 	conf_t *conf = r10_bio->mddev->private;
242 
243 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
244 		r10_bio->devs[slot].addr + (r10_bio->sectors);
245 }
246 
raid10_end_read_request(struct bio * bio,int error)247 static void raid10_end_read_request(struct bio *bio, int error)
248 {
249 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
250 	r10bio_t *r10_bio = bio->bi_private;
251 	int slot, dev;
252 	conf_t *conf = r10_bio->mddev->private;
253 
254 
255 	slot = r10_bio->read_slot;
256 	dev = r10_bio->devs[slot].devnum;
257 	/*
258 	 * this branch is our 'one mirror IO has finished' event handler:
259 	 */
260 	update_head_pos(slot, r10_bio);
261 
262 	if (uptodate) {
263 		/*
264 		 * Set R10BIO_Uptodate in our master bio, so that
265 		 * we will return a good error code to the higher
266 		 * levels even if IO on some other mirrored buffer fails.
267 		 *
268 		 * The 'master' represents the composite IO operation to
269 		 * user-side. So if something waits for IO, then it will
270 		 * wait for the 'master' bio.
271 		 */
272 		set_bit(R10BIO_Uptodate, &r10_bio->state);
273 		raid_end_bio_io(r10_bio);
274 	} else {
275 		/*
276 		 * oops, read error:
277 		 */
278 		char b[BDEVNAME_SIZE];
279 		if (printk_ratelimit())
280 			printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
281 			       mdname(conf->mddev),
282 			       bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
283 		reschedule_retry(r10_bio);
284 	}
285 
286 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
287 }
288 
raid10_end_write_request(struct bio * bio,int error)289 static void raid10_end_write_request(struct bio *bio, int error)
290 {
291 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
292 	r10bio_t *r10_bio = bio->bi_private;
293 	int slot, dev;
294 	conf_t *conf = r10_bio->mddev->private;
295 
296 	for (slot = 0; slot < conf->copies; slot++)
297 		if (r10_bio->devs[slot].bio == bio)
298 			break;
299 	dev = r10_bio->devs[slot].devnum;
300 
301 	/*
302 	 * this branch is our 'one mirror IO has finished' event handler:
303 	 */
304 	if (!uptodate) {
305 		md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
306 		/* an I/O failed, we can't clear the bitmap */
307 		set_bit(R10BIO_Degraded, &r10_bio->state);
308 	} else
309 		/*
310 		 * Set R10BIO_Uptodate in our master bio, so that
311 		 * we will return a good error code for to the higher
312 		 * levels even if IO on some other mirrored buffer fails.
313 		 *
314 		 * The 'master' represents the composite IO operation to
315 		 * user-side. So if something waits for IO, then it will
316 		 * wait for the 'master' bio.
317 		 */
318 		set_bit(R10BIO_Uptodate, &r10_bio->state);
319 
320 	update_head_pos(slot, r10_bio);
321 
322 	/*
323 	 *
324 	 * Let's see if all mirrored write operations have finished
325 	 * already.
326 	 */
327 	if (atomic_dec_and_test(&r10_bio->remaining)) {
328 		/* clear the bitmap if all writes complete successfully */
329 		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
330 				r10_bio->sectors,
331 				!test_bit(R10BIO_Degraded, &r10_bio->state),
332 				0);
333 		md_write_end(r10_bio->mddev);
334 		raid_end_bio_io(r10_bio);
335 	}
336 
337 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
338 }
339 
340 
341 /*
342  * RAID10 layout manager
343  * As well as the chunksize and raid_disks count, there are two
344  * parameters: near_copies and far_copies.
345  * near_copies * far_copies must be <= raid_disks.
346  * Normally one of these will be 1.
347  * If both are 1, we get raid0.
348  * If near_copies == raid_disks, we get raid1.
349  *
350  * Chunks are laid out in raid0 style with near_copies copies of the
351  * first chunk, followed by near_copies copies of the next chunk and
352  * so on.
353  * If far_copies > 1, then after 1/far_copies of the array has been assigned
354  * as described above, we start again with a device offset of near_copies.
355  * So we effectively have another copy of the whole array further down all
356  * the drives, but with blocks on different drives.
357  * With this layout, and block is never stored twice on the one device.
358  *
359  * raid10_find_phys finds the sector offset of a given virtual sector
360  * on each device that it is on.
361  *
362  * raid10_find_virt does the reverse mapping, from a device and a
363  * sector offset to a virtual address
364  */
365 
raid10_find_phys(conf_t * conf,r10bio_t * r10bio)366 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
367 {
368 	int n,f;
369 	sector_t sector;
370 	sector_t chunk;
371 	sector_t stripe;
372 	int dev;
373 
374 	int slot = 0;
375 
376 	/* now calculate first sector/dev */
377 	chunk = r10bio->sector >> conf->chunk_shift;
378 	sector = r10bio->sector & conf->chunk_mask;
379 
380 	chunk *= conf->near_copies;
381 	stripe = chunk;
382 	dev = sector_div(stripe, conf->raid_disks);
383 	if (conf->far_offset)
384 		stripe *= conf->far_copies;
385 
386 	sector += stripe << conf->chunk_shift;
387 
388 	/* and calculate all the others */
389 	for (n=0; n < conf->near_copies; n++) {
390 		int d = dev;
391 		sector_t s = sector;
392 		r10bio->devs[slot].addr = sector;
393 		r10bio->devs[slot].devnum = d;
394 		slot++;
395 
396 		for (f = 1; f < conf->far_copies; f++) {
397 			d += conf->near_copies;
398 			if (d >= conf->raid_disks)
399 				d -= conf->raid_disks;
400 			s += conf->stride;
401 			r10bio->devs[slot].devnum = d;
402 			r10bio->devs[slot].addr = s;
403 			slot++;
404 		}
405 		dev++;
406 		if (dev >= conf->raid_disks) {
407 			dev = 0;
408 			sector += (conf->chunk_mask + 1);
409 		}
410 	}
411 	BUG_ON(slot != conf->copies);
412 }
413 
raid10_find_virt(conf_t * conf,sector_t sector,int dev)414 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
415 {
416 	sector_t offset, chunk, vchunk;
417 
418 	offset = sector & conf->chunk_mask;
419 	if (conf->far_offset) {
420 		int fc;
421 		chunk = sector >> conf->chunk_shift;
422 		fc = sector_div(chunk, conf->far_copies);
423 		dev -= fc * conf->near_copies;
424 		if (dev < 0)
425 			dev += conf->raid_disks;
426 	} else {
427 		while (sector >= conf->stride) {
428 			sector -= conf->stride;
429 			if (dev < conf->near_copies)
430 				dev += conf->raid_disks - conf->near_copies;
431 			else
432 				dev -= conf->near_copies;
433 		}
434 		chunk = sector >> conf->chunk_shift;
435 	}
436 	vchunk = chunk * conf->raid_disks + dev;
437 	sector_div(vchunk, conf->near_copies);
438 	return (vchunk << conf->chunk_shift) + offset;
439 }
440 
441 /**
442  *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
443  *	@q: request queue
444  *	@bvm: properties of new bio
445  *	@biovec: the request that could be merged to it.
446  *
447  *	Return amount of bytes we can accept at this offset
448  *      If near_copies == raid_disk, there are no striping issues,
449  *      but in that case, the function isn't called at all.
450  */
raid10_mergeable_bvec(struct request_queue * q,struct bvec_merge_data * bvm,struct bio_vec * biovec)451 static int raid10_mergeable_bvec(struct request_queue *q,
452 				 struct bvec_merge_data *bvm,
453 				 struct bio_vec *biovec)
454 {
455 	mddev_t *mddev = q->queuedata;
456 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
457 	int max;
458 	unsigned int chunk_sectors = mddev->chunk_sectors;
459 	unsigned int bio_sectors = bvm->bi_size >> 9;
460 
461 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
462 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
463 	if (max <= biovec->bv_len && bio_sectors == 0)
464 		return biovec->bv_len;
465 	else
466 		return max;
467 }
468 
469 /*
470  * This routine returns the disk from which the requested read should
471  * be done. There is a per-array 'next expected sequential IO' sector
472  * number - if this matches on the next IO then we use the last disk.
473  * There is also a per-disk 'last know head position' sector that is
474  * maintained from IRQ contexts, both the normal and the resync IO
475  * completion handlers update this position correctly. If there is no
476  * perfect sequential match then we pick the disk whose head is closest.
477  *
478  * If there are 2 mirrors in the same 2 devices, performance degrades
479  * because position is mirror, not device based.
480  *
481  * The rdev for the device selected will have nr_pending incremented.
482  */
483 
484 /*
485  * FIXME: possibly should rethink readbalancing and do it differently
486  * depending on near_copies / far_copies geometry.
487  */
read_balance(conf_t * conf,r10bio_t * r10_bio)488 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
489 {
490 	const sector_t this_sector = r10_bio->sector;
491 	int disk, slot, nslot;
492 	const int sectors = r10_bio->sectors;
493 	sector_t new_distance, current_distance;
494 	mdk_rdev_t *rdev;
495 
496 	raid10_find_phys(conf, r10_bio);
497 	rcu_read_lock();
498 	/*
499 	 * Check if we can balance. We can balance on the whole
500 	 * device if no resync is going on (recovery is ok), or below
501 	 * the resync window. We take the first readable disk when
502 	 * above the resync window.
503 	 */
504 	if (conf->mddev->recovery_cp < MaxSector
505 	    && (this_sector + sectors >= conf->next_resync)) {
506 		/* make sure that disk is operational */
507 		slot = 0;
508 		disk = r10_bio->devs[slot].devnum;
509 
510 		while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
511 		       r10_bio->devs[slot].bio == IO_BLOCKED ||
512 		       !test_bit(In_sync, &rdev->flags)) {
513 			slot++;
514 			if (slot == conf->copies) {
515 				slot = 0;
516 				disk = -1;
517 				break;
518 			}
519 			disk = r10_bio->devs[slot].devnum;
520 		}
521 		goto rb_out;
522 	}
523 
524 
525 	/* make sure the disk is operational */
526 	slot = 0;
527 	disk = r10_bio->devs[slot].devnum;
528 	while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
529 	       r10_bio->devs[slot].bio == IO_BLOCKED ||
530 	       !test_bit(In_sync, &rdev->flags)) {
531 		slot ++;
532 		if (slot == conf->copies) {
533 			disk = -1;
534 			goto rb_out;
535 		}
536 		disk = r10_bio->devs[slot].devnum;
537 	}
538 
539 
540 	current_distance = abs(r10_bio->devs[slot].addr -
541 			       conf->mirrors[disk].head_position);
542 
543 	/* Find the disk whose head is closest,
544 	 * or - for far > 1 - find the closest to partition beginning */
545 
546 	for (nslot = slot; nslot < conf->copies; nslot++) {
547 		int ndisk = r10_bio->devs[nslot].devnum;
548 
549 
550 		if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
551 		    r10_bio->devs[nslot].bio == IO_BLOCKED ||
552 		    !test_bit(In_sync, &rdev->flags))
553 			continue;
554 
555 		/* This optimisation is debatable, and completely destroys
556 		 * sequential read speed for 'far copies' arrays.  So only
557 		 * keep it for 'near' arrays, and review those later.
558 		 */
559 		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
560 			disk = ndisk;
561 			slot = nslot;
562 			break;
563 		}
564 
565 		/* for far > 1 always use the lowest address */
566 		if (conf->far_copies > 1)
567 			new_distance = r10_bio->devs[nslot].addr;
568 		else
569 			new_distance = abs(r10_bio->devs[nslot].addr -
570 					   conf->mirrors[ndisk].head_position);
571 		if (new_distance < current_distance) {
572 			current_distance = new_distance;
573 			disk = ndisk;
574 			slot = nslot;
575 		}
576 	}
577 
578 rb_out:
579 	r10_bio->read_slot = slot;
580 /*	conf->next_seq_sect = this_sector + sectors;*/
581 
582 	if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
583 		atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
584 	else
585 		disk = -1;
586 	rcu_read_unlock();
587 
588 	return disk;
589 }
590 
raid10_congested(void * data,int bits)591 static int raid10_congested(void *data, int bits)
592 {
593 	mddev_t *mddev = data;
594 	conf_t *conf = mddev->private;
595 	int i, ret = 0;
596 
597 	if (mddev_congested(mddev, bits))
598 		return 1;
599 	rcu_read_lock();
600 	for (i = 0; i < conf->raid_disks && ret == 0; i++) {
601 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
602 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
603 			struct request_queue *q = bdev_get_queue(rdev->bdev);
604 
605 			ret |= bdi_congested(&q->backing_dev_info, bits);
606 		}
607 	}
608 	rcu_read_unlock();
609 	return ret;
610 }
611 
flush_pending_writes(conf_t * conf)612 static void flush_pending_writes(conf_t *conf)
613 {
614 	/* Any writes that have been queued but are awaiting
615 	 * bitmap updates get flushed here.
616 	 */
617 	spin_lock_irq(&conf->device_lock);
618 
619 	if (conf->pending_bio_list.head) {
620 		struct bio *bio;
621 		bio = bio_list_get(&conf->pending_bio_list);
622 		spin_unlock_irq(&conf->device_lock);
623 		/* flush any pending bitmap writes to disk
624 		 * before proceeding w/ I/O */
625 		bitmap_unplug(conf->mddev->bitmap);
626 
627 		while (bio) { /* submit pending writes */
628 			struct bio *next = bio->bi_next;
629 			bio->bi_next = NULL;
630 			generic_make_request(bio);
631 			bio = next;
632 		}
633 	} else
634 		spin_unlock_irq(&conf->device_lock);
635 }
636 
637 /* Barriers....
638  * Sometimes we need to suspend IO while we do something else,
639  * either some resync/recovery, or reconfigure the array.
640  * To do this we raise a 'barrier'.
641  * The 'barrier' is a counter that can be raised multiple times
642  * to count how many activities are happening which preclude
643  * normal IO.
644  * We can only raise the barrier if there is no pending IO.
645  * i.e. if nr_pending == 0.
646  * We choose only to raise the barrier if no-one is waiting for the
647  * barrier to go down.  This means that as soon as an IO request
648  * is ready, no other operations which require a barrier will start
649  * until the IO request has had a chance.
650  *
651  * So: regular IO calls 'wait_barrier'.  When that returns there
652  *    is no backgroup IO happening,  It must arrange to call
653  *    allow_barrier when it has finished its IO.
654  * backgroup IO calls must call raise_barrier.  Once that returns
655  *    there is no normal IO happeing.  It must arrange to call
656  *    lower_barrier when the particular background IO completes.
657  */
658 
raise_barrier(conf_t * conf,int force)659 static void raise_barrier(conf_t *conf, int force)
660 {
661 	BUG_ON(force && !conf->barrier);
662 	spin_lock_irq(&conf->resync_lock);
663 
664 	/* Wait until no block IO is waiting (unless 'force') */
665 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
666 			    conf->resync_lock, );
667 
668 	/* block any new IO from starting */
669 	conf->barrier++;
670 
671 	/* Now wait for all pending IO to complete */
672 	wait_event_lock_irq(conf->wait_barrier,
673 			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
674 			    conf->resync_lock, );
675 
676 	spin_unlock_irq(&conf->resync_lock);
677 }
678 
lower_barrier(conf_t * conf)679 static void lower_barrier(conf_t *conf)
680 {
681 	unsigned long flags;
682 	spin_lock_irqsave(&conf->resync_lock, flags);
683 	conf->barrier--;
684 	spin_unlock_irqrestore(&conf->resync_lock, flags);
685 	wake_up(&conf->wait_barrier);
686 }
687 
wait_barrier(conf_t * conf)688 static void wait_barrier(conf_t *conf)
689 {
690 	spin_lock_irq(&conf->resync_lock);
691 	if (conf->barrier) {
692 		conf->nr_waiting++;
693 		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
694 				    conf->resync_lock,
695 				    );
696 		conf->nr_waiting--;
697 	}
698 	conf->nr_pending++;
699 	spin_unlock_irq(&conf->resync_lock);
700 }
701 
allow_barrier(conf_t * conf)702 static void allow_barrier(conf_t *conf)
703 {
704 	unsigned long flags;
705 	spin_lock_irqsave(&conf->resync_lock, flags);
706 	conf->nr_pending--;
707 	spin_unlock_irqrestore(&conf->resync_lock, flags);
708 	wake_up(&conf->wait_barrier);
709 }
710 
freeze_array(conf_t * conf)711 static void freeze_array(conf_t *conf)
712 {
713 	/* stop syncio and normal IO and wait for everything to
714 	 * go quiet.
715 	 * We increment barrier and nr_waiting, and then
716 	 * wait until nr_pending match nr_queued+1
717 	 * This is called in the context of one normal IO request
718 	 * that has failed. Thus any sync request that might be pending
719 	 * will be blocked by nr_pending, and we need to wait for
720 	 * pending IO requests to complete or be queued for re-try.
721 	 * Thus the number queued (nr_queued) plus this request (1)
722 	 * must match the number of pending IOs (nr_pending) before
723 	 * we continue.
724 	 */
725 	spin_lock_irq(&conf->resync_lock);
726 	conf->barrier++;
727 	conf->nr_waiting++;
728 	wait_event_lock_irq(conf->wait_barrier,
729 			    conf->nr_pending == conf->nr_queued+1,
730 			    conf->resync_lock,
731 			    flush_pending_writes(conf));
732 
733 	spin_unlock_irq(&conf->resync_lock);
734 }
735 
unfreeze_array(conf_t * conf)736 static void unfreeze_array(conf_t *conf)
737 {
738 	/* reverse the effect of the freeze */
739 	spin_lock_irq(&conf->resync_lock);
740 	conf->barrier--;
741 	conf->nr_waiting--;
742 	wake_up(&conf->wait_barrier);
743 	spin_unlock_irq(&conf->resync_lock);
744 }
745 
make_request(mddev_t * mddev,struct bio * bio)746 static int make_request(mddev_t *mddev, struct bio * bio)
747 {
748 	conf_t *conf = mddev->private;
749 	mirror_info_t *mirror;
750 	r10bio_t *r10_bio;
751 	struct bio *read_bio;
752 	int i;
753 	int chunk_sects = conf->chunk_mask + 1;
754 	const int rw = bio_data_dir(bio);
755 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
756 	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
757 	unsigned long flags;
758 	mdk_rdev_t *blocked_rdev;
759 	int plugged;
760 
761 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
762 		md_flush_request(mddev, bio);
763 		return 0;
764 	}
765 
766 	/* If this request crosses a chunk boundary, we need to
767 	 * split it.  This will only happen for 1 PAGE (or less) requests.
768 	 */
769 	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
770 		      > chunk_sects &&
771 		    conf->near_copies < conf->raid_disks)) {
772 		struct bio_pair *bp;
773 		/* Sanity check -- queue functions should prevent this happening */
774 		if (bio->bi_vcnt != 1 ||
775 		    bio->bi_idx != 0)
776 			goto bad_map;
777 		/* This is a one page bio that upper layers
778 		 * refuse to split for us, so we need to split it.
779 		 */
780 		bp = bio_split(bio,
781 			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
782 
783 		/* Each of these 'make_request' calls will call 'wait_barrier'.
784 		 * If the first succeeds but the second blocks due to the resync
785 		 * thread raising the barrier, we will deadlock because the
786 		 * IO to the underlying device will be queued in generic_make_request
787 		 * and will never complete, so will never reduce nr_pending.
788 		 * So increment nr_waiting here so no new raise_barriers will
789 		 * succeed, and so the second wait_barrier cannot block.
790 		 */
791 		spin_lock_irq(&conf->resync_lock);
792 		conf->nr_waiting++;
793 		spin_unlock_irq(&conf->resync_lock);
794 
795 		if (make_request(mddev, &bp->bio1))
796 			generic_make_request(&bp->bio1);
797 		if (make_request(mddev, &bp->bio2))
798 			generic_make_request(&bp->bio2);
799 
800 		spin_lock_irq(&conf->resync_lock);
801 		conf->nr_waiting--;
802 		wake_up(&conf->wait_barrier);
803 		spin_unlock_irq(&conf->resync_lock);
804 
805 		bio_pair_release(bp);
806 		return 0;
807 	bad_map:
808 		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
809 		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
810 		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
811 
812 		bio_io_error(bio);
813 		return 0;
814 	}
815 
816 	md_write_start(mddev, bio);
817 
818 	/*
819 	 * Register the new request and wait if the reconstruction
820 	 * thread has put up a bar for new requests.
821 	 * Continue immediately if no resync is active currently.
822 	 */
823 	wait_barrier(conf);
824 
825 	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
826 
827 	r10_bio->master_bio = bio;
828 	r10_bio->sectors = bio->bi_size >> 9;
829 
830 	r10_bio->mddev = mddev;
831 	r10_bio->sector = bio->bi_sector;
832 	r10_bio->state = 0;
833 
834 	if (rw == READ) {
835 		/*
836 		 * read balancing logic:
837 		 */
838 		int disk = read_balance(conf, r10_bio);
839 		int slot = r10_bio->read_slot;
840 		if (disk < 0) {
841 			raid_end_bio_io(r10_bio);
842 			return 0;
843 		}
844 		mirror = conf->mirrors + disk;
845 
846 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
847 
848 		r10_bio->devs[slot].bio = read_bio;
849 
850 		read_bio->bi_sector = r10_bio->devs[slot].addr +
851 			mirror->rdev->data_offset;
852 		read_bio->bi_bdev = mirror->rdev->bdev;
853 		read_bio->bi_end_io = raid10_end_read_request;
854 		read_bio->bi_rw = READ | do_sync;
855 		read_bio->bi_private = r10_bio;
856 
857 		generic_make_request(read_bio);
858 		return 0;
859 	}
860 
861 	/*
862 	 * WRITE:
863 	 */
864 	/* first select target devices under rcu_lock and
865 	 * inc refcount on their rdev.  Record them by setting
866 	 * bios[x] to bio
867 	 */
868 	plugged = mddev_check_plugged(mddev);
869 
870 	raid10_find_phys(conf, r10_bio);
871  retry_write:
872 	blocked_rdev = NULL;
873 	rcu_read_lock();
874 	for (i = 0;  i < conf->copies; i++) {
875 		int d = r10_bio->devs[i].devnum;
876 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
877 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
878 			atomic_inc(&rdev->nr_pending);
879 			blocked_rdev = rdev;
880 			break;
881 		}
882 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
883 			atomic_inc(&rdev->nr_pending);
884 			r10_bio->devs[i].bio = bio;
885 		} else {
886 			r10_bio->devs[i].bio = NULL;
887 			set_bit(R10BIO_Degraded, &r10_bio->state);
888 		}
889 	}
890 	rcu_read_unlock();
891 
892 	if (unlikely(blocked_rdev)) {
893 		/* Have to wait for this device to get unblocked, then retry */
894 		int j;
895 		int d;
896 
897 		for (j = 0; j < i; j++)
898 			if (r10_bio->devs[j].bio) {
899 				d = r10_bio->devs[j].devnum;
900 				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
901 			}
902 		allow_barrier(conf);
903 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
904 		wait_barrier(conf);
905 		goto retry_write;
906 	}
907 
908 	atomic_set(&r10_bio->remaining, 1);
909 	bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
910 
911 	for (i = 0; i < conf->copies; i++) {
912 		struct bio *mbio;
913 		int d = r10_bio->devs[i].devnum;
914 		if (!r10_bio->devs[i].bio)
915 			continue;
916 
917 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
918 		r10_bio->devs[i].bio = mbio;
919 
920 		mbio->bi_sector	= r10_bio->devs[i].addr+
921 			conf->mirrors[d].rdev->data_offset;
922 		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
923 		mbio->bi_end_io	= raid10_end_write_request;
924 		mbio->bi_rw = WRITE | do_sync | do_fua;
925 		mbio->bi_private = r10_bio;
926 
927 		atomic_inc(&r10_bio->remaining);
928 		spin_lock_irqsave(&conf->device_lock, flags);
929 		bio_list_add(&conf->pending_bio_list, mbio);
930 		spin_unlock_irqrestore(&conf->device_lock, flags);
931 	}
932 
933 	if (atomic_dec_and_test(&r10_bio->remaining)) {
934 		/* This matches the end of raid10_end_write_request() */
935 		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
936 				r10_bio->sectors,
937 				!test_bit(R10BIO_Degraded, &r10_bio->state),
938 				0);
939 		md_write_end(mddev);
940 		raid_end_bio_io(r10_bio);
941 	}
942 
943 	/* In case raid10d snuck in to freeze_array */
944 	wake_up(&conf->wait_barrier);
945 
946 	if (do_sync || !mddev->bitmap || !plugged)
947 		md_wakeup_thread(mddev->thread);
948 	return 0;
949 }
950 
status(struct seq_file * seq,mddev_t * mddev)951 static void status(struct seq_file *seq, mddev_t *mddev)
952 {
953 	conf_t *conf = mddev->private;
954 	int i;
955 
956 	if (conf->near_copies < conf->raid_disks)
957 		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
958 	if (conf->near_copies > 1)
959 		seq_printf(seq, " %d near-copies", conf->near_copies);
960 	if (conf->far_copies > 1) {
961 		if (conf->far_offset)
962 			seq_printf(seq, " %d offset-copies", conf->far_copies);
963 		else
964 			seq_printf(seq, " %d far-copies", conf->far_copies);
965 	}
966 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
967 					conf->raid_disks - mddev->degraded);
968 	for (i = 0; i < conf->raid_disks; i++)
969 		seq_printf(seq, "%s",
970 			      conf->mirrors[i].rdev &&
971 			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
972 	seq_printf(seq, "]");
973 }
974 
error(mddev_t * mddev,mdk_rdev_t * rdev)975 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
976 {
977 	char b[BDEVNAME_SIZE];
978 	conf_t *conf = mddev->private;
979 
980 	/*
981 	 * If it is not operational, then we have already marked it as dead
982 	 * else if it is the last working disks, ignore the error, let the
983 	 * next level up know.
984 	 * else mark the drive as failed
985 	 */
986 	if (test_bit(In_sync, &rdev->flags)
987 	    && conf->raid_disks-mddev->degraded == 1)
988 		/*
989 		 * Don't fail the drive, just return an IO error.
990 		 * The test should really be more sophisticated than
991 		 * "working_disks == 1", but it isn't critical, and
992 		 * can wait until we do more sophisticated "is the drive
993 		 * really dead" tests...
994 		 */
995 		return;
996 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
997 		unsigned long flags;
998 		spin_lock_irqsave(&conf->device_lock, flags);
999 		mddev->degraded++;
1000 		spin_unlock_irqrestore(&conf->device_lock, flags);
1001 		/*
1002 		 * if recovery is running, make sure it aborts.
1003 		 */
1004 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1005 	}
1006 	set_bit(Faulty, &rdev->flags);
1007 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1008 	printk(KERN_ALERT
1009 	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
1010 	       "md/raid10:%s: Operation continuing on %d devices.\n",
1011 	       mdname(mddev), bdevname(rdev->bdev, b),
1012 	       mdname(mddev), conf->raid_disks - mddev->degraded);
1013 }
1014 
print_conf(conf_t * conf)1015 static void print_conf(conf_t *conf)
1016 {
1017 	int i;
1018 	mirror_info_t *tmp;
1019 
1020 	printk(KERN_DEBUG "RAID10 conf printout:\n");
1021 	if (!conf) {
1022 		printk(KERN_DEBUG "(!conf)\n");
1023 		return;
1024 	}
1025 	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1026 		conf->raid_disks);
1027 
1028 	for (i = 0; i < conf->raid_disks; i++) {
1029 		char b[BDEVNAME_SIZE];
1030 		tmp = conf->mirrors + i;
1031 		if (tmp->rdev)
1032 			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1033 				i, !test_bit(In_sync, &tmp->rdev->flags),
1034 			        !test_bit(Faulty, &tmp->rdev->flags),
1035 				bdevname(tmp->rdev->bdev,b));
1036 	}
1037 }
1038 
close_sync(conf_t * conf)1039 static void close_sync(conf_t *conf)
1040 {
1041 	wait_barrier(conf);
1042 	allow_barrier(conf);
1043 
1044 	mempool_destroy(conf->r10buf_pool);
1045 	conf->r10buf_pool = NULL;
1046 }
1047 
1048 /* check if there are enough drives for
1049  * every block to appear on atleast one
1050  */
enough(conf_t * conf)1051 static int enough(conf_t *conf)
1052 {
1053 	int first = 0;
1054 
1055 	do {
1056 		int n = conf->copies;
1057 		int cnt = 0;
1058 		while (n--) {
1059 			if (conf->mirrors[first].rdev)
1060 				cnt++;
1061 			first = (first+1) % conf->raid_disks;
1062 		}
1063 		if (cnt == 0)
1064 			return 0;
1065 	} while (first != 0);
1066 	return 1;
1067 }
1068 
raid10_spare_active(mddev_t * mddev)1069 static int raid10_spare_active(mddev_t *mddev)
1070 {
1071 	int i;
1072 	conf_t *conf = mddev->private;
1073 	mirror_info_t *tmp;
1074 	int count = 0;
1075 	unsigned long flags;
1076 
1077 	/*
1078 	 * Find all non-in_sync disks within the RAID10 configuration
1079 	 * and mark them in_sync
1080 	 */
1081 	for (i = 0; i < conf->raid_disks; i++) {
1082 		tmp = conf->mirrors + i;
1083 		if (tmp->rdev
1084 		    && !test_bit(Faulty, &tmp->rdev->flags)
1085 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1086 			count++;
1087 			sysfs_notify_dirent(tmp->rdev->sysfs_state);
1088 		}
1089 	}
1090 	spin_lock_irqsave(&conf->device_lock, flags);
1091 	mddev->degraded -= count;
1092 	spin_unlock_irqrestore(&conf->device_lock, flags);
1093 
1094 	print_conf(conf);
1095 	return count;
1096 }
1097 
1098 
raid10_add_disk(mddev_t * mddev,mdk_rdev_t * rdev)1099 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1100 {
1101 	conf_t *conf = mddev->private;
1102 	int err = -EEXIST;
1103 	int mirror;
1104 	mirror_info_t *p;
1105 	int first = 0;
1106 	int last = conf->raid_disks - 1;
1107 
1108 	if (mddev->recovery_cp < MaxSector)
1109 		/* only hot-add to in-sync arrays, as recovery is
1110 		 * very different from resync
1111 		 */
1112 		return -EBUSY;
1113 	if (!enough(conf))
1114 		return -EINVAL;
1115 
1116 	if (rdev->raid_disk >= 0)
1117 		first = last = rdev->raid_disk;
1118 
1119 	if (rdev->saved_raid_disk >= 0 &&
1120 	    rdev->saved_raid_disk >= first &&
1121 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1122 		mirror = rdev->saved_raid_disk;
1123 	else
1124 		mirror = first;
1125 	for ( ; mirror <= last ; mirror++)
1126 		if ( !(p=conf->mirrors+mirror)->rdev) {
1127 
1128 			disk_stack_limits(mddev->gendisk, rdev->bdev,
1129 					  rdev->data_offset << 9);
1130 			/* as we don't honour merge_bvec_fn, we must
1131 			 * never risk violating it, so limit
1132 			 * ->max_segments to one lying with a single
1133 			 * page, as a one page request is never in
1134 			 * violation.
1135 			 */
1136 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1137 				blk_queue_max_segments(mddev->queue, 1);
1138 				blk_queue_segment_boundary(mddev->queue,
1139 							   PAGE_CACHE_SIZE - 1);
1140 			}
1141 
1142 			p->head_position = 0;
1143 			rdev->raid_disk = mirror;
1144 			err = 0;
1145 			if (rdev->saved_raid_disk != mirror)
1146 				conf->fullsync = 1;
1147 			rcu_assign_pointer(p->rdev, rdev);
1148 			break;
1149 		}
1150 
1151 	md_integrity_add_rdev(rdev, mddev);
1152 	print_conf(conf);
1153 	return err;
1154 }
1155 
raid10_remove_disk(mddev_t * mddev,int number)1156 static int raid10_remove_disk(mddev_t *mddev, int number)
1157 {
1158 	conf_t *conf = mddev->private;
1159 	int err = 0;
1160 	mdk_rdev_t *rdev;
1161 	mirror_info_t *p = conf->mirrors+ number;
1162 
1163 	print_conf(conf);
1164 	rdev = p->rdev;
1165 	if (rdev) {
1166 		if (test_bit(In_sync, &rdev->flags) ||
1167 		    atomic_read(&rdev->nr_pending)) {
1168 			err = -EBUSY;
1169 			goto abort;
1170 		}
1171 		/* Only remove faulty devices in recovery
1172 		 * is not possible.
1173 		 */
1174 		if (!test_bit(Faulty, &rdev->flags) &&
1175 		    enough(conf)) {
1176 			err = -EBUSY;
1177 			goto abort;
1178 		}
1179 		p->rdev = NULL;
1180 		synchronize_rcu();
1181 		if (atomic_read(&rdev->nr_pending)) {
1182 			/* lost the race, try later */
1183 			err = -EBUSY;
1184 			p->rdev = rdev;
1185 			goto abort;
1186 		}
1187 		err = md_integrity_register(mddev);
1188 	}
1189 abort:
1190 
1191 	print_conf(conf);
1192 	return err;
1193 }
1194 
1195 
end_sync_read(struct bio * bio,int error)1196 static void end_sync_read(struct bio *bio, int error)
1197 {
1198 	r10bio_t *r10_bio = bio->bi_private;
1199 	conf_t *conf = r10_bio->mddev->private;
1200 	int i,d;
1201 
1202 	for (i=0; i<conf->copies; i++)
1203 		if (r10_bio->devs[i].bio == bio)
1204 			break;
1205 	BUG_ON(i == conf->copies);
1206 	update_head_pos(i, r10_bio);
1207 	d = r10_bio->devs[i].devnum;
1208 
1209 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1210 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1211 	else {
1212 		atomic_add(r10_bio->sectors,
1213 			   &conf->mirrors[d].rdev->corrected_errors);
1214 		if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1215 			md_error(r10_bio->mddev,
1216 				 conf->mirrors[d].rdev);
1217 	}
1218 
1219 	/* for reconstruct, we always reschedule after a read.
1220 	 * for resync, only after all reads
1221 	 */
1222 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1223 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1224 	    atomic_dec_and_test(&r10_bio->remaining)) {
1225 		/* we have read all the blocks,
1226 		 * do the comparison in process context in raid10d
1227 		 */
1228 		reschedule_retry(r10_bio);
1229 	}
1230 }
1231 
end_sync_write(struct bio * bio,int error)1232 static void end_sync_write(struct bio *bio, int error)
1233 {
1234 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1235 	r10bio_t *r10_bio = bio->bi_private;
1236 	mddev_t *mddev = r10_bio->mddev;
1237 	conf_t *conf = mddev->private;
1238 	int i,d;
1239 
1240 	for (i = 0; i < conf->copies; i++)
1241 		if (r10_bio->devs[i].bio == bio)
1242 			break;
1243 	d = r10_bio->devs[i].devnum;
1244 
1245 	if (!uptodate)
1246 		md_error(mddev, conf->mirrors[d].rdev);
1247 
1248 	update_head_pos(i, r10_bio);
1249 
1250 	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1251 	while (atomic_dec_and_test(&r10_bio->remaining)) {
1252 		if (r10_bio->master_bio == NULL) {
1253 			/* the primary of several recovery bios */
1254 			sector_t s = r10_bio->sectors;
1255 			put_buf(r10_bio);
1256 			md_done_sync(mddev, s, 1);
1257 			break;
1258 		} else {
1259 			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1260 			put_buf(r10_bio);
1261 			r10_bio = r10_bio2;
1262 		}
1263 	}
1264 }
1265 
1266 /*
1267  * Note: sync and recover and handled very differently for raid10
1268  * This code is for resync.
1269  * For resync, we read through virtual addresses and read all blocks.
1270  * If there is any error, we schedule a write.  The lowest numbered
1271  * drive is authoritative.
1272  * However requests come for physical address, so we need to map.
1273  * For every physical address there are raid_disks/copies virtual addresses,
1274  * which is always are least one, but is not necessarly an integer.
1275  * This means that a physical address can span multiple chunks, so we may
1276  * have to submit multiple io requests for a single sync request.
1277  */
1278 /*
1279  * We check if all blocks are in-sync and only write to blocks that
1280  * aren't in sync
1281  */
sync_request_write(mddev_t * mddev,r10bio_t * r10_bio)1282 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1283 {
1284 	conf_t *conf = mddev->private;
1285 	int i, first;
1286 	struct bio *tbio, *fbio;
1287 
1288 	atomic_set(&r10_bio->remaining, 1);
1289 
1290 	/* find the first device with a block */
1291 	for (i=0; i<conf->copies; i++)
1292 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1293 			break;
1294 
1295 	if (i == conf->copies)
1296 		goto done;
1297 
1298 	first = i;
1299 	fbio = r10_bio->devs[i].bio;
1300 
1301 	/* now find blocks with errors */
1302 	for (i=0 ; i < conf->copies ; i++) {
1303 		int  j, d;
1304 		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1305 
1306 		tbio = r10_bio->devs[i].bio;
1307 
1308 		if (tbio->bi_end_io != end_sync_read)
1309 			continue;
1310 		if (i == first)
1311 			continue;
1312 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1313 			/* We know that the bi_io_vec layout is the same for
1314 			 * both 'first' and 'i', so we just compare them.
1315 			 * All vec entries are PAGE_SIZE;
1316 			 */
1317 			for (j = 0; j < vcnt; j++)
1318 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1319 					   page_address(tbio->bi_io_vec[j].bv_page),
1320 					   PAGE_SIZE))
1321 					break;
1322 			if (j == vcnt)
1323 				continue;
1324 			mddev->resync_mismatches += r10_bio->sectors;
1325 		}
1326 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1327 			/* Don't fix anything. */
1328 			continue;
1329 		/* Ok, we need to write this bio
1330 		 * First we need to fixup bv_offset, bv_len and
1331 		 * bi_vecs, as the read request might have corrupted these
1332 		 */
1333 		tbio->bi_vcnt = vcnt;
1334 		tbio->bi_size = r10_bio->sectors << 9;
1335 		tbio->bi_idx = 0;
1336 		tbio->bi_phys_segments = 0;
1337 		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1338 		tbio->bi_flags |= 1 << BIO_UPTODATE;
1339 		tbio->bi_next = NULL;
1340 		tbio->bi_rw = WRITE;
1341 		tbio->bi_private = r10_bio;
1342 		tbio->bi_sector = r10_bio->devs[i].addr;
1343 
1344 		for (j=0; j < vcnt ; j++) {
1345 			tbio->bi_io_vec[j].bv_offset = 0;
1346 			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1347 
1348 			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1349 			       page_address(fbio->bi_io_vec[j].bv_page),
1350 			       PAGE_SIZE);
1351 		}
1352 		tbio->bi_end_io = end_sync_write;
1353 
1354 		d = r10_bio->devs[i].devnum;
1355 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1356 		atomic_inc(&r10_bio->remaining);
1357 		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1358 
1359 		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1360 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1361 		generic_make_request(tbio);
1362 	}
1363 
1364 done:
1365 	if (atomic_dec_and_test(&r10_bio->remaining)) {
1366 		md_done_sync(mddev, r10_bio->sectors, 1);
1367 		put_buf(r10_bio);
1368 	}
1369 }
1370 
1371 /*
1372  * Now for the recovery code.
1373  * Recovery happens across physical sectors.
1374  * We recover all non-is_sync drives by finding the virtual address of
1375  * each, and then choose a working drive that also has that virt address.
1376  * There is a separate r10_bio for each non-in_sync drive.
1377  * Only the first two slots are in use. The first for reading,
1378  * The second for writing.
1379  *
1380  */
1381 
recovery_request_write(mddev_t * mddev,r10bio_t * r10_bio)1382 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1383 {
1384 	conf_t *conf = mddev->private;
1385 	int i, d;
1386 	struct bio *bio, *wbio;
1387 
1388 
1389 	/* move the pages across to the second bio
1390 	 * and submit the write request
1391 	 */
1392 	bio = r10_bio->devs[0].bio;
1393 	wbio = r10_bio->devs[1].bio;
1394 	for (i=0; i < wbio->bi_vcnt; i++) {
1395 		struct page *p = bio->bi_io_vec[i].bv_page;
1396 		bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1397 		wbio->bi_io_vec[i].bv_page = p;
1398 	}
1399 	d = r10_bio->devs[1].devnum;
1400 
1401 	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1402 	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1403 	if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1404 		generic_make_request(wbio);
1405 	else
1406 		bio_endio(wbio, -EIO);
1407 }
1408 
1409 
1410 /*
1411  * Used by fix_read_error() to decay the per rdev read_errors.
1412  * We halve the read error count for every hour that has elapsed
1413  * since the last recorded read error.
1414  *
1415  */
check_decay_read_errors(mddev_t * mddev,mdk_rdev_t * rdev)1416 static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1417 {
1418 	struct timespec cur_time_mon;
1419 	unsigned long hours_since_last;
1420 	unsigned int read_errors = atomic_read(&rdev->read_errors);
1421 
1422 	ktime_get_ts(&cur_time_mon);
1423 
1424 	if (rdev->last_read_error.tv_sec == 0 &&
1425 	    rdev->last_read_error.tv_nsec == 0) {
1426 		/* first time we've seen a read error */
1427 		rdev->last_read_error = cur_time_mon;
1428 		return;
1429 	}
1430 
1431 	hours_since_last = (cur_time_mon.tv_sec -
1432 			    rdev->last_read_error.tv_sec) / 3600;
1433 
1434 	rdev->last_read_error = cur_time_mon;
1435 
1436 	/*
1437 	 * if hours_since_last is > the number of bits in read_errors
1438 	 * just set read errors to 0. We do this to avoid
1439 	 * overflowing the shift of read_errors by hours_since_last.
1440 	 */
1441 	if (hours_since_last >= 8 * sizeof(read_errors))
1442 		atomic_set(&rdev->read_errors, 0);
1443 	else
1444 		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
1445 }
1446 
1447 /*
1448  * This is a kernel thread which:
1449  *
1450  *	1.	Retries failed read operations on working mirrors.
1451  *	2.	Updates the raid superblock when problems encounter.
1452  *	3.	Performs writes following reads for array synchronising.
1453  */
1454 
fix_read_error(conf_t * conf,mddev_t * mddev,r10bio_t * r10_bio)1455 static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1456 {
1457 	int sect = 0; /* Offset from r10_bio->sector */
1458 	int sectors = r10_bio->sectors;
1459 	mdk_rdev_t*rdev;
1460 	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
1461 	int d = r10_bio->devs[r10_bio->read_slot].devnum;
1462 
1463 	rcu_read_lock();
1464 	rdev = rcu_dereference(conf->mirrors[d].rdev);
1465 	if (rdev) { /* If rdev is not NULL */
1466 		char b[BDEVNAME_SIZE];
1467 		int cur_read_error_count = 0;
1468 
1469 		bdevname(rdev->bdev, b);
1470 
1471 		if (test_bit(Faulty, &rdev->flags)) {
1472 			rcu_read_unlock();
1473 			/* drive has already been failed, just ignore any
1474 			   more fix_read_error() attempts */
1475 			return;
1476 		}
1477 
1478 		check_decay_read_errors(mddev, rdev);
1479 		atomic_inc(&rdev->read_errors);
1480 		cur_read_error_count = atomic_read(&rdev->read_errors);
1481 		if (cur_read_error_count > max_read_errors) {
1482 			rcu_read_unlock();
1483 			printk(KERN_NOTICE
1484 			       "md/raid10:%s: %s: Raid device exceeded "
1485 			       "read_error threshold "
1486 			       "[cur %d:max %d]\n",
1487 			       mdname(mddev),
1488 			       b, cur_read_error_count, max_read_errors);
1489 			printk(KERN_NOTICE
1490 			       "md/raid10:%s: %s: Failing raid "
1491 			       "device\n", mdname(mddev), b);
1492 			md_error(mddev, conf->mirrors[d].rdev);
1493 			return;
1494 		}
1495 	}
1496 	rcu_read_unlock();
1497 
1498 	while(sectors) {
1499 		int s = sectors;
1500 		int sl = r10_bio->read_slot;
1501 		int success = 0;
1502 		int start;
1503 
1504 		if (s > (PAGE_SIZE>>9))
1505 			s = PAGE_SIZE >> 9;
1506 
1507 		rcu_read_lock();
1508 		do {
1509 			d = r10_bio->devs[sl].devnum;
1510 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1511 			if (rdev &&
1512 			    test_bit(In_sync, &rdev->flags)) {
1513 				atomic_inc(&rdev->nr_pending);
1514 				rcu_read_unlock();
1515 				success = sync_page_io(rdev,
1516 						       r10_bio->devs[sl].addr +
1517 						       sect,
1518 						       s<<9,
1519 						       conf->tmppage, READ, false);
1520 				rdev_dec_pending(rdev, mddev);
1521 				rcu_read_lock();
1522 				if (success)
1523 					break;
1524 			}
1525 			sl++;
1526 			if (sl == conf->copies)
1527 				sl = 0;
1528 		} while (!success && sl != r10_bio->read_slot);
1529 		rcu_read_unlock();
1530 
1531 		if (!success) {
1532 			/* Cannot read from anywhere -- bye bye array */
1533 			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1534 			md_error(mddev, conf->mirrors[dn].rdev);
1535 			break;
1536 		}
1537 
1538 		start = sl;
1539 		/* write it back and re-read */
1540 		rcu_read_lock();
1541 		while (sl != r10_bio->read_slot) {
1542 			char b[BDEVNAME_SIZE];
1543 
1544 			if (sl==0)
1545 				sl = conf->copies;
1546 			sl--;
1547 			d = r10_bio->devs[sl].devnum;
1548 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1549 			if (rdev &&
1550 			    test_bit(In_sync, &rdev->flags)) {
1551 				atomic_inc(&rdev->nr_pending);
1552 				rcu_read_unlock();
1553 				atomic_add(s, &rdev->corrected_errors);
1554 				if (sync_page_io(rdev,
1555 						 r10_bio->devs[sl].addr +
1556 						 sect,
1557 						 s<<9, conf->tmppage, WRITE, false)
1558 				    == 0) {
1559 					/* Well, this device is dead */
1560 					printk(KERN_NOTICE
1561 					       "md/raid10:%s: read correction "
1562 					       "write failed"
1563 					       " (%d sectors at %llu on %s)\n",
1564 					       mdname(mddev), s,
1565 					       (unsigned long long)(sect+
1566 					       rdev->data_offset),
1567 					       bdevname(rdev->bdev, b));
1568 					printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1569 					       "drive\n",
1570 					       mdname(mddev),
1571 					       bdevname(rdev->bdev, b));
1572 					md_error(mddev, rdev);
1573 				}
1574 				rdev_dec_pending(rdev, mddev);
1575 				rcu_read_lock();
1576 			}
1577 		}
1578 		sl = start;
1579 		while (sl != r10_bio->read_slot) {
1580 
1581 			if (sl==0)
1582 				sl = conf->copies;
1583 			sl--;
1584 			d = r10_bio->devs[sl].devnum;
1585 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1586 			if (rdev &&
1587 			    test_bit(In_sync, &rdev->flags)) {
1588 				char b[BDEVNAME_SIZE];
1589 				atomic_inc(&rdev->nr_pending);
1590 				rcu_read_unlock();
1591 				if (sync_page_io(rdev,
1592 						 r10_bio->devs[sl].addr +
1593 						 sect,
1594 						 s<<9, conf->tmppage,
1595 						 READ, false) == 0) {
1596 					/* Well, this device is dead */
1597 					printk(KERN_NOTICE
1598 					       "md/raid10:%s: unable to read back "
1599 					       "corrected sectors"
1600 					       " (%d sectors at %llu on %s)\n",
1601 					       mdname(mddev), s,
1602 					       (unsigned long long)(sect+
1603 						    rdev->data_offset),
1604 					       bdevname(rdev->bdev, b));
1605 					printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
1606 					       mdname(mddev),
1607 					       bdevname(rdev->bdev, b));
1608 
1609 					md_error(mddev, rdev);
1610 				} else {
1611 					printk(KERN_INFO
1612 					       "md/raid10:%s: read error corrected"
1613 					       " (%d sectors at %llu on %s)\n",
1614 					       mdname(mddev), s,
1615 					       (unsigned long long)(sect+
1616 					            rdev->data_offset),
1617 					       bdevname(rdev->bdev, b));
1618 				}
1619 
1620 				rdev_dec_pending(rdev, mddev);
1621 				rcu_read_lock();
1622 			}
1623 		}
1624 		rcu_read_unlock();
1625 
1626 		sectors -= s;
1627 		sect += s;
1628 	}
1629 }
1630 
raid10d(mddev_t * mddev)1631 static void raid10d(mddev_t *mddev)
1632 {
1633 	r10bio_t *r10_bio;
1634 	struct bio *bio;
1635 	unsigned long flags;
1636 	conf_t *conf = mddev->private;
1637 	struct list_head *head = &conf->retry_list;
1638 	mdk_rdev_t *rdev;
1639 	struct blk_plug plug;
1640 
1641 	md_check_recovery(mddev);
1642 
1643 	blk_start_plug(&plug);
1644 	for (;;) {
1645 		char b[BDEVNAME_SIZE];
1646 
1647 		flush_pending_writes(conf);
1648 
1649 		spin_lock_irqsave(&conf->device_lock, flags);
1650 		if (list_empty(head)) {
1651 			spin_unlock_irqrestore(&conf->device_lock, flags);
1652 			break;
1653 		}
1654 		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1655 		list_del(head->prev);
1656 		conf->nr_queued--;
1657 		spin_unlock_irqrestore(&conf->device_lock, flags);
1658 
1659 		mddev = r10_bio->mddev;
1660 		conf = mddev->private;
1661 		if (test_bit(R10BIO_IsSync, &r10_bio->state))
1662 			sync_request_write(mddev, r10_bio);
1663 		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1664 			recovery_request_write(mddev, r10_bio);
1665 		else {
1666 			int mirror;
1667 			/* we got a read error. Maybe the drive is bad.  Maybe just
1668 			 * the block and we can fix it.
1669 			 * We freeze all other IO, and try reading the block from
1670 			 * other devices.  When we find one, we re-write
1671 			 * and check it that fixes the read error.
1672 			 * This is all done synchronously while the array is
1673 			 * frozen.
1674 			 */
1675 			if (mddev->ro == 0) {
1676 				freeze_array(conf);
1677 				fix_read_error(conf, mddev, r10_bio);
1678 				unfreeze_array(conf);
1679 			}
1680 
1681 			bio = r10_bio->devs[r10_bio->read_slot].bio;
1682 			r10_bio->devs[r10_bio->read_slot].bio =
1683 				mddev->ro ? IO_BLOCKED : NULL;
1684 			mirror = read_balance(conf, r10_bio);
1685 			if (mirror == -1) {
1686 				printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
1687 				       " read error for block %llu\n",
1688 				       mdname(mddev),
1689 				       bdevname(bio->bi_bdev,b),
1690 				       (unsigned long long)r10_bio->sector);
1691 				raid_end_bio_io(r10_bio);
1692 				bio_put(bio);
1693 			} else {
1694 				const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1695 				bio_put(bio);
1696 				rdev = conf->mirrors[mirror].rdev;
1697 				if (printk_ratelimit())
1698 					printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
1699 					       " another mirror\n",
1700 					       mdname(mddev),
1701 					       bdevname(rdev->bdev,b),
1702 					       (unsigned long long)r10_bio->sector);
1703 				bio = bio_clone_mddev(r10_bio->master_bio,
1704 						      GFP_NOIO, mddev);
1705 				r10_bio->devs[r10_bio->read_slot].bio = bio;
1706 				bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1707 					+ rdev->data_offset;
1708 				bio->bi_bdev = rdev->bdev;
1709 				bio->bi_rw = READ | do_sync;
1710 				bio->bi_private = r10_bio;
1711 				bio->bi_end_io = raid10_end_read_request;
1712 				generic_make_request(bio);
1713 			}
1714 		}
1715 		cond_resched();
1716 	}
1717 	blk_finish_plug(&plug);
1718 }
1719 
1720 
init_resync(conf_t * conf)1721 static int init_resync(conf_t *conf)
1722 {
1723 	int buffs;
1724 
1725 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1726 	BUG_ON(conf->r10buf_pool);
1727 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1728 	if (!conf->r10buf_pool)
1729 		return -ENOMEM;
1730 	conf->next_resync = 0;
1731 	return 0;
1732 }
1733 
1734 /*
1735  * perform a "sync" on one "block"
1736  *
1737  * We need to make sure that no normal I/O request - particularly write
1738  * requests - conflict with active sync requests.
1739  *
1740  * This is achieved by tracking pending requests and a 'barrier' concept
1741  * that can be installed to exclude normal IO requests.
1742  *
1743  * Resync and recovery are handled very differently.
1744  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1745  *
1746  * For resync, we iterate over virtual addresses, read all copies,
1747  * and update if there are differences.  If only one copy is live,
1748  * skip it.
1749  * For recovery, we iterate over physical addresses, read a good
1750  * value for each non-in_sync drive, and over-write.
1751  *
1752  * So, for recovery we may have several outstanding complex requests for a
1753  * given address, one for each out-of-sync device.  We model this by allocating
1754  * a number of r10_bio structures, one for each out-of-sync device.
1755  * As we setup these structures, we collect all bio's together into a list
1756  * which we then process collectively to add pages, and then process again
1757  * to pass to generic_make_request.
1758  *
1759  * The r10_bio structures are linked using a borrowed master_bio pointer.
1760  * This link is counted in ->remaining.  When the r10_bio that points to NULL
1761  * has its remaining count decremented to 0, the whole complex operation
1762  * is complete.
1763  *
1764  */
1765 
sync_request(mddev_t * mddev,sector_t sector_nr,int * skipped,int go_faster)1766 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1767 {
1768 	conf_t *conf = mddev->private;
1769 	r10bio_t *r10_bio;
1770 	struct bio *biolist = NULL, *bio;
1771 	sector_t max_sector, nr_sectors;
1772 	int disk;
1773 	int i;
1774 	int max_sync;
1775 	sector_t sync_blocks;
1776 
1777 	sector_t sectors_skipped = 0;
1778 	int chunks_skipped = 0;
1779 
1780 	if (!conf->r10buf_pool)
1781 		if (init_resync(conf))
1782 			return 0;
1783 
1784  skipped:
1785 	max_sector = mddev->dev_sectors;
1786 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1787 		max_sector = mddev->resync_max_sectors;
1788 	if (sector_nr >= max_sector) {
1789 		/* If we aborted, we need to abort the
1790 		 * sync on the 'current' bitmap chucks (there can
1791 		 * be several when recovering multiple devices).
1792 		 * as we may have started syncing it but not finished.
1793 		 * We can find the current address in
1794 		 * mddev->curr_resync, but for recovery,
1795 		 * we need to convert that to several
1796 		 * virtual addresses.
1797 		 */
1798 		if (mddev->curr_resync < max_sector) { /* aborted */
1799 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1800 				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1801 						&sync_blocks, 1);
1802 			else for (i=0; i<conf->raid_disks; i++) {
1803 				sector_t sect =
1804 					raid10_find_virt(conf, mddev->curr_resync, i);
1805 				bitmap_end_sync(mddev->bitmap, sect,
1806 						&sync_blocks, 1);
1807 			}
1808 		} else /* completed sync */
1809 			conf->fullsync = 0;
1810 
1811 		bitmap_close_sync(mddev->bitmap);
1812 		close_sync(conf);
1813 		*skipped = 1;
1814 		return sectors_skipped;
1815 	}
1816 	if (chunks_skipped >= conf->raid_disks) {
1817 		/* if there has been nothing to do on any drive,
1818 		 * then there is nothing to do at all..
1819 		 */
1820 		*skipped = 1;
1821 		return (max_sector - sector_nr) + sectors_skipped;
1822 	}
1823 
1824 	if (max_sector > mddev->resync_max)
1825 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
1826 
1827 	/* make sure whole request will fit in a chunk - if chunks
1828 	 * are meaningful
1829 	 */
1830 	if (conf->near_copies < conf->raid_disks &&
1831 	    max_sector > (sector_nr | conf->chunk_mask))
1832 		max_sector = (sector_nr | conf->chunk_mask) + 1;
1833 	/*
1834 	 * If there is non-resync activity waiting for us then
1835 	 * put in a delay to throttle resync.
1836 	 */
1837 	if (!go_faster && conf->nr_waiting)
1838 		msleep_interruptible(1000);
1839 
1840 	/* Again, very different code for resync and recovery.
1841 	 * Both must result in an r10bio with a list of bios that
1842 	 * have bi_end_io, bi_sector, bi_bdev set,
1843 	 * and bi_private set to the r10bio.
1844 	 * For recovery, we may actually create several r10bios
1845 	 * with 2 bios in each, that correspond to the bios in the main one.
1846 	 * In this case, the subordinate r10bios link back through a
1847 	 * borrowed master_bio pointer, and the counter in the master
1848 	 * includes a ref from each subordinate.
1849 	 */
1850 	/* First, we decide what to do and set ->bi_end_io
1851 	 * To end_sync_read if we want to read, and
1852 	 * end_sync_write if we will want to write.
1853 	 */
1854 
1855 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1856 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1857 		/* recovery... the complicated one */
1858 		int j, k;
1859 		r10_bio = NULL;
1860 
1861 		for (i=0 ; i<conf->raid_disks; i++)
1862 			if (conf->mirrors[i].rdev &&
1863 			    !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1864 				int still_degraded = 0;
1865 				/* want to reconstruct this device */
1866 				r10bio_t *rb2 = r10_bio;
1867 				sector_t sect = raid10_find_virt(conf, sector_nr, i);
1868 				int must_sync;
1869 				/* Unless we are doing a full sync, we only need
1870 				 * to recover the block if it is set in the bitmap
1871 				 */
1872 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1873 							      &sync_blocks, 1);
1874 				if (sync_blocks < max_sync)
1875 					max_sync = sync_blocks;
1876 				if (!must_sync &&
1877 				    !conf->fullsync) {
1878 					/* yep, skip the sync_blocks here, but don't assume
1879 					 * that there will never be anything to do here
1880 					 */
1881 					chunks_skipped = -1;
1882 					continue;
1883 				}
1884 
1885 				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1886 				raise_barrier(conf, rb2 != NULL);
1887 				atomic_set(&r10_bio->remaining, 0);
1888 
1889 				r10_bio->master_bio = (struct bio*)rb2;
1890 				if (rb2)
1891 					atomic_inc(&rb2->remaining);
1892 				r10_bio->mddev = mddev;
1893 				set_bit(R10BIO_IsRecover, &r10_bio->state);
1894 				r10_bio->sector = sect;
1895 
1896 				raid10_find_phys(conf, r10_bio);
1897 
1898 				/* Need to check if the array will still be
1899 				 * degraded
1900 				 */
1901 				for (j=0; j<conf->raid_disks; j++)
1902 					if (conf->mirrors[j].rdev == NULL ||
1903 					    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
1904 						still_degraded = 1;
1905 						break;
1906 					}
1907 
1908 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1909 							      &sync_blocks, still_degraded);
1910 
1911 				for (j=0; j<conf->copies;j++) {
1912 					int d = r10_bio->devs[j].devnum;
1913 					if (conf->mirrors[d].rdev &&
1914 					    test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1915 						/* This is where we read from */
1916 						bio = r10_bio->devs[0].bio;
1917 						bio->bi_next = biolist;
1918 						biolist = bio;
1919 						bio->bi_private = r10_bio;
1920 						bio->bi_end_io = end_sync_read;
1921 						bio->bi_rw = READ;
1922 						bio->bi_sector = r10_bio->devs[j].addr +
1923 							conf->mirrors[d].rdev->data_offset;
1924 						bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1925 						atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1926 						atomic_inc(&r10_bio->remaining);
1927 						/* and we write to 'i' */
1928 
1929 						for (k=0; k<conf->copies; k++)
1930 							if (r10_bio->devs[k].devnum == i)
1931 								break;
1932 						BUG_ON(k == conf->copies);
1933 						bio = r10_bio->devs[1].bio;
1934 						bio->bi_next = biolist;
1935 						biolist = bio;
1936 						bio->bi_private = r10_bio;
1937 						bio->bi_end_io = end_sync_write;
1938 						bio->bi_rw = WRITE;
1939 						bio->bi_sector = r10_bio->devs[k].addr +
1940 							conf->mirrors[i].rdev->data_offset;
1941 						bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1942 
1943 						r10_bio->devs[0].devnum = d;
1944 						r10_bio->devs[1].devnum = i;
1945 
1946 						break;
1947 					}
1948 				}
1949 				if (j == conf->copies) {
1950 					/* Cannot recover, so abort the recovery */
1951 					put_buf(r10_bio);
1952 					if (rb2)
1953 						atomic_dec(&rb2->remaining);
1954 					r10_bio = rb2;
1955 					if (!test_and_set_bit(MD_RECOVERY_INTR,
1956 							      &mddev->recovery))
1957 						printk(KERN_INFO "md/raid10:%s: insufficient "
1958 						       "working devices for recovery.\n",
1959 						       mdname(mddev));
1960 					break;
1961 				}
1962 			}
1963 		if (biolist == NULL) {
1964 			while (r10_bio) {
1965 				r10bio_t *rb2 = r10_bio;
1966 				r10_bio = (r10bio_t*) rb2->master_bio;
1967 				rb2->master_bio = NULL;
1968 				put_buf(rb2);
1969 			}
1970 			goto giveup;
1971 		}
1972 	} else {
1973 		/* resync. Schedule a read for every block at this virt offset */
1974 		int count = 0;
1975 
1976 		bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1977 
1978 		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1979 				       &sync_blocks, mddev->degraded) &&
1980 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1981 			/* We can skip this block */
1982 			*skipped = 1;
1983 			return sync_blocks + sectors_skipped;
1984 		}
1985 		if (sync_blocks < max_sync)
1986 			max_sync = sync_blocks;
1987 		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1988 
1989 		r10_bio->mddev = mddev;
1990 		atomic_set(&r10_bio->remaining, 0);
1991 		raise_barrier(conf, 0);
1992 		conf->next_resync = sector_nr;
1993 
1994 		r10_bio->master_bio = NULL;
1995 		r10_bio->sector = sector_nr;
1996 		set_bit(R10BIO_IsSync, &r10_bio->state);
1997 		raid10_find_phys(conf, r10_bio);
1998 		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1999 
2000 		for (i=0; i<conf->copies; i++) {
2001 			int d = r10_bio->devs[i].devnum;
2002 			bio = r10_bio->devs[i].bio;
2003 			bio->bi_end_io = NULL;
2004 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
2005 			if (conf->mirrors[d].rdev == NULL ||
2006 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
2007 				continue;
2008 			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2009 			atomic_inc(&r10_bio->remaining);
2010 			bio->bi_next = biolist;
2011 			biolist = bio;
2012 			bio->bi_private = r10_bio;
2013 			bio->bi_end_io = end_sync_read;
2014 			bio->bi_rw = READ;
2015 			bio->bi_sector = r10_bio->devs[i].addr +
2016 				conf->mirrors[d].rdev->data_offset;
2017 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2018 			count++;
2019 		}
2020 
2021 		if (count < 2) {
2022 			for (i=0; i<conf->copies; i++) {
2023 				int d = r10_bio->devs[i].devnum;
2024 				if (r10_bio->devs[i].bio->bi_end_io)
2025 					rdev_dec_pending(conf->mirrors[d].rdev, mddev);
2026 			}
2027 			put_buf(r10_bio);
2028 			biolist = NULL;
2029 			goto giveup;
2030 		}
2031 	}
2032 
2033 	for (bio = biolist; bio ; bio=bio->bi_next) {
2034 
2035 		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2036 		if (bio->bi_end_io)
2037 			bio->bi_flags |= 1 << BIO_UPTODATE;
2038 		bio->bi_vcnt = 0;
2039 		bio->bi_idx = 0;
2040 		bio->bi_phys_segments = 0;
2041 		bio->bi_size = 0;
2042 	}
2043 
2044 	nr_sectors = 0;
2045 	if (sector_nr + max_sync < max_sector)
2046 		max_sector = sector_nr + max_sync;
2047 	do {
2048 		struct page *page;
2049 		int len = PAGE_SIZE;
2050 		disk = 0;
2051 		if (sector_nr + (len>>9) > max_sector)
2052 			len = (max_sector - sector_nr) << 9;
2053 		if (len == 0)
2054 			break;
2055 		for (bio= biolist ; bio ; bio=bio->bi_next) {
2056 			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2057 			if (bio_add_page(bio, page, len, 0) == 0) {
2058 				/* stop here */
2059 				struct bio *bio2;
2060 				bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2061 				for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
2062 					/* remove last page from this bio */
2063 					bio2->bi_vcnt--;
2064 					bio2->bi_size -= len;
2065 					bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
2066 				}
2067 				goto bio_full;
2068 			}
2069 			disk = i;
2070 		}
2071 		nr_sectors += len>>9;
2072 		sector_nr += len>>9;
2073 	} while (biolist->bi_vcnt < RESYNC_PAGES);
2074  bio_full:
2075 	r10_bio->sectors = nr_sectors;
2076 
2077 	while (biolist) {
2078 		bio = biolist;
2079 		biolist = biolist->bi_next;
2080 
2081 		bio->bi_next = NULL;
2082 		r10_bio = bio->bi_private;
2083 		r10_bio->sectors = nr_sectors;
2084 
2085 		if (bio->bi_end_io == end_sync_read) {
2086 			md_sync_acct(bio->bi_bdev, nr_sectors);
2087 			generic_make_request(bio);
2088 		}
2089 	}
2090 
2091 	if (sectors_skipped)
2092 		/* pretend they weren't skipped, it makes
2093 		 * no important difference in this case
2094 		 */
2095 		md_done_sync(mddev, sectors_skipped, 1);
2096 
2097 	return sectors_skipped + nr_sectors;
2098  giveup:
2099 	/* There is nowhere to write, so all non-sync
2100 	 * drives must be failed, so try the next chunk...
2101 	 */
2102 	if (sector_nr + max_sync < max_sector)
2103 		max_sector = sector_nr + max_sync;
2104 
2105 	sectors_skipped += (max_sector - sector_nr);
2106 	chunks_skipped ++;
2107 	sector_nr = max_sector;
2108 	goto skipped;
2109 }
2110 
2111 static sector_t
raid10_size(mddev_t * mddev,sector_t sectors,int raid_disks)2112 raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2113 {
2114 	sector_t size;
2115 	conf_t *conf = mddev->private;
2116 
2117 	if (!raid_disks)
2118 		raid_disks = conf->raid_disks;
2119 	if (!sectors)
2120 		sectors = conf->dev_sectors;
2121 
2122 	size = sectors >> conf->chunk_shift;
2123 	sector_div(size, conf->far_copies);
2124 	size = size * raid_disks;
2125 	sector_div(size, conf->near_copies);
2126 
2127 	return size << conf->chunk_shift;
2128 }
2129 
2130 
setup_conf(mddev_t * mddev)2131 static conf_t *setup_conf(mddev_t *mddev)
2132 {
2133 	conf_t *conf = NULL;
2134 	int nc, fc, fo;
2135 	sector_t stride, size;
2136 	int err = -EINVAL;
2137 
2138 	if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
2139 	    !is_power_of_2(mddev->new_chunk_sectors)) {
2140 		printk(KERN_ERR "md/raid10:%s: chunk size must be "
2141 		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2142 		       mdname(mddev), PAGE_SIZE);
2143 		goto out;
2144 	}
2145 
2146 	nc = mddev->new_layout & 255;
2147 	fc = (mddev->new_layout >> 8) & 255;
2148 	fo = mddev->new_layout & (1<<16);
2149 
2150 	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2151 	    (mddev->new_layout >> 17)) {
2152 		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2153 		       mdname(mddev), mddev->new_layout);
2154 		goto out;
2155 	}
2156 
2157 	err = -ENOMEM;
2158 	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2159 	if (!conf)
2160 		goto out;
2161 
2162 	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2163 				GFP_KERNEL);
2164 	if (!conf->mirrors)
2165 		goto out;
2166 
2167 	conf->tmppage = alloc_page(GFP_KERNEL);
2168 	if (!conf->tmppage)
2169 		goto out;
2170 
2171 
2172 	conf->raid_disks = mddev->raid_disks;
2173 	conf->near_copies = nc;
2174 	conf->far_copies = fc;
2175 	conf->copies = nc*fc;
2176 	conf->far_offset = fo;
2177 	conf->chunk_mask = mddev->new_chunk_sectors - 1;
2178 	conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
2179 
2180 	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2181 					   r10bio_pool_free, conf);
2182 	if (!conf->r10bio_pool)
2183 		goto out;
2184 
2185 	size = mddev->dev_sectors >> conf->chunk_shift;
2186 	sector_div(size, fc);
2187 	size = size * conf->raid_disks;
2188 	sector_div(size, nc);
2189 	/* 'size' is now the number of chunks in the array */
2190 	/* calculate "used chunks per device" in 'stride' */
2191 	stride = size * conf->copies;
2192 
2193 	/* We need to round up when dividing by raid_disks to
2194 	 * get the stride size.
2195 	 */
2196 	stride += conf->raid_disks - 1;
2197 	sector_div(stride, conf->raid_disks);
2198 
2199 	conf->dev_sectors = stride << conf->chunk_shift;
2200 
2201 	if (fo)
2202 		stride = 1;
2203 	else
2204 		sector_div(stride, fc);
2205 	conf->stride = stride << conf->chunk_shift;
2206 
2207 
2208 	spin_lock_init(&conf->device_lock);
2209 	INIT_LIST_HEAD(&conf->retry_list);
2210 
2211 	spin_lock_init(&conf->resync_lock);
2212 	init_waitqueue_head(&conf->wait_barrier);
2213 
2214 	conf->thread = md_register_thread(raid10d, mddev, NULL);
2215 	if (!conf->thread)
2216 		goto out;
2217 
2218 	conf->mddev = mddev;
2219 	return conf;
2220 
2221  out:
2222 	printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
2223 	       mdname(mddev));
2224 	if (conf) {
2225 		if (conf->r10bio_pool)
2226 			mempool_destroy(conf->r10bio_pool);
2227 		kfree(conf->mirrors);
2228 		safe_put_page(conf->tmppage);
2229 		kfree(conf);
2230 	}
2231 	return ERR_PTR(err);
2232 }
2233 
run(mddev_t * mddev)2234 static int run(mddev_t *mddev)
2235 {
2236 	conf_t *conf;
2237 	int i, disk_idx, chunk_size;
2238 	mirror_info_t *disk;
2239 	mdk_rdev_t *rdev;
2240 	sector_t size;
2241 
2242 	/*
2243 	 * copy the already verified devices into our private RAID10
2244 	 * bookkeeping area. [whatever we allocate in run(),
2245 	 * should be freed in stop()]
2246 	 */
2247 
2248 	if (mddev->private == NULL) {
2249 		conf = setup_conf(mddev);
2250 		if (IS_ERR(conf))
2251 			return PTR_ERR(conf);
2252 		mddev->private = conf;
2253 	}
2254 	conf = mddev->private;
2255 	if (!conf)
2256 		goto out;
2257 
2258 	mddev->thread = conf->thread;
2259 	conf->thread = NULL;
2260 
2261 	chunk_size = mddev->chunk_sectors << 9;
2262 	blk_queue_io_min(mddev->queue, chunk_size);
2263 	if (conf->raid_disks % conf->near_copies)
2264 		blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2265 	else
2266 		blk_queue_io_opt(mddev->queue, chunk_size *
2267 				 (conf->raid_disks / conf->near_copies));
2268 
2269 	list_for_each_entry(rdev, &mddev->disks, same_set) {
2270 		disk_idx = rdev->raid_disk;
2271 		if (disk_idx >= conf->raid_disks
2272 		    || disk_idx < 0)
2273 			continue;
2274 		disk = conf->mirrors + disk_idx;
2275 
2276 		disk->rdev = rdev;
2277 		disk_stack_limits(mddev->gendisk, rdev->bdev,
2278 				  rdev->data_offset << 9);
2279 		/* as we don't honour merge_bvec_fn, we must never risk
2280 		 * violating it, so limit max_segments to 1 lying
2281 		 * within a single page.
2282 		 */
2283 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2284 			blk_queue_max_segments(mddev->queue, 1);
2285 			blk_queue_segment_boundary(mddev->queue,
2286 						   PAGE_CACHE_SIZE - 1);
2287 		}
2288 
2289 		disk->head_position = 0;
2290 	}
2291 	/* need to check that every block has at least one working mirror */
2292 	if (!enough(conf)) {
2293 		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
2294 		       mdname(mddev));
2295 		goto out_free_conf;
2296 	}
2297 
2298 	mddev->degraded = 0;
2299 	for (i = 0; i < conf->raid_disks; i++) {
2300 
2301 		disk = conf->mirrors + i;
2302 
2303 		if (!disk->rdev ||
2304 		    !test_bit(In_sync, &disk->rdev->flags)) {
2305 			disk->head_position = 0;
2306 			mddev->degraded++;
2307 			if (disk->rdev)
2308 				conf->fullsync = 1;
2309 		}
2310 	}
2311 
2312 	if (mddev->recovery_cp != MaxSector)
2313 		printk(KERN_NOTICE "md/raid10:%s: not clean"
2314 		       " -- starting background reconstruction\n",
2315 		       mdname(mddev));
2316 	printk(KERN_INFO
2317 		"md/raid10:%s: active with %d out of %d devices\n",
2318 		mdname(mddev), conf->raid_disks - mddev->degraded,
2319 		conf->raid_disks);
2320 	/*
2321 	 * Ok, everything is just fine now
2322 	 */
2323 	mddev->dev_sectors = conf->dev_sectors;
2324 	size = raid10_size(mddev, 0, 0);
2325 	md_set_array_sectors(mddev, size);
2326 	mddev->resync_max_sectors = size;
2327 
2328 	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2329 	mddev->queue->backing_dev_info.congested_data = mddev;
2330 
2331 	/* Calculate max read-ahead size.
2332 	 * We need to readahead at least twice a whole stripe....
2333 	 * maybe...
2334 	 */
2335 	{
2336 		int stripe = conf->raid_disks *
2337 			((mddev->chunk_sectors << 9) / PAGE_SIZE);
2338 		stripe /= conf->near_copies;
2339 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2340 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2341 	}
2342 
2343 	if (conf->near_copies < conf->raid_disks)
2344 		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2345 
2346 	if (md_integrity_register(mddev))
2347 		goto out_free_conf;
2348 
2349 	return 0;
2350 
2351 out_free_conf:
2352 	md_unregister_thread(mddev->thread);
2353 	if (conf->r10bio_pool)
2354 		mempool_destroy(conf->r10bio_pool);
2355 	safe_put_page(conf->tmppage);
2356 	kfree(conf->mirrors);
2357 	kfree(conf);
2358 	mddev->private = NULL;
2359 out:
2360 	return -EIO;
2361 }
2362 
stop(mddev_t * mddev)2363 static int stop(mddev_t *mddev)
2364 {
2365 	conf_t *conf = mddev->private;
2366 
2367 	raise_barrier(conf, 0);
2368 	lower_barrier(conf);
2369 
2370 	md_unregister_thread(mddev->thread);
2371 	mddev->thread = NULL;
2372 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2373 	if (conf->r10bio_pool)
2374 		mempool_destroy(conf->r10bio_pool);
2375 	kfree(conf->mirrors);
2376 	kfree(conf);
2377 	mddev->private = NULL;
2378 	return 0;
2379 }
2380 
raid10_quiesce(mddev_t * mddev,int state)2381 static void raid10_quiesce(mddev_t *mddev, int state)
2382 {
2383 	conf_t *conf = mddev->private;
2384 
2385 	switch(state) {
2386 	case 1:
2387 		raise_barrier(conf, 0);
2388 		break;
2389 	case 0:
2390 		lower_barrier(conf);
2391 		break;
2392 	}
2393 }
2394 
raid10_takeover_raid0(mddev_t * mddev)2395 static void *raid10_takeover_raid0(mddev_t *mddev)
2396 {
2397 	mdk_rdev_t *rdev;
2398 	conf_t *conf;
2399 
2400 	if (mddev->degraded > 0) {
2401 		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
2402 		       mdname(mddev));
2403 		return ERR_PTR(-EINVAL);
2404 	}
2405 
2406 	/* Set new parameters */
2407 	mddev->new_level = 10;
2408 	/* new layout: far_copies = 1, near_copies = 2 */
2409 	mddev->new_layout = (1<<8) + 2;
2410 	mddev->new_chunk_sectors = mddev->chunk_sectors;
2411 	mddev->delta_disks = mddev->raid_disks;
2412 	mddev->raid_disks *= 2;
2413 	/* make sure it will be not marked as dirty */
2414 	mddev->recovery_cp = MaxSector;
2415 
2416 	conf = setup_conf(mddev);
2417 	if (!IS_ERR(conf)) {
2418 		list_for_each_entry(rdev, &mddev->disks, same_set)
2419 			if (rdev->raid_disk >= 0)
2420 				rdev->new_raid_disk = rdev->raid_disk * 2;
2421 		conf->barrier = 1;
2422 	}
2423 
2424 	return conf;
2425 }
2426 
raid10_takeover(mddev_t * mddev)2427 static void *raid10_takeover(mddev_t *mddev)
2428 {
2429 	struct raid0_private_data *raid0_priv;
2430 
2431 	/* raid10 can take over:
2432 	 *  raid0 - providing it has only two drives
2433 	 */
2434 	if (mddev->level == 0) {
2435 		/* for raid0 takeover only one zone is supported */
2436 		raid0_priv = mddev->private;
2437 		if (raid0_priv->nr_strip_zones > 1) {
2438 			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
2439 			       " with more than one zone.\n",
2440 			       mdname(mddev));
2441 			return ERR_PTR(-EINVAL);
2442 		}
2443 		return raid10_takeover_raid0(mddev);
2444 	}
2445 	return ERR_PTR(-EINVAL);
2446 }
2447 
2448 static struct mdk_personality raid10_personality =
2449 {
2450 	.name		= "raid10",
2451 	.level		= 10,
2452 	.owner		= THIS_MODULE,
2453 	.make_request	= make_request,
2454 	.run		= run,
2455 	.stop		= stop,
2456 	.status		= status,
2457 	.error_handler	= error,
2458 	.hot_add_disk	= raid10_add_disk,
2459 	.hot_remove_disk= raid10_remove_disk,
2460 	.spare_active	= raid10_spare_active,
2461 	.sync_request	= sync_request,
2462 	.quiesce	= raid10_quiesce,
2463 	.size		= raid10_size,
2464 	.takeover	= raid10_takeover,
2465 };
2466 
raid_init(void)2467 static int __init raid_init(void)
2468 {
2469 	return register_md_personality(&raid10_personality);
2470 }
2471 
raid_exit(void)2472 static void raid_exit(void)
2473 {
2474 	unregister_md_personality(&raid10_personality);
2475 }
2476 
2477 module_init(raid_init);
2478 module_exit(raid_exit);
2479 MODULE_LICENSE("GPL");
2480 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
2481 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2482 MODULE_ALIAS("md-raid10");
2483 MODULE_ALIAS("md-level-10");
2484