1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Partial Parity Log for closing the RAID5 write hole
4 * Copyright (c) 2017, Intel Corporation.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/async_tx.h>
12 #include <linux/raid/md_p.h>
13 #include "md.h"
14 #include "raid5.h"
15 #include "raid5-log.h"
16
17 /*
18 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19 * partial parity data. The header contains an array of entries
20 * (struct ppl_header_entry) which describe the logged write requests.
21 * Partial parity for the entries comes after the header, written in the same
22 * sequence as the entries:
23 *
24 * Header
25 * entry0
26 * ...
27 * entryN
28 * PP data
29 * PP for entry0
30 * ...
31 * PP for entryN
32 *
33 * An entry describes one or more consecutive stripe_heads, up to a full
34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35 * number of stripe_heads in the entry and n is the number of modified data
36 * disks. Every stripe_head in the entry must write to the same data disks.
37 * An example of a valid case described by a single entry (writes to the first
38 * stripe of a 4 disk array, 16k chunk size):
39 *
40 * sh->sector dd0 dd1 dd2 ppl
41 * +-----+-----+-----+
42 * 0 | --- | --- | --- | +----+
43 * 8 | -W- | -W- | --- | | pp | data_sector = 8
44 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
45 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
46 * +-----+-----+-----+ +----+
47 *
48 * data_sector is the first raid sector of the modified data, data_size is the
49 * total size of modified data and pp_size is the size of partial parity for
50 * this entry. Entries for full stripe writes contain no partial parity
51 * (pp_size = 0), they only mark the stripes for which parity should be
52 * recalculated after an unclean shutdown. Every entry holds a checksum of its
53 * partial parity, the header also has a checksum of the header itself.
54 *
55 * A write request is always logged to the PPL instance stored on the parity
56 * disk of the corresponding stripe. For each member disk there is one ppl_log
57 * used to handle logging for this disk, independently from others. They are
58 * grouped in child_logs array in struct ppl_conf, which is assigned to
59 * r5conf->log_private.
60 *
61 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63 * can be appended to the last entry if it meets the conditions for a valid
64 * entry described above, otherwise a new entry is added. Checksums of entries
65 * are calculated incrementally as stripes containing partial parity are being
66 * added. ppl_submit_iounit() calculates the checksum of the header and submits
67 * a bio containing the header page and partial parity pages (sh->ppl_page) for
68 * all stripes of the io_unit. When the PPL write completes, the stripes
69 * associated with the io_unit are released and raid5d starts writing their data
70 * and parity. When all stripes are written, the io_unit is freed and the next
71 * can be submitted.
72 *
73 * An io_unit is used to gather stripes until it is submitted or becomes full
74 * (if the maximum number of entries or size of PPL is reached). Another io_unit
75 * can't be submitted until the previous has completed (PPL and stripe
76 * data+parity is written). The log->io_list tracks all io_units of a log
77 * (for a single member disk). New io_units are added to the end of the list
78 * and the first io_unit is submitted, if it is not submitted already.
79 * The current io_unit accepting new stripes is always at the end of the list.
80 *
81 * If write-back cache is enabled for any of the disks in the array, its data
82 * must be flushed before next io_unit is submitted.
83 */
84
85 #define PPL_SPACE_SIZE (128 * 1024)
86
87 struct ppl_conf {
88 struct mddev *mddev;
89
90 /* array of child logs, one for each raid disk */
91 struct ppl_log *child_logs;
92 int count;
93
94 int block_size; /* the logical block size used for data_sector
95 * in ppl_header_entry */
96 u32 signature; /* raid array identifier */
97 atomic64_t seq; /* current log write sequence number */
98
99 struct kmem_cache *io_kc;
100 mempool_t io_pool;
101 struct bio_set bs;
102 struct bio_set flush_bs;
103
104 /* used only for recovery */
105 int recovered_entries;
106 int mismatch_count;
107
108 /* stripes to retry if failed to allocate io_unit */
109 struct list_head no_mem_stripes;
110 spinlock_t no_mem_stripes_lock;
111
112 unsigned short write_hint;
113 };
114
115 struct ppl_log {
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
117
118 struct md_rdev *rdev; /* array member disk associated with
119 * this log instance */
120 struct mutex io_mutex;
121 struct ppl_io_unit *current_io; /* current io_unit accepting new data
122 * always at the end of io_list */
123 spinlock_t io_list_lock;
124 struct list_head io_list; /* all io_units of this log */
125
126 sector_t next_io_sector;
127 unsigned int entry_space;
128 bool use_multippl;
129 bool wb_cache_on;
130 unsigned long disk_flush_bitmap;
131 };
132
133 #define PPL_IO_INLINE_BVECS 32
134
135 struct ppl_io_unit {
136 struct ppl_log *log;
137
138 struct page *header_page; /* for ppl_header */
139
140 unsigned int entries_count; /* number of entries in ppl_header */
141 unsigned int pp_size; /* total size current of partial parity */
142
143 u64 seq; /* sequence number of this log write */
144 struct list_head log_sibling; /* log->io_list */
145
146 struct list_head stripe_list; /* stripes added to the io_unit */
147 atomic_t pending_stripes; /* how many stripes not written to raid */
148 atomic_t pending_flushes; /* how many disk flushes are in progress */
149
150 bool submitted; /* true if write to log started */
151
152 /* inline bio and its biovec for submitting the iounit */
153 struct bio bio;
154 struct bio_vec biovec[PPL_IO_INLINE_BVECS];
155 };
156
157 struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)158 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
159 struct dma_async_tx_descriptor *tx)
160 {
161 int disks = sh->disks;
162 struct page **srcs = percpu->scribble;
163 int count = 0, pd_idx = sh->pd_idx, i;
164 struct async_submit_ctl submit;
165
166 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
167
168 /*
169 * Partial parity is the XOR of stripe data chunks that are not changed
170 * during the write request. Depending on available data
171 * (read-modify-write vs. reconstruct-write case) we calculate it
172 * differently.
173 */
174 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175 /*
176 * rmw: xor old data and parity from updated disks
177 * This is calculated earlier by ops_run_prexor5() so just copy
178 * the parity dev page.
179 */
180 srcs[count++] = sh->dev[pd_idx].page;
181 } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
182 /* rcw: xor data from all not updated disks */
183 for (i = disks; i--;) {
184 struct r5dev *dev = &sh->dev[i];
185 if (test_bit(R5_UPTODATE, &dev->flags))
186 srcs[count++] = dev->page;
187 }
188 } else {
189 return tx;
190 }
191
192 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
193 NULL, sh, (void *) (srcs + sh->disks + 2));
194
195 if (count == 1)
196 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197 &submit);
198 else
199 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200 &submit);
201
202 return tx;
203 }
204
ppl_io_pool_alloc(gfp_t gfp_mask,void * pool_data)205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206 {
207 struct kmem_cache *kc = pool_data;
208 struct ppl_io_unit *io;
209
210 io = kmem_cache_alloc(kc, gfp_mask);
211 if (!io)
212 return NULL;
213
214 io->header_page = alloc_page(gfp_mask);
215 if (!io->header_page) {
216 kmem_cache_free(kc, io);
217 return NULL;
218 }
219
220 return io;
221 }
222
ppl_io_pool_free(void * element,void * pool_data)223 static void ppl_io_pool_free(void *element, void *pool_data)
224 {
225 struct kmem_cache *kc = pool_data;
226 struct ppl_io_unit *io = element;
227
228 __free_page(io->header_page);
229 kmem_cache_free(kc, io);
230 }
231
ppl_new_iounit(struct ppl_log * log,struct stripe_head * sh)232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233 struct stripe_head *sh)
234 {
235 struct ppl_conf *ppl_conf = log->ppl_conf;
236 struct ppl_io_unit *io;
237 struct ppl_header *pplhdr;
238 struct page *header_page;
239
240 io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
241 if (!io)
242 return NULL;
243
244 header_page = io->header_page;
245 memset(io, 0, sizeof(*io));
246 io->header_page = header_page;
247
248 io->log = log;
249 INIT_LIST_HEAD(&io->log_sibling);
250 INIT_LIST_HEAD(&io->stripe_list);
251 atomic_set(&io->pending_stripes, 0);
252 atomic_set(&io->pending_flushes, 0);
253 bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
254 REQ_OP_WRITE | REQ_FUA);
255
256 pplhdr = page_address(io->header_page);
257 clear_page(pplhdr);
258 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
259 pplhdr->signature = cpu_to_le32(ppl_conf->signature);
260
261 io->seq = atomic64_add_return(1, &ppl_conf->seq);
262 pplhdr->generation = cpu_to_le64(io->seq);
263
264 return io;
265 }
266
ppl_log_stripe(struct ppl_log * log,struct stripe_head * sh)267 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
268 {
269 struct ppl_io_unit *io = log->current_io;
270 struct ppl_header_entry *e = NULL;
271 struct ppl_header *pplhdr;
272 int i;
273 sector_t data_sector = 0;
274 int data_disks = 0;
275 struct r5conf *conf = sh->raid_conf;
276
277 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
278
279 /* check if current io_unit is full */
280 if (io && (io->pp_size == log->entry_space ||
281 io->entries_count == PPL_HDR_MAX_ENTRIES)) {
282 pr_debug("%s: add io_unit blocked by seq: %llu\n",
283 __func__, io->seq);
284 io = NULL;
285 }
286
287 /* add a new unit if there is none or the current is full */
288 if (!io) {
289 io = ppl_new_iounit(log, sh);
290 if (!io)
291 return -ENOMEM;
292 spin_lock_irq(&log->io_list_lock);
293 list_add_tail(&io->log_sibling, &log->io_list);
294 spin_unlock_irq(&log->io_list_lock);
295
296 log->current_io = io;
297 }
298
299 for (i = 0; i < sh->disks; i++) {
300 struct r5dev *dev = &sh->dev[i];
301
302 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
303 if (!data_disks || dev->sector < data_sector)
304 data_sector = dev->sector;
305 data_disks++;
306 }
307 }
308 BUG_ON(!data_disks);
309
310 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
311 io->seq, (unsigned long long)data_sector, data_disks);
312
313 pplhdr = page_address(io->header_page);
314
315 if (io->entries_count > 0) {
316 struct ppl_header_entry *last =
317 &pplhdr->entries[io->entries_count - 1];
318 struct stripe_head *sh_last = list_last_entry(
319 &io->stripe_list, struct stripe_head, log_list);
320 u64 data_sector_last = le64_to_cpu(last->data_sector);
321 u32 data_size_last = le32_to_cpu(last->data_size);
322
323 /*
324 * Check if we can append the stripe to the last entry. It must
325 * be just after the last logged stripe and write to the same
326 * disks. Use bit shift and logarithm to avoid 64-bit division.
327 */
328 if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
329 (data_sector >> ilog2(conf->chunk_sectors) ==
330 data_sector_last >> ilog2(conf->chunk_sectors)) &&
331 ((data_sector - data_sector_last) * data_disks ==
332 data_size_last >> 9))
333 e = last;
334 }
335
336 if (!e) {
337 e = &pplhdr->entries[io->entries_count++];
338 e->data_sector = cpu_to_le64(data_sector);
339 e->parity_disk = cpu_to_le32(sh->pd_idx);
340 e->checksum = cpu_to_le32(~0);
341 }
342
343 le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
344
345 /* don't write any PP if full stripe write */
346 if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
347 le32_add_cpu(&e->pp_size, PAGE_SIZE);
348 io->pp_size += PAGE_SIZE;
349 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
350 page_address(sh->ppl_page),
351 PAGE_SIZE));
352 }
353
354 list_add_tail(&sh->log_list, &io->stripe_list);
355 atomic_inc(&io->pending_stripes);
356 sh->ppl_io = io;
357
358 return 0;
359 }
360
ppl_write_stripe(struct r5conf * conf,struct stripe_head * sh)361 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
362 {
363 struct ppl_conf *ppl_conf = conf->log_private;
364 struct ppl_io_unit *io = sh->ppl_io;
365 struct ppl_log *log;
366
367 if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
368 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
369 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
370 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
371 return -EAGAIN;
372 }
373
374 log = &ppl_conf->child_logs[sh->pd_idx];
375
376 mutex_lock(&log->io_mutex);
377
378 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
379 mutex_unlock(&log->io_mutex);
380 return -EAGAIN;
381 }
382
383 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
384 clear_bit(STRIPE_DELAYED, &sh->state);
385 atomic_inc(&sh->count);
386
387 if (ppl_log_stripe(log, sh)) {
388 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
389 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
390 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
391 }
392
393 mutex_unlock(&log->io_mutex);
394
395 return 0;
396 }
397
ppl_log_endio(struct bio * bio)398 static void ppl_log_endio(struct bio *bio)
399 {
400 struct ppl_io_unit *io = bio->bi_private;
401 struct ppl_log *log = io->log;
402 struct ppl_conf *ppl_conf = log->ppl_conf;
403 struct stripe_head *sh, *next;
404
405 pr_debug("%s: seq: %llu\n", __func__, io->seq);
406
407 if (bio->bi_status)
408 md_error(ppl_conf->mddev, log->rdev);
409
410 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
411 list_del_init(&sh->log_list);
412
413 set_bit(STRIPE_HANDLE, &sh->state);
414 raid5_release_stripe(sh);
415 }
416 }
417
ppl_submit_iounit_bio(struct ppl_io_unit * io,struct bio * bio)418 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
419 {
420 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
421 __func__, io->seq, bio->bi_iter.bi_size,
422 (unsigned long long)bio->bi_iter.bi_sector,
423 bio->bi_bdev);
424
425 submit_bio(bio);
426 }
427
ppl_submit_iounit(struct ppl_io_unit * io)428 static void ppl_submit_iounit(struct ppl_io_unit *io)
429 {
430 struct ppl_log *log = io->log;
431 struct ppl_conf *ppl_conf = log->ppl_conf;
432 struct ppl_header *pplhdr = page_address(io->header_page);
433 struct bio *bio = &io->bio;
434 struct stripe_head *sh;
435 int i;
436
437 bio->bi_private = io;
438
439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
440 ppl_log_endio(bio);
441 return;
442 }
443
444 for (i = 0; i < io->entries_count; i++) {
445 struct ppl_header_entry *e = &pplhdr->entries[i];
446
447 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
448 __func__, io->seq, i, le64_to_cpu(e->data_sector),
449 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
450
451 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
452 ilog2(ppl_conf->block_size >> 9));
453 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
454 }
455
456 pplhdr->entries_count = cpu_to_le32(io->entries_count);
457 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
458
459 /* Rewind the buffer if current PPL is larger then remaining space */
460 if (log->use_multippl &&
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
462 (PPL_HEADER_SIZE + io->pp_size) >> 9)
463 log->next_io_sector = log->rdev->ppl.sector;
464
465
466 bio->bi_end_io = ppl_log_endio;
467 bio->bi_iter.bi_sector = log->next_io_sector;
468 bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
469
470 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
471 (unsigned long long)log->next_io_sector);
472
473 if (log->use_multippl)
474 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
475
476 WARN_ON(log->disk_flush_bitmap != 0);
477
478 list_for_each_entry(sh, &io->stripe_list, log_list) {
479 for (i = 0; i < sh->disks; i++) {
480 struct r5dev *dev = &sh->dev[i];
481
482 if ((ppl_conf->child_logs[i].wb_cache_on) &&
483 (test_bit(R5_Wantwrite, &dev->flags))) {
484 set_bit(i, &log->disk_flush_bitmap);
485 }
486 }
487
488 /* entries for full stripe writes have no partial parity */
489 if (test_bit(STRIPE_FULL_WRITE, &sh->state))
490 continue;
491
492 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
493 struct bio *prev = bio;
494
495 bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
496 prev->bi_opf, GFP_NOIO,
497 &ppl_conf->bs);
498 bio->bi_iter.bi_sector = bio_end_sector(prev);
499 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
500
501 bio_chain(bio, prev);
502 ppl_submit_iounit_bio(io, prev);
503 }
504 }
505
506 ppl_submit_iounit_bio(io, bio);
507 }
508
ppl_submit_current_io(struct ppl_log * log)509 static void ppl_submit_current_io(struct ppl_log *log)
510 {
511 struct ppl_io_unit *io;
512
513 spin_lock_irq(&log->io_list_lock);
514
515 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
516 log_sibling);
517 if (io && io->submitted)
518 io = NULL;
519
520 spin_unlock_irq(&log->io_list_lock);
521
522 if (io) {
523 io->submitted = true;
524
525 if (io == log->current_io)
526 log->current_io = NULL;
527
528 ppl_submit_iounit(io);
529 }
530 }
531
ppl_write_stripe_run(struct r5conf * conf)532 void ppl_write_stripe_run(struct r5conf *conf)
533 {
534 struct ppl_conf *ppl_conf = conf->log_private;
535 struct ppl_log *log;
536 int i;
537
538 for (i = 0; i < ppl_conf->count; i++) {
539 log = &ppl_conf->child_logs[i];
540
541 mutex_lock(&log->io_mutex);
542 ppl_submit_current_io(log);
543 mutex_unlock(&log->io_mutex);
544 }
545 }
546
ppl_io_unit_finished(struct ppl_io_unit * io)547 static void ppl_io_unit_finished(struct ppl_io_unit *io)
548 {
549 struct ppl_log *log = io->log;
550 struct ppl_conf *ppl_conf = log->ppl_conf;
551 struct r5conf *conf = ppl_conf->mddev->private;
552 unsigned long flags;
553
554 pr_debug("%s: seq: %llu\n", __func__, io->seq);
555
556 local_irq_save(flags);
557
558 spin_lock(&log->io_list_lock);
559 list_del(&io->log_sibling);
560 spin_unlock(&log->io_list_lock);
561
562 mempool_free(io, &ppl_conf->io_pool);
563
564 spin_lock(&ppl_conf->no_mem_stripes_lock);
565 if (!list_empty(&ppl_conf->no_mem_stripes)) {
566 struct stripe_head *sh;
567
568 sh = list_first_entry(&ppl_conf->no_mem_stripes,
569 struct stripe_head, log_list);
570 list_del_init(&sh->log_list);
571 set_bit(STRIPE_HANDLE, &sh->state);
572 raid5_release_stripe(sh);
573 }
574 spin_unlock(&ppl_conf->no_mem_stripes_lock);
575
576 local_irq_restore(flags);
577
578 wake_up(&conf->wait_for_quiescent);
579 }
580
ppl_flush_endio(struct bio * bio)581 static void ppl_flush_endio(struct bio *bio)
582 {
583 struct ppl_io_unit *io = bio->bi_private;
584 struct ppl_log *log = io->log;
585 struct ppl_conf *ppl_conf = log->ppl_conf;
586 struct r5conf *conf = ppl_conf->mddev->private;
587
588 pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
589
590 if (bio->bi_status) {
591 struct md_rdev *rdev;
592
593 rcu_read_lock();
594 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
595 if (rdev)
596 md_error(rdev->mddev, rdev);
597 rcu_read_unlock();
598 }
599
600 bio_put(bio);
601
602 if (atomic_dec_and_test(&io->pending_flushes)) {
603 ppl_io_unit_finished(io);
604 md_wakeup_thread(conf->mddev->thread);
605 }
606 }
607
ppl_do_flush(struct ppl_io_unit * io)608 static void ppl_do_flush(struct ppl_io_unit *io)
609 {
610 struct ppl_log *log = io->log;
611 struct ppl_conf *ppl_conf = log->ppl_conf;
612 struct r5conf *conf = ppl_conf->mddev->private;
613 int raid_disks = conf->raid_disks;
614 int flushed_disks = 0;
615 int i;
616
617 atomic_set(&io->pending_flushes, raid_disks);
618
619 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
620 struct md_rdev *rdev;
621 struct block_device *bdev = NULL;
622
623 rcu_read_lock();
624 rdev = rcu_dereference(conf->disks[i].rdev);
625 if (rdev && !test_bit(Faulty, &rdev->flags))
626 bdev = rdev->bdev;
627 rcu_read_unlock();
628
629 if (bdev) {
630 struct bio *bio;
631
632 bio = bio_alloc_bioset(bdev, 0,
633 REQ_OP_WRITE | REQ_PREFLUSH,
634 GFP_NOIO, &ppl_conf->flush_bs);
635 bio->bi_private = io;
636 bio->bi_end_io = ppl_flush_endio;
637
638 pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
639
640 submit_bio(bio);
641 flushed_disks++;
642 }
643 }
644
645 log->disk_flush_bitmap = 0;
646
647 for (i = flushed_disks ; i < raid_disks; i++) {
648 if (atomic_dec_and_test(&io->pending_flushes))
649 ppl_io_unit_finished(io);
650 }
651 }
652
ppl_no_io_unit_submitted(struct r5conf * conf,struct ppl_log * log)653 static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
654 struct ppl_log *log)
655 {
656 struct ppl_io_unit *io;
657
658 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
659 log_sibling);
660
661 return !io || !io->submitted;
662 }
663
ppl_quiesce(struct r5conf * conf,int quiesce)664 void ppl_quiesce(struct r5conf *conf, int quiesce)
665 {
666 struct ppl_conf *ppl_conf = conf->log_private;
667 int i;
668
669 if (quiesce) {
670 for (i = 0; i < ppl_conf->count; i++) {
671 struct ppl_log *log = &ppl_conf->child_logs[i];
672
673 spin_lock_irq(&log->io_list_lock);
674 wait_event_lock_irq(conf->wait_for_quiescent,
675 ppl_no_io_unit_submitted(conf, log),
676 log->io_list_lock);
677 spin_unlock_irq(&log->io_list_lock);
678 }
679 }
680 }
681
ppl_handle_flush_request(struct bio * bio)682 int ppl_handle_flush_request(struct bio *bio)
683 {
684 if (bio->bi_iter.bi_size == 0) {
685 bio_endio(bio);
686 return 0;
687 }
688 bio->bi_opf &= ~REQ_PREFLUSH;
689 return -EAGAIN;
690 }
691
ppl_stripe_write_finished(struct stripe_head * sh)692 void ppl_stripe_write_finished(struct stripe_head *sh)
693 {
694 struct ppl_io_unit *io;
695
696 io = sh->ppl_io;
697 sh->ppl_io = NULL;
698
699 if (io && atomic_dec_and_test(&io->pending_stripes)) {
700 if (io->log->disk_flush_bitmap)
701 ppl_do_flush(io);
702 else
703 ppl_io_unit_finished(io);
704 }
705 }
706
ppl_xor(int size,struct page * page1,struct page * page2)707 static void ppl_xor(int size, struct page *page1, struct page *page2)
708 {
709 struct async_submit_ctl submit;
710 struct dma_async_tx_descriptor *tx;
711 struct page *xor_srcs[] = { page1, page2 };
712
713 init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
714 NULL, NULL, NULL, NULL);
715 tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
716
717 async_tx_quiesce(&tx);
718 }
719
720 /*
721 * PPL recovery strategy: xor partial parity and data from all modified data
722 * disks within a stripe and write the result as the new stripe parity. If all
723 * stripe data disks are modified (full stripe write), no partial parity is
724 * available, so just xor the data disks.
725 *
726 * Recovery of a PPL entry shall occur only if all modified data disks are
727 * available and read from all of them succeeds.
728 *
729 * A PPL entry applies to a stripe, partial parity size for an entry is at most
730 * the size of the chunk. Examples of possible cases for a single entry:
731 *
732 * case 0: single data disk write:
733 * data0 data1 data2 ppl parity
734 * +--------+--------+--------+ +--------------------+
735 * | ------ | ------ | ------ | +----+ | (no change) |
736 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
737 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
738 * | ------ | ------ | ------ | +----+ | (no change) |
739 * +--------+--------+--------+ +--------------------+
740 * pp_size = data_size
741 *
742 * case 1: more than one data disk write:
743 * data0 data1 data2 ppl parity
744 * +--------+--------+--------+ +--------------------+
745 * | ------ | ------ | ------ | +----+ | (no change) |
746 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
747 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
748 * | ------ | ------ | ------ | +----+ | (no change) |
749 * +--------+--------+--------+ +--------------------+
750 * pp_size = data_size / modified_data_disks
751 *
752 * case 2: write to all data disks (also full stripe write):
753 * data0 data1 data2 parity
754 * +--------+--------+--------+ +--------------------+
755 * | ------ | ------ | ------ | | (no change) |
756 * | -data- | -data- | -data- | --------> | xor all data |
757 * | ------ | ------ | ------ | --------> | (no change) |
758 * | ------ | ------ | ------ | | (no change) |
759 * +--------+--------+--------+ +--------------------+
760 * pp_size = 0
761 *
762 * The following cases are possible only in other implementations. The recovery
763 * code can handle them, but they are not generated at runtime because they can
764 * be reduced to cases 0, 1 and 2:
765 *
766 * case 3:
767 * data0 data1 data2 ppl parity
768 * +--------+--------+--------+ +----+ +--------------------+
769 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
770 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
771 * | -data- | -data- | -data- | | -- | -> | xor all data |
772 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
773 * +--------+--------+--------+ +----+ +--------------------+
774 * pp_size = chunk_size
775 *
776 * case 4:
777 * data0 data1 data2 ppl parity
778 * +--------+--------+--------+ +----+ +--------------------+
779 * | ------ | -data- | ------ | | pp | | data1 ^ pp |
780 * | ------ | ------ | ------ | | -- | -> | (no change) |
781 * | ------ | ------ | ------ | | -- | -> | (no change) |
782 * | -data- | ------ | ------ | | pp | | data0 ^ pp |
783 * +--------+--------+--------+ +----+ +--------------------+
784 * pp_size = chunk_size
785 */
ppl_recover_entry(struct ppl_log * log,struct ppl_header_entry * e,sector_t ppl_sector)786 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
787 sector_t ppl_sector)
788 {
789 struct ppl_conf *ppl_conf = log->ppl_conf;
790 struct mddev *mddev = ppl_conf->mddev;
791 struct r5conf *conf = mddev->private;
792 int block_size = ppl_conf->block_size;
793 struct page *page1;
794 struct page *page2;
795 sector_t r_sector_first;
796 sector_t r_sector_last;
797 int strip_sectors;
798 int data_disks;
799 int i;
800 int ret = 0;
801 unsigned int pp_size = le32_to_cpu(e->pp_size);
802 unsigned int data_size = le32_to_cpu(e->data_size);
803
804 page1 = alloc_page(GFP_KERNEL);
805 page2 = alloc_page(GFP_KERNEL);
806
807 if (!page1 || !page2) {
808 ret = -ENOMEM;
809 goto out;
810 }
811
812 r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
813
814 if ((pp_size >> 9) < conf->chunk_sectors) {
815 if (pp_size > 0) {
816 data_disks = data_size / pp_size;
817 strip_sectors = pp_size >> 9;
818 } else {
819 data_disks = conf->raid_disks - conf->max_degraded;
820 strip_sectors = (data_size >> 9) / data_disks;
821 }
822 r_sector_last = r_sector_first +
823 (data_disks - 1) * conf->chunk_sectors +
824 strip_sectors;
825 } else {
826 data_disks = conf->raid_disks - conf->max_degraded;
827 strip_sectors = conf->chunk_sectors;
828 r_sector_last = r_sector_first + (data_size >> 9);
829 }
830
831 pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
832 (unsigned long long)r_sector_first,
833 (unsigned long long)r_sector_last);
834
835 /* if start and end is 4k aligned, use a 4k block */
836 if (block_size == 512 &&
837 (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
838 (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
839 block_size = RAID5_STRIPE_SIZE(conf);
840
841 /* iterate through blocks in strip */
842 for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
843 bool update_parity = false;
844 sector_t parity_sector;
845 struct md_rdev *parity_rdev;
846 struct stripe_head sh;
847 int disk;
848 int indent = 0;
849
850 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
851 indent += 2;
852
853 memset(page_address(page1), 0, PAGE_SIZE);
854
855 /* iterate through data member disks */
856 for (disk = 0; disk < data_disks; disk++) {
857 int dd_idx;
858 struct md_rdev *rdev;
859 sector_t sector;
860 sector_t r_sector = r_sector_first + i +
861 (disk * conf->chunk_sectors);
862
863 pr_debug("%s:%*s data member disk %d start\n",
864 __func__, indent, "", disk);
865 indent += 2;
866
867 if (r_sector >= r_sector_last) {
868 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
869 __func__, indent, "",
870 (unsigned long long)r_sector);
871 indent -= 2;
872 continue;
873 }
874
875 update_parity = true;
876
877 /* map raid sector to member disk */
878 sector = raid5_compute_sector(conf, r_sector, 0,
879 &dd_idx, NULL);
880 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
881 __func__, indent, "",
882 (unsigned long long)r_sector, dd_idx,
883 (unsigned long long)sector);
884
885 /* Array has not started so rcu dereference is safe */
886 rdev = rcu_dereference_protected(
887 conf->disks[dd_idx].rdev, 1);
888 if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
889 sector >= rdev->recovery_offset)) {
890 pr_debug("%s:%*s data member disk %d missing\n",
891 __func__, indent, "", dd_idx);
892 update_parity = false;
893 break;
894 }
895
896 pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
897 __func__, indent, "", rdev->bdev,
898 (unsigned long long)sector);
899 if (!sync_page_io(rdev, sector, block_size, page2,
900 REQ_OP_READ, false)) {
901 md_error(mddev, rdev);
902 pr_debug("%s:%*s read failed!\n", __func__,
903 indent, "");
904 ret = -EIO;
905 goto out;
906 }
907
908 ppl_xor(block_size, page1, page2);
909
910 indent -= 2;
911 }
912
913 if (!update_parity)
914 continue;
915
916 if (pp_size > 0) {
917 pr_debug("%s:%*s reading pp disk sector %llu\n",
918 __func__, indent, "",
919 (unsigned long long)(ppl_sector + i));
920 if (!sync_page_io(log->rdev,
921 ppl_sector - log->rdev->data_offset + i,
922 block_size, page2, REQ_OP_READ,
923 false)) {
924 pr_debug("%s:%*s read failed!\n", __func__,
925 indent, "");
926 md_error(mddev, log->rdev);
927 ret = -EIO;
928 goto out;
929 }
930
931 ppl_xor(block_size, page1, page2);
932 }
933
934 /* map raid sector to parity disk */
935 parity_sector = raid5_compute_sector(conf, r_sector_first + i,
936 0, &disk, &sh);
937 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
938
939 /* Array has not started so rcu dereference is safe */
940 parity_rdev = rcu_dereference_protected(
941 conf->disks[sh.pd_idx].rdev, 1);
942
943 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
944 pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
945 __func__, indent, "",
946 (unsigned long long)parity_sector,
947 parity_rdev->bdev);
948 if (!sync_page_io(parity_rdev, parity_sector, block_size,
949 page1, REQ_OP_WRITE, false)) {
950 pr_debug("%s:%*s parity write error!\n", __func__,
951 indent, "");
952 md_error(mddev, parity_rdev);
953 ret = -EIO;
954 goto out;
955 }
956 }
957 out:
958 if (page1)
959 __free_page(page1);
960 if (page2)
961 __free_page(page2);
962 return ret;
963 }
964
ppl_recover(struct ppl_log * log,struct ppl_header * pplhdr,sector_t offset)965 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
966 sector_t offset)
967 {
968 struct ppl_conf *ppl_conf = log->ppl_conf;
969 struct md_rdev *rdev = log->rdev;
970 struct mddev *mddev = rdev->mddev;
971 sector_t ppl_sector = rdev->ppl.sector + offset +
972 (PPL_HEADER_SIZE >> 9);
973 struct page *page;
974 int i;
975 int ret = 0;
976
977 page = alloc_page(GFP_KERNEL);
978 if (!page)
979 return -ENOMEM;
980
981 /* iterate through all PPL entries saved */
982 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
983 struct ppl_header_entry *e = &pplhdr->entries[i];
984 u32 pp_size = le32_to_cpu(e->pp_size);
985 sector_t sector = ppl_sector;
986 int ppl_entry_sectors = pp_size >> 9;
987 u32 crc, crc_stored;
988
989 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
990 __func__, rdev->raid_disk, i,
991 (unsigned long long)ppl_sector, pp_size);
992
993 crc = ~0;
994 crc_stored = le32_to_cpu(e->checksum);
995
996 /* read parial parity for this entry and calculate its checksum */
997 while (pp_size) {
998 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
999
1000 if (!sync_page_io(rdev, sector - rdev->data_offset,
1001 s, page, REQ_OP_READ, false)) {
1002 md_error(mddev, rdev);
1003 ret = -EIO;
1004 goto out;
1005 }
1006
1007 crc = crc32c_le(crc, page_address(page), s);
1008
1009 pp_size -= s;
1010 sector += s >> 9;
1011 }
1012
1013 crc = ~crc;
1014
1015 if (crc != crc_stored) {
1016 /*
1017 * Don't recover this entry if the checksum does not
1018 * match, but keep going and try to recover other
1019 * entries.
1020 */
1021 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1022 __func__, crc_stored, crc);
1023 ppl_conf->mismatch_count++;
1024 } else {
1025 ret = ppl_recover_entry(log, e, ppl_sector);
1026 if (ret)
1027 goto out;
1028 ppl_conf->recovered_entries++;
1029 }
1030
1031 ppl_sector += ppl_entry_sectors;
1032 }
1033
1034 /* flush the disk cache after recovery if necessary */
1035 ret = blkdev_issue_flush(rdev->bdev);
1036 out:
1037 __free_page(page);
1038 return ret;
1039 }
1040
ppl_write_empty_header(struct ppl_log * log)1041 static int ppl_write_empty_header(struct ppl_log *log)
1042 {
1043 struct page *page;
1044 struct ppl_header *pplhdr;
1045 struct md_rdev *rdev = log->rdev;
1046 int ret = 0;
1047
1048 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1049 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1050
1051 page = alloc_page(GFP_NOIO | __GFP_ZERO);
1052 if (!page)
1053 return -ENOMEM;
1054
1055 pplhdr = page_address(page);
1056 /* zero out PPL space to avoid collision with old PPLs */
1057 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1058 log->rdev->ppl.size, GFP_NOIO, 0);
1059 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1060 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1061 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1062
1063 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1064 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1065 REQ_FUA, false)) {
1066 md_error(rdev->mddev, rdev);
1067 ret = -EIO;
1068 }
1069
1070 __free_page(page);
1071 return ret;
1072 }
1073
ppl_load_distributed(struct ppl_log * log)1074 static int ppl_load_distributed(struct ppl_log *log)
1075 {
1076 struct ppl_conf *ppl_conf = log->ppl_conf;
1077 struct md_rdev *rdev = log->rdev;
1078 struct mddev *mddev = rdev->mddev;
1079 struct page *page, *page2;
1080 struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1081 u32 crc, crc_stored;
1082 u32 signature;
1083 int ret = 0, i;
1084 sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1085
1086 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1087 /* read PPL headers, find the recent one */
1088 page = alloc_page(GFP_KERNEL);
1089 if (!page)
1090 return -ENOMEM;
1091
1092 page2 = alloc_page(GFP_KERNEL);
1093 if (!page2) {
1094 __free_page(page);
1095 return -ENOMEM;
1096 }
1097
1098 /* searching ppl area for latest ppl */
1099 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1100 if (!sync_page_io(rdev,
1101 rdev->ppl.sector - rdev->data_offset +
1102 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1103 false)) {
1104 md_error(mddev, rdev);
1105 ret = -EIO;
1106 /* if not able to read - don't recover any PPL */
1107 pplhdr = NULL;
1108 break;
1109 }
1110 pplhdr = page_address(page);
1111
1112 /* check header validity */
1113 crc_stored = le32_to_cpu(pplhdr->checksum);
1114 pplhdr->checksum = 0;
1115 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1116
1117 if (crc_stored != crc) {
1118 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1119 __func__, crc_stored, crc,
1120 (unsigned long long)pplhdr_offset);
1121 pplhdr = prev_pplhdr;
1122 pplhdr_offset = prev_pplhdr_offset;
1123 break;
1124 }
1125
1126 signature = le32_to_cpu(pplhdr->signature);
1127
1128 if (mddev->external) {
1129 /*
1130 * For external metadata the header signature is set and
1131 * validated in userspace.
1132 */
1133 ppl_conf->signature = signature;
1134 } else if (ppl_conf->signature != signature) {
1135 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1136 __func__, signature, ppl_conf->signature,
1137 (unsigned long long)pplhdr_offset);
1138 pplhdr = prev_pplhdr;
1139 pplhdr_offset = prev_pplhdr_offset;
1140 break;
1141 }
1142
1143 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1144 le64_to_cpu(pplhdr->generation)) {
1145 /* previous was newest */
1146 pplhdr = prev_pplhdr;
1147 pplhdr_offset = prev_pplhdr_offset;
1148 break;
1149 }
1150
1151 prev_pplhdr_offset = pplhdr_offset;
1152 prev_pplhdr = pplhdr;
1153
1154 swap(page, page2);
1155
1156 /* calculate next potential ppl offset */
1157 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1158 pplhdr_offset +=
1159 le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1160 pplhdr_offset += PPL_HEADER_SIZE >> 9;
1161 }
1162
1163 /* no valid ppl found */
1164 if (!pplhdr)
1165 ppl_conf->mismatch_count++;
1166 else
1167 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1168 __func__, (unsigned long long)pplhdr_offset,
1169 le64_to_cpu(pplhdr->generation));
1170
1171 /* attempt to recover from log if we are starting a dirty array */
1172 if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1173 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1174
1175 /* write empty header if we are starting the array */
1176 if (!ret && !mddev->pers)
1177 ret = ppl_write_empty_header(log);
1178
1179 __free_page(page);
1180 __free_page(page2);
1181
1182 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1183 __func__, ret, ppl_conf->mismatch_count,
1184 ppl_conf->recovered_entries);
1185 return ret;
1186 }
1187
ppl_load(struct ppl_conf * ppl_conf)1188 static int ppl_load(struct ppl_conf *ppl_conf)
1189 {
1190 int ret = 0;
1191 u32 signature = 0;
1192 bool signature_set = false;
1193 int i;
1194
1195 for (i = 0; i < ppl_conf->count; i++) {
1196 struct ppl_log *log = &ppl_conf->child_logs[i];
1197
1198 /* skip missing drive */
1199 if (!log->rdev)
1200 continue;
1201
1202 ret = ppl_load_distributed(log);
1203 if (ret)
1204 break;
1205
1206 /*
1207 * For external metadata we can't check if the signature is
1208 * correct on a single drive, but we can check if it is the same
1209 * on all drives.
1210 */
1211 if (ppl_conf->mddev->external) {
1212 if (!signature_set) {
1213 signature = ppl_conf->signature;
1214 signature_set = true;
1215 } else if (signature != ppl_conf->signature) {
1216 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1217 mdname(ppl_conf->mddev));
1218 ret = -EINVAL;
1219 break;
1220 }
1221 }
1222 }
1223
1224 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1225 __func__, ret, ppl_conf->mismatch_count,
1226 ppl_conf->recovered_entries);
1227 return ret;
1228 }
1229
__ppl_exit_log(struct ppl_conf * ppl_conf)1230 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1231 {
1232 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1233 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1234
1235 kfree(ppl_conf->child_logs);
1236
1237 bioset_exit(&ppl_conf->bs);
1238 bioset_exit(&ppl_conf->flush_bs);
1239 mempool_exit(&ppl_conf->io_pool);
1240 kmem_cache_destroy(ppl_conf->io_kc);
1241
1242 kfree(ppl_conf);
1243 }
1244
ppl_exit_log(struct r5conf * conf)1245 void ppl_exit_log(struct r5conf *conf)
1246 {
1247 struct ppl_conf *ppl_conf = conf->log_private;
1248
1249 if (ppl_conf) {
1250 __ppl_exit_log(ppl_conf);
1251 conf->log_private = NULL;
1252 }
1253 }
1254
ppl_validate_rdev(struct md_rdev * rdev)1255 static int ppl_validate_rdev(struct md_rdev *rdev)
1256 {
1257 int ppl_data_sectors;
1258 int ppl_size_new;
1259
1260 /*
1261 * The configured PPL size must be enough to store
1262 * the header and (at the very least) partial parity
1263 * for one stripe. Round it down to ensure the data
1264 * space is cleanly divisible by stripe size.
1265 */
1266 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1267
1268 if (ppl_data_sectors > 0)
1269 ppl_data_sectors = rounddown(ppl_data_sectors,
1270 RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1271
1272 if (ppl_data_sectors <= 0) {
1273 pr_warn("md/raid:%s: PPL space too small on %pg\n",
1274 mdname(rdev->mddev), rdev->bdev);
1275 return -ENOSPC;
1276 }
1277
1278 ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1279
1280 if ((rdev->ppl.sector < rdev->data_offset &&
1281 rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1282 (rdev->ppl.sector >= rdev->data_offset &&
1283 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1284 pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
1285 mdname(rdev->mddev), rdev->bdev);
1286 return -EINVAL;
1287 }
1288
1289 if (!rdev->mddev->external &&
1290 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1291 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1292 pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
1293 mdname(rdev->mddev), rdev->bdev);
1294 return -EINVAL;
1295 }
1296
1297 rdev->ppl.size = ppl_size_new;
1298
1299 return 0;
1300 }
1301
ppl_init_child_log(struct ppl_log * log,struct md_rdev * rdev)1302 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1303 {
1304 struct request_queue *q;
1305
1306 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1307 PPL_HEADER_SIZE) * 2) {
1308 log->use_multippl = true;
1309 set_bit(MD_HAS_MULTIPLE_PPLS,
1310 &log->ppl_conf->mddev->flags);
1311 log->entry_space = PPL_SPACE_SIZE;
1312 } else {
1313 log->use_multippl = false;
1314 log->entry_space = (log->rdev->ppl.size << 9) -
1315 PPL_HEADER_SIZE;
1316 }
1317 log->next_io_sector = rdev->ppl.sector;
1318
1319 q = bdev_get_queue(rdev->bdev);
1320 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1321 log->wb_cache_on = true;
1322 }
1323
ppl_init_log(struct r5conf * conf)1324 int ppl_init_log(struct r5conf *conf)
1325 {
1326 struct ppl_conf *ppl_conf;
1327 struct mddev *mddev = conf->mddev;
1328 int ret = 0;
1329 int max_disks;
1330 int i;
1331
1332 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1333 mdname(conf->mddev));
1334
1335 if (PAGE_SIZE != 4096)
1336 return -EINVAL;
1337
1338 if (mddev->level != 5) {
1339 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1340 mdname(mddev), mddev->level);
1341 return -EINVAL;
1342 }
1343
1344 if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1345 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1346 mdname(mddev));
1347 return -EINVAL;
1348 }
1349
1350 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1351 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1352 mdname(mddev));
1353 return -EINVAL;
1354 }
1355
1356 max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1357 BITS_PER_BYTE;
1358 if (conf->raid_disks > max_disks) {
1359 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1360 mdname(mddev), max_disks);
1361 return -EINVAL;
1362 }
1363
1364 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1365 if (!ppl_conf)
1366 return -ENOMEM;
1367
1368 ppl_conf->mddev = mddev;
1369
1370 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1371 if (!ppl_conf->io_kc) {
1372 ret = -ENOMEM;
1373 goto err;
1374 }
1375
1376 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1377 ppl_io_pool_free, ppl_conf->io_kc);
1378 if (ret)
1379 goto err;
1380
1381 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1382 if (ret)
1383 goto err;
1384
1385 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1386 if (ret)
1387 goto err;
1388
1389 ppl_conf->count = conf->raid_disks;
1390 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1391 GFP_KERNEL);
1392 if (!ppl_conf->child_logs) {
1393 ret = -ENOMEM;
1394 goto err;
1395 }
1396
1397 atomic64_set(&ppl_conf->seq, 0);
1398 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1399 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1400
1401 if (!mddev->external) {
1402 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1403 ppl_conf->block_size = 512;
1404 } else {
1405 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1406 }
1407
1408 for (i = 0; i < ppl_conf->count; i++) {
1409 struct ppl_log *log = &ppl_conf->child_logs[i];
1410 /* Array has not started so rcu dereference is safe */
1411 struct md_rdev *rdev =
1412 rcu_dereference_protected(conf->disks[i].rdev, 1);
1413
1414 mutex_init(&log->io_mutex);
1415 spin_lock_init(&log->io_list_lock);
1416 INIT_LIST_HEAD(&log->io_list);
1417
1418 log->ppl_conf = ppl_conf;
1419 log->rdev = rdev;
1420
1421 if (rdev) {
1422 ret = ppl_validate_rdev(rdev);
1423 if (ret)
1424 goto err;
1425
1426 ppl_init_child_log(log, rdev);
1427 }
1428 }
1429
1430 /* load and possibly recover the logs from the member disks */
1431 ret = ppl_load(ppl_conf);
1432
1433 if (ret) {
1434 goto err;
1435 } else if (!mddev->pers && mddev->recovery_cp == 0 &&
1436 ppl_conf->recovered_entries > 0 &&
1437 ppl_conf->mismatch_count == 0) {
1438 /*
1439 * If we are starting a dirty array and the recovery succeeds
1440 * without any issues, set the array as clean.
1441 */
1442 mddev->recovery_cp = MaxSector;
1443 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1444 } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1445 /* no mismatch allowed when enabling PPL for a running array */
1446 ret = -EINVAL;
1447 goto err;
1448 }
1449
1450 conf->log_private = ppl_conf;
1451 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1452
1453 return 0;
1454 err:
1455 __ppl_exit_log(ppl_conf);
1456 return ret;
1457 }
1458
ppl_modify_log(struct r5conf * conf,struct md_rdev * rdev,bool add)1459 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1460 {
1461 struct ppl_conf *ppl_conf = conf->log_private;
1462 struct ppl_log *log;
1463 int ret = 0;
1464
1465 if (!rdev)
1466 return -EINVAL;
1467
1468 pr_debug("%s: disk: %d operation: %s dev: %pg\n",
1469 __func__, rdev->raid_disk, add ? "add" : "remove",
1470 rdev->bdev);
1471
1472 if (rdev->raid_disk < 0)
1473 return 0;
1474
1475 if (rdev->raid_disk >= ppl_conf->count)
1476 return -ENODEV;
1477
1478 log = &ppl_conf->child_logs[rdev->raid_disk];
1479
1480 mutex_lock(&log->io_mutex);
1481 if (add) {
1482 ret = ppl_validate_rdev(rdev);
1483 if (!ret) {
1484 log->rdev = rdev;
1485 ret = ppl_write_empty_header(log);
1486 ppl_init_child_log(log, rdev);
1487 }
1488 } else {
1489 log->rdev = NULL;
1490 }
1491 mutex_unlock(&log->io_mutex);
1492
1493 return ret;
1494 }
1495
1496 static ssize_t
ppl_write_hint_show(struct mddev * mddev,char * buf)1497 ppl_write_hint_show(struct mddev *mddev, char *buf)
1498 {
1499 return sprintf(buf, "%d\n", 0);
1500 }
1501
1502 static ssize_t
ppl_write_hint_store(struct mddev * mddev,const char * page,size_t len)1503 ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1504 {
1505 struct r5conf *conf;
1506 int err = 0;
1507 unsigned short new;
1508
1509 if (len >= PAGE_SIZE)
1510 return -EINVAL;
1511 if (kstrtou16(page, 10, &new))
1512 return -EINVAL;
1513
1514 err = mddev_lock(mddev);
1515 if (err)
1516 return err;
1517
1518 conf = mddev->private;
1519 if (!conf)
1520 err = -ENODEV;
1521 else if (!raid5_has_ppl(conf) || !conf->log_private)
1522 err = -EINVAL;
1523
1524 mddev_unlock(mddev);
1525
1526 return err ?: len;
1527 }
1528
1529 struct md_sysfs_entry
1530 ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1531 ppl_write_hint_show,
1532 ppl_write_hint_store);
1533