1 /*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/export.h>
26 #include <linux/mempool.h>
27 #include <linux/workqueue.h>
28 #include <scsi/sg.h> /* for struct sg_iovec */
29
30 #include <trace/events/block.h>
31
32 /*
33 * Test patch to inline a certain number of bi_io_vec's inside the bio
34 * itself, to shrink a bio data allocation from two mempool calls to one
35 */
36 #define BIO_INLINE_VECS 4
37
38 static mempool_t *bio_split_pool __read_mostly;
39
40 /*
41 * if you change this list, also change bvec_alloc or things will
42 * break badly! cannot be bigger than what you can fit into an
43 * unsigned short
44 */
45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46 static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48 };
49 #undef BV
50
51 /*
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
53 * IO code that does not need private memory pools.
54 */
55 struct bio_set *fs_bio_set;
56
57 /*
58 * Our slab pool management
59 */
60 struct bio_slab {
61 struct kmem_cache *slab;
62 unsigned int slab_ref;
63 unsigned int slab_size;
64 char name[8];
65 };
66 static DEFINE_MUTEX(bio_slab_lock);
67 static struct bio_slab *bio_slabs;
68 static unsigned int bio_slab_nr, bio_slab_max;
69
bio_find_or_create_slab(unsigned int extra_size)70 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
71 {
72 unsigned int sz = sizeof(struct bio) + extra_size;
73 struct kmem_cache *slab = NULL;
74 struct bio_slab *bslab;
75 unsigned int i, entry = -1;
76
77 mutex_lock(&bio_slab_lock);
78
79 i = 0;
80 while (i < bio_slab_nr) {
81 bslab = &bio_slabs[i];
82
83 if (!bslab->slab && entry == -1)
84 entry = i;
85 else if (bslab->slab_size == sz) {
86 slab = bslab->slab;
87 bslab->slab_ref++;
88 break;
89 }
90 i++;
91 }
92
93 if (slab)
94 goto out_unlock;
95
96 if (bio_slab_nr == bio_slab_max && entry == -1) {
97 bio_slab_max <<= 1;
98 bio_slabs = krealloc(bio_slabs,
99 bio_slab_max * sizeof(struct bio_slab),
100 GFP_KERNEL);
101 if (!bio_slabs)
102 goto out_unlock;
103 }
104 if (entry == -1)
105 entry = bio_slab_nr++;
106
107 bslab = &bio_slabs[entry];
108
109 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
110 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
111 if (!slab)
112 goto out_unlock;
113
114 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
115 bslab->slab = slab;
116 bslab->slab_ref = 1;
117 bslab->slab_size = sz;
118 out_unlock:
119 mutex_unlock(&bio_slab_lock);
120 return slab;
121 }
122
bio_put_slab(struct bio_set * bs)123 static void bio_put_slab(struct bio_set *bs)
124 {
125 struct bio_slab *bslab = NULL;
126 unsigned int i;
127
128 mutex_lock(&bio_slab_lock);
129
130 for (i = 0; i < bio_slab_nr; i++) {
131 if (bs->bio_slab == bio_slabs[i].slab) {
132 bslab = &bio_slabs[i];
133 break;
134 }
135 }
136
137 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
138 goto out;
139
140 WARN_ON(!bslab->slab_ref);
141
142 if (--bslab->slab_ref)
143 goto out;
144
145 kmem_cache_destroy(bslab->slab);
146 bslab->slab = NULL;
147
148 out:
149 mutex_unlock(&bio_slab_lock);
150 }
151
bvec_nr_vecs(unsigned short idx)152 unsigned int bvec_nr_vecs(unsigned short idx)
153 {
154 return bvec_slabs[idx].nr_vecs;
155 }
156
bvec_free_bs(struct bio_set * bs,struct bio_vec * bv,unsigned int idx)157 void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
158 {
159 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
160
161 if (idx == BIOVEC_MAX_IDX)
162 mempool_free(bv, bs->bvec_pool);
163 else {
164 struct biovec_slab *bvs = bvec_slabs + idx;
165
166 kmem_cache_free(bvs->slab, bv);
167 }
168 }
169
bvec_alloc_bs(gfp_t gfp_mask,int nr,unsigned long * idx,struct bio_set * bs)170 struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
171 struct bio_set *bs)
172 {
173 struct bio_vec *bvl;
174
175 /*
176 * see comment near bvec_array define!
177 */
178 switch (nr) {
179 case 1:
180 *idx = 0;
181 break;
182 case 2 ... 4:
183 *idx = 1;
184 break;
185 case 5 ... 16:
186 *idx = 2;
187 break;
188 case 17 ... 64:
189 *idx = 3;
190 break;
191 case 65 ... 128:
192 *idx = 4;
193 break;
194 case 129 ... BIO_MAX_PAGES:
195 *idx = 5;
196 break;
197 default:
198 return NULL;
199 }
200
201 /*
202 * idx now points to the pool we want to allocate from. only the
203 * 1-vec entry pool is mempool backed.
204 */
205 if (*idx == BIOVEC_MAX_IDX) {
206 fallback:
207 bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
208 } else {
209 struct biovec_slab *bvs = bvec_slabs + *idx;
210 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
211
212 /*
213 * Make this allocation restricted and don't dump info on
214 * allocation failures, since we'll fallback to the mempool
215 * in case of failure.
216 */
217 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
218
219 /*
220 * Try a slab allocation. If this fails and __GFP_WAIT
221 * is set, retry with the 1-entry mempool
222 */
223 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
224 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
225 *idx = BIOVEC_MAX_IDX;
226 goto fallback;
227 }
228 }
229
230 return bvl;
231 }
232
bio_free(struct bio * bio,struct bio_set * bs)233 void bio_free(struct bio *bio, struct bio_set *bs)
234 {
235 void *p;
236
237 if (bio_has_allocated_vec(bio))
238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
239
240 if (bio_integrity(bio))
241 bio_integrity_free(bio, bs);
242
243 /*
244 * If we have front padding, adjust the bio pointer before freeing
245 */
246 p = bio;
247 if (bs->front_pad)
248 p -= bs->front_pad;
249
250 mempool_free(p, bs->bio_pool);
251 }
252 EXPORT_SYMBOL(bio_free);
253
bio_init(struct bio * bio)254 void bio_init(struct bio *bio)
255 {
256 memset(bio, 0, sizeof(*bio));
257 bio->bi_flags = 1 << BIO_UPTODATE;
258 atomic_set(&bio->bi_cnt, 1);
259 }
260 EXPORT_SYMBOL(bio_init);
261
262 /**
263 * bio_alloc_bioset - allocate a bio for I/O
264 * @gfp_mask: the GFP_ mask given to the slab allocator
265 * @nr_iovecs: number of iovecs to pre-allocate
266 * @bs: the bio_set to allocate from.
267 *
268 * Description:
269 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
270 * If %__GFP_WAIT is set then we will block on the internal pool waiting
271 * for a &struct bio to become free.
272 *
273 * Note that the caller must set ->bi_destructor on successful return
274 * of a bio, to do the appropriate freeing of the bio once the reference
275 * count drops to zero.
276 **/
bio_alloc_bioset(gfp_t gfp_mask,int nr_iovecs,struct bio_set * bs)277 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
278 {
279 unsigned long idx = BIO_POOL_NONE;
280 struct bio_vec *bvl = NULL;
281 struct bio *bio;
282 void *p;
283
284 p = mempool_alloc(bs->bio_pool, gfp_mask);
285 if (unlikely(!p))
286 return NULL;
287 bio = p + bs->front_pad;
288
289 bio_init(bio);
290
291 if (unlikely(!nr_iovecs))
292 goto out_set;
293
294 if (nr_iovecs <= BIO_INLINE_VECS) {
295 bvl = bio->bi_inline_vecs;
296 nr_iovecs = BIO_INLINE_VECS;
297 } else {
298 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
299 if (unlikely(!bvl))
300 goto err_free;
301
302 nr_iovecs = bvec_nr_vecs(idx);
303 }
304 out_set:
305 bio->bi_flags |= idx << BIO_POOL_OFFSET;
306 bio->bi_max_vecs = nr_iovecs;
307 bio->bi_io_vec = bvl;
308 return bio;
309
310 err_free:
311 mempool_free(p, bs->bio_pool);
312 return NULL;
313 }
314 EXPORT_SYMBOL(bio_alloc_bioset);
315
bio_fs_destructor(struct bio * bio)316 static void bio_fs_destructor(struct bio *bio)
317 {
318 bio_free(bio, fs_bio_set);
319 }
320
321 /**
322 * bio_alloc - allocate a new bio, memory pool backed
323 * @gfp_mask: allocation mask to use
324 * @nr_iovecs: number of iovecs
325 *
326 * bio_alloc will allocate a bio and associated bio_vec array that can hold
327 * at least @nr_iovecs entries. Allocations will be done from the
328 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
329 *
330 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
331 * a bio. This is due to the mempool guarantees. To make this work, callers
332 * must never allocate more than 1 bio at a time from this pool. Callers
333 * that need to allocate more than 1 bio must always submit the previously
334 * allocated bio for IO before attempting to allocate a new one. Failure to
335 * do so can cause livelocks under memory pressure.
336 *
337 * RETURNS:
338 * Pointer to new bio on success, NULL on failure.
339 */
bio_alloc(gfp_t gfp_mask,unsigned int nr_iovecs)340 struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
341 {
342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
343
344 if (bio)
345 bio->bi_destructor = bio_fs_destructor;
346
347 return bio;
348 }
349 EXPORT_SYMBOL(bio_alloc);
350
bio_kmalloc_destructor(struct bio * bio)351 static void bio_kmalloc_destructor(struct bio *bio)
352 {
353 if (bio_integrity(bio))
354 bio_integrity_free(bio, fs_bio_set);
355 kfree(bio);
356 }
357
358 /**
359 * bio_kmalloc - allocate a bio for I/O using kmalloc()
360 * @gfp_mask: the GFP_ mask given to the slab allocator
361 * @nr_iovecs: number of iovecs to pre-allocate
362 *
363 * Description:
364 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
365 * %__GFP_WAIT, the allocation is guaranteed to succeed.
366 *
367 **/
bio_kmalloc(gfp_t gfp_mask,unsigned int nr_iovecs)368 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
369 {
370 struct bio *bio;
371
372 if (nr_iovecs > UIO_MAXIOV)
373 return NULL;
374
375 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
376 gfp_mask);
377 if (unlikely(!bio))
378 return NULL;
379
380 bio_init(bio);
381 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
382 bio->bi_max_vecs = nr_iovecs;
383 bio->bi_io_vec = bio->bi_inline_vecs;
384 bio->bi_destructor = bio_kmalloc_destructor;
385
386 return bio;
387 }
388 EXPORT_SYMBOL(bio_kmalloc);
389
zero_fill_bio(struct bio * bio)390 void zero_fill_bio(struct bio *bio)
391 {
392 unsigned long flags;
393 struct bio_vec *bv;
394 int i;
395
396 bio_for_each_segment(bv, bio, i) {
397 char *data = bvec_kmap_irq(bv, &flags);
398 memset(data, 0, bv->bv_len);
399 flush_dcache_page(bv->bv_page);
400 bvec_kunmap_irq(data, &flags);
401 }
402 }
403 EXPORT_SYMBOL(zero_fill_bio);
404
405 /**
406 * bio_put - release a reference to a bio
407 * @bio: bio to release reference to
408 *
409 * Description:
410 * Put a reference to a &struct bio, either one you have gotten with
411 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
412 **/
bio_put(struct bio * bio)413 void bio_put(struct bio *bio)
414 {
415 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
416
417 /*
418 * last put frees it
419 */
420 if (atomic_dec_and_test(&bio->bi_cnt)) {
421 bio->bi_next = NULL;
422 bio->bi_destructor(bio);
423 }
424 }
425 EXPORT_SYMBOL(bio_put);
426
bio_phys_segments(struct request_queue * q,struct bio * bio)427 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
428 {
429 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
430 blk_recount_segments(q, bio);
431
432 return bio->bi_phys_segments;
433 }
434 EXPORT_SYMBOL(bio_phys_segments);
435
436 /**
437 * __bio_clone - clone a bio
438 * @bio: destination bio
439 * @bio_src: bio to clone
440 *
441 * Clone a &bio. Caller will own the returned bio, but not
442 * the actual data it points to. Reference count of returned
443 * bio will be one.
444 */
__bio_clone(struct bio * bio,struct bio * bio_src)445 void __bio_clone(struct bio *bio, struct bio *bio_src)
446 {
447 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
448 bio_src->bi_max_vecs * sizeof(struct bio_vec));
449
450 /*
451 * most users will be overriding ->bi_bdev with a new target,
452 * so we don't set nor calculate new physical/hw segment counts here
453 */
454 bio->bi_sector = bio_src->bi_sector;
455 bio->bi_bdev = bio_src->bi_bdev;
456 bio->bi_flags |= 1 << BIO_CLONED;
457 bio->bi_rw = bio_src->bi_rw;
458 bio->bi_vcnt = bio_src->bi_vcnt;
459 bio->bi_size = bio_src->bi_size;
460 bio->bi_idx = bio_src->bi_idx;
461 }
462 EXPORT_SYMBOL(__bio_clone);
463
464 /**
465 * bio_clone - clone a bio
466 * @bio: bio to clone
467 * @gfp_mask: allocation priority
468 *
469 * Like __bio_clone, only also allocates the returned bio
470 */
bio_clone(struct bio * bio,gfp_t gfp_mask)471 struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
472 {
473 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
474
475 if (!b)
476 return NULL;
477
478 b->bi_destructor = bio_fs_destructor;
479 __bio_clone(b, bio);
480
481 if (bio_integrity(bio)) {
482 int ret;
483
484 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
485
486 if (ret < 0) {
487 bio_put(b);
488 return NULL;
489 }
490 }
491
492 return b;
493 }
494 EXPORT_SYMBOL(bio_clone);
495
496 /**
497 * bio_get_nr_vecs - return approx number of vecs
498 * @bdev: I/O target
499 *
500 * Return the approximate number of pages we can send to this target.
501 * There's no guarantee that you will be able to fit this number of pages
502 * into a bio, it does not account for dynamic restrictions that vary
503 * on offset.
504 */
bio_get_nr_vecs(struct block_device * bdev)505 int bio_get_nr_vecs(struct block_device *bdev)
506 {
507 struct request_queue *q = bdev_get_queue(bdev);
508 int nr_pages;
509
510 nr_pages = min_t(unsigned,
511 queue_max_segments(q),
512 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
513
514 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
515
516 }
517 EXPORT_SYMBOL(bio_get_nr_vecs);
518
__bio_add_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset,unsigned short max_sectors)519 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
520 *page, unsigned int len, unsigned int offset,
521 unsigned short max_sectors)
522 {
523 int retried_segments = 0;
524 struct bio_vec *bvec;
525
526 /*
527 * cloned bio must not modify vec list
528 */
529 if (unlikely(bio_flagged(bio, BIO_CLONED)))
530 return 0;
531
532 if (((bio->bi_size + len) >> 9) > max_sectors)
533 return 0;
534
535 /*
536 * For filesystems with a blocksize smaller than the pagesize
537 * we will often be called with the same page as last time and
538 * a consecutive offset. Optimize this special case.
539 */
540 if (bio->bi_vcnt > 0) {
541 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
542
543 if (page == prev->bv_page &&
544 offset == prev->bv_offset + prev->bv_len) {
545 unsigned int prev_bv_len = prev->bv_len;
546 prev->bv_len += len;
547
548 if (q->merge_bvec_fn) {
549 struct bvec_merge_data bvm = {
550 /* prev_bvec is already charged in
551 bi_size, discharge it in order to
552 simulate merging updated prev_bvec
553 as new bvec. */
554 .bi_bdev = bio->bi_bdev,
555 .bi_sector = bio->bi_sector,
556 .bi_size = bio->bi_size - prev_bv_len,
557 .bi_rw = bio->bi_rw,
558 };
559
560 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
561 prev->bv_len -= len;
562 return 0;
563 }
564 }
565
566 goto done;
567 }
568 }
569
570 if (bio->bi_vcnt >= bio->bi_max_vecs)
571 return 0;
572
573 /*
574 * we might lose a segment or two here, but rather that than
575 * make this too complex.
576 */
577
578 while (bio->bi_phys_segments >= queue_max_segments(q)) {
579
580 if (retried_segments)
581 return 0;
582
583 retried_segments = 1;
584 blk_recount_segments(q, bio);
585 }
586
587 /*
588 * setup the new entry, we might clear it again later if we
589 * cannot add the page
590 */
591 bvec = &bio->bi_io_vec[bio->bi_vcnt];
592 bvec->bv_page = page;
593 bvec->bv_len = len;
594 bvec->bv_offset = offset;
595
596 /*
597 * if queue has other restrictions (eg varying max sector size
598 * depending on offset), it can specify a merge_bvec_fn in the
599 * queue to get further control
600 */
601 if (q->merge_bvec_fn) {
602 struct bvec_merge_data bvm = {
603 .bi_bdev = bio->bi_bdev,
604 .bi_sector = bio->bi_sector,
605 .bi_size = bio->bi_size,
606 .bi_rw = bio->bi_rw,
607 };
608
609 /*
610 * merge_bvec_fn() returns number of bytes it can accept
611 * at this offset
612 */
613 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
614 bvec->bv_page = NULL;
615 bvec->bv_len = 0;
616 bvec->bv_offset = 0;
617 return 0;
618 }
619 }
620
621 /* If we may be able to merge these biovecs, force a recount */
622 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
623 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
624
625 bio->bi_vcnt++;
626 bio->bi_phys_segments++;
627 done:
628 bio->bi_size += len;
629 return len;
630 }
631
632 /**
633 * bio_add_pc_page - attempt to add page to bio
634 * @q: the target queue
635 * @bio: destination bio
636 * @page: page to add
637 * @len: vec entry length
638 * @offset: vec entry offset
639 *
640 * Attempt to add a page to the bio_vec maplist. This can fail for a
641 * number of reasons, such as the bio being full or target block device
642 * limitations. The target block device must allow bio's up to PAGE_SIZE,
643 * so it is always possible to add a single page to an empty bio.
644 *
645 * This should only be used by REQ_PC bios.
646 */
bio_add_pc_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset)647 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
648 unsigned int len, unsigned int offset)
649 {
650 return __bio_add_page(q, bio, page, len, offset,
651 queue_max_hw_sectors(q));
652 }
653 EXPORT_SYMBOL(bio_add_pc_page);
654
655 /**
656 * bio_add_page - attempt to add page to bio
657 * @bio: destination bio
658 * @page: page to add
659 * @len: vec entry length
660 * @offset: vec entry offset
661 *
662 * Attempt to add a page to the bio_vec maplist. This can fail for a
663 * number of reasons, such as the bio being full or target block device
664 * limitations. The target block device must allow bio's up to PAGE_SIZE,
665 * so it is always possible to add a single page to an empty bio.
666 */
bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)667 int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
668 unsigned int offset)
669 {
670 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
671 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
672 }
673 EXPORT_SYMBOL(bio_add_page);
674
675 struct bio_map_data {
676 struct bio_vec *iovecs;
677 struct sg_iovec *sgvecs;
678 int nr_sgvecs;
679 int is_our_pages;
680 };
681
bio_set_map_data(struct bio_map_data * bmd,struct bio * bio,struct sg_iovec * iov,int iov_count,int is_our_pages)682 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
683 struct sg_iovec *iov, int iov_count,
684 int is_our_pages)
685 {
686 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
687 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
688 bmd->nr_sgvecs = iov_count;
689 bmd->is_our_pages = is_our_pages;
690 bio->bi_private = bmd;
691 }
692
bio_free_map_data(struct bio_map_data * bmd)693 static void bio_free_map_data(struct bio_map_data *bmd)
694 {
695 kfree(bmd->iovecs);
696 kfree(bmd->sgvecs);
697 kfree(bmd);
698 }
699
bio_alloc_map_data(int nr_segs,unsigned int iov_count,gfp_t gfp_mask)700 static struct bio_map_data *bio_alloc_map_data(int nr_segs,
701 unsigned int iov_count,
702 gfp_t gfp_mask)
703 {
704 struct bio_map_data *bmd;
705
706 if (iov_count > UIO_MAXIOV)
707 return NULL;
708
709 bmd = kmalloc(sizeof(*bmd), gfp_mask);
710 if (!bmd)
711 return NULL;
712
713 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
714 if (!bmd->iovecs) {
715 kfree(bmd);
716 return NULL;
717 }
718
719 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
720 if (bmd->sgvecs)
721 return bmd;
722
723 kfree(bmd->iovecs);
724 kfree(bmd);
725 return NULL;
726 }
727
__bio_copy_iov(struct bio * bio,struct bio_vec * iovecs,struct sg_iovec * iov,int iov_count,int to_user,int from_user,int do_free_page)728 static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
729 struct sg_iovec *iov, int iov_count,
730 int to_user, int from_user, int do_free_page)
731 {
732 int ret = 0, i;
733 struct bio_vec *bvec;
734 int iov_idx = 0;
735 unsigned int iov_off = 0;
736
737 __bio_for_each_segment(bvec, bio, i, 0) {
738 char *bv_addr = page_address(bvec->bv_page);
739 unsigned int bv_len = iovecs[i].bv_len;
740
741 while (bv_len && iov_idx < iov_count) {
742 unsigned int bytes;
743 char __user *iov_addr;
744
745 bytes = min_t(unsigned int,
746 iov[iov_idx].iov_len - iov_off, bv_len);
747 iov_addr = iov[iov_idx].iov_base + iov_off;
748
749 if (!ret) {
750 if (to_user)
751 ret = copy_to_user(iov_addr, bv_addr,
752 bytes);
753
754 if (from_user)
755 ret = copy_from_user(bv_addr, iov_addr,
756 bytes);
757
758 if (ret)
759 ret = -EFAULT;
760 }
761
762 bv_len -= bytes;
763 bv_addr += bytes;
764 iov_addr += bytes;
765 iov_off += bytes;
766
767 if (iov[iov_idx].iov_len == iov_off) {
768 iov_idx++;
769 iov_off = 0;
770 }
771 }
772
773 if (do_free_page)
774 __free_page(bvec->bv_page);
775 }
776
777 return ret;
778 }
779
780 /**
781 * bio_uncopy_user - finish previously mapped bio
782 * @bio: bio being terminated
783 *
784 * Free pages allocated from bio_copy_user() and write back data
785 * to user space in case of a read.
786 */
bio_uncopy_user(struct bio * bio)787 int bio_uncopy_user(struct bio *bio)
788 {
789 struct bio_map_data *bmd = bio->bi_private;
790 struct bio_vec *bvec;
791 int ret = 0, i;
792
793 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
794 /*
795 * if we're in a workqueue, the request is orphaned, so
796 * don't copy into a random user address space, just free.
797 */
798 if (current->mm)
799 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
800 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
801 0, bmd->is_our_pages);
802 else if (bmd->is_our_pages)
803 __bio_for_each_segment(bvec, bio, i, 0)
804 __free_page(bvec->bv_page);
805 }
806 bio_free_map_data(bmd);
807 bio_put(bio);
808 return ret;
809 }
810 EXPORT_SYMBOL(bio_uncopy_user);
811
812 /**
813 * bio_copy_user_iov - copy user data to bio
814 * @q: destination block queue
815 * @map_data: pointer to the rq_map_data holding pages (if necessary)
816 * @iov: the iovec.
817 * @iov_count: number of elements in the iovec
818 * @write_to_vm: bool indicating writing to pages or not
819 * @gfp_mask: memory allocation flags
820 *
821 * Prepares and returns a bio for indirect user io, bouncing data
822 * to/from kernel pages as necessary. Must be paired with
823 * call bio_uncopy_user() on io completion.
824 */
bio_copy_user_iov(struct request_queue * q,struct rq_map_data * map_data,struct sg_iovec * iov,int iov_count,int write_to_vm,gfp_t gfp_mask)825 struct bio *bio_copy_user_iov(struct request_queue *q,
826 struct rq_map_data *map_data,
827 struct sg_iovec *iov, int iov_count,
828 int write_to_vm, gfp_t gfp_mask)
829 {
830 struct bio_map_data *bmd;
831 struct bio_vec *bvec;
832 struct page *page;
833 struct bio *bio;
834 int i, ret;
835 int nr_pages = 0;
836 unsigned int len = 0;
837 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
838
839 for (i = 0; i < iov_count; i++) {
840 unsigned long uaddr;
841 unsigned long end;
842 unsigned long start;
843
844 uaddr = (unsigned long)iov[i].iov_base;
845 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
846 start = uaddr >> PAGE_SHIFT;
847
848 /*
849 * Overflow, abort
850 */
851 if (end < start)
852 return ERR_PTR(-EINVAL);
853
854 nr_pages += end - start;
855 len += iov[i].iov_len;
856 }
857
858 if (offset)
859 nr_pages++;
860
861 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
862 if (!bmd)
863 return ERR_PTR(-ENOMEM);
864
865 ret = -ENOMEM;
866 bio = bio_kmalloc(gfp_mask, nr_pages);
867 if (!bio)
868 goto out_bmd;
869
870 if (!write_to_vm)
871 bio->bi_rw |= REQ_WRITE;
872
873 ret = 0;
874
875 if (map_data) {
876 nr_pages = 1 << map_data->page_order;
877 i = map_data->offset / PAGE_SIZE;
878 }
879 while (len) {
880 unsigned int bytes = PAGE_SIZE;
881
882 bytes -= offset;
883
884 if (bytes > len)
885 bytes = len;
886
887 if (map_data) {
888 if (i == map_data->nr_entries * nr_pages) {
889 ret = -ENOMEM;
890 break;
891 }
892
893 page = map_data->pages[i / nr_pages];
894 page += (i % nr_pages);
895
896 i++;
897 } else {
898 page = alloc_page(q->bounce_gfp | gfp_mask);
899 if (!page) {
900 ret = -ENOMEM;
901 break;
902 }
903 }
904
905 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
906 break;
907
908 len -= bytes;
909 offset = 0;
910 }
911
912 if (ret)
913 goto cleanup;
914
915 /*
916 * success
917 */
918 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
919 (map_data && map_data->from_user)) {
920 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
921 if (ret)
922 goto cleanup;
923 }
924
925 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
926 return bio;
927 cleanup:
928 if (!map_data)
929 bio_for_each_segment(bvec, bio, i)
930 __free_page(bvec->bv_page);
931
932 bio_put(bio);
933 out_bmd:
934 bio_free_map_data(bmd);
935 return ERR_PTR(ret);
936 }
937
938 /**
939 * bio_copy_user - copy user data to bio
940 * @q: destination block queue
941 * @map_data: pointer to the rq_map_data holding pages (if necessary)
942 * @uaddr: start of user address
943 * @len: length in bytes
944 * @write_to_vm: bool indicating writing to pages or not
945 * @gfp_mask: memory allocation flags
946 *
947 * Prepares and returns a bio for indirect user io, bouncing data
948 * to/from kernel pages as necessary. Must be paired with
949 * call bio_uncopy_user() on io completion.
950 */
bio_copy_user(struct request_queue * q,struct rq_map_data * map_data,unsigned long uaddr,unsigned int len,int write_to_vm,gfp_t gfp_mask)951 struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
952 unsigned long uaddr, unsigned int len,
953 int write_to_vm, gfp_t gfp_mask)
954 {
955 struct sg_iovec iov;
956
957 iov.iov_base = (void __user *)uaddr;
958 iov.iov_len = len;
959
960 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
961 }
962 EXPORT_SYMBOL(bio_copy_user);
963
__bio_map_user_iov(struct request_queue * q,struct block_device * bdev,struct sg_iovec * iov,int iov_count,int write_to_vm,gfp_t gfp_mask)964 static struct bio *__bio_map_user_iov(struct request_queue *q,
965 struct block_device *bdev,
966 struct sg_iovec *iov, int iov_count,
967 int write_to_vm, gfp_t gfp_mask)
968 {
969 int i, j;
970 int nr_pages = 0;
971 struct page **pages;
972 struct bio *bio;
973 int cur_page = 0;
974 int ret, offset;
975
976 for (i = 0; i < iov_count; i++) {
977 unsigned long uaddr = (unsigned long)iov[i].iov_base;
978 unsigned long len = iov[i].iov_len;
979 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
980 unsigned long start = uaddr >> PAGE_SHIFT;
981
982 /*
983 * Overflow, abort
984 */
985 if (end < start)
986 return ERR_PTR(-EINVAL);
987
988 nr_pages += end - start;
989 /*
990 * buffer must be aligned to at least hardsector size for now
991 */
992 if (uaddr & queue_dma_alignment(q))
993 return ERR_PTR(-EINVAL);
994 }
995
996 if (!nr_pages)
997 return ERR_PTR(-EINVAL);
998
999 bio = bio_kmalloc(gfp_mask, nr_pages);
1000 if (!bio)
1001 return ERR_PTR(-ENOMEM);
1002
1003 ret = -ENOMEM;
1004 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1005 if (!pages)
1006 goto out;
1007
1008 for (i = 0; i < iov_count; i++) {
1009 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1010 unsigned long len = iov[i].iov_len;
1011 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1012 unsigned long start = uaddr >> PAGE_SHIFT;
1013 const int local_nr_pages = end - start;
1014 const int page_limit = cur_page + local_nr_pages;
1015
1016 ret = get_user_pages_fast(uaddr, local_nr_pages,
1017 write_to_vm, &pages[cur_page]);
1018 if (ret < local_nr_pages) {
1019 ret = -EFAULT;
1020 goto out_unmap;
1021 }
1022
1023 offset = uaddr & ~PAGE_MASK;
1024 for (j = cur_page; j < page_limit; j++) {
1025 unsigned int bytes = PAGE_SIZE - offset;
1026
1027 if (len <= 0)
1028 break;
1029
1030 if (bytes > len)
1031 bytes = len;
1032
1033 /*
1034 * sorry...
1035 */
1036 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1037 bytes)
1038 break;
1039
1040 len -= bytes;
1041 offset = 0;
1042 }
1043
1044 cur_page = j;
1045 /*
1046 * release the pages we didn't map into the bio, if any
1047 */
1048 while (j < page_limit)
1049 page_cache_release(pages[j++]);
1050 }
1051
1052 kfree(pages);
1053
1054 /*
1055 * set data direction, and check if mapped pages need bouncing
1056 */
1057 if (!write_to_vm)
1058 bio->bi_rw |= REQ_WRITE;
1059
1060 bio->bi_bdev = bdev;
1061 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1062 return bio;
1063
1064 out_unmap:
1065 for (i = 0; i < nr_pages; i++) {
1066 if(!pages[i])
1067 break;
1068 page_cache_release(pages[i]);
1069 }
1070 out:
1071 kfree(pages);
1072 bio_put(bio);
1073 return ERR_PTR(ret);
1074 }
1075
1076 /**
1077 * bio_map_user - map user address into bio
1078 * @q: the struct request_queue for the bio
1079 * @bdev: destination block device
1080 * @uaddr: start of user address
1081 * @len: length in bytes
1082 * @write_to_vm: bool indicating writing to pages or not
1083 * @gfp_mask: memory allocation flags
1084 *
1085 * Map the user space address into a bio suitable for io to a block
1086 * device. Returns an error pointer in case of error.
1087 */
bio_map_user(struct request_queue * q,struct block_device * bdev,unsigned long uaddr,unsigned int len,int write_to_vm,gfp_t gfp_mask)1088 struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1089 unsigned long uaddr, unsigned int len, int write_to_vm,
1090 gfp_t gfp_mask)
1091 {
1092 struct sg_iovec iov;
1093
1094 iov.iov_base = (void __user *)uaddr;
1095 iov.iov_len = len;
1096
1097 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1098 }
1099 EXPORT_SYMBOL(bio_map_user);
1100
1101 /**
1102 * bio_map_user_iov - map user sg_iovec table into bio
1103 * @q: the struct request_queue for the bio
1104 * @bdev: destination block device
1105 * @iov: the iovec.
1106 * @iov_count: number of elements in the iovec
1107 * @write_to_vm: bool indicating writing to pages or not
1108 * @gfp_mask: memory allocation flags
1109 *
1110 * Map the user space address into a bio suitable for io to a block
1111 * device. Returns an error pointer in case of error.
1112 */
bio_map_user_iov(struct request_queue * q,struct block_device * bdev,struct sg_iovec * iov,int iov_count,int write_to_vm,gfp_t gfp_mask)1113 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1114 struct sg_iovec *iov, int iov_count,
1115 int write_to_vm, gfp_t gfp_mask)
1116 {
1117 struct bio *bio;
1118
1119 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1120 gfp_mask);
1121 if (IS_ERR(bio))
1122 return bio;
1123
1124 /*
1125 * subtle -- if __bio_map_user() ended up bouncing a bio,
1126 * it would normally disappear when its bi_end_io is run.
1127 * however, we need it for the unmap, so grab an extra
1128 * reference to it
1129 */
1130 bio_get(bio);
1131
1132 return bio;
1133 }
1134
__bio_unmap_user(struct bio * bio)1135 static void __bio_unmap_user(struct bio *bio)
1136 {
1137 struct bio_vec *bvec;
1138 int i;
1139
1140 /*
1141 * make sure we dirty pages we wrote to
1142 */
1143 __bio_for_each_segment(bvec, bio, i, 0) {
1144 if (bio_data_dir(bio) == READ)
1145 set_page_dirty_lock(bvec->bv_page);
1146
1147 page_cache_release(bvec->bv_page);
1148 }
1149
1150 bio_put(bio);
1151 }
1152
1153 /**
1154 * bio_unmap_user - unmap a bio
1155 * @bio: the bio being unmapped
1156 *
1157 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1158 * a process context.
1159 *
1160 * bio_unmap_user() may sleep.
1161 */
bio_unmap_user(struct bio * bio)1162 void bio_unmap_user(struct bio *bio)
1163 {
1164 __bio_unmap_user(bio);
1165 bio_put(bio);
1166 }
1167 EXPORT_SYMBOL(bio_unmap_user);
1168
bio_map_kern_endio(struct bio * bio,int err)1169 static void bio_map_kern_endio(struct bio *bio, int err)
1170 {
1171 bio_put(bio);
1172 }
1173
__bio_map_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask)1174 static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1175 unsigned int len, gfp_t gfp_mask)
1176 {
1177 unsigned long kaddr = (unsigned long)data;
1178 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1179 unsigned long start = kaddr >> PAGE_SHIFT;
1180 const int nr_pages = end - start;
1181 int offset, i;
1182 struct bio *bio;
1183
1184 bio = bio_kmalloc(gfp_mask, nr_pages);
1185 if (!bio)
1186 return ERR_PTR(-ENOMEM);
1187
1188 offset = offset_in_page(kaddr);
1189 for (i = 0; i < nr_pages; i++) {
1190 unsigned int bytes = PAGE_SIZE - offset;
1191
1192 if (len <= 0)
1193 break;
1194
1195 if (bytes > len)
1196 bytes = len;
1197
1198 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1199 offset) < bytes)
1200 break;
1201
1202 data += bytes;
1203 len -= bytes;
1204 offset = 0;
1205 }
1206
1207 bio->bi_end_io = bio_map_kern_endio;
1208 return bio;
1209 }
1210
1211 /**
1212 * bio_map_kern - map kernel address into bio
1213 * @q: the struct request_queue for the bio
1214 * @data: pointer to buffer to map
1215 * @len: length in bytes
1216 * @gfp_mask: allocation flags for bio allocation
1217 *
1218 * Map the kernel address into a bio suitable for io to a block
1219 * device. Returns an error pointer in case of error.
1220 */
bio_map_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask)1221 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1222 gfp_t gfp_mask)
1223 {
1224 struct bio *bio;
1225
1226 bio = __bio_map_kern(q, data, len, gfp_mask);
1227 if (IS_ERR(bio))
1228 return bio;
1229
1230 if (bio->bi_size == len)
1231 return bio;
1232
1233 /*
1234 * Don't support partial mappings.
1235 */
1236 bio_put(bio);
1237 return ERR_PTR(-EINVAL);
1238 }
1239 EXPORT_SYMBOL(bio_map_kern);
1240
bio_copy_kern_endio(struct bio * bio,int err)1241 static void bio_copy_kern_endio(struct bio *bio, int err)
1242 {
1243 struct bio_vec *bvec;
1244 const int read = bio_data_dir(bio) == READ;
1245 struct bio_map_data *bmd = bio->bi_private;
1246 int i;
1247 char *p = bmd->sgvecs[0].iov_base;
1248
1249 __bio_for_each_segment(bvec, bio, i, 0) {
1250 char *addr = page_address(bvec->bv_page);
1251 int len = bmd->iovecs[i].bv_len;
1252
1253 if (read)
1254 memcpy(p, addr, len);
1255
1256 __free_page(bvec->bv_page);
1257 p += len;
1258 }
1259
1260 bio_free_map_data(bmd);
1261 bio_put(bio);
1262 }
1263
1264 /**
1265 * bio_copy_kern - copy kernel address into bio
1266 * @q: the struct request_queue for the bio
1267 * @data: pointer to buffer to copy
1268 * @len: length in bytes
1269 * @gfp_mask: allocation flags for bio and page allocation
1270 * @reading: data direction is READ
1271 *
1272 * copy the kernel address into a bio suitable for io to a block
1273 * device. Returns an error pointer in case of error.
1274 */
bio_copy_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask,int reading)1275 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1276 gfp_t gfp_mask, int reading)
1277 {
1278 struct bio *bio;
1279 struct bio_vec *bvec;
1280 int i;
1281
1282 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1283 if (IS_ERR(bio))
1284 return bio;
1285
1286 if (!reading) {
1287 void *p = data;
1288
1289 bio_for_each_segment(bvec, bio, i) {
1290 char *addr = page_address(bvec->bv_page);
1291
1292 memcpy(addr, p, bvec->bv_len);
1293 p += bvec->bv_len;
1294 }
1295 }
1296
1297 bio->bi_end_io = bio_copy_kern_endio;
1298
1299 return bio;
1300 }
1301 EXPORT_SYMBOL(bio_copy_kern);
1302
1303 /*
1304 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1305 * for performing direct-IO in BIOs.
1306 *
1307 * The problem is that we cannot run set_page_dirty() from interrupt context
1308 * because the required locks are not interrupt-safe. So what we can do is to
1309 * mark the pages dirty _before_ performing IO. And in interrupt context,
1310 * check that the pages are still dirty. If so, fine. If not, redirty them
1311 * in process context.
1312 *
1313 * We special-case compound pages here: normally this means reads into hugetlb
1314 * pages. The logic in here doesn't really work right for compound pages
1315 * because the VM does not uniformly chase down the head page in all cases.
1316 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1317 * handle them at all. So we skip compound pages here at an early stage.
1318 *
1319 * Note that this code is very hard to test under normal circumstances because
1320 * direct-io pins the pages with get_user_pages(). This makes
1321 * is_page_cache_freeable return false, and the VM will not clean the pages.
1322 * But other code (eg, pdflush) could clean the pages if they are mapped
1323 * pagecache.
1324 *
1325 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1326 * deferred bio dirtying paths.
1327 */
1328
1329 /*
1330 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1331 */
bio_set_pages_dirty(struct bio * bio)1332 void bio_set_pages_dirty(struct bio *bio)
1333 {
1334 struct bio_vec *bvec = bio->bi_io_vec;
1335 int i;
1336
1337 for (i = 0; i < bio->bi_vcnt; i++) {
1338 struct page *page = bvec[i].bv_page;
1339
1340 if (page && !PageCompound(page))
1341 set_page_dirty_lock(page);
1342 }
1343 }
1344
bio_release_pages(struct bio * bio)1345 static void bio_release_pages(struct bio *bio)
1346 {
1347 struct bio_vec *bvec = bio->bi_io_vec;
1348 int i;
1349
1350 for (i = 0; i < bio->bi_vcnt; i++) {
1351 struct page *page = bvec[i].bv_page;
1352
1353 if (page)
1354 put_page(page);
1355 }
1356 }
1357
1358 /*
1359 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1360 * If they are, then fine. If, however, some pages are clean then they must
1361 * have been written out during the direct-IO read. So we take another ref on
1362 * the BIO and the offending pages and re-dirty the pages in process context.
1363 *
1364 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1365 * here on. It will run one page_cache_release() against each page and will
1366 * run one bio_put() against the BIO.
1367 */
1368
1369 static void bio_dirty_fn(struct work_struct *work);
1370
1371 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1372 static DEFINE_SPINLOCK(bio_dirty_lock);
1373 static struct bio *bio_dirty_list;
1374
1375 /*
1376 * This runs in process context
1377 */
bio_dirty_fn(struct work_struct * work)1378 static void bio_dirty_fn(struct work_struct *work)
1379 {
1380 unsigned long flags;
1381 struct bio *bio;
1382
1383 spin_lock_irqsave(&bio_dirty_lock, flags);
1384 bio = bio_dirty_list;
1385 bio_dirty_list = NULL;
1386 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1387
1388 while (bio) {
1389 struct bio *next = bio->bi_private;
1390
1391 bio_set_pages_dirty(bio);
1392 bio_release_pages(bio);
1393 bio_put(bio);
1394 bio = next;
1395 }
1396 }
1397
bio_check_pages_dirty(struct bio * bio)1398 void bio_check_pages_dirty(struct bio *bio)
1399 {
1400 struct bio_vec *bvec = bio->bi_io_vec;
1401 int nr_clean_pages = 0;
1402 int i;
1403
1404 for (i = 0; i < bio->bi_vcnt; i++) {
1405 struct page *page = bvec[i].bv_page;
1406
1407 if (PageDirty(page) || PageCompound(page)) {
1408 page_cache_release(page);
1409 bvec[i].bv_page = NULL;
1410 } else {
1411 nr_clean_pages++;
1412 }
1413 }
1414
1415 if (nr_clean_pages) {
1416 unsigned long flags;
1417
1418 spin_lock_irqsave(&bio_dirty_lock, flags);
1419 bio->bi_private = bio_dirty_list;
1420 bio_dirty_list = bio;
1421 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1422 schedule_work(&bio_dirty_work);
1423 } else {
1424 bio_put(bio);
1425 }
1426 }
1427
1428 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
bio_flush_dcache_pages(struct bio * bi)1429 void bio_flush_dcache_pages(struct bio *bi)
1430 {
1431 int i;
1432 struct bio_vec *bvec;
1433
1434 bio_for_each_segment(bvec, bi, i)
1435 flush_dcache_page(bvec->bv_page);
1436 }
1437 EXPORT_SYMBOL(bio_flush_dcache_pages);
1438 #endif
1439
1440 /**
1441 * bio_endio - end I/O on a bio
1442 * @bio: bio
1443 * @error: error, if any
1444 *
1445 * Description:
1446 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1447 * preferred way to end I/O on a bio, it takes care of clearing
1448 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1449 * established -Exxxx (-EIO, for instance) error values in case
1450 * something went wrong. No one should call bi_end_io() directly on a
1451 * bio unless they own it and thus know that it has an end_io
1452 * function.
1453 **/
bio_endio(struct bio * bio,int error)1454 void bio_endio(struct bio *bio, int error)
1455 {
1456 if (error)
1457 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1458 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1459 error = -EIO;
1460
1461 if (bio->bi_end_io)
1462 bio->bi_end_io(bio, error);
1463 }
1464 EXPORT_SYMBOL(bio_endio);
1465
bio_pair_release(struct bio_pair * bp)1466 void bio_pair_release(struct bio_pair *bp)
1467 {
1468 if (atomic_dec_and_test(&bp->cnt)) {
1469 struct bio *master = bp->bio1.bi_private;
1470
1471 bio_endio(master, bp->error);
1472 mempool_free(bp, bp->bio2.bi_private);
1473 }
1474 }
1475 EXPORT_SYMBOL(bio_pair_release);
1476
bio_pair_end_1(struct bio * bi,int err)1477 static void bio_pair_end_1(struct bio *bi, int err)
1478 {
1479 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1480
1481 if (err)
1482 bp->error = err;
1483
1484 bio_pair_release(bp);
1485 }
1486
bio_pair_end_2(struct bio * bi,int err)1487 static void bio_pair_end_2(struct bio *bi, int err)
1488 {
1489 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1490
1491 if (err)
1492 bp->error = err;
1493
1494 bio_pair_release(bp);
1495 }
1496
1497 /*
1498 * split a bio - only worry about a bio with a single page in its iovec
1499 */
bio_split(struct bio * bi,int first_sectors)1500 struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1501 {
1502 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1503
1504 if (!bp)
1505 return bp;
1506
1507 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1508 bi->bi_sector + first_sectors);
1509
1510 BUG_ON(bi->bi_vcnt != 1);
1511 BUG_ON(bi->bi_idx != 0);
1512 atomic_set(&bp->cnt, 3);
1513 bp->error = 0;
1514 bp->bio1 = *bi;
1515 bp->bio2 = *bi;
1516 bp->bio2.bi_sector += first_sectors;
1517 bp->bio2.bi_size -= first_sectors << 9;
1518 bp->bio1.bi_size = first_sectors << 9;
1519
1520 bp->bv1 = bi->bi_io_vec[0];
1521 bp->bv2 = bi->bi_io_vec[0];
1522 bp->bv2.bv_offset += first_sectors << 9;
1523 bp->bv2.bv_len -= first_sectors << 9;
1524 bp->bv1.bv_len = first_sectors << 9;
1525
1526 bp->bio1.bi_io_vec = &bp->bv1;
1527 bp->bio2.bi_io_vec = &bp->bv2;
1528
1529 bp->bio1.bi_max_vecs = 1;
1530 bp->bio2.bi_max_vecs = 1;
1531
1532 bp->bio1.bi_end_io = bio_pair_end_1;
1533 bp->bio2.bi_end_io = bio_pair_end_2;
1534
1535 bp->bio1.bi_private = bi;
1536 bp->bio2.bi_private = bio_split_pool;
1537
1538 if (bio_integrity(bi))
1539 bio_integrity_split(bi, bp, first_sectors);
1540
1541 return bp;
1542 }
1543 EXPORT_SYMBOL(bio_split);
1544
1545 /**
1546 * bio_sector_offset - Find hardware sector offset in bio
1547 * @bio: bio to inspect
1548 * @index: bio_vec index
1549 * @offset: offset in bv_page
1550 *
1551 * Return the number of hardware sectors between beginning of bio
1552 * and an end point indicated by a bio_vec index and an offset
1553 * within that vector's page.
1554 */
bio_sector_offset(struct bio * bio,unsigned short index,unsigned int offset)1555 sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1556 unsigned int offset)
1557 {
1558 unsigned int sector_sz;
1559 struct bio_vec *bv;
1560 sector_t sectors;
1561 int i;
1562
1563 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1564 sectors = 0;
1565
1566 if (index >= bio->bi_idx)
1567 index = bio->bi_vcnt - 1;
1568
1569 __bio_for_each_segment(bv, bio, i, 0) {
1570 if (i == index) {
1571 if (offset > bv->bv_offset)
1572 sectors += (offset - bv->bv_offset) / sector_sz;
1573 break;
1574 }
1575
1576 sectors += bv->bv_len / sector_sz;
1577 }
1578
1579 return sectors;
1580 }
1581 EXPORT_SYMBOL(bio_sector_offset);
1582
1583 /*
1584 * create memory pools for biovec's in a bio_set.
1585 * use the global biovec slabs created for general use.
1586 */
biovec_create_pools(struct bio_set * bs,int pool_entries)1587 static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1588 {
1589 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1590
1591 bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
1592 if (!bs->bvec_pool)
1593 return -ENOMEM;
1594
1595 return 0;
1596 }
1597
biovec_free_pools(struct bio_set * bs)1598 static void biovec_free_pools(struct bio_set *bs)
1599 {
1600 mempool_destroy(bs->bvec_pool);
1601 }
1602
bioset_free(struct bio_set * bs)1603 void bioset_free(struct bio_set *bs)
1604 {
1605 if (bs->bio_pool)
1606 mempool_destroy(bs->bio_pool);
1607
1608 bioset_integrity_free(bs);
1609 biovec_free_pools(bs);
1610 bio_put_slab(bs);
1611
1612 kfree(bs);
1613 }
1614 EXPORT_SYMBOL(bioset_free);
1615
1616 /**
1617 * bioset_create - Create a bio_set
1618 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1619 * @front_pad: Number of bytes to allocate in front of the returned bio
1620 *
1621 * Description:
1622 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1623 * to ask for a number of bytes to be allocated in front of the bio.
1624 * Front pad allocation is useful for embedding the bio inside
1625 * another structure, to avoid allocating extra data to go with the bio.
1626 * Note that the bio must be embedded at the END of that structure always,
1627 * or things will break badly.
1628 */
bioset_create(unsigned int pool_size,unsigned int front_pad)1629 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1630 {
1631 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1632 struct bio_set *bs;
1633
1634 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1635 if (!bs)
1636 return NULL;
1637
1638 bs->front_pad = front_pad;
1639
1640 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1641 if (!bs->bio_slab) {
1642 kfree(bs);
1643 return NULL;
1644 }
1645
1646 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1647 if (!bs->bio_pool)
1648 goto bad;
1649
1650 if (!biovec_create_pools(bs, pool_size))
1651 return bs;
1652
1653 bad:
1654 bioset_free(bs);
1655 return NULL;
1656 }
1657 EXPORT_SYMBOL(bioset_create);
1658
biovec_init_slabs(void)1659 static void __init biovec_init_slabs(void)
1660 {
1661 int i;
1662
1663 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1664 int size;
1665 struct biovec_slab *bvs = bvec_slabs + i;
1666
1667 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1668 bvs->slab = NULL;
1669 continue;
1670 }
1671
1672 size = bvs->nr_vecs * sizeof(struct bio_vec);
1673 bvs->slab = kmem_cache_create(bvs->name, size, 0,
1674 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1675 }
1676 }
1677
init_bio(void)1678 static int __init init_bio(void)
1679 {
1680 bio_slab_max = 2;
1681 bio_slab_nr = 0;
1682 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1683 if (!bio_slabs)
1684 panic("bio: can't allocate bios\n");
1685
1686 bio_integrity_init();
1687 biovec_init_slabs();
1688
1689 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1690 if (!fs_bio_set)
1691 panic("bio: can't allocate bios\n");
1692
1693 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
1694 panic("bio: can't create integrity pool\n");
1695
1696 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1697 sizeof(struct bio_pair));
1698 if (!bio_split_pool)
1699 panic("bio: can't create split pool\n");
1700
1701 return 0;
1702 }
1703 subsys_initcall(init_bio);
1704