1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
17 #include <linux/fs.h>
18 #include <linux/module.h>
19 #include "blk.h"
20
bdev_file_inode(struct file * file)21 static inline struct inode *bdev_file_inode(struct file *file)
22 {
23 return file->f_mapping->host;
24 }
25
blkdev_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)26 static int blkdev_get_block(struct inode *inode, sector_t iblock,
27 struct buffer_head *bh, int create)
28 {
29 bh->b_bdev = I_BDEV(inode);
30 bh->b_blocknr = iblock;
31 set_buffer_mapped(bh);
32 return 0;
33 }
34
dio_bio_write_op(struct kiocb * iocb)35 static unsigned int dio_bio_write_op(struct kiocb *iocb)
36 {
37 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
38
39 /* avoid the need for a I/O completion work item */
40 if (iocb->ki_flags & IOCB_DSYNC)
41 op |= REQ_FUA;
42 return op;
43 }
44
45 #define DIO_INLINE_BIO_VECS 4
46
__blkdev_direct_IO_simple(struct kiocb * iocb,struct iov_iter * iter,unsigned int nr_pages)47 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
48 struct iov_iter *iter, unsigned int nr_pages)
49 {
50 struct block_device *bdev = iocb->ki_filp->private_data;
51 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
52 loff_t pos = iocb->ki_pos;
53 bool should_dirty = false;
54 struct bio bio;
55 ssize_t ret;
56
57 if ((pos | iov_iter_alignment(iter)) &
58 (bdev_logical_block_size(bdev) - 1))
59 return -EINVAL;
60
61 if (nr_pages <= DIO_INLINE_BIO_VECS)
62 vecs = inline_vecs;
63 else {
64 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
65 GFP_KERNEL);
66 if (!vecs)
67 return -ENOMEM;
68 }
69
70 if (iov_iter_rw(iter) == READ) {
71 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
72 if (iter_is_iovec(iter))
73 should_dirty = true;
74 } else {
75 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
76 }
77 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
78 bio.bi_ioprio = iocb->ki_ioprio;
79
80 ret = bio_iov_iter_get_pages(&bio, iter);
81 if (unlikely(ret))
82 goto out;
83 ret = bio.bi_iter.bi_size;
84
85 if (iov_iter_rw(iter) == WRITE)
86 task_io_account_write(ret);
87
88 if (iocb->ki_flags & IOCB_NOWAIT)
89 bio.bi_opf |= REQ_NOWAIT;
90
91 submit_bio_wait(&bio);
92
93 bio_release_pages(&bio, should_dirty);
94 if (unlikely(bio.bi_status))
95 ret = blk_status_to_errno(bio.bi_status);
96
97 out:
98 if (vecs != inline_vecs)
99 kfree(vecs);
100
101 bio_uninit(&bio);
102
103 return ret;
104 }
105
106 enum {
107 DIO_SHOULD_DIRTY = 1,
108 DIO_IS_SYNC = 2,
109 };
110
111 struct blkdev_dio {
112 union {
113 struct kiocb *iocb;
114 struct task_struct *waiter;
115 };
116 size_t size;
117 atomic_t ref;
118 unsigned int flags;
119 struct bio bio ____cacheline_aligned_in_smp;
120 };
121
122 static struct bio_set blkdev_dio_pool;
123
blkdev_bio_end_io(struct bio * bio)124 static void blkdev_bio_end_io(struct bio *bio)
125 {
126 struct blkdev_dio *dio = bio->bi_private;
127 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
128
129 if (bio->bi_status && !dio->bio.bi_status)
130 dio->bio.bi_status = bio->bi_status;
131
132 if (atomic_dec_and_test(&dio->ref)) {
133 if (!(dio->flags & DIO_IS_SYNC)) {
134 struct kiocb *iocb = dio->iocb;
135 ssize_t ret;
136
137 WRITE_ONCE(iocb->private, NULL);
138
139 if (likely(!dio->bio.bi_status)) {
140 ret = dio->size;
141 iocb->ki_pos += ret;
142 } else {
143 ret = blk_status_to_errno(dio->bio.bi_status);
144 }
145
146 dio->iocb->ki_complete(iocb, ret);
147 bio_put(&dio->bio);
148 } else {
149 struct task_struct *waiter = dio->waiter;
150
151 WRITE_ONCE(dio->waiter, NULL);
152 blk_wake_io_task(waiter);
153 }
154 }
155
156 if (should_dirty) {
157 bio_check_pages_dirty(bio);
158 } else {
159 bio_release_pages(bio, false);
160 bio_put(bio);
161 }
162 }
163
__blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter,unsigned int nr_pages)164 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
165 unsigned int nr_pages)
166 {
167 struct block_device *bdev = iocb->ki_filp->private_data;
168 struct blk_plug plug;
169 struct blkdev_dio *dio;
170 struct bio *bio;
171 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
172 unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
173 loff_t pos = iocb->ki_pos;
174 int ret = 0;
175
176 if ((pos | iov_iter_alignment(iter)) &
177 (bdev_logical_block_size(bdev) - 1))
178 return -EINVAL;
179
180 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
181 opf |= REQ_ALLOC_CACHE;
182 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
183 &blkdev_dio_pool);
184 dio = container_of(bio, struct blkdev_dio, bio);
185 atomic_set(&dio->ref, 1);
186 /*
187 * Grab an extra reference to ensure the dio structure which is embedded
188 * into the first bio stays around.
189 */
190 bio_get(bio);
191
192 is_sync = is_sync_kiocb(iocb);
193 if (is_sync) {
194 dio->flags = DIO_IS_SYNC;
195 dio->waiter = current;
196 } else {
197 dio->flags = 0;
198 dio->iocb = iocb;
199 }
200
201 dio->size = 0;
202 if (is_read && iter_is_iovec(iter))
203 dio->flags |= DIO_SHOULD_DIRTY;
204
205 blk_start_plug(&plug);
206
207 for (;;) {
208 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
209 bio->bi_private = dio;
210 bio->bi_end_io = blkdev_bio_end_io;
211 bio->bi_ioprio = iocb->ki_ioprio;
212
213 ret = bio_iov_iter_get_pages(bio, iter);
214 if (unlikely(ret)) {
215 bio->bi_status = BLK_STS_IOERR;
216 bio_endio(bio);
217 break;
218 }
219
220 if (is_read) {
221 if (dio->flags & DIO_SHOULD_DIRTY)
222 bio_set_pages_dirty(bio);
223 } else {
224 task_io_account_write(bio->bi_iter.bi_size);
225 }
226 if (iocb->ki_flags & IOCB_NOWAIT)
227 bio->bi_opf |= REQ_NOWAIT;
228
229 dio->size += bio->bi_iter.bi_size;
230 pos += bio->bi_iter.bi_size;
231
232 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
233 if (!nr_pages) {
234 submit_bio(bio);
235 break;
236 }
237 atomic_inc(&dio->ref);
238 submit_bio(bio);
239 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
240 }
241
242 blk_finish_plug(&plug);
243
244 if (!is_sync)
245 return -EIOCBQUEUED;
246
247 for (;;) {
248 set_current_state(TASK_UNINTERRUPTIBLE);
249 if (!READ_ONCE(dio->waiter))
250 break;
251 blk_io_schedule();
252 }
253 __set_current_state(TASK_RUNNING);
254
255 if (!ret)
256 ret = blk_status_to_errno(dio->bio.bi_status);
257 if (likely(!ret))
258 ret = dio->size;
259
260 bio_put(&dio->bio);
261 return ret;
262 }
263
blkdev_bio_end_io_async(struct bio * bio)264 static void blkdev_bio_end_io_async(struct bio *bio)
265 {
266 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
267 struct kiocb *iocb = dio->iocb;
268 ssize_t ret;
269
270 WRITE_ONCE(iocb->private, NULL);
271
272 if (likely(!bio->bi_status)) {
273 ret = dio->size;
274 iocb->ki_pos += ret;
275 } else {
276 ret = blk_status_to_errno(bio->bi_status);
277 }
278
279 iocb->ki_complete(iocb, ret);
280
281 if (dio->flags & DIO_SHOULD_DIRTY) {
282 bio_check_pages_dirty(bio);
283 } else {
284 bio_release_pages(bio, false);
285 bio_put(bio);
286 }
287 }
288
__blkdev_direct_IO_async(struct kiocb * iocb,struct iov_iter * iter,unsigned int nr_pages)289 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
290 struct iov_iter *iter,
291 unsigned int nr_pages)
292 {
293 struct block_device *bdev = iocb->ki_filp->private_data;
294 bool is_read = iov_iter_rw(iter) == READ;
295 unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
296 struct blkdev_dio *dio;
297 struct bio *bio;
298 loff_t pos = iocb->ki_pos;
299 int ret = 0;
300
301 if ((pos | iov_iter_alignment(iter)) &
302 (bdev_logical_block_size(bdev) - 1))
303 return -EINVAL;
304
305 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
306 opf |= REQ_ALLOC_CACHE;
307 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
308 &blkdev_dio_pool);
309 dio = container_of(bio, struct blkdev_dio, bio);
310 dio->flags = 0;
311 dio->iocb = iocb;
312 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
313 bio->bi_end_io = blkdev_bio_end_io_async;
314 bio->bi_ioprio = iocb->ki_ioprio;
315
316 if (iov_iter_is_bvec(iter)) {
317 /*
318 * Users don't rely on the iterator being in any particular
319 * state for async I/O returning -EIOCBQUEUED, hence we can
320 * avoid expensive iov_iter_advance(). Bypass
321 * bio_iov_iter_get_pages() and set the bvec directly.
322 */
323 bio_iov_bvec_set(bio, iter);
324 } else {
325 ret = bio_iov_iter_get_pages(bio, iter);
326 if (unlikely(ret)) {
327 bio_put(bio);
328 return ret;
329 }
330 }
331 dio->size = bio->bi_iter.bi_size;
332
333 if (is_read) {
334 if (iter_is_iovec(iter)) {
335 dio->flags |= DIO_SHOULD_DIRTY;
336 bio_set_pages_dirty(bio);
337 }
338 } else {
339 task_io_account_write(bio->bi_iter.bi_size);
340 }
341
342 if (iocb->ki_flags & IOCB_HIPRI) {
343 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
344 submit_bio(bio);
345 WRITE_ONCE(iocb->private, bio);
346 } else {
347 if (iocb->ki_flags & IOCB_NOWAIT)
348 bio->bi_opf |= REQ_NOWAIT;
349 submit_bio(bio);
350 }
351 return -EIOCBQUEUED;
352 }
353
blkdev_direct_IO(struct kiocb * iocb,struct iov_iter * iter)354 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
355 {
356 unsigned int nr_pages;
357
358 if (!iov_iter_count(iter))
359 return 0;
360
361 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
362 if (likely(nr_pages <= BIO_MAX_VECS)) {
363 if (is_sync_kiocb(iocb))
364 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
365 return __blkdev_direct_IO_async(iocb, iter, nr_pages);
366 }
367 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
368 }
369
blkdev_writepage(struct page * page,struct writeback_control * wbc)370 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
371 {
372 return block_write_full_page(page, blkdev_get_block, wbc);
373 }
374
blkdev_read_folio(struct file * file,struct folio * folio)375 static int blkdev_read_folio(struct file *file, struct folio *folio)
376 {
377 return block_read_full_folio(folio, blkdev_get_block);
378 }
379
blkdev_readahead(struct readahead_control * rac)380 static void blkdev_readahead(struct readahead_control *rac)
381 {
382 mpage_readahead(rac, blkdev_get_block);
383 }
384
blkdev_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)385 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
386 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
387 {
388 return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
389 }
390
blkdev_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)391 static int blkdev_write_end(struct file *file, struct address_space *mapping,
392 loff_t pos, unsigned len, unsigned copied, struct page *page,
393 void *fsdata)
394 {
395 int ret;
396 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
397
398 unlock_page(page);
399 put_page(page);
400
401 return ret;
402 }
403
blkdev_writepages(struct address_space * mapping,struct writeback_control * wbc)404 static int blkdev_writepages(struct address_space *mapping,
405 struct writeback_control *wbc)
406 {
407 return generic_writepages(mapping, wbc);
408 }
409
410 const struct address_space_operations def_blk_aops = {
411 .dirty_folio = block_dirty_folio,
412 .invalidate_folio = block_invalidate_folio,
413 .read_folio = blkdev_read_folio,
414 .readahead = blkdev_readahead,
415 .writepage = blkdev_writepage,
416 .write_begin = blkdev_write_begin,
417 .write_end = blkdev_write_end,
418 .writepages = blkdev_writepages,
419 .direct_IO = blkdev_direct_IO,
420 .migratepage = buffer_migrate_page_norefs,
421 .is_dirty_writeback = buffer_check_dirty_writeback,
422 };
423
424 /*
425 * for a block special file file_inode(file)->i_size is zero
426 * so we compute the size by hand (just as in block_read/write above)
427 */
blkdev_llseek(struct file * file,loff_t offset,int whence)428 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
429 {
430 struct inode *bd_inode = bdev_file_inode(file);
431 loff_t retval;
432
433 inode_lock(bd_inode);
434 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
435 inode_unlock(bd_inode);
436 return retval;
437 }
438
blkdev_fsync(struct file * filp,loff_t start,loff_t end,int datasync)439 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
440 int datasync)
441 {
442 struct block_device *bdev = filp->private_data;
443 int error;
444
445 error = file_write_and_wait_range(filp, start, end);
446 if (error)
447 return error;
448
449 /*
450 * There is no need to serialise calls to blkdev_issue_flush with
451 * i_mutex and doing so causes performance issues with concurrent
452 * O_SYNC writers to a block device.
453 */
454 error = blkdev_issue_flush(bdev);
455 if (error == -EOPNOTSUPP)
456 error = 0;
457
458 return error;
459 }
460
blkdev_open(struct inode * inode,struct file * filp)461 static int blkdev_open(struct inode *inode, struct file *filp)
462 {
463 struct block_device *bdev;
464
465 /*
466 * Preserve backwards compatibility and allow large file access
467 * even if userspace doesn't ask for it explicitly. Some mkfs
468 * binary needs it. We might want to drop this workaround
469 * during an unstable branch.
470 */
471 filp->f_flags |= O_LARGEFILE;
472 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
473
474 if (filp->f_flags & O_NDELAY)
475 filp->f_mode |= FMODE_NDELAY;
476 if (filp->f_flags & O_EXCL)
477 filp->f_mode |= FMODE_EXCL;
478 if ((filp->f_flags & O_ACCMODE) == 3)
479 filp->f_mode |= FMODE_WRITE_IOCTL;
480
481 bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
482 if (IS_ERR(bdev))
483 return PTR_ERR(bdev);
484
485 filp->private_data = bdev;
486 filp->f_mapping = bdev->bd_inode->i_mapping;
487 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
488 return 0;
489 }
490
blkdev_close(struct inode * inode,struct file * filp)491 static int blkdev_close(struct inode *inode, struct file *filp)
492 {
493 struct block_device *bdev = filp->private_data;
494
495 blkdev_put(bdev, filp->f_mode);
496 return 0;
497 }
498
499 /*
500 * Write data to the block device. Only intended for the block device itself
501 * and the raw driver which basically is a fake block device.
502 *
503 * Does not take i_mutex for the write and thus is not for general purpose
504 * use.
505 */
blkdev_write_iter(struct kiocb * iocb,struct iov_iter * from)506 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
507 {
508 struct block_device *bdev = iocb->ki_filp->private_data;
509 struct inode *bd_inode = bdev->bd_inode;
510 loff_t size = bdev_nr_bytes(bdev);
511 struct blk_plug plug;
512 size_t shorted = 0;
513 ssize_t ret;
514
515 if (bdev_read_only(bdev))
516 return -EPERM;
517
518 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
519 return -ETXTBSY;
520
521 if (!iov_iter_count(from))
522 return 0;
523
524 if (iocb->ki_pos >= size)
525 return -ENOSPC;
526
527 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
528 return -EOPNOTSUPP;
529
530 size -= iocb->ki_pos;
531 if (iov_iter_count(from) > size) {
532 shorted = iov_iter_count(from) - size;
533 iov_iter_truncate(from, size);
534 }
535
536 blk_start_plug(&plug);
537 ret = __generic_file_write_iter(iocb, from);
538 if (ret > 0)
539 ret = generic_write_sync(iocb, ret);
540 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
541 blk_finish_plug(&plug);
542 return ret;
543 }
544
blkdev_read_iter(struct kiocb * iocb,struct iov_iter * to)545 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
546 {
547 struct block_device *bdev = iocb->ki_filp->private_data;
548 loff_t size = bdev_nr_bytes(bdev);
549 loff_t pos = iocb->ki_pos;
550 size_t shorted = 0;
551 ssize_t ret = 0;
552 size_t count;
553
554 if (unlikely(pos + iov_iter_count(to) > size)) {
555 if (pos >= size)
556 return 0;
557 size -= pos;
558 shorted = iov_iter_count(to) - size;
559 iov_iter_truncate(to, size);
560 }
561
562 count = iov_iter_count(to);
563 if (!count)
564 goto reexpand; /* skip atime */
565
566 if (iocb->ki_flags & IOCB_DIRECT) {
567 struct address_space *mapping = iocb->ki_filp->f_mapping;
568
569 if (iocb->ki_flags & IOCB_NOWAIT) {
570 if (filemap_range_needs_writeback(mapping, pos,
571 pos + count - 1)) {
572 ret = -EAGAIN;
573 goto reexpand;
574 }
575 } else {
576 ret = filemap_write_and_wait_range(mapping, pos,
577 pos + count - 1);
578 if (ret < 0)
579 goto reexpand;
580 }
581
582 file_accessed(iocb->ki_filp);
583
584 ret = blkdev_direct_IO(iocb, to);
585 if (ret >= 0) {
586 iocb->ki_pos += ret;
587 count -= ret;
588 }
589 iov_iter_revert(to, count - iov_iter_count(to));
590 if (ret < 0 || !count)
591 goto reexpand;
592 }
593
594 ret = filemap_read(iocb, to, ret);
595
596 reexpand:
597 if (unlikely(shorted))
598 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
599 return ret;
600 }
601
602 #define BLKDEV_FALLOC_FL_SUPPORTED \
603 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
604 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
605
blkdev_fallocate(struct file * file,int mode,loff_t start,loff_t len)606 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
607 loff_t len)
608 {
609 struct inode *inode = bdev_file_inode(file);
610 struct block_device *bdev = I_BDEV(inode);
611 loff_t end = start + len - 1;
612 loff_t isize;
613 int error;
614
615 /* Fail if we don't recognize the flags. */
616 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
617 return -EOPNOTSUPP;
618
619 /* Don't go off the end of the device. */
620 isize = bdev_nr_bytes(bdev);
621 if (start >= isize)
622 return -EINVAL;
623 if (end >= isize) {
624 if (mode & FALLOC_FL_KEEP_SIZE) {
625 len = isize - start;
626 end = start + len - 1;
627 } else
628 return -EINVAL;
629 }
630
631 /*
632 * Don't allow IO that isn't aligned to logical block size.
633 */
634 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
635 return -EINVAL;
636
637 filemap_invalidate_lock(inode->i_mapping);
638
639 /* Invalidate the page cache, including dirty pages. */
640 error = truncate_bdev_range(bdev, file->f_mode, start, end);
641 if (error)
642 goto fail;
643
644 switch (mode) {
645 case FALLOC_FL_ZERO_RANGE:
646 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
647 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
648 len >> SECTOR_SHIFT, GFP_KERNEL,
649 BLKDEV_ZERO_NOUNMAP);
650 break;
651 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
652 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
653 len >> SECTOR_SHIFT, GFP_KERNEL,
654 BLKDEV_ZERO_NOFALLBACK);
655 break;
656 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
657 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
658 len >> SECTOR_SHIFT, GFP_KERNEL);
659 break;
660 default:
661 error = -EOPNOTSUPP;
662 }
663
664 fail:
665 filemap_invalidate_unlock(inode->i_mapping);
666 return error;
667 }
668
669 const struct file_operations def_blk_fops = {
670 .open = blkdev_open,
671 .release = blkdev_close,
672 .llseek = blkdev_llseek,
673 .read_iter = blkdev_read_iter,
674 .write_iter = blkdev_write_iter,
675 .iopoll = iocb_bio_iopoll,
676 .mmap = generic_file_mmap,
677 .fsync = blkdev_fsync,
678 .unlocked_ioctl = blkdev_ioctl,
679 #ifdef CONFIG_COMPAT
680 .compat_ioctl = compat_blkdev_ioctl,
681 #endif
682 .splice_read = generic_file_splice_read,
683 .splice_write = iter_file_splice_write,
684 .fallocate = blkdev_fallocate,
685 };
686
blkdev_init(void)687 static __init int blkdev_init(void)
688 {
689 return bioset_init(&blkdev_dio_pool, 4,
690 offsetof(struct blkdev_dio, bio),
691 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
692 }
693 module_init(blkdev_init);
694