1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright 2022 Red Hat, Inc.
4 */
5
6 #include <linux/bio.h>
7 #include <linux/blk-crypto.h>
8 #include <linux/blk-integrity.h>
9
10 #include "dm-core.h"
11
dm_bvec_iter_rewind(const struct bio_vec * bv,struct bvec_iter * iter,unsigned int bytes)12 static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv,
13 struct bvec_iter *iter,
14 unsigned int bytes)
15 {
16 int idx;
17
18 iter->bi_size += bytes;
19 if (bytes <= iter->bi_bvec_done) {
20 iter->bi_bvec_done -= bytes;
21 return true;
22 }
23
24 bytes -= iter->bi_bvec_done;
25 idx = iter->bi_idx - 1;
26
27 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) {
28 bytes -= bv[idx].bv_len;
29 idx--;
30 }
31
32 if (WARN_ONCE(idx < 0 && bytes,
33 "Attempted to rewind iter beyond bvec's boundaries\n")) {
34 iter->bi_size -= bytes;
35 iter->bi_bvec_done = 0;
36 iter->bi_idx = 0;
37 return false;
38 }
39
40 iter->bi_idx = idx;
41 iter->bi_bvec_done = bv[idx].bv_len - bytes;
42 return true;
43 }
44
45 #if defined(CONFIG_BLK_DEV_INTEGRITY)
46
47 /**
48 * dm_bio_integrity_rewind - Rewind integrity vector
49 * @bio: bio whose integrity vector to update
50 * @bytes_done: number of data bytes to rewind
51 *
52 * Description: This function calculates how many integrity bytes the
53 * number of completed data bytes correspond to and rewind the
54 * integrity vector accordingly.
55 */
dm_bio_integrity_rewind(struct bio * bio,unsigned int bytes_done)56 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
57 {
58 struct bio_integrity_payload *bip = bio_integrity(bio);
59 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
60 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
61
62 bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
63 dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
64 }
65
66 #else /* CONFIG_BLK_DEV_INTEGRITY */
67
dm_bio_integrity_rewind(struct bio * bio,unsigned int bytes_done)68 static inline void dm_bio_integrity_rewind(struct bio *bio,
69 unsigned int bytes_done)
70 {
71 return;
72 }
73
74 #endif
75
76 #if defined(CONFIG_BLK_INLINE_ENCRYPTION)
77
78 /* Decrements @dun by @dec, treating @dun as a multi-limb integer. */
dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],unsigned int dec)79 static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
80 unsigned int dec)
81 {
82 int i;
83
84 for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
85 u64 prev = dun[i];
86
87 dun[i] -= dec;
88 if (dun[i] > prev)
89 dec = 1;
90 else
91 dec = 0;
92 }
93 }
94
dm_bio_crypt_rewind(struct bio * bio,unsigned int bytes)95 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
96 {
97 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
98
99 dm_bio_crypt_dun_decrement(bc->bc_dun,
100 bytes >> bc->bc_key->data_unit_size_bits);
101 }
102
103 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
104
dm_bio_crypt_rewind(struct bio * bio,unsigned int bytes)105 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
106 {
107 return;
108 }
109
110 #endif
111
dm_bio_rewind_iter(const struct bio * bio,struct bvec_iter * iter,unsigned int bytes)112 static inline void dm_bio_rewind_iter(const struct bio *bio,
113 struct bvec_iter *iter, unsigned int bytes)
114 {
115 iter->bi_sector -= bytes >> 9;
116
117 /* No advance means no rewind */
118 if (bio_no_advance_iter(bio))
119 iter->bi_size += bytes;
120 else
121 dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
122 }
123
124 /**
125 * dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes.
126 * @bio: bio to rewind
127 * @bytes: how many bytes to rewind
128 *
129 * WARNING:
130 * Caller must ensure that @bio has a fixed end sector, to allow
131 * rewinding from end of bio and restoring its original position.
132 * Caller is also responsibile for restoring bio's size.
133 */
dm_bio_rewind(struct bio * bio,unsigned bytes)134 static void dm_bio_rewind(struct bio *bio, unsigned bytes)
135 {
136 if (bio_integrity(bio))
137 dm_bio_integrity_rewind(bio, bytes);
138
139 if (bio_has_crypt_ctx(bio))
140 dm_bio_crypt_rewind(bio, bytes);
141
142 dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
143 }
144
dm_io_rewind(struct dm_io * io,struct bio_set * bs)145 void dm_io_rewind(struct dm_io *io, struct bio_set *bs)
146 {
147 struct bio *orig = io->orig_bio;
148 struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
149 GFP_NOIO, bs);
150 /*
151 * dm_bio_rewind can restore to previous position since the
152 * end sector is fixed for original bio, but we still need
153 * to restore bio's size manually (using io->sectors).
154 */
155 dm_bio_rewind(new_orig, ((io->sector_offset << 9) -
156 orig->bi_iter.bi_size));
157 bio_trim(new_orig, 0, io->sectors);
158
159 bio_chain(new_orig, orig);
160 /*
161 * __bi_remaining was increased (by dm_split_and_process_bio),
162 * so must drop the one added in bio_chain.
163 */
164 atomic_dec(&orig->__bi_remaining);
165 io->orig_bio = new_orig;
166 }
167