1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * random utiility code, for bcache but in theory not specific to bcache
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/module.h>
14 #include <linux/seq_file.h>
15 #include <linux/types.h>
16 #include <linux/sched/clock.h>
17
18 #include "util.h"
19
20 #define simple_strtoint(c, end, base) simple_strtol(c, end, base)
21 #define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
22
23 #define STRTO_H(name, type) \
24 int bch_ ## name ## _h(const char *cp, type *res) \
25 { \
26 int u = 0; \
27 char *e; \
28 type i = simple_ ## name(cp, &e, 10); \
29 \
30 switch (tolower(*e)) { \
31 default: \
32 return -EINVAL; \
33 case 'y': \
34 case 'z': \
35 u++; \
36 fallthrough; \
37 case 'e': \
38 u++; \
39 fallthrough; \
40 case 'p': \
41 u++; \
42 fallthrough; \
43 case 't': \
44 u++; \
45 fallthrough; \
46 case 'g': \
47 u++; \
48 fallthrough; \
49 case 'm': \
50 u++; \
51 fallthrough; \
52 case 'k': \
53 u++; \
54 if (e++ == cp) \
55 return -EINVAL; \
56 fallthrough; \
57 case '\n': \
58 case '\0': \
59 if (*e == '\n') \
60 e++; \
61 } \
62 \
63 if (*e) \
64 return -EINVAL; \
65 \
66 while (u--) { \
67 if ((type) ~0 > 0 && \
68 (type) ~0 / 1024 <= i) \
69 return -EINVAL; \
70 if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
71 (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
72 return -EINVAL; \
73 i *= 1024; \
74 } \
75 \
76 *res = i; \
77 return 0; \
78 } \
79
STRTO_H(strtoint,int)80 STRTO_H(strtoint, int)
81 STRTO_H(strtouint, unsigned int)
82 STRTO_H(strtoll, long long)
83 STRTO_H(strtoull, unsigned long long)
84
85 /**
86 * bch_hprint - formats @v to human readable string for sysfs.
87 * @buf: the (at least 8 byte) buffer to format the result into.
88 * @v: signed 64 bit integer
89 *
90 * Returns the number of bytes used by format.
91 */
92 ssize_t bch_hprint(char *buf, int64_t v)
93 {
94 static const char units[] = "?kMGTPEZY";
95 int u = 0, t;
96
97 uint64_t q;
98
99 if (v < 0)
100 q = -v;
101 else
102 q = v;
103
104 /* For as long as the number is more than 3 digits, but at least
105 * once, shift right / divide by 1024. Keep the remainder for
106 * a digit after the decimal point.
107 */
108 do {
109 u++;
110
111 t = q & ~(~0 << 10);
112 q >>= 10;
113 } while (q >= 1000);
114
115 if (v < 0)
116 /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
117 * yields 8 bytes.
118 */
119 return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
120 else
121 return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
122 }
123
bch_is_zero(const char * p,size_t n)124 bool bch_is_zero(const char *p, size_t n)
125 {
126 size_t i;
127
128 for (i = 0; i < n; i++)
129 if (p[i])
130 return false;
131 return true;
132 }
133
bch_parse_uuid(const char * s,char * uuid)134 int bch_parse_uuid(const char *s, char *uuid)
135 {
136 size_t i, j, x;
137
138 memset(uuid, 0, 16);
139
140 for (i = 0, j = 0;
141 i < strspn(s, "-0123456789:ABCDEFabcdef") && j < 32;
142 i++) {
143 x = s[i] | 32;
144
145 switch (x) {
146 case '0'...'9':
147 x -= '0';
148 break;
149 case 'a'...'f':
150 x -= 'a' - 10;
151 break;
152 default:
153 continue;
154 }
155
156 if (!(j & 1))
157 x <<= 4;
158 uuid[j++ >> 1] |= x;
159 }
160 return i;
161 }
162
bch_time_stats_update(struct time_stats * stats,uint64_t start_time)163 void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
164 {
165 uint64_t now, duration, last;
166
167 spin_lock(&stats->lock);
168
169 now = local_clock();
170 duration = time_after64(now, start_time)
171 ? now - start_time : 0;
172 last = time_after64(now, stats->last)
173 ? now - stats->last : 0;
174
175 stats->max_duration = max(stats->max_duration, duration);
176
177 if (stats->last) {
178 ewma_add(stats->average_duration, duration, 8, 8);
179
180 if (stats->average_frequency)
181 ewma_add(stats->average_frequency, last, 8, 8);
182 else
183 stats->average_frequency = last << 8;
184 } else {
185 stats->average_duration = duration << 8;
186 }
187
188 stats->last = now ?: 1;
189
190 spin_unlock(&stats->lock);
191 }
192
193 /**
194 * bch_next_delay() - update ratelimiting statistics and calculate next delay
195 * @d: the struct bch_ratelimit to update
196 * @done: the amount of work done, in arbitrary units
197 *
198 * Increment @d by the amount of work done, and return how long to delay in
199 * jiffies until the next time to do some work.
200 */
bch_next_delay(struct bch_ratelimit * d,uint64_t done)201 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
202 {
203 uint64_t now = local_clock();
204
205 d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));
206
207 /* Bound the time. Don't let us fall further than 2 seconds behind
208 * (this prevents unnecessary backlog that would make it impossible
209 * to catch up). If we're ahead of the desired writeback rate,
210 * don't let us sleep more than 2.5 seconds (so we can notice/respond
211 * if the control system tells us to speed up!).
212 */
213 if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
214 d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
215
216 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
217 d->next = now - NSEC_PER_SEC * 2;
218
219 return time_after64(d->next, now)
220 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
221 : 0;
222 }
223
224 /*
225 * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
226 * the preferred way is bio_add_page, but in this case, bch_bio_map()
227 * supposes that the bvec table is empty, so it is safe to access
228 * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
229 * supported.
230 */
bch_bio_map(struct bio * bio,void * base)231 void bch_bio_map(struct bio *bio, void *base)
232 {
233 size_t size = bio->bi_iter.bi_size;
234 struct bio_vec *bv = bio->bi_io_vec;
235
236 BUG_ON(!bio->bi_iter.bi_size);
237 BUG_ON(bio->bi_vcnt);
238
239 bv->bv_offset = base ? offset_in_page(base) : 0;
240 goto start;
241
242 for (; size; bio->bi_vcnt++, bv++) {
243 bv->bv_offset = 0;
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
245 size);
246 if (base) {
247 bv->bv_page = is_vmalloc_addr(base)
248 ? vmalloc_to_page(base)
249 : virt_to_page(base);
250
251 base += bv->bv_len;
252 }
253
254 size -= bv->bv_len;
255 }
256 }
257
258 /**
259 * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
260 * @bio: bio to allocate pages for
261 * @gfp_mask: flags for allocation
262 *
263 * Allocates pages up to @bio->bi_vcnt.
264 *
265 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
266 * freed.
267 */
bch_bio_alloc_pages(struct bio * bio,gfp_t gfp_mask)268 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
269 {
270 int i;
271 struct bio_vec *bv;
272
273 /*
274 * This is called on freshly new bio, so it is safe to access the
275 * bvec table directly.
276 */
277 for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) {
278 bv->bv_page = alloc_page(gfp_mask);
279 if (!bv->bv_page) {
280 while (--bv >= bio->bi_io_vec)
281 __free_page(bv->bv_page);
282 return -ENOMEM;
283 }
284 }
285
286 return 0;
287 }
288