1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct pipe_inode_info;
15 
16 struct kvec {
17 	void *iov_base; /* and that should *never* hold a userland pointer */
18 	size_t iov_len;
19 };
20 
21 enum iter_type {
22 	/* iter types */
23 	ITER_IOVEC,
24 	ITER_KVEC,
25 	ITER_BVEC,
26 	ITER_PIPE,
27 	ITER_XARRAY,
28 	ITER_DISCARD,
29 };
30 
31 struct iov_iter_state {
32 	size_t iov_offset;
33 	size_t count;
34 	unsigned long nr_segs;
35 };
36 
37 struct iov_iter {
38 	u8 iter_type;
39 	bool nofault;
40 	bool data_source;
41 	size_t iov_offset;
42 	size_t count;
43 	union {
44 		const struct iovec *iov;
45 		const struct kvec *kvec;
46 		const struct bio_vec *bvec;
47 		struct xarray *xarray;
48 		struct pipe_inode_info *pipe;
49 	};
50 	union {
51 		unsigned long nr_segs;
52 		struct {
53 			unsigned int head;
54 			unsigned int start_head;
55 		};
56 		loff_t xarray_start;
57 	};
58 };
59 
iov_iter_type(const struct iov_iter * i)60 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
61 {
62 	return i->iter_type;
63 }
64 
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)65 static inline void iov_iter_save_state(struct iov_iter *iter,
66 				       struct iov_iter_state *state)
67 {
68 	state->iov_offset = iter->iov_offset;
69 	state->count = iter->count;
70 	state->nr_segs = iter->nr_segs;
71 }
72 
iter_is_iovec(const struct iov_iter * i)73 static inline bool iter_is_iovec(const struct iov_iter *i)
74 {
75 	return iov_iter_type(i) == ITER_IOVEC;
76 }
77 
iov_iter_is_kvec(const struct iov_iter * i)78 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
79 {
80 	return iov_iter_type(i) == ITER_KVEC;
81 }
82 
iov_iter_is_bvec(const struct iov_iter * i)83 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
84 {
85 	return iov_iter_type(i) == ITER_BVEC;
86 }
87 
iov_iter_is_pipe(const struct iov_iter * i)88 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
89 {
90 	return iov_iter_type(i) == ITER_PIPE;
91 }
92 
iov_iter_is_discard(const struct iov_iter * i)93 static inline bool iov_iter_is_discard(const struct iov_iter *i)
94 {
95 	return iov_iter_type(i) == ITER_DISCARD;
96 }
97 
iov_iter_is_xarray(const struct iov_iter * i)98 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
99 {
100 	return iov_iter_type(i) == ITER_XARRAY;
101 }
102 
iov_iter_rw(const struct iov_iter * i)103 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
104 {
105 	return i->data_source ? WRITE : READ;
106 }
107 
108 /*
109  * Total number of bytes covered by an iovec.
110  *
111  * NOTE that it is not safe to use this function until all the iovec's
112  * segment lengths have been validated.  Because the individual lengths can
113  * overflow a size_t when added together.
114  */
iov_length(const struct iovec * iov,unsigned long nr_segs)115 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
116 {
117 	unsigned long seg;
118 	size_t ret = 0;
119 
120 	for (seg = 0; seg < nr_segs; seg++)
121 		ret += iov[seg].iov_len;
122 	return ret;
123 }
124 
iov_iter_iovec(const struct iov_iter * iter)125 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
126 {
127 	return (struct iovec) {
128 		.iov_base = iter->iov->iov_base + iter->iov_offset,
129 		.iov_len = min(iter->count,
130 			       iter->iov->iov_len - iter->iov_offset),
131 	};
132 }
133 
134 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
135 				  size_t bytes, struct iov_iter *i);
136 void iov_iter_advance(struct iov_iter *i, size_t bytes);
137 void iov_iter_revert(struct iov_iter *i, size_t bytes);
138 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
139 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
140 size_t iov_iter_single_seg_count(const struct iov_iter *i);
141 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
142 			 struct iov_iter *i);
143 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
144 			 struct iov_iter *i);
145 
146 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
147 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
148 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
149 
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)150 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
151 		size_t bytes, struct iov_iter *i)
152 {
153 	return copy_page_to_iter(&folio->page, offset, bytes, i);
154 }
155 
156 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)157 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
158 {
159 	if (unlikely(!check_copy_size(addr, bytes, true)))
160 		return 0;
161 	else
162 		return _copy_to_iter(addr, bytes, i);
163 }
164 
165 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)166 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
167 {
168 	if (unlikely(!check_copy_size(addr, bytes, false)))
169 		return 0;
170 	else
171 		return _copy_from_iter(addr, bytes, i);
172 }
173 
174 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)175 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
176 {
177 	size_t copied = copy_from_iter(addr, bytes, i);
178 	if (likely(copied == bytes))
179 		return true;
180 	iov_iter_revert(i, copied);
181 	return false;
182 }
183 
184 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)185 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
186 {
187 	if (unlikely(!check_copy_size(addr, bytes, false)))
188 		return 0;
189 	else
190 		return _copy_from_iter_nocache(addr, bytes, i);
191 }
192 
193 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)194 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
195 {
196 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
197 	if (likely(copied == bytes))
198 		return true;
199 	iov_iter_revert(i, copied);
200 	return false;
201 }
202 
203 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
204 /*
205  * Note, users like pmem that depend on the stricter semantics of
206  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
207  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
208  * destination is flushed from the cache on return.
209  */
210 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
211 #else
212 #define _copy_from_iter_flushcache _copy_from_iter_nocache
213 #endif
214 
215 #ifdef CONFIG_ARCH_HAS_COPY_MC
216 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
217 #else
218 #define _copy_mc_to_iter _copy_to_iter
219 #endif
220 
221 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
222 unsigned long iov_iter_alignment(const struct iov_iter *i);
223 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
224 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
225 			unsigned long nr_segs, size_t count);
226 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
227 			unsigned long nr_segs, size_t count);
228 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
229 			unsigned long nr_segs, size_t count);
230 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
231 			size_t count);
232 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
233 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
234 		     loff_t start, size_t count);
235 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
236 			size_t maxsize, unsigned maxpages, size_t *start);
237 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
238 			size_t maxsize, size_t *start);
239 int iov_iter_npages(const struct iov_iter *i, int maxpages);
240 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
241 
242 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
243 
iov_iter_count(const struct iov_iter * i)244 static inline size_t iov_iter_count(const struct iov_iter *i)
245 {
246 	return i->count;
247 }
248 
249 /*
250  * Cap the iov_iter by given limit; note that the second argument is
251  * *not* the new size - it's upper limit for such.  Passing it a value
252  * greater than the amount of data in iov_iter is fine - it'll just do
253  * nothing in that case.
254  */
iov_iter_truncate(struct iov_iter * i,u64 count)255 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
256 {
257 	/*
258 	 * count doesn't have to fit in size_t - comparison extends both
259 	 * operands to u64 here and any value that would be truncated by
260 	 * conversion in assignement is by definition greater than all
261 	 * values of size_t, including old i->count.
262 	 */
263 	if (i->count > count)
264 		i->count = count;
265 }
266 
267 /*
268  * reexpand a previously truncated iterator; count must be no more than how much
269  * we had shrunk it.
270  */
iov_iter_reexpand(struct iov_iter * i,size_t count)271 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
272 {
273 	i->count = count;
274 }
275 
276 static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)277 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
278 {
279 	size_t shorted = 0;
280 	int npages;
281 
282 	if (iov_iter_count(i) > max_bytes) {
283 		shorted = iov_iter_count(i) - max_bytes;
284 		iov_iter_truncate(i, max_bytes);
285 	}
286 	npages = iov_iter_npages(i, INT_MAX);
287 	if (shorted)
288 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
289 
290 	return npages;
291 }
292 
293 struct csum_state {
294 	__wsum csum;
295 	size_t off;
296 };
297 
298 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
299 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
300 
301 static __always_inline __must_check
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)302 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
303 				  __wsum *csum, struct iov_iter *i)
304 {
305 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
306 	if (likely(copied == bytes))
307 		return true;
308 	iov_iter_revert(i, copied);
309 	return false;
310 }
311 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
312 		struct iov_iter *i);
313 
314 struct iovec *iovec_from_user(const struct iovec __user *uvector,
315 		unsigned long nr_segs, unsigned long fast_segs,
316 		struct iovec *fast_iov, bool compat);
317 ssize_t import_iovec(int type, const struct iovec __user *uvec,
318 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
319 		 struct iov_iter *i);
320 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
321 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
322 		 struct iov_iter *i, bool compat);
323 int import_single_range(int type, void __user *buf, size_t len,
324 		 struct iovec *iov, struct iov_iter *i);
325 
326 #endif
327