1 /*
2  * linux/net/sunrpc/xdr.c
3  *
4  * Generic XDR support.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/in.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
18 
19 /*
20  * XDR functions for basic NFS types
21  */
22 u32 *
xdr_encode_netobj(u32 * p,const struct xdr_netobj * obj)23 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
24 {
25 	unsigned int	quadlen = XDR_QUADLEN(obj->len);
26 
27 	p[quadlen] = 0;		/* zero trailing bytes */
28 	*p++ = htonl(obj->len);
29 	memcpy(p, obj->data, obj->len);
30 	return p + XDR_QUADLEN(obj->len);
31 }
32 
33 u32 *
xdr_decode_netobj_fixed(u32 * p,void * obj,unsigned int len)34 xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len)
35 {
36 	if (ntohl(*p++) != len)
37 		return NULL;
38 	memcpy(obj, p, len);
39 	return p + XDR_QUADLEN(len);
40 }
41 
42 u32 *
xdr_decode_netobj(u32 * p,struct xdr_netobj * obj)43 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
44 {
45 	unsigned int	len;
46 
47 	if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
48 		return NULL;
49 	obj->len  = len;
50 	obj->data = (u8 *) p;
51 	return p + XDR_QUADLEN(len);
52 }
53 
54 u32 *
xdr_encode_array(u32 * p,const char * array,unsigned int len)55 xdr_encode_array(u32 *p, const char *array, unsigned int len)
56 {
57 	int quadlen = XDR_QUADLEN(len);
58 
59 	p[quadlen] = 0;
60 	*p++ = htonl(len);
61 	memcpy(p, array, len);
62 	return p + quadlen;
63 }
64 
65 u32 *
xdr_encode_string(u32 * p,const char * string)66 xdr_encode_string(u32 *p, const char *string)
67 {
68 	return xdr_encode_array(p, string, strlen(string));
69 }
70 
71 u32 *
xdr_decode_string(u32 * p,char ** sp,int * lenp,int maxlen)72 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
73 {
74 	unsigned int	len;
75 	char		*string;
76 
77 	if ((len = ntohl(*p++)) > maxlen)
78 		return NULL;
79 	if (lenp)
80 		*lenp = len;
81 	if ((len % 4) != 0) {
82 		string = (char *) p;
83 	} else {
84 		string = (char *) (p - 1);
85 		memmove(string, p, len);
86 	}
87 	string[len] = '\0';
88 	*sp = string;
89 	return p + XDR_QUADLEN(len);
90 }
91 
92 u32 *
xdr_decode_string_inplace(u32 * p,char ** sp,int * lenp,int maxlen)93 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
94 {
95 	unsigned int	len;
96 
97 	if ((len = ntohl(*p++)) > maxlen)
98 		return NULL;
99 	*lenp = len;
100 	*sp = (char *) p;
101 	return p + XDR_QUADLEN(len);
102 }
103 
104 
105 void
xdr_encode_pages(struct xdr_buf * xdr,struct page ** pages,unsigned int base,unsigned int len)106 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
107 		 unsigned int len)
108 {
109 	xdr->pages = pages;
110 	xdr->page_base = base;
111 	xdr->page_len = len;
112 
113 	if (len & 3) {
114 		struct iovec *iov = xdr->tail;
115 		unsigned int pad = 4 - (len & 3);
116 
117 		iov->iov_base = (void *) "\0\0\0";
118 		iov->iov_len  = pad;
119 		len += pad;
120 	}
121 	xdr->len += len;
122 }
123 
124 void
xdr_inline_pages(struct xdr_buf * xdr,unsigned int offset,struct page ** pages,unsigned int base,unsigned int len)125 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
126 		 struct page **pages, unsigned int base, unsigned int len)
127 {
128 	struct iovec *head = xdr->head;
129 	struct iovec *tail = xdr->tail;
130 	char *buf = (char *)head->iov_base;
131 	unsigned int buflen = head->iov_len;
132 
133 	head->iov_len  = offset;
134 
135 	xdr->pages = pages;
136 	xdr->page_base = base;
137 	xdr->page_len = len;
138 
139 	tail->iov_base = buf + offset;
140 	tail->iov_len = buflen - offset;
141 
142 	xdr->len += len;
143 }
144 
145 /*
146  * Realign the iovec if the server missed out some reply elements
147  * (such as post-op attributes,...)
148  * Note: This is a simple implementation that assumes that
149  *            len <= iov->iov_len !!!
150  *       The RPC header (assumed to be the 1st element in the iov array)
151  *            is not shifted.
152  */
xdr_shift_iovec(struct iovec * iov,int nr,size_t len)153 void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
154 {
155 	struct iovec *pvec;
156 
157 	for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
158 		struct iovec *svec = pvec - 1;
159 
160 		if (len > pvec->iov_len) {
161 			printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
162 			return;
163 		}
164 		memmove((char *)pvec->iov_base + len, pvec->iov_base,
165 			pvec->iov_len - len);
166 
167 		if (len > svec->iov_len) {
168 			printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
169 			return;
170 		}
171 		memcpy(pvec->iov_base,
172 		       (char *)svec->iov_base + svec->iov_len - len, len);
173 	}
174 }
175 
176 /*
177  * Map a struct xdr_buf into an iovec array.
178  */
xdr_kmap(struct iovec * iov_base,struct xdr_buf * xdr,unsigned int base)179 int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, unsigned int base)
180 {
181 	struct iovec	*iov = iov_base;
182 	struct page	**ppage = xdr->pages;
183 	struct page	**first_kmap = NULL;
184 	unsigned int	len, pglen = xdr->page_len;
185 
186 	len = xdr->head[0].iov_len;
187 	if (base < len) {
188 		iov->iov_len = len - base;
189 		iov->iov_base = (char *)xdr->head[0].iov_base + base;
190 		iov++;
191 		base = 0;
192 	} else
193 		base -= len;
194 
195 	if (pglen == 0)
196 		goto map_tail;
197 	if (base >= pglen) {
198 		base -= pglen;
199 		goto map_tail;
200 	}
201 	if (base || xdr->page_base) {
202 		pglen -= base;
203 		base  += xdr->page_base;
204 		ppage += base >> PAGE_CACHE_SHIFT;
205 		base &= ~PAGE_CACHE_MASK;
206 	}
207 	do {
208 		len = PAGE_CACHE_SIZE;
209 		if (!first_kmap) {
210 			first_kmap = ppage;
211 			iov->iov_base = kmap(*ppage);
212 		} else {
213 			iov->iov_base = kmap_nonblock(*ppage);
214 			if (!iov->iov_base)
215 				goto out_err;
216 		}
217 		if (base) {
218 			iov->iov_base += base;
219 			len -= base;
220 			base = 0;
221 		}
222 		if (pglen < len)
223 			len = pglen;
224 		iov->iov_len = len;
225 		iov++;
226 		ppage++;
227 	} while ((pglen -= len) != 0);
228 map_tail:
229 	if (xdr->tail[0].iov_len) {
230 		iov->iov_len = xdr->tail[0].iov_len - base;
231 		iov->iov_base = (char *)xdr->tail[0].iov_base + base;
232 		iov++;
233 	}
234 	return (iov - iov_base);
235 out_err:
236 	for (; first_kmap != ppage; first_kmap++)
237 		kunmap(*first_kmap);
238 	return 0;
239 }
240 
xdr_kunmap(struct xdr_buf * xdr,unsigned int base,int niov)241 void xdr_kunmap(struct xdr_buf *xdr, unsigned int base, int niov)
242 {
243 	struct page	**ppage = xdr->pages;
244 	unsigned int	pglen = xdr->page_len;
245 
246 	if (!pglen)
247 		return;
248 	if (base >= xdr->head[0].iov_len)
249 		base -= xdr->head[0].iov_len;
250 	else {
251 		niov--;
252 		base = 0;
253 	}
254 
255 	if (base >= pglen)
256 		return;
257 	if (base || xdr->page_base) {
258 		pglen -= base;
259 		base  += xdr->page_base;
260 		ppage += base >> PAGE_CACHE_SHIFT;
261 		/* Note: The offset means that the length of the first
262 		 * page is really (PAGE_CACHE_SIZE - (base & ~PAGE_CACHE_MASK)).
263 		 * In order to avoid an extra test inside the loop,
264 		 * we bump pglen here, and just subtract PAGE_CACHE_SIZE... */
265 		pglen += base & ~PAGE_CACHE_MASK;
266 	}
267 	/*
268 	 * In case we could only do a partial xdr_kmap, all remaining iovecs
269 	 * refer to pages. Otherwise we detect the end through pglen.
270 	 */
271 	for (; niov; niov--) {
272 		flush_dcache_page(*ppage);
273 		kunmap(*ppage);
274 		if (pglen <= PAGE_CACHE_SIZE)
275 			break;
276 		pglen -= PAGE_CACHE_SIZE;
277 		ppage++;
278 	}
279 }
280 
281 void
xdr_partial_copy_from_skb(struct xdr_buf * xdr,unsigned int base,skb_reader_t * desc,skb_read_actor_t copy_actor)282 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
283 			  skb_reader_t *desc,
284 			  skb_read_actor_t copy_actor)
285 {
286 	struct page	**ppage = xdr->pages;
287 	unsigned int	len, pglen = xdr->page_len;
288 	int		ret;
289 
290 	len = xdr->head[0].iov_len;
291 	if (base < len) {
292 		len -= base;
293 		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
294 		if (ret != len || !desc->count)
295 			return;
296 		base = 0;
297 	} else
298 		base -= len;
299 
300 	if (pglen == 0)
301 		goto copy_tail;
302 	if (base >= pglen) {
303 		base -= pglen;
304 		goto copy_tail;
305 	}
306 	if (base || xdr->page_base) {
307 		pglen -= base;
308 		base  += xdr->page_base;
309 		ppage += base >> PAGE_CACHE_SHIFT;
310 		base &= ~PAGE_CACHE_MASK;
311 	}
312 	do {
313 		char *kaddr;
314 
315 		len = PAGE_CACHE_SIZE;
316 		kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
317 		if (base) {
318 			len -= base;
319 			if (pglen < len)
320 				len = pglen;
321 			ret = copy_actor(desc, kaddr + base, len);
322 			base = 0;
323 		} else {
324 			if (pglen < len)
325 				len = pglen;
326 			ret = copy_actor(desc, kaddr, len);
327 		}
328 		kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
329 		if (ret != len || !desc->count)
330 			return;
331 		ppage++;
332 	} while ((pglen -= len) != 0);
333 copy_tail:
334 	len = xdr->tail[0].iov_len;
335 	if (len)
336 		copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
337 }
338 
339 /*
340  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
341  *
342  * _shift_data_right_pages
343  * @pages: vector of pages containing both the source and dest memory area.
344  * @pgto_base: page vector address of destination
345  * @pgfrom_base: page vector address of source
346  * @len: number of bytes to copy
347  *
348  * Note: the addresses pgto_base and pgfrom_base are both calculated in
349  *       the same way:
350  *            if a memory area starts at byte 'base' in page 'pages[i]',
351  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
352  * Also note: pgfrom_base must be < pgto_base, but the memory areas
353  * 	they point to may overlap.
354  */
355 static void
_shift_data_right_pages(struct page ** pages,size_t pgto_base,size_t pgfrom_base,size_t len)356 _shift_data_right_pages(struct page **pages, size_t pgto_base,
357 		size_t pgfrom_base, size_t len)
358 {
359 	struct page **pgfrom, **pgto;
360 	char *vfrom, *vto;
361 	size_t copy;
362 
363 	BUG_ON(pgto_base <= pgfrom_base);
364 
365 	pgto_base += len;
366 	pgfrom_base += len;
367 
368 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
369 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
370 
371 	pgto_base &= ~PAGE_CACHE_MASK;
372 	pgfrom_base &= ~PAGE_CACHE_MASK;
373 
374 	do {
375 		/* Are any pointers crossing a page boundary? */
376 		if (pgto_base == 0) {
377 			pgto_base = PAGE_CACHE_SIZE;
378 			pgto--;
379 		}
380 		if (pgfrom_base == 0) {
381 			pgfrom_base = PAGE_CACHE_SIZE;
382 			pgfrom--;
383 		}
384 
385 		copy = len;
386 		if (copy > pgto_base)
387 			copy = pgto_base;
388 		if (copy > pgfrom_base)
389 			copy = pgfrom_base;
390 		pgto_base -= copy;
391 		pgfrom_base -= copy;
392 
393 		vto = kmap_atomic(*pgto, KM_USER0);
394 		vfrom = kmap_atomic(*pgfrom, KM_USER1);
395 		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
396 		kunmap_atomic(vfrom, KM_USER1);
397 		kunmap_atomic(vto, KM_USER0);
398 
399 	} while ((len -= copy) != 0);
400 }
401 
402 /*
403  * _copy_to_pages
404  * @pages: array of pages
405  * @pgbase: page vector address of destination
406  * @p: pointer to source data
407  * @len: length
408  *
409  * Copies data from an arbitrary memory location into an array of pages
410  * The copy is assumed to be non-overlapping.
411  */
412 static void
_copy_to_pages(struct page ** pages,size_t pgbase,const char * p,size_t len)413 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
414 {
415 	struct page **pgto;
416 	char *vto;
417 	size_t copy;
418 
419 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
420 	pgbase &= ~PAGE_CACHE_MASK;
421 
422 	do {
423 		copy = PAGE_CACHE_SIZE - pgbase;
424 		if (copy > len)
425 			copy = len;
426 
427 		vto = kmap_atomic(*pgto, KM_USER0);
428 		memcpy(vto + pgbase, p, copy);
429 		kunmap_atomic(vto, KM_USER0);
430 
431 		pgbase += copy;
432 		if (pgbase == PAGE_CACHE_SIZE) {
433 			pgbase = 0;
434 			pgto++;
435 		}
436 		p += copy;
437 
438 	} while ((len -= copy) != 0);
439 }
440 
441 /*
442  * _copy_from_pages
443  * @p: pointer to destination
444  * @pages: array of pages
445  * @pgbase: offset of source data
446  * @len: length
447  *
448  * Copies data into an arbitrary memory location from an array of pages
449  * The copy is assumed to be non-overlapping.
450  */
451 static void
_copy_from_pages(char * p,struct page ** pages,size_t pgbase,size_t len)452 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
453 {
454 	struct page **pgfrom;
455 	char *vfrom;
456 	size_t copy;
457 
458 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
459 	pgbase &= ~PAGE_CACHE_MASK;
460 
461 	do {
462 		copy = PAGE_CACHE_SIZE - pgbase;
463 		if (copy > len)
464 			copy = len;
465 
466 		vfrom = kmap_atomic(*pgfrom, KM_USER0);
467 		memcpy(p, vfrom + pgbase, copy);
468 		kunmap_atomic(vfrom, KM_USER0);
469 
470 		pgbase += copy;
471 		if (pgbase == PAGE_CACHE_SIZE) {
472 			pgbase = 0;
473 			pgfrom++;
474 		}
475 		p += copy;
476 
477 	} while ((len -= copy) != 0);
478 }
479 
480 /*
481  * xdr_shrink_bufhead
482  * @buf: xdr_buf
483  * @len: bytes to remove from buf->head[0]
484  *
485  * Shrinks XDR buffer's header iovec buf->head[0] by
486  * 'len' bytes. The extra data is not lost, but is instead
487  * moved into the inlined pages and/or the tail.
488  */
489 void
xdr_shrink_bufhead(struct xdr_buf * buf,size_t len)490 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
491 {
492 	struct iovec *head, *tail;
493 	size_t copy, offs;
494 	unsigned int pglen = buf->page_len;
495 
496 	tail = buf->tail;
497 	head = buf->head;
498 	BUG_ON (len > head->iov_len);
499 
500 	/* Shift the tail first */
501 	if (tail->iov_len != 0) {
502 		if (tail->iov_len > len) {
503 			copy = tail->iov_len - len;
504 			memmove((char *)tail->iov_base + len,
505 					tail->iov_base, copy);
506 		}
507 		/* Copy from the inlined pages into the tail */
508 		copy = len;
509 		if (copy > pglen)
510 			copy = pglen;
511 		offs = len - copy;
512 		if (offs >= tail->iov_len)
513 			copy = 0;
514 		else if (copy > tail->iov_len - offs)
515 			copy = tail->iov_len - offs;
516 		if (copy != 0)
517 			_copy_from_pages((char *)tail->iov_base + offs,
518 					buf->pages,
519 					buf->page_base + pglen + offs - len,
520 					copy);
521 		/* Do we also need to copy data from the head into the tail ? */
522 		if (len > pglen) {
523 			offs = copy = len - pglen;
524 			if (copy > tail->iov_len)
525 				copy = tail->iov_len;
526 			memcpy(tail->iov_base,
527 					(char *)head->iov_base +
528 					head->iov_len - offs,
529 					copy);
530 		}
531 	}
532 	/* Now handle pages */
533 	if (pglen != 0) {
534 		if (pglen > len)
535 			_shift_data_right_pages(buf->pages,
536 					buf->page_base + len,
537 					buf->page_base,
538 					pglen - len);
539 		copy = len;
540 		if (len > pglen)
541 			copy = pglen;
542 		_copy_to_pages(buf->pages, buf->page_base,
543 				(char *)head->iov_base + head->iov_len - len,
544 				copy);
545 	}
546 	head->iov_len -= len;
547 	buf->len -= len;
548 }
549 
550 void
xdr_shift_buf(struct xdr_buf * buf,size_t len)551 xdr_shift_buf(struct xdr_buf *buf, size_t len)
552 {
553 	xdr_shrink_bufhead(buf, len);
554 }
555