1 /*
2  * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/crypto.h>
18 #include <linux/fs.h>
19 #include <linux/jhash.h>
20 #include <linux/hash.h>
21 #include <linux/ktime.h>
22 #include <linux/mempool.h>
23 #include <linux/mm.h>
24 #include <linux/mount.h>
25 #include <linux/pagemap.h>
26 #include <linux/parser.h>
27 #include <linux/poll.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
30 #include <linux/statfs.h>
31 #include <linux/writeback.h>
32 
33 #include "netfs.h"
34 
35 static struct kmem_cache *netfs_trans_dst;
36 static mempool_t *netfs_trans_dst_pool;
37 
netfs_trans_init_static(struct netfs_trans * t,int num,int size)38 static void netfs_trans_init_static(struct netfs_trans *t, int num, int size)
39 {
40 	t->page_num = num;
41 	t->total_size = size;
42 	atomic_set(&t->refcnt, 1);
43 
44 	spin_lock_init(&t->dst_lock);
45 	INIT_LIST_HEAD(&t->dst_list);
46 }
47 
netfs_trans_send_pages(struct netfs_trans * t,struct netfs_state * st)48 static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
49 {
50 	int err = 0;
51 	unsigned int i, attached_pages = t->attached_pages, ci;
52 	struct msghdr msg;
53 	struct page **pages = (t->eng)?t->eng->pages:t->pages;
54 	struct page *p;
55 	unsigned int size;
56 
57 	msg.msg_name = NULL;
58 	msg.msg_namelen = 0;
59 	msg.msg_control = NULL;
60 	msg.msg_controllen = 0;
61 	msg.msg_flags = MSG_WAITALL | MSG_MORE;
62 
63 	ci = 0;
64 	for (i=0; i<t->page_num; ++i) {
65 		struct page *page = pages[ci];
66 		struct netfs_cmd cmd;
67 		struct iovec io;
68 
69 		p = t->pages[i];
70 
71 		if (!p)
72 			continue;
73 
74 		size = page_private(p);
75 
76 		io.iov_base = &cmd;
77 		io.iov_len = sizeof(struct netfs_cmd);
78 
79 		cmd.cmd = NETFS_WRITE_PAGE;
80 		cmd.ext = 0;
81 		cmd.id = 0;
82 		cmd.size = size;
83 		cmd.start = p->index;
84 		cmd.start <<= PAGE_CACHE_SHIFT;
85 		cmd.csize = 0;
86 		cmd.cpad = 0;
87 		cmd.iv = pohmelfs_gen_iv(t);
88 
89 		netfs_convert_cmd(&cmd);
90 
91 		msg.msg_iov = &io;
92 		msg.msg_iovlen = 1;
93 		msg.msg_flags = MSG_WAITALL | MSG_MORE;
94 
95 		err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, sizeof(struct netfs_cmd));
96 		if (err <= 0) {
97 			printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
98 					__func__, i, t->page_num, t, t->gen, err);
99 			if (err == 0)
100 				err = -ECONNRESET;
101 			goto err_out;
102 		}
103 
104 		msg.msg_flags = MSG_WAITALL | (attached_pages == 1 ? 0 :
105 				MSG_MORE);
106 
107 		err = kernel_sendpage(st->socket, page, 0, size, msg.msg_flags);
108 		if (err <= 0) {
109 			printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
110 					__func__, i, t->page_num, t, t->gen, size, err);
111 			if (err == 0)
112 				err = -ECONNRESET;
113 			goto err_out;
114 		}
115 
116 		dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
117 			__func__, i, t->page_num, t, t->gen, page, p, size);
118 
119 		err = 0;
120 		attached_pages--;
121 		if (!attached_pages)
122 			break;
123 		ci++;
124 
125 		continue;
126 
127 err_out:
128 		printk("%s: t: %p, gen: %u, err: %d.\n", __func__, t, t->gen, err);
129 		netfs_state_exit(st);
130 		break;
131 	}
132 
133 	return err;
134 }
135 
netfs_trans_send(struct netfs_trans * t,struct netfs_state * st)136 int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
137 {
138 	int err;
139 	struct msghdr msg;
140 
141 	BUG_ON(!t->iovec.iov_len);
142 	BUG_ON(t->iovec.iov_len > 1024*1024*1024);
143 
144 	netfs_state_lock_send(st);
145 	if (!st->socket) {
146 		err = netfs_state_init(st);
147 		if (err)
148 			goto err_out_unlock_return;
149 	}
150 
151 	msg.msg_iov = &t->iovec;
152 	msg.msg_iovlen = 1;
153 	msg.msg_name = NULL;
154 	msg.msg_namelen = 0;
155 	msg.msg_control = NULL;
156 	msg.msg_controllen = 0;
157 	msg.msg_flags = MSG_WAITALL;
158 
159 	if (t->attached_pages)
160 		msg.msg_flags |= MSG_MORE;
161 
162 	err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, t->iovec.iov_len);
163 	if (err <= 0) {
164 		printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
165 				__func__, t, t->gen, t->iovec.iov_len, err);
166 		if (err == 0)
167 			err = -ECONNRESET;
168 		goto err_out_unlock_return;
169 	}
170 
171 	dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
172 			__func__, (t->page_num)?"partial":"full",
173 			t, t->gen, t->iovec.iov_len, t->page_num);
174 
175 	err = 0;
176 	if (t->attached_pages)
177 		err = netfs_trans_send_pages(t, st);
178 
179 err_out_unlock_return:
180 
181 	if (st->need_reset)
182 		netfs_state_exit(st);
183 
184 	netfs_state_unlock_send(st);
185 
186 	dprintk("%s: t: %p, gen: %u, err: %d.\n",
187 		__func__, t, t->gen, err);
188 
189 	t->result = err;
190 	return err;
191 }
192 
netfs_trans_cmp(unsigned int gen,unsigned int new)193 static inline int netfs_trans_cmp(unsigned int gen, unsigned int new)
194 {
195 	if (gen < new)
196 		return 1;
197 	if (gen > new)
198 		return -1;
199 	return 0;
200 }
201 
netfs_trans_search(struct netfs_state * st,unsigned int gen)202 struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen)
203 {
204 	struct rb_root *root = &st->trans_root;
205 	struct rb_node *n = root->rb_node;
206 	struct netfs_trans_dst *tmp, *ret = NULL;
207 	struct netfs_trans *t;
208 	int cmp;
209 
210 	while (n) {
211 		tmp = rb_entry(n, struct netfs_trans_dst, state_entry);
212 		t = tmp->trans;
213 
214 		cmp = netfs_trans_cmp(t->gen, gen);
215 		if (cmp < 0)
216 			n = n->rb_left;
217 		else if (cmp > 0)
218 			n = n->rb_right;
219 		else {
220 			ret = tmp;
221 			break;
222 		}
223 	}
224 
225 	return ret;
226 }
227 
netfs_trans_insert(struct netfs_trans_dst * ndst,struct netfs_state * st)228 static int netfs_trans_insert(struct netfs_trans_dst *ndst, struct netfs_state *st)
229 {
230 	struct rb_root *root = &st->trans_root;
231 	struct rb_node **n = &root->rb_node, *parent = NULL;
232 	struct netfs_trans_dst *ret = NULL, *tmp;
233 	struct netfs_trans *t = NULL, *new = ndst->trans;
234 	int cmp;
235 
236 	while (*n) {
237 		parent = *n;
238 
239 		tmp = rb_entry(parent, struct netfs_trans_dst, state_entry);
240 		t = tmp->trans;
241 
242 		cmp = netfs_trans_cmp(t->gen, new->gen);
243 		if (cmp < 0)
244 			n = &parent->rb_left;
245 		else if (cmp > 0)
246 			n = &parent->rb_right;
247 		else {
248 			ret = tmp;
249 			break;
250 		}
251 	}
252 
253 	if (ret) {
254 		printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
255 				"new: gen: %u, flags: %x, send_time: %lu.\n",
256 			__func__, t->gen, t->flags, ret->send_time,
257 			new->gen, new->flags, ndst->send_time);
258 		return -EEXIST;
259 	}
260 
261 	rb_link_node(&ndst->state_entry, parent, n);
262 	rb_insert_color(&ndst->state_entry, root);
263 	ndst->send_time = jiffies;
264 
265 	return 0;
266 }
267 
netfs_trans_remove_nolock(struct netfs_trans_dst * dst,struct netfs_state * st)268 int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st)
269 {
270 	if (dst && dst->state_entry.rb_parent_color) {
271 		rb_erase(&dst->state_entry, &st->trans_root);
272 		dst->state_entry.rb_parent_color = 0;
273 		return 1;
274 	}
275 	return 0;
276 }
277 
netfs_trans_remove_state(struct netfs_trans_dst * dst)278 static int netfs_trans_remove_state(struct netfs_trans_dst *dst)
279 {
280 	int ret;
281 	struct netfs_state *st = dst->state;
282 
283 	mutex_lock(&st->trans_lock);
284 	ret = netfs_trans_remove_nolock(dst, st);
285 	mutex_unlock(&st->trans_lock);
286 
287 	return ret;
288 }
289 
290 /*
291  * Create new destination for given transaction associated with given network state.
292  * Transaction's reference counter is bumped and will be dropped when either
293  * reply is received or when async timeout detection task will fail resending
294  * and drop transaction.
295  */
netfs_trans_push_dst(struct netfs_trans * t,struct netfs_state * st)296 static int netfs_trans_push_dst(struct netfs_trans *t, struct netfs_state *st)
297 {
298 	struct netfs_trans_dst *dst;
299 	int err;
300 
301 	dst = mempool_alloc(netfs_trans_dst_pool, GFP_KERNEL);
302 	if (!dst)
303 		return -ENOMEM;
304 
305 	dst->retries = 0;
306 	dst->send_time = 0;
307 	dst->state = st;
308 	dst->trans = t;
309 	netfs_trans_get(t);
310 
311 	mutex_lock(&st->trans_lock);
312 	err = netfs_trans_insert(dst, st);
313 	mutex_unlock(&st->trans_lock);
314 
315 	if (err)
316 		goto err_out_free;
317 
318 	spin_lock(&t->dst_lock);
319 	list_add_tail(&dst->trans_entry, &t->dst_list);
320 	spin_unlock(&t->dst_lock);
321 
322 	return 0;
323 
324 err_out_free:
325 	t->result = err;
326 	netfs_trans_put(t);
327 	mempool_free(dst, netfs_trans_dst_pool);
328 	return err;
329 }
330 
netfs_trans_free_dst(struct netfs_trans_dst * dst)331 static void netfs_trans_free_dst(struct netfs_trans_dst *dst)
332 {
333 	netfs_trans_put(dst->trans);
334 	mempool_free(dst, netfs_trans_dst_pool);
335 }
336 
netfs_trans_remove_dst(struct netfs_trans_dst * dst)337 static void netfs_trans_remove_dst(struct netfs_trans_dst *dst)
338 {
339 	if (netfs_trans_remove_state(dst))
340 		netfs_trans_free_dst(dst);
341 }
342 
343 /*
344  * Drop destination transaction entry when we know it.
345  */
netfs_trans_drop_dst(struct netfs_trans_dst * dst)346 void netfs_trans_drop_dst(struct netfs_trans_dst *dst)
347 {
348 	struct netfs_trans *t = dst->trans;
349 
350 	spin_lock(&t->dst_lock);
351 	list_del_init(&dst->trans_entry);
352 	spin_unlock(&t->dst_lock);
353 
354 	netfs_trans_remove_dst(dst);
355 }
356 
357 /*
358  * Drop destination transaction entry when we know it and when we
359  * already removed dst from state tree.
360  */
netfs_trans_drop_dst_nostate(struct netfs_trans_dst * dst)361 void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst)
362 {
363 	struct netfs_trans *t = dst->trans;
364 
365 	spin_lock(&t->dst_lock);
366 	list_del_init(&dst->trans_entry);
367 	spin_unlock(&t->dst_lock);
368 
369 	netfs_trans_free_dst(dst);
370 }
371 
372 /*
373  * This drops destination transaction entry from appropriate network state
374  * tree and drops related reference counter. It is possible that transaction
375  * will be freed here if its reference counter hits zero.
376  * Destination transaction entry will be freed.
377  */
netfs_trans_drop_trans(struct netfs_trans * t,struct netfs_state * st)378 void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st)
379 {
380 	struct netfs_trans_dst *dst, *tmp, *ret = NULL;
381 
382 	spin_lock(&t->dst_lock);
383 	list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
384 		if (dst->state == st) {
385 			ret = dst;
386 			list_del(&dst->trans_entry);
387 			break;
388 		}
389 	}
390 	spin_unlock(&t->dst_lock);
391 
392 	if (ret)
393 		netfs_trans_remove_dst(ret);
394 }
395 
396 /*
397  * This drops destination transaction entry from appropriate network state
398  * tree and drops related reference counter. It is possible that transaction
399  * will be freed here if its reference counter hits zero.
400  * Destination transaction entry will be freed.
401  */
netfs_trans_drop_last(struct netfs_trans * t,struct netfs_state * st)402 void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st)
403 {
404 	struct netfs_trans_dst *dst, *tmp, *ret;
405 
406 	spin_lock(&t->dst_lock);
407 	ret = list_entry(t->dst_list.prev, struct netfs_trans_dst, trans_entry);
408 	if (ret->state != st) {
409 		ret = NULL;
410 		list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
411 			if (dst->state == st) {
412 				ret = dst;
413 				list_del_init(&dst->trans_entry);
414 				break;
415 			}
416 		}
417 	} else {
418 		list_del(&ret->trans_entry);
419 	}
420 	spin_unlock(&t->dst_lock);
421 
422 	if (ret)
423 		netfs_trans_remove_dst(ret);
424 }
425 
netfs_trans_push(struct netfs_trans * t,struct netfs_state * st)426 static int netfs_trans_push(struct netfs_trans *t, struct netfs_state *st)
427 {
428 	int err;
429 
430 	err = netfs_trans_push_dst(t, st);
431 	if (err)
432 		return err;
433 
434 	err = netfs_trans_send(t, st);
435 	if (err)
436 		goto err_out_free;
437 
438 	if (t->flags & NETFS_TRANS_SINGLE_DST)
439 		pohmelfs_switch_active(st->psb);
440 
441 	return 0;
442 
443 err_out_free:
444 	t->result = err;
445 	netfs_trans_drop_last(t, st);
446 
447 	return err;
448 }
449 
netfs_trans_finish_send(struct netfs_trans * t,struct pohmelfs_sb * psb)450 int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
451 {
452 	struct pohmelfs_config *c;
453 	int err = -ENODEV;
454 	struct netfs_state *st;
455 #if 0
456 	dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
457 		__func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
458 #endif
459 	mutex_lock(&psb->state_lock);
460 	list_for_each_entry(c, &psb->state_list, config_entry) {
461 		st = &c->state;
462 
463 		if (t->flags & NETFS_TRANS_SINGLE_DST) {
464 			if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
465 				continue;
466 		} else {
467 			if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
468 				continue;
469 		}
470 
471 		if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio) &&
472 				(t->flags & NETFS_TRANS_SINGLE_DST))
473 			st = &psb->active_state->state;
474 
475 		err = netfs_trans_push(t, st);
476 		if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
477 			break;
478 	}
479 
480 	mutex_unlock(&psb->state_lock);
481 #if 0
482 	dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
483 		__func__, t, t->gen, t->iovec.iov_len, t->page_num, err);
484 #endif
485 	if (err)
486 		t->result = err;
487 	return err;
488 }
489 
netfs_trans_finish(struct netfs_trans * t,struct pohmelfs_sb * psb)490 int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
491 {
492 	int err;
493 	struct netfs_cmd *cmd = t->iovec.iov_base;
494 
495 	t->gen = atomic_inc_return(&psb->trans_gen);
496 
497 	cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
498 		t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
499 	cmd->cmd = NETFS_TRANS;
500 	cmd->start = t->gen;
501 	cmd->id = 0;
502 
503 	if (psb->perform_crypto) {
504 		cmd->ext = psb->crypto_attached_size;
505 		cmd->csize = psb->crypto_attached_size;
506 	}
507 
508 	dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
509 			__func__, t->gen, cmd->size, t->iovec.iov_len, t->attached_size, t->attached_pages);
510 	err = pohmelfs_trans_crypt(t, psb);
511 	if (err) {
512 		t->result = err;
513 		netfs_convert_cmd(cmd);
514 		dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
515 			__func__, cmd->start, psb->crypto_attached_size, t->attached_size, t->attached_pages, cmd->size, err);
516 	}
517 	netfs_trans_put(t);
518 	return err;
519 }
520 
521 /*
522  * Resend transaction to remote server(s).
523  * If new servers were added into superblock, we can try to send data
524  * to them too.
525  *
526  * It is called under superblock's state_lock, so we can safely
527  * dereference psb->state_list. Also, transaction's reference counter is
528  * bumped, so it can not go away under us, thus we can safely access all
529  * its members. State is locked.
530  *
531  * This function returns 0 if transaction was successfully sent to at
532  * least one destination target.
533  */
netfs_trans_resend(struct netfs_trans * t,struct pohmelfs_sb * psb)534 int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb)
535 {
536 	struct netfs_trans_dst *dst;
537 	struct netfs_state *st;
538 	struct pohmelfs_config *c;
539 	int err, exist, error = -ENODEV;
540 
541 	list_for_each_entry(c, &psb->state_list, config_entry) {
542 		st = &c->state;
543 
544 		exist = 0;
545 		spin_lock(&t->dst_lock);
546 		list_for_each_entry(dst, &t->dst_list, trans_entry) {
547 			if (st == dst->state) {
548 				exist = 1;
549 				break;
550 			}
551 		}
552 		spin_unlock(&t->dst_lock);
553 
554 		if (exist) {
555 			if (!(t->flags & NETFS_TRANS_SINGLE_DST) ||
556 					(c->config_entry.next == &psb->state_list)) {
557 				dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
558 						__func__, st, t, t->gen);
559 				err = netfs_trans_send(t, st);
560 				if (!err)
561 					error = 0;
562 			}
563 			continue;
564 		}
565 
566 		dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
567 				__func__, st, t, t->gen);
568 		err = netfs_trans_push(t, st);
569 		if (err)
570 			continue;
571 		error = 0;
572 		if (t->flags & NETFS_TRANS_SINGLE_DST)
573 			break;
574 	}
575 
576 	t->result = error;
577 	return error;
578 }
579 
netfs_trans_add(struct netfs_trans * t,unsigned int size)580 void *netfs_trans_add(struct netfs_trans *t, unsigned int size)
581 {
582 	struct iovec *io = &t->iovec;
583 	void *ptr;
584 
585 	if (size > t->total_size) {
586 		ptr = ERR_PTR(-EINVAL);
587 		goto out;
588 	}
589 
590 	if (io->iov_len + size > t->total_size) {
591 		dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
592 				__func__, t, t->gen, io->iov_len, size, t->total_size);
593 		ptr = ERR_PTR(-E2BIG);
594 		goto out;
595 	}
596 
597 	ptr = io->iov_base + io->iov_len;
598 	io->iov_len += size;
599 
600 out:
601 	dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
602 		__func__, t, t->gen, size, io->iov_len);
603 	return ptr;
604 }
605 
netfs_trans_free(struct netfs_trans * t)606 void netfs_trans_free(struct netfs_trans *t)
607 {
608 	if (t->eng)
609 		pohmelfs_crypto_thread_make_ready(t->eng->thread);
610 	kfree(t);
611 }
612 
netfs_trans_alloc(struct pohmelfs_sb * psb,unsigned int size,unsigned int flags,unsigned int nr)613 struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
614 		unsigned int flags, unsigned int nr)
615 {
616 	struct netfs_trans *t;
617 	unsigned int num, cont, pad, size_no_trans;
618 	unsigned int crypto_added = 0;
619 	struct netfs_cmd *cmd;
620 
621 	if (psb->perform_crypto)
622 		crypto_added = psb->crypto_attached_size;
623 
624 	/*
625 	 * |sizeof(struct netfs_trans)|
626 	 * |sizeof(struct netfs_cmd)| - transaction header
627 	 * |size| - buffer with requested size
628 	 * |padding| - crypto padding, zero bytes
629 	 * |nr * sizeof(struct page *)| - array of page pointers
630 	 *
631 	 * Overall size should be less than PAGE_SIZE for guaranteed allocation.
632 	 */
633 
634 	cont = size;
635 	size = ALIGN(size, psb->crypto_align_size);
636 	pad = size - cont;
637 
638 	size_no_trans = size + sizeof(struct netfs_cmd) * 2 + crypto_added;
639 
640 	cont = sizeof(struct netfs_trans) + size_no_trans;
641 
642 	num = (PAGE_SIZE - cont)/sizeof(struct page *);
643 
644 	if (nr > num)
645 		nr = num;
646 
647 	t = kzalloc(cont + nr*sizeof(struct page *), GFP_NOIO);
648 	if (!t)
649 		goto err_out_exit;
650 
651 	t->iovec.iov_base = (void *)(t + 1);
652 	t->pages = (struct page **)(t->iovec.iov_base + size_no_trans);
653 
654 	/*
655 	 * Reserving space for transaction header.
656 	 */
657 	t->iovec.iov_len = sizeof(struct netfs_cmd) + crypto_added;
658 
659 	netfs_trans_init_static(t, nr, size_no_trans);
660 
661 	t->flags = flags;
662 	t->psb = psb;
663 
664 	cmd = (struct netfs_cmd *)t->iovec.iov_base;
665 
666 	cmd->size = size;
667 	cmd->cpad = pad;
668 	cmd->csize = crypto_added;
669 
670 	dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
671 			"page_num: %u, base: %p, pages: %p.\n",
672 			__func__, t, t->gen, size, pad, psb->crypto_align_size, flags, nr,
673 			t->iovec.iov_base, t->pages);
674 
675 	return t;
676 
677 err_out_exit:
678 	return NULL;
679 }
680 
netfs_trans_init(void)681 int netfs_trans_init(void)
682 {
683 	int err = -ENOMEM;
684 
685 	netfs_trans_dst = kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst),
686 			0, 0, NULL);
687 	if (!netfs_trans_dst)
688 		goto err_out_exit;
689 
690 	netfs_trans_dst_pool = mempool_create_slab_pool(256, netfs_trans_dst);
691 	if (!netfs_trans_dst_pool)
692 		goto err_out_free;
693 
694 	return 0;
695 
696 err_out_free:
697 	kmem_cache_destroy(netfs_trans_dst);
698 err_out_exit:
699 	return err;
700 }
701 
netfs_trans_exit(void)702 void netfs_trans_exit(void)
703 {
704 	mempool_destroy(netfs_trans_dst_pool);
705 	kmem_cache_destroy(netfs_trans_dst);
706 }
707