1 /*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/fs.h>
45 #include <linux/sysctl.h>
46 #include <linux/workqueue.h>
47 #include <linux/sunrpc/clnt.h>
48 #include <linux/sunrpc/sched.h>
49 #include <linux/sunrpc/svc_rdma.h>
50
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
53 /* RPC/RDMA parameters */
54 unsigned int svcrdma_ord = RPCRDMA_ORD;
55 static unsigned int min_ord = 1;
56 static unsigned int max_ord = 4096;
57 unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS;
58 static unsigned int min_max_requests = 4;
59 static unsigned int max_max_requests = 16384;
60 unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
61 static unsigned int min_max_inline = 4096;
62 static unsigned int max_max_inline = 65536;
63
64 atomic_t rdma_stat_recv;
65 atomic_t rdma_stat_read;
66 atomic_t rdma_stat_write;
67 atomic_t rdma_stat_sq_starve;
68 atomic_t rdma_stat_rq_starve;
69 atomic_t rdma_stat_rq_poll;
70 atomic_t rdma_stat_rq_prod;
71 atomic_t rdma_stat_sq_poll;
72 atomic_t rdma_stat_sq_prod;
73
74 /* Temporary NFS request map and context caches */
75 struct kmem_cache *svc_rdma_map_cachep;
76 struct kmem_cache *svc_rdma_ctxt_cachep;
77
78 struct workqueue_struct *svc_rdma_wq;
79
80 /*
81 * This function implements reading and resetting an atomic_t stat
82 * variable through read/write to a proc file. Any write to the file
83 * resets the associated statistic to zero. Any read returns it's
84 * current value.
85 */
read_reset_stat(ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)86 static int read_reset_stat(ctl_table *table, int write,
87 void __user *buffer, size_t *lenp,
88 loff_t *ppos)
89 {
90 atomic_t *stat = (atomic_t *)table->data;
91
92 if (!stat)
93 return -EINVAL;
94
95 if (write)
96 atomic_set(stat, 0);
97 else {
98 char str_buf[32];
99 char *data;
100 int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat));
101 if (len >= 32)
102 return -EFAULT;
103 len = strlen(str_buf);
104 if (*ppos > len) {
105 *lenp = 0;
106 return 0;
107 }
108 data = &str_buf[*ppos];
109 len -= *ppos;
110 if (len > *lenp)
111 len = *lenp;
112 if (len && copy_to_user(buffer, str_buf, len))
113 return -EFAULT;
114 *lenp = len;
115 *ppos += len;
116 }
117 return 0;
118 }
119
120 static struct ctl_table_header *svcrdma_table_header;
121 static ctl_table svcrdma_parm_table[] = {
122 {
123 .procname = "max_requests",
124 .data = &svcrdma_max_requests,
125 .maxlen = sizeof(unsigned int),
126 .mode = 0644,
127 .proc_handler = proc_dointvec_minmax,
128 .extra1 = &min_max_requests,
129 .extra2 = &max_max_requests
130 },
131 {
132 .procname = "max_req_size",
133 .data = &svcrdma_max_req_size,
134 .maxlen = sizeof(unsigned int),
135 .mode = 0644,
136 .proc_handler = proc_dointvec_minmax,
137 .extra1 = &min_max_inline,
138 .extra2 = &max_max_inline
139 },
140 {
141 .procname = "max_outbound_read_requests",
142 .data = &svcrdma_ord,
143 .maxlen = sizeof(unsigned int),
144 .mode = 0644,
145 .proc_handler = proc_dointvec_minmax,
146 .extra1 = &min_ord,
147 .extra2 = &max_ord,
148 },
149
150 {
151 .procname = "rdma_stat_read",
152 .data = &rdma_stat_read,
153 .maxlen = sizeof(atomic_t),
154 .mode = 0644,
155 .proc_handler = read_reset_stat,
156 },
157 {
158 .procname = "rdma_stat_recv",
159 .data = &rdma_stat_recv,
160 .maxlen = sizeof(atomic_t),
161 .mode = 0644,
162 .proc_handler = read_reset_stat,
163 },
164 {
165 .procname = "rdma_stat_write",
166 .data = &rdma_stat_write,
167 .maxlen = sizeof(atomic_t),
168 .mode = 0644,
169 .proc_handler = read_reset_stat,
170 },
171 {
172 .procname = "rdma_stat_sq_starve",
173 .data = &rdma_stat_sq_starve,
174 .maxlen = sizeof(atomic_t),
175 .mode = 0644,
176 .proc_handler = read_reset_stat,
177 },
178 {
179 .procname = "rdma_stat_rq_starve",
180 .data = &rdma_stat_rq_starve,
181 .maxlen = sizeof(atomic_t),
182 .mode = 0644,
183 .proc_handler = read_reset_stat,
184 },
185 {
186 .procname = "rdma_stat_rq_poll",
187 .data = &rdma_stat_rq_poll,
188 .maxlen = sizeof(atomic_t),
189 .mode = 0644,
190 .proc_handler = read_reset_stat,
191 },
192 {
193 .procname = "rdma_stat_rq_prod",
194 .data = &rdma_stat_rq_prod,
195 .maxlen = sizeof(atomic_t),
196 .mode = 0644,
197 .proc_handler = read_reset_stat,
198 },
199 {
200 .procname = "rdma_stat_sq_poll",
201 .data = &rdma_stat_sq_poll,
202 .maxlen = sizeof(atomic_t),
203 .mode = 0644,
204 .proc_handler = read_reset_stat,
205 },
206 {
207 .procname = "rdma_stat_sq_prod",
208 .data = &rdma_stat_sq_prod,
209 .maxlen = sizeof(atomic_t),
210 .mode = 0644,
211 .proc_handler = read_reset_stat,
212 },
213 { },
214 };
215
216 static ctl_table svcrdma_table[] = {
217 {
218 .procname = "svc_rdma",
219 .mode = 0555,
220 .child = svcrdma_parm_table
221 },
222 { },
223 };
224
225 static ctl_table svcrdma_root_table[] = {
226 {
227 .procname = "sunrpc",
228 .mode = 0555,
229 .child = svcrdma_table
230 },
231 { },
232 };
233
svc_rdma_cleanup(void)234 void svc_rdma_cleanup(void)
235 {
236 dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
237 destroy_workqueue(svc_rdma_wq);
238 if (svcrdma_table_header) {
239 unregister_sysctl_table(svcrdma_table_header);
240 svcrdma_table_header = NULL;
241 }
242 svc_unreg_xprt_class(&svc_rdma_class);
243 kmem_cache_destroy(svc_rdma_map_cachep);
244 kmem_cache_destroy(svc_rdma_ctxt_cachep);
245 }
246
svc_rdma_init(void)247 int svc_rdma_init(void)
248 {
249 dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
250 dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
251 dprintk("\tmax_requests : %d\n", svcrdma_max_requests);
252 dprintk("\tsq_depth : %d\n",
253 svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
254 dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
255
256 svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
257 if (!svc_rdma_wq)
258 return -ENOMEM;
259
260 if (!svcrdma_table_header)
261 svcrdma_table_header =
262 register_sysctl_table(svcrdma_root_table);
263
264 /* Create the temporary map cache */
265 svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
266 sizeof(struct svc_rdma_req_map),
267 0,
268 SLAB_HWCACHE_ALIGN,
269 NULL);
270 if (!svc_rdma_map_cachep) {
271 printk(KERN_INFO "Could not allocate map cache.\n");
272 goto err0;
273 }
274
275 /* Create the temporary context cache */
276 svc_rdma_ctxt_cachep =
277 kmem_cache_create("svc_rdma_ctxt_cache",
278 sizeof(struct svc_rdma_op_ctxt),
279 0,
280 SLAB_HWCACHE_ALIGN,
281 NULL);
282 if (!svc_rdma_ctxt_cachep) {
283 printk(KERN_INFO "Could not allocate WR ctxt cache.\n");
284 goto err1;
285 }
286
287 /* Register RDMA with the SVC transport switch */
288 svc_reg_xprt_class(&svc_rdma_class);
289 return 0;
290 err1:
291 kmem_cache_destroy(svc_rdma_map_cachep);
292 err0:
293 unregister_sysctl_table(svcrdma_table_header);
294 destroy_workqueue(svc_rdma_wq);
295 return -ENOMEM;
296 }
297 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
298 MODULE_DESCRIPTION("SVC RDMA Transport");
299 MODULE_LICENSE("Dual BSD/GPL");
300 module_init(svc_rdma_init);
301 module_exit(svc_rdma_cleanup);
302