1 /******************************************************************************
2 
3 (c) 2007 Network Appliance, Inc.  All Rights Reserved.
4 (c) 2009 NetApp.  All Rights Reserved.
5 
6 NetApp provides this source code under the GPL v2 License.
7 The GPL v2 license is available at
8 http://opensource.org/licenses/gpl-license.php.
9 
10 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 
22 ******************************************************************************/
23 
24 #include <linux/tcp.h>
25 #include <linux/slab.h>
26 #include <linux/sunrpc/xprt.h>
27 #include <linux/export.h>
28 #include <linux/sunrpc/bc_xprt.h>
29 
30 #ifdef RPC_DEBUG
31 #define RPCDBG_FACILITY	RPCDBG_TRANS
32 #endif
33 
34 /*
35  * Helper routines that track the number of preallocation elements
36  * on the transport.
37  */
xprt_need_to_requeue(struct rpc_xprt * xprt)38 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39 {
40 	return xprt->bc_alloc_count > 0;
41 }
42 
xprt_inc_alloc_count(struct rpc_xprt * xprt,unsigned int n)43 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44 {
45 	xprt->bc_alloc_count += n;
46 }
47 
xprt_dec_alloc_count(struct rpc_xprt * xprt,unsigned int n)48 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
49 {
50 	return xprt->bc_alloc_count -= n;
51 }
52 
53 /*
54  * Free the preallocated rpc_rqst structure and the memory
55  * buffers hanging off of it.
56  */
xprt_free_allocation(struct rpc_rqst * req)57 static void xprt_free_allocation(struct rpc_rqst *req)
58 {
59 	struct xdr_buf *xbufp;
60 
61 	dprintk("RPC:        free allocations for req= %p\n", req);
62 	BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63 	xbufp = &req->rq_private_buf;
64 	free_page((unsigned long)xbufp->head[0].iov_base);
65 	xbufp = &req->rq_snd_buf;
66 	free_page((unsigned long)xbufp->head[0].iov_base);
67 	list_del(&req->rq_bc_pa_list);
68 	kfree(req);
69 }
70 
71 /*
72  * Preallocate up to min_reqs structures and related buffers for use
73  * by the backchannel.  This function can be called multiple times
74  * when creating new sessions that use the same rpc_xprt.  The
75  * preallocated buffers are added to the pool of resources used by
76  * the rpc_xprt.  Anyone of these resources may be used used by an
77  * incoming callback request.  It's up to the higher levels in the
78  * stack to enforce that the maximum number of session slots is not
79  * being exceeded.
80  *
81  * Some callback arguments can be large.  For example, a pNFS server
82  * using multiple deviceids.  The list can be unbound, but the client
83  * has the ability to tell the server the maximum size of the callback
84  * requests.  Each deviceID is 16 bytes, so allocate one page
85  * for the arguments to have enough room to receive a number of these
86  * deviceIDs.  The NFS client indicates to the pNFS server that its
87  * callback requests can be up to 4096 bytes in size.
88  */
xprt_setup_backchannel(struct rpc_xprt * xprt,unsigned int min_reqs)89 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
90 {
91 	struct page *page_rcv = NULL, *page_snd = NULL;
92 	struct xdr_buf *xbufp = NULL;
93 	struct rpc_rqst *req, *tmp;
94 	struct list_head tmp_list;
95 	int i;
96 
97 	dprintk("RPC:       setup backchannel transport\n");
98 
99 	/*
100 	 * We use a temporary list to keep track of the preallocated
101 	 * buffers.  Once we're done building the list we splice it
102 	 * into the backchannel preallocation list off of the rpc_xprt
103 	 * struct.  This helps minimize the amount of time the list
104 	 * lock is held on the rpc_xprt struct.  It also makes cleanup
105 	 * easier in case of memory allocation errors.
106 	 */
107 	INIT_LIST_HEAD(&tmp_list);
108 	for (i = 0; i < min_reqs; i++) {
109 		/* Pre-allocate one backchannel rpc_rqst */
110 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
111 		if (req == NULL) {
112 			printk(KERN_ERR "Failed to create bc rpc_rqst\n");
113 			goto out_free;
114 		}
115 
116 		/* Add the allocated buffer to the tmp list */
117 		dprintk("RPC:       adding req= %p\n", req);
118 		list_add(&req->rq_bc_pa_list, &tmp_list);
119 
120 		req->rq_xprt = xprt;
121 		INIT_LIST_HEAD(&req->rq_list);
122 		INIT_LIST_HEAD(&req->rq_bc_list);
123 
124 		/* Preallocate one XDR receive buffer */
125 		page_rcv = alloc_page(GFP_KERNEL);
126 		if (page_rcv == NULL) {
127 			printk(KERN_ERR "Failed to create bc receive xbuf\n");
128 			goto out_free;
129 		}
130 		xbufp = &req->rq_rcv_buf;
131 		xbufp->head[0].iov_base = page_address(page_rcv);
132 		xbufp->head[0].iov_len = PAGE_SIZE;
133 		xbufp->tail[0].iov_base = NULL;
134 		xbufp->tail[0].iov_len = 0;
135 		xbufp->page_len = 0;
136 		xbufp->len = PAGE_SIZE;
137 		xbufp->buflen = PAGE_SIZE;
138 
139 		/* Preallocate one XDR send buffer */
140 		page_snd = alloc_page(GFP_KERNEL);
141 		if (page_snd == NULL) {
142 			printk(KERN_ERR "Failed to create bc snd xbuf\n");
143 			goto out_free;
144 		}
145 
146 		xbufp = &req->rq_snd_buf;
147 		xbufp->head[0].iov_base = page_address(page_snd);
148 		xbufp->head[0].iov_len = 0;
149 		xbufp->tail[0].iov_base = NULL;
150 		xbufp->tail[0].iov_len = 0;
151 		xbufp->page_len = 0;
152 		xbufp->len = 0;
153 		xbufp->buflen = PAGE_SIZE;
154 	}
155 
156 	/*
157 	 * Add the temporary list to the backchannel preallocation list
158 	 */
159 	spin_lock_bh(&xprt->bc_pa_lock);
160 	list_splice(&tmp_list, &xprt->bc_pa_list);
161 	xprt_inc_alloc_count(xprt, min_reqs);
162 	spin_unlock_bh(&xprt->bc_pa_lock);
163 
164 	dprintk("RPC:       setup backchannel transport done\n");
165 	return 0;
166 
167 out_free:
168 	/*
169 	 * Memory allocation failed, free the temporary list
170 	 */
171 	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
172 		xprt_free_allocation(req);
173 
174 	dprintk("RPC:       setup backchannel transport failed\n");
175 	return -1;
176 }
177 EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
178 
179 /*
180  * Destroys the backchannel preallocated structures.
181  * Since these structures may have been allocated by multiple calls
182  * to xprt_setup_backchannel, we only destroy up to the maximum number
183  * of reqs specified by the caller.
184  * @xprt:	the transport holding the preallocated strucures
185  * @max_reqs	the maximum number of preallocated structures to destroy
186  */
xprt_destroy_backchannel(struct rpc_xprt * xprt,unsigned int max_reqs)187 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
188 {
189 	struct rpc_rqst *req = NULL, *tmp = NULL;
190 
191 	dprintk("RPC:        destroy backchannel transport\n");
192 
193 	BUG_ON(max_reqs == 0);
194 	spin_lock_bh(&xprt->bc_pa_lock);
195 	xprt_dec_alloc_count(xprt, max_reqs);
196 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
197 		dprintk("RPC:        req=%p\n", req);
198 		xprt_free_allocation(req);
199 		if (--max_reqs == 0)
200 			break;
201 	}
202 	spin_unlock_bh(&xprt->bc_pa_lock);
203 
204 	dprintk("RPC:        backchannel list empty= %s\n",
205 		list_empty(&xprt->bc_pa_list) ? "true" : "false");
206 }
207 EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
208 
209 /*
210  * One or more rpc_rqst structure have been preallocated during the
211  * backchannel setup.  Buffer space for the send and private XDR buffers
212  * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
213  * to this request.  Use xprt_free_bc_request to return it.
214  *
215  * We know that we're called in soft interrupt context, grab the spin_lock
216  * since there is no need to grab the bottom half spin_lock.
217  *
218  * Return an available rpc_rqst, otherwise NULL if non are available.
219  */
xprt_alloc_bc_request(struct rpc_xprt * xprt)220 struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
221 {
222 	struct rpc_rqst *req;
223 
224 	dprintk("RPC:       allocate a backchannel request\n");
225 	spin_lock(&xprt->bc_pa_lock);
226 	if (!list_empty(&xprt->bc_pa_list)) {
227 		req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
228 				rq_bc_pa_list);
229 		list_del(&req->rq_bc_pa_list);
230 	} else {
231 		req = NULL;
232 	}
233 	spin_unlock(&xprt->bc_pa_lock);
234 
235 	if (req != NULL) {
236 		set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
237 		req->rq_reply_bytes_recvd = 0;
238 		req->rq_bytes_sent = 0;
239 		memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
240 			sizeof(req->rq_private_buf));
241 	}
242 	dprintk("RPC:       backchannel req=%p\n", req);
243 	return req;
244 }
245 
246 /*
247  * Return the preallocated rpc_rqst structure and XDR buffers
248  * associated with this rpc_task.
249  */
xprt_free_bc_request(struct rpc_rqst * req)250 void xprt_free_bc_request(struct rpc_rqst *req)
251 {
252 	struct rpc_xprt *xprt = req->rq_xprt;
253 
254 	dprintk("RPC:       free backchannel req=%p\n", req);
255 
256 	smp_mb__before_clear_bit();
257 	BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
258 	clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
259 	smp_mb__after_clear_bit();
260 
261 	if (!xprt_need_to_requeue(xprt)) {
262 		/*
263 		 * The last remaining session was destroyed while this
264 		 * entry was in use.  Free the entry and don't attempt
265 		 * to add back to the list because there is no need to
266 		 * have anymore preallocated entries.
267 		 */
268 		dprintk("RPC:       Last session removed req=%p\n", req);
269 		xprt_free_allocation(req);
270 		return;
271 	}
272 
273 	/*
274 	 * Return it to the list of preallocations so that it
275 	 * may be reused by a new callback request.
276 	 */
277 	spin_lock_bh(&xprt->bc_pa_lock);
278 	list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
279 	spin_unlock_bh(&xprt->bc_pa_lock);
280 }
281 
282