1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <asm/uaccess.h>
31 #include <asm/processor.h>
32 #include <linux/mempool.h>
33 #include "cifspdu.h"
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37
38 extern mempool_t *cifs_mid_poolp;
39
40 static void
wake_up_task(struct mid_q_entry * mid)41 wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cERROR(1, "Null TCP session in AllocMidQEntry");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68
69 /*
70 * The default is for the mid to be synchronous, so the
71 * default callback just wakes up the current task.
72 */
73 temp->callback = wake_up_task;
74 temp->callback_data = current;
75 }
76
77 atomic_inc(&midCount);
78 temp->mid_state = MID_REQUEST_ALLOCATED;
79 return temp;
80 }
81
82 void
DeleteMidQEntry(struct mid_q_entry * midEntry)83 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 {
85 #ifdef CONFIG_CIFS_STATS2
86 unsigned long now;
87 #endif
88 midEntry->mid_state = MID_FREE;
89 atomic_dec(&midCount);
90 if (midEntry->large_buf)
91 cifs_buf_release(midEntry->resp_buf);
92 else
93 cifs_small_buf_release(midEntry->resp_buf);
94 #ifdef CONFIG_CIFS_STATS2
95 now = jiffies;
96 /* commands taking longer than one second are indications that
97 something is wrong, unless it is quite a slow link or server */
98 if ((now - midEntry->when_alloc) > HZ) {
99 if ((cifsFYI & CIFS_TIMER) &&
100 (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
101 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
102 midEntry->command, midEntry->mid);
103 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104 now - midEntry->when_alloc,
105 now - midEntry->when_sent,
106 now - midEntry->when_received);
107 }
108 }
109 #endif
110 mempool_free(midEntry, cifs_mid_poolp);
111 }
112
113 static void
delete_mid(struct mid_q_entry * mid)114 delete_mid(struct mid_q_entry *mid)
115 {
116 spin_lock(&GlobalMid_Lock);
117 list_del(&mid->qhead);
118 spin_unlock(&GlobalMid_Lock);
119
120 DeleteMidQEntry(mid);
121 }
122
123 static int
smb_sendv(struct TCP_Server_Info * server,struct kvec * iov,int n_vec)124 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 {
126 int rc = 0;
127 int i = 0;
128 struct msghdr smb_msg;
129 __be32 *buf_len = (__be32 *)(iov[0].iov_base);
130 unsigned int len = iov[0].iov_len;
131 unsigned int total_len;
132 int first_vec = 0;
133 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
134 struct socket *ssocket = server->ssocket;
135
136 if (ssocket == NULL)
137 return -ENOTSOCK; /* BB eventually add reconnect code here */
138
139 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
140 smb_msg.msg_namelen = sizeof(struct sockaddr);
141 smb_msg.msg_control = NULL;
142 smb_msg.msg_controllen = 0;
143 if (server->noblocksnd)
144 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145 else
146 smb_msg.msg_flags = MSG_NOSIGNAL;
147
148 total_len = 0;
149 for (i = 0; i < n_vec; i++)
150 total_len += iov[i].iov_len;
151
152 cFYI(1, "Sending smb: total_len %d", total_len);
153 dump_smb(iov[0].iov_base, len);
154
155 i = 0;
156 while (total_len) {
157 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
158 n_vec - first_vec, total_len);
159 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 i++;
161 /*
162 * If blocking send we try 3 times, since each can block
163 * for 5 seconds. For nonblocking we have to try more
164 * but wait increasing amounts of time allowing time for
165 * socket to clear. The overall time we wait in either
166 * case to send on the socket is about 15 seconds.
167 * Similarly we wait for 15 seconds for a response from
168 * the server in SendReceive[2] for the server to send
169 * a response back for most types of requests (except
170 * SMB Write past end of file which can be slow, and
171 * blocking lock operations). NFS waits slightly longer
172 * than CIFS, but this can make it take longer for
173 * nonresponsive servers to be detected and 15 seconds
174 * is more than enough time for modern networks to
175 * send a packet. In most cases if we fail to send
176 * after the retries we will kill the socket and
177 * reconnect which may clear the network problem.
178 */
179 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180 cERROR(1, "sends on sock %p stuck for 15 seconds",
181 ssocket);
182 rc = -EAGAIN;
183 break;
184 }
185 msleep(1 << i);
186 continue;
187 }
188 if (rc < 0)
189 break;
190
191 if (rc == total_len) {
192 total_len = 0;
193 break;
194 } else if (rc > total_len) {
195 cERROR(1, "sent %d requested %d", rc, total_len);
196 break;
197 }
198 if (rc == 0) {
199 /* should never happen, letting socket clear before
200 retrying is our only obvious option here */
201 cERROR(1, "tcp sent no data");
202 msleep(500);
203 continue;
204 }
205 total_len -= rc;
206 /* the line below resets i */
207 for (i = first_vec; i < n_vec; i++) {
208 if (iov[i].iov_len) {
209 if (rc > iov[i].iov_len) {
210 rc -= iov[i].iov_len;
211 iov[i].iov_len = 0;
212 } else {
213 iov[i].iov_base += rc;
214 iov[i].iov_len -= rc;
215 first_vec = i;
216 break;
217 }
218 }
219 }
220 i = 0; /* in case we get ENOSPC on the next send */
221 }
222
223 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
224 cFYI(1, "partial send (%d remaining), terminating session",
225 total_len);
226 /* If we have only sent part of an SMB then the next SMB
227 could be taken as the remainder of this one. We need
228 to kill the socket so the server throws away the partial
229 SMB */
230 server->tcpStatus = CifsNeedReconnect;
231 }
232
233 if (rc < 0 && rc != -EINTR)
234 cERROR(1, "Error %d sending data on socket to server", rc);
235 else
236 rc = 0;
237
238 /* Don't want to modify the buffer as a side effect of this call. */
239 *buf_len = cpu_to_be32(smb_buf_length);
240
241 return rc;
242 }
243
244 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
247 {
248 struct kvec iov;
249
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
252
253 return smb_sendv(server, &iov, 1);
254 }
255
256 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int optype,int * credits)257 wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
258 int *credits)
259 {
260 int rc;
261
262 spin_lock(&server->req_lock);
263 if (optype == CIFS_ASYNC_OP) {
264 /* oplock breaks must not be held up */
265 server->in_flight++;
266 *credits -= 1;
267 spin_unlock(&server->req_lock);
268 return 0;
269 }
270
271 while (1) {
272 if (*credits <= 0) {
273 spin_unlock(&server->req_lock);
274 cifs_num_waiters_inc(server);
275 rc = wait_event_killable(server->request_q,
276 has_credits(server, credits));
277 cifs_num_waiters_dec(server);
278 if (rc)
279 return rc;
280 spin_lock(&server->req_lock);
281 } else {
282 if (server->tcpStatus == CifsExiting) {
283 spin_unlock(&server->req_lock);
284 return -ENOENT;
285 }
286
287 /*
288 * Can not count locking commands against total
289 * as they are allowed to block on server.
290 */
291
292 /* update # of requests on the wire to server */
293 if (optype != CIFS_BLOCKING_OP) {
294 *credits -= 1;
295 server->in_flight++;
296 }
297 spin_unlock(&server->req_lock);
298 break;
299 }
300 }
301 return 0;
302 }
303
304 static int
wait_for_free_request(struct TCP_Server_Info * server,const int optype)305 wait_for_free_request(struct TCP_Server_Info *server, const int optype)
306 {
307 return wait_for_free_credits(server, optype, get_credits_field(server));
308 }
309
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)310 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
311 struct mid_q_entry **ppmidQ)
312 {
313 if (ses->server->tcpStatus == CifsExiting) {
314 return -ENOENT;
315 }
316
317 if (ses->server->tcpStatus == CifsNeedReconnect) {
318 cFYI(1, "tcp session dead - return to caller to retry");
319 return -EAGAIN;
320 }
321
322 if (ses->status != CifsGood) {
323 /* check if SMB session is bad because we are setting it up */
324 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
325 (in_buf->Command != SMB_COM_NEGOTIATE))
326 return -EAGAIN;
327 /* else ok - we are setting up session */
328 }
329 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
330 if (*ppmidQ == NULL)
331 return -ENOMEM;
332 spin_lock(&GlobalMid_Lock);
333 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
334 spin_unlock(&GlobalMid_Lock);
335 return 0;
336 }
337
338 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)339 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
340 {
341 int error;
342
343 error = wait_event_freezekillable(server->response_q,
344 midQ->mid_state != MID_REQUEST_SUBMITTED);
345 if (error < 0)
346 return -ERESTARTSYS;
347
348 return 0;
349 }
350
351 static int
cifs_setup_async_request(struct TCP_Server_Info * server,struct kvec * iov,unsigned int nvec,struct mid_q_entry ** ret_mid)352 cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
353 unsigned int nvec, struct mid_q_entry **ret_mid)
354 {
355 int rc;
356 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
357 struct mid_q_entry *mid;
358
359 /* enable signing if server requires it */
360 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
361 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
362
363 mid = AllocMidQEntry(hdr, server);
364 if (mid == NULL)
365 return -ENOMEM;
366
367 /* put it on the pending_mid_q */
368 spin_lock(&GlobalMid_Lock);
369 list_add_tail(&mid->qhead, &server->pending_mid_q);
370 spin_unlock(&GlobalMid_Lock);
371
372 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
373 if (rc)
374 delete_mid(mid);
375 *ret_mid = mid;
376 return rc;
377 }
378
379 /*
380 * Send a SMB request and set the callback function in the mid to handle
381 * the result. Caller is responsible for dealing with timeouts.
382 */
383 int
cifs_call_async(struct TCP_Server_Info * server,struct kvec * iov,unsigned int nvec,mid_receive_t * receive,mid_callback_t * callback,void * cbdata,bool ignore_pend)384 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
385 unsigned int nvec, mid_receive_t *receive,
386 mid_callback_t *callback, void *cbdata, bool ignore_pend)
387 {
388 int rc;
389 struct mid_q_entry *mid;
390
391 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
392 if (rc)
393 return rc;
394
395 mutex_lock(&server->srv_mutex);
396 rc = cifs_setup_async_request(server, iov, nvec, &mid);
397 if (rc) {
398 mutex_unlock(&server->srv_mutex);
399 cifs_add_credits(server, 1);
400 wake_up(&server->request_q);
401 return rc;
402 }
403
404 mid->receive = receive;
405 mid->callback = callback;
406 mid->callback_data = cbdata;
407 mid->mid_state = MID_REQUEST_SUBMITTED;
408
409 cifs_in_send_inc(server);
410 rc = smb_sendv(server, iov, nvec);
411 cifs_in_send_dec(server);
412 cifs_save_when_sent(mid);
413 mutex_unlock(&server->srv_mutex);
414
415 if (rc)
416 goto out_err;
417
418 return rc;
419 out_err:
420 delete_mid(mid);
421 cifs_add_credits(server, 1);
422 wake_up(&server->request_q);
423 return rc;
424 }
425
426 /*
427 *
428 * Send an SMB Request. No response info (other than return code)
429 * needs to be parsed.
430 *
431 * flags indicate the type of request buffer and how long to wait
432 * and whether to log NT STATUS code (error) before mapping it to POSIX error
433 *
434 */
435 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)436 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
437 char *in_buf, int flags)
438 {
439 int rc;
440 struct kvec iov[1];
441 int resp_buf_type;
442
443 iov[0].iov_base = in_buf;
444 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
445 flags |= CIFS_NO_RESP;
446 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
447 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
448
449 return rc;
450 }
451
452 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)453 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
454 {
455 int rc = 0;
456
457 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
458 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
459
460 spin_lock(&GlobalMid_Lock);
461 switch (mid->mid_state) {
462 case MID_RESPONSE_RECEIVED:
463 spin_unlock(&GlobalMid_Lock);
464 return rc;
465 case MID_RETRY_NEEDED:
466 rc = -EAGAIN;
467 break;
468 case MID_RESPONSE_MALFORMED:
469 rc = -EIO;
470 break;
471 case MID_SHUTDOWN:
472 rc = -EHOSTDOWN;
473 break;
474 default:
475 list_del_init(&mid->qhead);
476 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
477 mid->mid, mid->mid_state);
478 rc = -EIO;
479 }
480 spin_unlock(&GlobalMid_Lock);
481
482 DeleteMidQEntry(mid);
483 return rc;
484 }
485
486 /*
487 * An NT cancel request header looks just like the original request except:
488 *
489 * The Command is SMB_COM_NT_CANCEL
490 * The WordCount is zeroed out
491 * The ByteCount is zeroed out
492 *
493 * This function mangles an existing request buffer into a
494 * SMB_COM_NT_CANCEL request and then sends it.
495 */
496 static int
send_nt_cancel(struct TCP_Server_Info * server,struct smb_hdr * in_buf,struct mid_q_entry * mid)497 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
498 struct mid_q_entry *mid)
499 {
500 int rc = 0;
501
502 /* -4 for RFC1001 length and +2 for BCC field */
503 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
504 in_buf->Command = SMB_COM_NT_CANCEL;
505 in_buf->WordCount = 0;
506 put_bcc(0, in_buf);
507
508 mutex_lock(&server->srv_mutex);
509 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
510 if (rc) {
511 mutex_unlock(&server->srv_mutex);
512 return rc;
513 }
514
515 /*
516 * The response to this call was already factored into the sequence
517 * number when the call went out, so we must adjust it back downward
518 * after signing here.
519 */
520 --server->sequence_number;
521 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
522 mutex_unlock(&server->srv_mutex);
523
524 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
525 in_buf->Mid, rc);
526
527 return rc;
528 }
529
530 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)531 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
532 bool log_error)
533 {
534 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
535
536 dump_smb(mid->resp_buf, min_t(u32, 92, len));
537
538 /* convert the length into a more usable form */
539 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
540 struct kvec iov;
541
542 iov.iov_base = mid->resp_buf;
543 iov.iov_len = len;
544 /* FIXME: add code to kill session */
545 if (cifs_verify_signature(&iov, 1, server,
546 mid->sequence_number + 1) != 0)
547 cERROR(1, "Unexpected SMB signature");
548 }
549
550 /* BB special case reconnect tid and uid here? */
551 return map_smb_to_linux_error(mid->resp_buf, log_error);
552 }
553
554 static int
cifs_setup_request(struct cifs_ses * ses,struct kvec * iov,unsigned int nvec,struct mid_q_entry ** ret_mid)555 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
556 unsigned int nvec, struct mid_q_entry **ret_mid)
557 {
558 int rc;
559 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
560 struct mid_q_entry *mid;
561
562 rc = allocate_mid(ses, hdr, &mid);
563 if (rc)
564 return rc;
565 rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
566 if (rc)
567 delete_mid(mid);
568 *ret_mid = mid;
569 return rc;
570 }
571
572 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * pRespBufType,const int flags)573 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
574 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
575 const int flags)
576 {
577 int rc = 0;
578 int long_op;
579 struct mid_q_entry *midQ;
580 char *buf = iov[0].iov_base;
581
582 long_op = flags & CIFS_TIMEOUT_MASK;
583
584 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
585
586 if ((ses == NULL) || (ses->server == NULL)) {
587 cifs_small_buf_release(buf);
588 cERROR(1, "Null session");
589 return -EIO;
590 }
591
592 if (ses->server->tcpStatus == CifsExiting) {
593 cifs_small_buf_release(buf);
594 return -ENOENT;
595 }
596
597 /*
598 * Ensure that we do not send more than 50 overlapping requests
599 * to the same server. We may make this configurable later or
600 * use ses->maxReq.
601 */
602
603 rc = wait_for_free_request(ses->server, long_op);
604 if (rc) {
605 cifs_small_buf_release(buf);
606 return rc;
607 }
608
609 /*
610 * Make sure that we sign in the same order that we send on this socket
611 * and avoid races inside tcp sendmsg code that could cause corruption
612 * of smb data.
613 */
614
615 mutex_lock(&ses->server->srv_mutex);
616
617 rc = cifs_setup_request(ses, iov, n_vec, &midQ);
618 if (rc) {
619 mutex_unlock(&ses->server->srv_mutex);
620 cifs_small_buf_release(buf);
621 /* Update # of requests on wire to server */
622 cifs_add_credits(ses->server, 1);
623 return rc;
624 }
625
626 midQ->mid_state = MID_REQUEST_SUBMITTED;
627 cifs_in_send_inc(ses->server);
628 rc = smb_sendv(ses->server, iov, n_vec);
629 cifs_in_send_dec(ses->server);
630 cifs_save_when_sent(midQ);
631
632 mutex_unlock(&ses->server->srv_mutex);
633
634 if (rc < 0) {
635 cifs_small_buf_release(buf);
636 goto out;
637 }
638
639 if (long_op == CIFS_ASYNC_OP) {
640 cifs_small_buf_release(buf);
641 goto out;
642 }
643
644 rc = wait_for_response(ses->server, midQ);
645 if (rc != 0) {
646 send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
647 spin_lock(&GlobalMid_Lock);
648 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
649 midQ->callback = DeleteMidQEntry;
650 spin_unlock(&GlobalMid_Lock);
651 cifs_small_buf_release(buf);
652 cifs_add_credits(ses->server, 1);
653 return rc;
654 }
655 spin_unlock(&GlobalMid_Lock);
656 }
657
658 cifs_small_buf_release(buf);
659
660 rc = cifs_sync_mid_result(midQ, ses->server);
661 if (rc != 0) {
662 cifs_add_credits(ses->server, 1);
663 return rc;
664 }
665
666 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
667 rc = -EIO;
668 cFYI(1, "Bad MID state?");
669 goto out;
670 }
671
672 buf = (char *)midQ->resp_buf;
673 iov[0].iov_base = buf;
674 iov[0].iov_len = get_rfc1002_length(buf) + 4;
675 if (midQ->large_buf)
676 *pRespBufType = CIFS_LARGE_BUFFER;
677 else
678 *pRespBufType = CIFS_SMALL_BUFFER;
679
680 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
681
682 /* mark it so buf will not be freed by delete_mid */
683 if ((flags & CIFS_NO_RESP) == 0)
684 midQ->resp_buf = NULL;
685 out:
686 delete_mid(midQ);
687 cifs_add_credits(ses->server, 1);
688
689 return rc;
690 }
691
692 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int long_op)693 SendReceive(const unsigned int xid, struct cifs_ses *ses,
694 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
695 int *pbytes_returned, const int long_op)
696 {
697 int rc = 0;
698 struct mid_q_entry *midQ;
699
700 if (ses == NULL) {
701 cERROR(1, "Null smb session");
702 return -EIO;
703 }
704 if (ses->server == NULL) {
705 cERROR(1, "Null tcp session");
706 return -EIO;
707 }
708
709 if (ses->server->tcpStatus == CifsExiting)
710 return -ENOENT;
711
712 /* Ensure that we do not send more than 50 overlapping requests
713 to the same server. We may make this configurable later or
714 use ses->maxReq */
715
716 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
717 MAX_CIFS_HDR_SIZE - 4) {
718 cERROR(1, "Illegal length, greater than maximum frame, %d",
719 be32_to_cpu(in_buf->smb_buf_length));
720 return -EIO;
721 }
722
723 rc = wait_for_free_request(ses->server, long_op);
724 if (rc)
725 return rc;
726
727 /* make sure that we sign in the same order that we send on this socket
728 and avoid races inside tcp sendmsg code that could cause corruption
729 of smb data */
730
731 mutex_lock(&ses->server->srv_mutex);
732
733 rc = allocate_mid(ses, in_buf, &midQ);
734 if (rc) {
735 mutex_unlock(&ses->server->srv_mutex);
736 /* Update # of requests on wire to server */
737 cifs_add_credits(ses->server, 1);
738 return rc;
739 }
740
741 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
742 if (rc) {
743 mutex_unlock(&ses->server->srv_mutex);
744 goto out;
745 }
746
747 midQ->mid_state = MID_REQUEST_SUBMITTED;
748
749 cifs_in_send_inc(ses->server);
750 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
751 cifs_in_send_dec(ses->server);
752 cifs_save_when_sent(midQ);
753 mutex_unlock(&ses->server->srv_mutex);
754
755 if (rc < 0)
756 goto out;
757
758 if (long_op == CIFS_ASYNC_OP)
759 goto out;
760
761 rc = wait_for_response(ses->server, midQ);
762 if (rc != 0) {
763 send_nt_cancel(ses->server, in_buf, midQ);
764 spin_lock(&GlobalMid_Lock);
765 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
766 /* no longer considered to be "in-flight" */
767 midQ->callback = DeleteMidQEntry;
768 spin_unlock(&GlobalMid_Lock);
769 cifs_add_credits(ses->server, 1);
770 return rc;
771 }
772 spin_unlock(&GlobalMid_Lock);
773 }
774
775 rc = cifs_sync_mid_result(midQ, ses->server);
776 if (rc != 0) {
777 cifs_add_credits(ses->server, 1);
778 return rc;
779 }
780
781 if (!midQ->resp_buf || !out_buf ||
782 midQ->mid_state != MID_RESPONSE_RECEIVED) {
783 rc = -EIO;
784 cERROR(1, "Bad MID state?");
785 goto out;
786 }
787
788 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
789 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
790 rc = cifs_check_receive(midQ, ses->server, 0);
791 out:
792 delete_mid(midQ);
793 cifs_add_credits(ses->server, 1);
794
795 return rc;
796 }
797
798 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
799 blocking lock to return. */
800
801 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)802 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
803 struct smb_hdr *in_buf,
804 struct smb_hdr *out_buf)
805 {
806 int bytes_returned;
807 struct cifs_ses *ses = tcon->ses;
808 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
809
810 /* We just modify the current in_buf to change
811 the type of lock from LOCKING_ANDX_SHARED_LOCK
812 or LOCKING_ANDX_EXCLUSIVE_LOCK to
813 LOCKING_ANDX_CANCEL_LOCK. */
814
815 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
816 pSMB->Timeout = 0;
817 pSMB->hdr.Mid = GetNextMid(ses->server);
818
819 return SendReceive(xid, ses, in_buf, out_buf,
820 &bytes_returned, 0);
821 }
822
823 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)824 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
825 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
826 int *pbytes_returned)
827 {
828 int rc = 0;
829 int rstart = 0;
830 struct mid_q_entry *midQ;
831 struct cifs_ses *ses;
832
833 if (tcon == NULL || tcon->ses == NULL) {
834 cERROR(1, "Null smb session");
835 return -EIO;
836 }
837 ses = tcon->ses;
838
839 if (ses->server == NULL) {
840 cERROR(1, "Null tcp session");
841 return -EIO;
842 }
843
844 if (ses->server->tcpStatus == CifsExiting)
845 return -ENOENT;
846
847 /* Ensure that we do not send more than 50 overlapping requests
848 to the same server. We may make this configurable later or
849 use ses->maxReq */
850
851 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
852 MAX_CIFS_HDR_SIZE - 4) {
853 cERROR(1, "Illegal length, greater than maximum frame, %d",
854 be32_to_cpu(in_buf->smb_buf_length));
855 return -EIO;
856 }
857
858 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
859 if (rc)
860 return rc;
861
862 /* make sure that we sign in the same order that we send on this socket
863 and avoid races inside tcp sendmsg code that could cause corruption
864 of smb data */
865
866 mutex_lock(&ses->server->srv_mutex);
867
868 rc = allocate_mid(ses, in_buf, &midQ);
869 if (rc) {
870 mutex_unlock(&ses->server->srv_mutex);
871 return rc;
872 }
873
874 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
875 if (rc) {
876 delete_mid(midQ);
877 mutex_unlock(&ses->server->srv_mutex);
878 return rc;
879 }
880
881 midQ->mid_state = MID_REQUEST_SUBMITTED;
882 cifs_in_send_inc(ses->server);
883 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
884 cifs_in_send_dec(ses->server);
885 cifs_save_when_sent(midQ);
886 mutex_unlock(&ses->server->srv_mutex);
887
888 if (rc < 0) {
889 delete_mid(midQ);
890 return rc;
891 }
892
893 /* Wait for a reply - allow signals to interrupt. */
894 rc = wait_event_interruptible(ses->server->response_q,
895 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
896 ((ses->server->tcpStatus != CifsGood) &&
897 (ses->server->tcpStatus != CifsNew)));
898
899 /* Were we interrupted by a signal ? */
900 if ((rc == -ERESTARTSYS) &&
901 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
902 ((ses->server->tcpStatus == CifsGood) ||
903 (ses->server->tcpStatus == CifsNew))) {
904
905 if (in_buf->Command == SMB_COM_TRANSACTION2) {
906 /* POSIX lock. We send a NT_CANCEL SMB to cause the
907 blocking lock to return. */
908 rc = send_nt_cancel(ses->server, in_buf, midQ);
909 if (rc) {
910 delete_mid(midQ);
911 return rc;
912 }
913 } else {
914 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
915 to cause the blocking lock to return. */
916
917 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
918
919 /* If we get -ENOLCK back the lock may have
920 already been removed. Don't exit in this case. */
921 if (rc && rc != -ENOLCK) {
922 delete_mid(midQ);
923 return rc;
924 }
925 }
926
927 rc = wait_for_response(ses->server, midQ);
928 if (rc) {
929 send_nt_cancel(ses->server, in_buf, midQ);
930 spin_lock(&GlobalMid_Lock);
931 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
932 /* no longer considered to be "in-flight" */
933 midQ->callback = DeleteMidQEntry;
934 spin_unlock(&GlobalMid_Lock);
935 return rc;
936 }
937 spin_unlock(&GlobalMid_Lock);
938 }
939
940 /* We got the response - restart system call. */
941 rstart = 1;
942 }
943
944 rc = cifs_sync_mid_result(midQ, ses->server);
945 if (rc != 0)
946 return rc;
947
948 /* rcvd frame is ok */
949 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
950 rc = -EIO;
951 cERROR(1, "Bad MID state?");
952 goto out;
953 }
954
955 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
956 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
957 rc = cifs_check_receive(midQ, ses->server, 0);
958 out:
959 delete_mid(midQ);
960 if (rstart && rc == -EACCES)
961 return -ERESTARTSYS;
962 return rc;
963 }
964