Merge branch 'next-integrity' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorri...
[linux-2.6-block.git] / fs / cifs / transport.c
... / ...
CommitLineData
1/*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/gfp.h>
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
29#include <linux/freezer.h>
30#include <linux/tcp.h>
31#include <linux/bvec.h>
32#include <linux/highmem.h>
33#include <linux/uaccess.h>
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
40#include "smb2proto.h"
41#include "smbdirect.h"
42
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
48{
49 wake_up_process(mid->callback_data);
50}
51
52struct mid_q_entry *
53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54{
55 struct mid_q_entry *temp;
56
57 if (server == NULL) {
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 return NULL;
60 }
61
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
73
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
80
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
83 return temp;
84}
85
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
101void
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
104#ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
106 unsigned long now;
107#endif
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
128 (midEntry->command != command)) {
129 /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
130 if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
131 (le16_to_cpu(midEntry->command) >= 0))
132 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
133
134 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
135 midEntry->mid, midEntry->pid,
136 midEntry->when_sent, midEntry->when_received);
137 if (cifsFYI & CIFS_TIMER) {
138 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
139 midEntry->command, midEntry->mid);
140 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
141 now - midEntry->when_alloc,
142 now - midEntry->when_sent,
143 now - midEntry->when_received);
144 }
145 }
146#endif
147 cifs_mid_q_entry_release(midEntry);
148}
149
150void
151cifs_delete_mid(struct mid_q_entry *mid)
152{
153 spin_lock(&GlobalMid_Lock);
154 list_del_init(&mid->qhead);
155 mid->mid_flags |= MID_DELETED;
156 spin_unlock(&GlobalMid_Lock);
157
158 DeleteMidQEntry(mid);
159}
160
161/*
162 * smb_send_kvec - send an array of kvecs to the server
163 * @server: Server to send the data to
164 * @smb_msg: Message to send
165 * @sent: amount of data sent on socket is stored here
166 *
167 * Our basic "send data to server" function. Should be called with srv_mutex
168 * held. The caller is responsible for handling the results.
169 */
170static int
171smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
172 size_t *sent)
173{
174 int rc = 0;
175 int retries = 0;
176 struct socket *ssocket = server->ssocket;
177
178 *sent = 0;
179
180 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
181 smb_msg->msg_namelen = sizeof(struct sockaddr);
182 smb_msg->msg_control = NULL;
183 smb_msg->msg_controllen = 0;
184 if (server->noblocksnd)
185 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
186 else
187 smb_msg->msg_flags = MSG_NOSIGNAL;
188
189 while (msg_data_left(smb_msg)) {
190 /*
191 * If blocking send, we try 3 times, since each can block
192 * for 5 seconds. For nonblocking we have to try more
193 * but wait increasing amounts of time allowing time for
194 * socket to clear. The overall time we wait in either
195 * case to send on the socket is about 15 seconds.
196 * Similarly we wait for 15 seconds for a response from
197 * the server in SendReceive[2] for the server to send
198 * a response back for most types of requests (except
199 * SMB Write past end of file which can be slow, and
200 * blocking lock operations). NFS waits slightly longer
201 * than CIFS, but this can make it take longer for
202 * nonresponsive servers to be detected and 15 seconds
203 * is more than enough time for modern networks to
204 * send a packet. In most cases if we fail to send
205 * after the retries we will kill the socket and
206 * reconnect which may clear the network problem.
207 */
208 rc = sock_sendmsg(ssocket, smb_msg);
209 if (rc == -EAGAIN) {
210 retries++;
211 if (retries >= 14 ||
212 (!server->noblocksnd && (retries > 2))) {
213 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
214 ssocket);
215 return -EAGAIN;
216 }
217 msleep(1 << retries);
218 continue;
219 }
220
221 if (rc < 0)
222 return rc;
223
224 if (rc == 0) {
225 /* should never happen, letting socket clear before
226 retrying is our only obvious option here */
227 cifs_dbg(VFS, "tcp sent no data\n");
228 msleep(500);
229 continue;
230 }
231
232 /* send was at least partially successful */
233 *sent += rc;
234 retries = 0; /* in case we get ENOSPC on the next send */
235 }
236 return 0;
237}
238
239unsigned long
240smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
241{
242 unsigned int i;
243 struct kvec *iov;
244 int nvec;
245 unsigned long buflen = 0;
246
247 if (server->vals->header_preamble_size == 0 &&
248 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
249 iov = &rqst->rq_iov[1];
250 nvec = rqst->rq_nvec - 1;
251 } else {
252 iov = rqst->rq_iov;
253 nvec = rqst->rq_nvec;
254 }
255
256 /* total up iov array first */
257 for (i = 0; i < nvec; i++)
258 buflen += iov[i].iov_len;
259
260 /*
261 * Add in the page array if there is one. The caller needs to make
262 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
263 * multiple pages ends at page boundary, rq_tailsz needs to be set to
264 * PAGE_SIZE.
265 */
266 if (rqst->rq_npages) {
267 if (rqst->rq_npages == 1)
268 buflen += rqst->rq_tailsz;
269 else {
270 /*
271 * If there is more than one page, calculate the
272 * buffer length based on rq_offset and rq_tailsz
273 */
274 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
275 rqst->rq_offset;
276 buflen += rqst->rq_tailsz;
277 }
278 }
279
280 return buflen;
281}
282
283static int
284__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
285 struct smb_rqst *rqst)
286{
287 int rc = 0;
288 struct kvec *iov;
289 int n_vec;
290 unsigned int send_length = 0;
291 unsigned int i, j;
292 size_t total_len = 0, sent, size;
293 struct socket *ssocket = server->ssocket;
294 struct msghdr smb_msg;
295 int val = 1;
296 __be32 rfc1002_marker;
297
298 if (cifs_rdma_enabled(server) && server->smbd_conn) {
299 rc = smbd_send(server, rqst);
300 goto smbd_done;
301 }
302 if (ssocket == NULL)
303 return -ENOTSOCK;
304
305 /* cork the socket */
306 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
307 (char *)&val, sizeof(val));
308
309 for (j = 0; j < num_rqst; j++)
310 send_length += smb_rqst_len(server, &rqst[j]);
311 rfc1002_marker = cpu_to_be32(send_length);
312
313 /* Generate a rfc1002 marker for SMB2+ */
314 if (server->vals->header_preamble_size == 0) {
315 struct kvec hiov = {
316 .iov_base = &rfc1002_marker,
317 .iov_len = 4
318 };
319 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
320 rc = smb_send_kvec(server, &smb_msg, &sent);
321 if (rc < 0)
322 goto uncork;
323
324 total_len += sent;
325 send_length += 4;
326 }
327
328 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
329
330 for (j = 0; j < num_rqst; j++) {
331 iov = rqst[j].rq_iov;
332 n_vec = rqst[j].rq_nvec;
333
334 size = 0;
335 for (i = 0; i < n_vec; i++) {
336 dump_smb(iov[i].iov_base, iov[i].iov_len);
337 size += iov[i].iov_len;
338 }
339
340 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
341
342 rc = smb_send_kvec(server, &smb_msg, &sent);
343 if (rc < 0)
344 goto uncork;
345
346 total_len += sent;
347
348 /* now walk the page array and send each page in it */
349 for (i = 0; i < rqst[j].rq_npages; i++) {
350 struct bio_vec bvec;
351
352 bvec.bv_page = rqst[j].rq_pages[i];
353 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
354 &bvec.bv_offset);
355
356 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
357 &bvec, 1, bvec.bv_len);
358 rc = smb_send_kvec(server, &smb_msg, &sent);
359 if (rc < 0)
360 break;
361
362 total_len += sent;
363 }
364 }
365
366uncork:
367 /* uncork it */
368 val = 0;
369 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
370 (char *)&val, sizeof(val));
371
372 if ((total_len > 0) && (total_len != send_length)) {
373 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
374 send_length, total_len);
375 /*
376 * If we have only sent part of an SMB then the next SMB could
377 * be taken as the remainder of this one. We need to kill the
378 * socket so the server throws away the partial SMB
379 */
380 server->tcpStatus = CifsNeedReconnect;
381 trace_smb3_partial_send_reconnect(server->CurrentMid,
382 server->hostname);
383 }
384smbd_done:
385 if (rc < 0 && rc != -EINTR)
386 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
387 rc);
388 else
389 rc = 0;
390
391 return rc;
392}
393
394static int
395smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
396 struct smb_rqst *rqst, int flags)
397{
398 struct kvec iov;
399 struct smb2_transform_hdr tr_hdr;
400 struct smb_rqst cur_rqst[MAX_COMPOUND];
401 int rc;
402
403 if (!(flags & CIFS_TRANSFORM_REQ))
404 return __smb_send_rqst(server, num_rqst, rqst);
405
406 if (num_rqst > MAX_COMPOUND - 1)
407 return -ENOMEM;
408
409 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
410 memset(&iov, 0, sizeof(iov));
411 memset(&tr_hdr, 0, sizeof(tr_hdr));
412
413 iov.iov_base = &tr_hdr;
414 iov.iov_len = sizeof(tr_hdr);
415 cur_rqst[0].rq_iov = &iov;
416 cur_rqst[0].rq_nvec = 1;
417
418 if (!server->ops->init_transform_rq) {
419 cifs_dbg(VFS, "Encryption requested but transform callback "
420 "is missing\n");
421 return -EIO;
422 }
423
424 rc = server->ops->init_transform_rq(server, num_rqst + 1,
425 &cur_rqst[0], rqst);
426 if (rc)
427 return rc;
428
429 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
430 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
431 return rc;
432}
433
434int
435smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
436 unsigned int smb_buf_length)
437{
438 struct kvec iov[2];
439 struct smb_rqst rqst = { .rq_iov = iov,
440 .rq_nvec = 2 };
441
442 iov[0].iov_base = smb_buffer;
443 iov[0].iov_len = 4;
444 iov[1].iov_base = (char *)smb_buffer + 4;
445 iov[1].iov_len = smb_buf_length;
446
447 return __smb_send_rqst(server, 1, &rqst);
448}
449
450static int
451wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
452 int *credits)
453{
454 int rc;
455
456 spin_lock(&server->req_lock);
457 if (timeout == CIFS_ASYNC_OP) {
458 /* oplock breaks must not be held up */
459 server->in_flight++;
460 *credits -= 1;
461 spin_unlock(&server->req_lock);
462 return 0;
463 }
464
465 while (1) {
466 if (*credits <= 0) {
467 spin_unlock(&server->req_lock);
468 cifs_num_waiters_inc(server);
469 rc = wait_event_killable(server->request_q,
470 has_credits(server, credits));
471 cifs_num_waiters_dec(server);
472 if (rc)
473 return rc;
474 spin_lock(&server->req_lock);
475 } else {
476 if (server->tcpStatus == CifsExiting) {
477 spin_unlock(&server->req_lock);
478 return -ENOENT;
479 }
480
481 /*
482 * Can not count locking commands against total
483 * as they are allowed to block on server.
484 */
485
486 /* update # of requests on the wire to server */
487 if (timeout != CIFS_BLOCKING_OP) {
488 *credits -= 1;
489 server->in_flight++;
490 }
491 spin_unlock(&server->req_lock);
492 break;
493 }
494 }
495 return 0;
496}
497
498static int
499wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
500 const int optype)
501{
502 int *val;
503
504 val = server->ops->get_credits_field(server, optype);
505 /* Since an echo is already inflight, no need to wait to send another */
506 if (*val <= 0 && optype == CIFS_ECHO_OP)
507 return -EAGAIN;
508 return wait_for_free_credits(server, timeout, val);
509}
510
511int
512cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
513 unsigned int *num, unsigned int *credits)
514{
515 *num = size;
516 *credits = 0;
517 return 0;
518}
519
520static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
521 struct mid_q_entry **ppmidQ)
522{
523 if (ses->server->tcpStatus == CifsExiting) {
524 return -ENOENT;
525 }
526
527 if (ses->server->tcpStatus == CifsNeedReconnect) {
528 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
529 return -EAGAIN;
530 }
531
532 if (ses->status == CifsNew) {
533 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
534 (in_buf->Command != SMB_COM_NEGOTIATE))
535 return -EAGAIN;
536 /* else ok - we are setting up session */
537 }
538
539 if (ses->status == CifsExiting) {
540 /* check if SMB session is bad because we are setting it up */
541 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
542 return -EAGAIN;
543 /* else ok - we are shutting down session */
544 }
545
546 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
547 if (*ppmidQ == NULL)
548 return -ENOMEM;
549 spin_lock(&GlobalMid_Lock);
550 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
551 spin_unlock(&GlobalMid_Lock);
552 return 0;
553}
554
555static int
556wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
557{
558 int error;
559
560 error = wait_event_freezekillable_unsafe(server->response_q,
561 midQ->mid_state != MID_REQUEST_SUBMITTED);
562 if (error < 0)
563 return -ERESTARTSYS;
564
565 return 0;
566}
567
568struct mid_q_entry *
569cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
570{
571 int rc;
572 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
573 struct mid_q_entry *mid;
574
575 if (rqst->rq_iov[0].iov_len != 4 ||
576 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
577 return ERR_PTR(-EIO);
578
579 /* enable signing if server requires it */
580 if (server->sign)
581 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
582
583 mid = AllocMidQEntry(hdr, server);
584 if (mid == NULL)
585 return ERR_PTR(-ENOMEM);
586
587 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
588 if (rc) {
589 DeleteMidQEntry(mid);
590 return ERR_PTR(rc);
591 }
592
593 return mid;
594}
595
596/*
597 * Send a SMB request and set the callback function in the mid to handle
598 * the result. Caller is responsible for dealing with timeouts.
599 */
600int
601cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
602 mid_receive_t *receive, mid_callback_t *callback,
603 mid_handle_t *handle, void *cbdata, const int flags)
604{
605 int rc, timeout, optype;
606 struct mid_q_entry *mid;
607 unsigned int credits = 0;
608
609 timeout = flags & CIFS_TIMEOUT_MASK;
610 optype = flags & CIFS_OP_MASK;
611
612 if ((flags & CIFS_HAS_CREDITS) == 0) {
613 rc = wait_for_free_request(server, timeout, optype);
614 if (rc)
615 return rc;
616 credits = 1;
617 }
618
619 mutex_lock(&server->srv_mutex);
620 mid = server->ops->setup_async_request(server, rqst);
621 if (IS_ERR(mid)) {
622 mutex_unlock(&server->srv_mutex);
623 add_credits_and_wake_if(server, credits, optype);
624 return PTR_ERR(mid);
625 }
626
627 mid->receive = receive;
628 mid->callback = callback;
629 mid->callback_data = cbdata;
630 mid->handle = handle;
631 mid->mid_state = MID_REQUEST_SUBMITTED;
632
633 /* put it on the pending_mid_q */
634 spin_lock(&GlobalMid_Lock);
635 list_add_tail(&mid->qhead, &server->pending_mid_q);
636 spin_unlock(&GlobalMid_Lock);
637
638 /*
639 * Need to store the time in mid before calling I/O. For call_async,
640 * I/O response may come back and free the mid entry on another thread.
641 */
642 cifs_save_when_sent(mid);
643 cifs_in_send_inc(server);
644 rc = smb_send_rqst(server, 1, rqst, flags);
645 cifs_in_send_dec(server);
646
647 if (rc < 0) {
648 server->sequence_number -= 2;
649 cifs_delete_mid(mid);
650 }
651
652 mutex_unlock(&server->srv_mutex);
653
654 if (rc == 0)
655 return 0;
656
657 add_credits_and_wake_if(server, credits, optype);
658 return rc;
659}
660
661/*
662 *
663 * Send an SMB Request. No response info (other than return code)
664 * needs to be parsed.
665 *
666 * flags indicate the type of request buffer and how long to wait
667 * and whether to log NT STATUS code (error) before mapping it to POSIX error
668 *
669 */
670int
671SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
672 char *in_buf, int flags)
673{
674 int rc;
675 struct kvec iov[1];
676 struct kvec rsp_iov;
677 int resp_buf_type;
678
679 iov[0].iov_base = in_buf;
680 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
681 flags |= CIFS_NO_RESP;
682 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
683 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
684
685 return rc;
686}
687
688static int
689cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
690{
691 int rc = 0;
692
693 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
694 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
695
696 spin_lock(&GlobalMid_Lock);
697 switch (mid->mid_state) {
698 case MID_RESPONSE_RECEIVED:
699 spin_unlock(&GlobalMid_Lock);
700 return rc;
701 case MID_RETRY_NEEDED:
702 rc = -EAGAIN;
703 break;
704 case MID_RESPONSE_MALFORMED:
705 rc = -EIO;
706 break;
707 case MID_SHUTDOWN:
708 rc = -EHOSTDOWN;
709 break;
710 default:
711 list_del_init(&mid->qhead);
712 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
713 __func__, mid->mid, mid->mid_state);
714 rc = -EIO;
715 }
716 spin_unlock(&GlobalMid_Lock);
717
718 DeleteMidQEntry(mid);
719 return rc;
720}
721
722static inline int
723send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
724 struct mid_q_entry *mid)
725{
726 return server->ops->send_cancel ?
727 server->ops->send_cancel(server, rqst, mid) : 0;
728}
729
730int
731cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
732 bool log_error)
733{
734 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
735
736 dump_smb(mid->resp_buf, min_t(u32, 92, len));
737
738 /* convert the length into a more usable form */
739 if (server->sign) {
740 struct kvec iov[2];
741 int rc = 0;
742 struct smb_rqst rqst = { .rq_iov = iov,
743 .rq_nvec = 2 };
744
745 iov[0].iov_base = mid->resp_buf;
746 iov[0].iov_len = 4;
747 iov[1].iov_base = (char *)mid->resp_buf + 4;
748 iov[1].iov_len = len - 4;
749 /* FIXME: add code to kill session */
750 rc = cifs_verify_signature(&rqst, server,
751 mid->sequence_number);
752 if (rc)
753 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
754 rc);
755 }
756
757 /* BB special case reconnect tid and uid here? */
758 return map_smb_to_linux_error(mid->resp_buf, log_error);
759}
760
761struct mid_q_entry *
762cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
763{
764 int rc;
765 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
766 struct mid_q_entry *mid;
767
768 if (rqst->rq_iov[0].iov_len != 4 ||
769 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
770 return ERR_PTR(-EIO);
771
772 rc = allocate_mid(ses, hdr, &mid);
773 if (rc)
774 return ERR_PTR(rc);
775 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
776 if (rc) {
777 cifs_delete_mid(mid);
778 return ERR_PTR(rc);
779 }
780 return mid;
781}
782
783static void
784cifs_noop_callback(struct mid_q_entry *mid)
785{
786}
787
788int
789compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
790 const int flags, const int num_rqst, struct smb_rqst *rqst,
791 int *resp_buf_type, struct kvec *resp_iov)
792{
793 int i, j, rc = 0;
794 int timeout, optype;
795 struct mid_q_entry *midQ[MAX_COMPOUND];
796 unsigned int credits = 0;
797 char *buf;
798
799 timeout = flags & CIFS_TIMEOUT_MASK;
800 optype = flags & CIFS_OP_MASK;
801
802 for (i = 0; i < num_rqst; i++)
803 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
804
805 if ((ses == NULL) || (ses->server == NULL)) {
806 cifs_dbg(VFS, "Null session\n");
807 return -EIO;
808 }
809
810 if (ses->server->tcpStatus == CifsExiting)
811 return -ENOENT;
812
813 /*
814 * Ensure that we do not send more than 50 overlapping requests
815 * to the same server. We may make this configurable later or
816 * use ses->maxReq.
817 */
818 rc = wait_for_free_request(ses->server, timeout, optype);
819 if (rc)
820 return rc;
821
822 /*
823 * Make sure that we sign in the same order that we send on this socket
824 * and avoid races inside tcp sendmsg code that could cause corruption
825 * of smb data.
826 */
827
828 mutex_lock(&ses->server->srv_mutex);
829
830 for (i = 0; i < num_rqst; i++) {
831 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
832 if (IS_ERR(midQ[i])) {
833 for (j = 0; j < i; j++)
834 cifs_delete_mid(midQ[j]);
835 mutex_unlock(&ses->server->srv_mutex);
836 /* Update # of requests on wire to server */
837 add_credits(ses->server, 1, optype);
838 return PTR_ERR(midQ[i]);
839 }
840
841 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
842 /*
843 * We don't invoke the callback compounds unless it is the last
844 * request.
845 */
846 if (i < num_rqst - 1)
847 midQ[i]->callback = cifs_noop_callback;
848 }
849 cifs_in_send_inc(ses->server);
850 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
851 cifs_in_send_dec(ses->server);
852
853 for (i = 0; i < num_rqst; i++)
854 cifs_save_when_sent(midQ[i]);
855
856 if (rc < 0)
857 ses->server->sequence_number -= 2;
858
859 mutex_unlock(&ses->server->srv_mutex);
860
861 if (rc < 0)
862 goto out;
863
864 /*
865 * Compounding is never used during session establish.
866 */
867 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
868 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
869 rqst[0].rq_nvec);
870
871 if (timeout == CIFS_ASYNC_OP)
872 goto out;
873
874 for (i = 0; i < num_rqst; i++) {
875 rc = wait_for_response(ses->server, midQ[i]);
876 if (rc != 0) {
877 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
878 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
879 send_cancel(ses->server, &rqst[i], midQ[i]);
880 spin_lock(&GlobalMid_Lock);
881 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
882 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
883 midQ[i]->callback = DeleteMidQEntry;
884 spin_unlock(&GlobalMid_Lock);
885 add_credits(ses->server, 1, optype);
886 return rc;
887 }
888 spin_unlock(&GlobalMid_Lock);
889 }
890 }
891
892 for (i = 0; i < num_rqst; i++)
893 if (midQ[i]->resp_buf)
894 credits += ses->server->ops->get_credits(midQ[i]);
895 if (!credits)
896 credits = 1;
897
898 for (i = 0; i < num_rqst; i++) {
899 if (rc < 0)
900 goto out;
901
902 rc = cifs_sync_mid_result(midQ[i], ses->server);
903 if (rc != 0) {
904 add_credits(ses->server, credits, optype);
905 return rc;
906 }
907
908 if (!midQ[i]->resp_buf ||
909 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
910 rc = -EIO;
911 cifs_dbg(FYI, "Bad MID state?\n");
912 goto out;
913 }
914
915 buf = (char *)midQ[i]->resp_buf;
916 resp_iov[i].iov_base = buf;
917 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
918 ses->server->vals->header_preamble_size;
919
920 if (midQ[i]->large_buf)
921 resp_buf_type[i] = CIFS_LARGE_BUFFER;
922 else
923 resp_buf_type[i] = CIFS_SMALL_BUFFER;
924
925 rc = ses->server->ops->check_receive(midQ[i], ses->server,
926 flags & CIFS_LOG_ERROR);
927
928 /* mark it so buf will not be freed by cifs_delete_mid */
929 if ((flags & CIFS_NO_RESP) == 0)
930 midQ[i]->resp_buf = NULL;
931
932 }
933
934 /*
935 * Compounding is never used during session establish.
936 */
937 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
938 struct kvec iov = {
939 .iov_base = resp_iov[0].iov_base,
940 .iov_len = resp_iov[0].iov_len
941 };
942 smb311_update_preauth_hash(ses, &iov, 1);
943 }
944
945out:
946 /*
947 * This will dequeue all mids. After this it is important that the
948 * demultiplex_thread will not process any of these mids any futher.
949 * This is prevented above by using a noop callback that will not
950 * wake this thread except for the very last PDU.
951 */
952 for (i = 0; i < num_rqst; i++)
953 cifs_delete_mid(midQ[i]);
954 add_credits(ses->server, credits, optype);
955
956 return rc;
957}
958
959int
960cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
961 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
962 struct kvec *resp_iov)
963{
964 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
965 resp_iov);
966}
967
968int
969SendReceive2(const unsigned int xid, struct cifs_ses *ses,
970 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
971 const int flags, struct kvec *resp_iov)
972{
973 struct smb_rqst rqst;
974 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
975 int rc;
976
977 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
978 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
979 GFP_KERNEL);
980 if (!new_iov) {
981 /* otherwise cifs_send_recv below sets resp_buf_type */
982 *resp_buf_type = CIFS_NO_BUFFER;
983 return -ENOMEM;
984 }
985 } else
986 new_iov = s_iov;
987
988 /* 1st iov is a RFC1001 length followed by the rest of the packet */
989 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
990
991 new_iov[0].iov_base = new_iov[1].iov_base;
992 new_iov[0].iov_len = 4;
993 new_iov[1].iov_base += 4;
994 new_iov[1].iov_len -= 4;
995
996 memset(&rqst, 0, sizeof(struct smb_rqst));
997 rqst.rq_iov = new_iov;
998 rqst.rq_nvec = n_vec + 1;
999
1000 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1001 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1002 kfree(new_iov);
1003 return rc;
1004}
1005
1006int
1007SendReceive(const unsigned int xid, struct cifs_ses *ses,
1008 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1009 int *pbytes_returned, const int timeout)
1010{
1011 int rc = 0;
1012 struct mid_q_entry *midQ;
1013 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1014 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1015 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1016
1017 if (ses == NULL) {
1018 cifs_dbg(VFS, "Null smb session\n");
1019 return -EIO;
1020 }
1021 if (ses->server == NULL) {
1022 cifs_dbg(VFS, "Null tcp session\n");
1023 return -EIO;
1024 }
1025
1026 if (ses->server->tcpStatus == CifsExiting)
1027 return -ENOENT;
1028
1029 /* Ensure that we do not send more than 50 overlapping requests
1030 to the same server. We may make this configurable later or
1031 use ses->maxReq */
1032
1033 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1034 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1035 len);
1036 return -EIO;
1037 }
1038
1039 rc = wait_for_free_request(ses->server, timeout, 0);
1040 if (rc)
1041 return rc;
1042
1043 /* make sure that we sign in the same order that we send on this socket
1044 and avoid races inside tcp sendmsg code that could cause corruption
1045 of smb data */
1046
1047 mutex_lock(&ses->server->srv_mutex);
1048
1049 rc = allocate_mid(ses, in_buf, &midQ);
1050 if (rc) {
1051 mutex_unlock(&ses->server->srv_mutex);
1052 /* Update # of requests on wire to server */
1053 add_credits(ses->server, 1, 0);
1054 return rc;
1055 }
1056
1057 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1058 if (rc) {
1059 mutex_unlock(&ses->server->srv_mutex);
1060 goto out;
1061 }
1062
1063 midQ->mid_state = MID_REQUEST_SUBMITTED;
1064
1065 cifs_in_send_inc(ses->server);
1066 rc = smb_send(ses->server, in_buf, len);
1067 cifs_in_send_dec(ses->server);
1068 cifs_save_when_sent(midQ);
1069
1070 if (rc < 0)
1071 ses->server->sequence_number -= 2;
1072
1073 mutex_unlock(&ses->server->srv_mutex);
1074
1075 if (rc < 0)
1076 goto out;
1077
1078 if (timeout == CIFS_ASYNC_OP)
1079 goto out;
1080
1081 rc = wait_for_response(ses->server, midQ);
1082 if (rc != 0) {
1083 send_cancel(ses->server, &rqst, midQ);
1084 spin_lock(&GlobalMid_Lock);
1085 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1086 /* no longer considered to be "in-flight" */
1087 midQ->callback = DeleteMidQEntry;
1088 spin_unlock(&GlobalMid_Lock);
1089 add_credits(ses->server, 1, 0);
1090 return rc;
1091 }
1092 spin_unlock(&GlobalMid_Lock);
1093 }
1094
1095 rc = cifs_sync_mid_result(midQ, ses->server);
1096 if (rc != 0) {
1097 add_credits(ses->server, 1, 0);
1098 return rc;
1099 }
1100
1101 if (!midQ->resp_buf || !out_buf ||
1102 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1103 rc = -EIO;
1104 cifs_dbg(VFS, "Bad MID state?\n");
1105 goto out;
1106 }
1107
1108 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1109 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1110 rc = cifs_check_receive(midQ, ses->server, 0);
1111out:
1112 cifs_delete_mid(midQ);
1113 add_credits(ses->server, 1, 0);
1114
1115 return rc;
1116}
1117
1118/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1119 blocking lock to return. */
1120
1121static int
1122send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1123 struct smb_hdr *in_buf,
1124 struct smb_hdr *out_buf)
1125{
1126 int bytes_returned;
1127 struct cifs_ses *ses = tcon->ses;
1128 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1129
1130 /* We just modify the current in_buf to change
1131 the type of lock from LOCKING_ANDX_SHARED_LOCK
1132 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1133 LOCKING_ANDX_CANCEL_LOCK. */
1134
1135 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1136 pSMB->Timeout = 0;
1137 pSMB->hdr.Mid = get_next_mid(ses->server);
1138
1139 return SendReceive(xid, ses, in_buf, out_buf,
1140 &bytes_returned, 0);
1141}
1142
1143int
1144SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1145 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1146 int *pbytes_returned)
1147{
1148 int rc = 0;
1149 int rstart = 0;
1150 struct mid_q_entry *midQ;
1151 struct cifs_ses *ses;
1152 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1153 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1154 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1155
1156 if (tcon == NULL || tcon->ses == NULL) {
1157 cifs_dbg(VFS, "Null smb session\n");
1158 return -EIO;
1159 }
1160 ses = tcon->ses;
1161
1162 if (ses->server == NULL) {
1163 cifs_dbg(VFS, "Null tcp session\n");
1164 return -EIO;
1165 }
1166
1167 if (ses->server->tcpStatus == CifsExiting)
1168 return -ENOENT;
1169
1170 /* Ensure that we do not send more than 50 overlapping requests
1171 to the same server. We may make this configurable later or
1172 use ses->maxReq */
1173
1174 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1175 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1176 len);
1177 return -EIO;
1178 }
1179
1180 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1181 if (rc)
1182 return rc;
1183
1184 /* make sure that we sign in the same order that we send on this socket
1185 and avoid races inside tcp sendmsg code that could cause corruption
1186 of smb data */
1187
1188 mutex_lock(&ses->server->srv_mutex);
1189
1190 rc = allocate_mid(ses, in_buf, &midQ);
1191 if (rc) {
1192 mutex_unlock(&ses->server->srv_mutex);
1193 return rc;
1194 }
1195
1196 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1197 if (rc) {
1198 cifs_delete_mid(midQ);
1199 mutex_unlock(&ses->server->srv_mutex);
1200 return rc;
1201 }
1202
1203 midQ->mid_state = MID_REQUEST_SUBMITTED;
1204 cifs_in_send_inc(ses->server);
1205 rc = smb_send(ses->server, in_buf, len);
1206 cifs_in_send_dec(ses->server);
1207 cifs_save_when_sent(midQ);
1208
1209 if (rc < 0)
1210 ses->server->sequence_number -= 2;
1211
1212 mutex_unlock(&ses->server->srv_mutex);
1213
1214 if (rc < 0) {
1215 cifs_delete_mid(midQ);
1216 return rc;
1217 }
1218
1219 /* Wait for a reply - allow signals to interrupt. */
1220 rc = wait_event_interruptible(ses->server->response_q,
1221 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1222 ((ses->server->tcpStatus != CifsGood) &&
1223 (ses->server->tcpStatus != CifsNew)));
1224
1225 /* Were we interrupted by a signal ? */
1226 if ((rc == -ERESTARTSYS) &&
1227 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1228 ((ses->server->tcpStatus == CifsGood) ||
1229 (ses->server->tcpStatus == CifsNew))) {
1230
1231 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1232 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1233 blocking lock to return. */
1234 rc = send_cancel(ses->server, &rqst, midQ);
1235 if (rc) {
1236 cifs_delete_mid(midQ);
1237 return rc;
1238 }
1239 } else {
1240 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1241 to cause the blocking lock to return. */
1242
1243 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1244
1245 /* If we get -ENOLCK back the lock may have
1246 already been removed. Don't exit in this case. */
1247 if (rc && rc != -ENOLCK) {
1248 cifs_delete_mid(midQ);
1249 return rc;
1250 }
1251 }
1252
1253 rc = wait_for_response(ses->server, midQ);
1254 if (rc) {
1255 send_cancel(ses->server, &rqst, midQ);
1256 spin_lock(&GlobalMid_Lock);
1257 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1258 /* no longer considered to be "in-flight" */
1259 midQ->callback = DeleteMidQEntry;
1260 spin_unlock(&GlobalMid_Lock);
1261 return rc;
1262 }
1263 spin_unlock(&GlobalMid_Lock);
1264 }
1265
1266 /* We got the response - restart system call. */
1267 rstart = 1;
1268 }
1269
1270 rc = cifs_sync_mid_result(midQ, ses->server);
1271 if (rc != 0)
1272 return rc;
1273
1274 /* rcvd frame is ok */
1275 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1276 rc = -EIO;
1277 cifs_dbg(VFS, "Bad MID state?\n");
1278 goto out;
1279 }
1280
1281 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1282 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1283 rc = cifs_check_receive(midQ, ses->server, 0);
1284out:
1285 cifs_delete_mid(midQ);
1286 if (rstart && rc == -EACCES)
1287 return -ERESTARTSYS;
1288 return rc;
1289}