smb3: add dynamic tracepoint for timeout waiting for credits
[linux-2.6-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
b30c74c7 36#include <linux/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
1da177e4 81
1da177e4 82 atomic_inc(&midCount);
7c9421e1 83 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
84 return temp;
85}
86
696e420b
LP
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
766fdbb5 102void
1da177e4
LT
103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
1047abc1 105#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 106 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
107 unsigned long now;
108#endif
7c9421e1 109 midEntry->mid_state = MID_FREE;
8097531a 110 atomic_dec(&midCount);
7c9421e1 111 if (midEntry->large_buf)
b8643e1b
SF
112 cifs_buf_release(midEntry->resp_buf);
113 else
114 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
115#ifdef CONFIG_CIFS_STATS2
116 now = jiffies;
00778e22
SF
117 /*
118 * commands taking longer than one second (default) can be indications
119 * that something is wrong, unless it is quite a slow link or a very
120 * busy server. Note that this calc is unlikely or impossible to wrap
121 * as long as slow_rsp_threshold is not set way above recommended max
122 * value (32767 ie 9 hours) and is generally harmless even if wrong
123 * since only affects debug counters - so leaving the calc as simple
124 * comparison rather than doing multiple conversions and overflow
125 * checks
126 */
127 if ((slow_rsp_threshold != 0) &&
128 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 129 (midEntry->command != command)) {
f5942db5
SF
130 /*
131 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132 * NB: le16_to_cpu returns unsigned so can not be negative below
133 */
134 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
468d6779
SF
135 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
020eec5f
SF
137 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138 midEntry->mid, midEntry->pid,
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
0b456f04 141 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 142 midEntry->command, midEntry->mid);
f80eaedd 143 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
147 }
148 }
149#endif
696e420b 150 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
151}
152
3c1bf7e4
PS
153void
154cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
155{
156 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
157 list_del_init(&mid->qhead);
158 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
159 spin_unlock(&GlobalMid_Lock);
160
161 DeleteMidQEntry(mid);
162}
163
6f49f46b
JL
164/*
165 * smb_send_kvec - send an array of kvecs to the server
166 * @server: Server to send the data to
3ab3f2a1 167 * @smb_msg: Message to send
6f49f46b
JL
168 * @sent: amount of data sent on socket is stored here
169 *
170 * Our basic "send data to server" function. Should be called with srv_mutex
171 * held. The caller is responsible for handling the results.
172 */
d6e04ae6 173static int
3ab3f2a1
AV
174smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175 size_t *sent)
1da177e4
LT
176{
177 int rc = 0;
3ab3f2a1 178 int retries = 0;
edf1ae40 179 struct socket *ssocket = server->ssocket;
50c2f753 180
6f49f46b
JL
181 *sent = 0;
182
3ab3f2a1
AV
183 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184 smb_msg->msg_namelen = sizeof(struct sockaddr);
185 smb_msg->msg_control = NULL;
186 smb_msg->msg_controllen = 0;
0496e02d 187 if (server->noblocksnd)
3ab3f2a1 188 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 189 else
3ab3f2a1 190 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 191
3ab3f2a1 192 while (msg_data_left(smb_msg)) {
6f49f46b
JL
193 /*
194 * If blocking send, we try 3 times, since each can block
195 * for 5 seconds. For nonblocking we have to try more
196 * but wait increasing amounts of time allowing time for
197 * socket to clear. The overall time we wait in either
198 * case to send on the socket is about 15 seconds.
199 * Similarly we wait for 15 seconds for a response from
200 * the server in SendReceive[2] for the server to send
201 * a response back for most types of requests (except
202 * SMB Write past end of file which can be slow, and
203 * blocking lock operations). NFS waits slightly longer
204 * than CIFS, but this can make it take longer for
205 * nonresponsive servers to be detected and 15 seconds
206 * is more than enough time for modern networks to
207 * send a packet. In most cases if we fail to send
208 * after the retries we will kill the socket and
209 * reconnect which may clear the network problem.
210 */
3ab3f2a1 211 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 212 if (rc == -EAGAIN) {
3ab3f2a1
AV
213 retries++;
214 if (retries >= 14 ||
215 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
216 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217 ssocket);
3ab3f2a1 218 return -EAGAIN;
1da177e4 219 }
3ab3f2a1 220 msleep(1 << retries);
1da177e4
LT
221 continue;
222 }
6f49f46b 223
79a58d1f 224 if (rc < 0)
3ab3f2a1 225 return rc;
6f49f46b 226
79a58d1f 227 if (rc == 0) {
3e84469d
SF
228 /* should never happen, letting socket clear before
229 retrying is our only obvious option here */
f96637be 230 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
231 msleep(500);
232 continue;
d6e04ae6 233 }
6f49f46b 234
3ab3f2a1
AV
235 /* send was at least partially successful */
236 *sent += rc;
237 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 238 }
3ab3f2a1 239 return 0;
97bc00b3
JL
240}
241
35e2cc1b 242unsigned long
81f39f95 243smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
244{
245 unsigned int i;
35e2cc1b
PA
246 struct kvec *iov;
247 int nvec;
a26054d1
JL
248 unsigned long buflen = 0;
249
81f39f95
RS
250 if (server->vals->header_preamble_size == 0 &&
251 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
252 iov = &rqst->rq_iov[1];
253 nvec = rqst->rq_nvec - 1;
254 } else {
255 iov = rqst->rq_iov;
256 nvec = rqst->rq_nvec;
257 }
258
a26054d1 259 /* total up iov array first */
35e2cc1b 260 for (i = 0; i < nvec; i++)
a26054d1
JL
261 buflen += iov[i].iov_len;
262
c06a0f2d
LL
263 /*
264 * Add in the page array if there is one. The caller needs to make
265 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266 * multiple pages ends at page boundary, rq_tailsz needs to be set to
267 * PAGE_SIZE.
268 */
a26054d1 269 if (rqst->rq_npages) {
c06a0f2d
LL
270 if (rqst->rq_npages == 1)
271 buflen += rqst->rq_tailsz;
272 else {
273 /*
274 * If there is more than one page, calculate the
275 * buffer length based on rq_offset and rq_tailsz
276 */
277 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278 rqst->rq_offset;
279 buflen += rqst->rq_tailsz;
280 }
a26054d1
JL
281 }
282
283 return buflen;
284}
285
6f49f46b 286static int
07cd952f
RS
287__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288 struct smb_rqst *rqst)
6f49f46b 289{
07cd952f
RS
290 int rc = 0;
291 struct kvec *iov;
292 int n_vec;
293 unsigned int send_length = 0;
294 unsigned int i, j;
b30c74c7 295 sigset_t mask, oldmask;
3ab3f2a1 296 size_t total_len = 0, sent, size;
b8eed283 297 struct socket *ssocket = server->ssocket;
3ab3f2a1 298 struct msghdr smb_msg;
b8eed283 299 int val = 1;
c713c877
RS
300 __be32 rfc1002_marker;
301
9762c2d0 302 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 303 rc = smbd_send(server, rqst);
9762c2d0
LL
304 goto smbd_done;
305 }
afc18a6f 306
ea702b80 307 if (ssocket == NULL)
afc18a6f 308 return -EAGAIN;
ea702b80 309
b30c74c7
PS
310 if (signal_pending(current)) {
311 cifs_dbg(FYI, "signal is pending before sending any data\n");
312 return -EINTR;
313 }
314
b8eed283
JL
315 /* cork the socket */
316 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317 (char *)&val, sizeof(val));
318
07cd952f 319 for (j = 0; j < num_rqst; j++)
81f39f95 320 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
321 rfc1002_marker = cpu_to_be32(send_length);
322
b30c74c7
PS
323 /*
324 * We should not allow signals to interrupt the network send because
325 * any partial send will cause session reconnects thus increasing
326 * latency of system calls and overload a server with unnecessary
327 * requests.
328 */
329
330 sigfillset(&mask);
331 sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
c713c877
RS
333 /* Generate a rfc1002 marker for SMB2+ */
334 if (server->vals->header_preamble_size == 0) {
335 struct kvec hiov = {
336 .iov_base = &rfc1002_marker,
337 .iov_len = 4
338 };
aa563d7b 339 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
340 rc = smb_send_kvec(server, &smb_msg, &sent);
341 if (rc < 0)
b30c74c7 342 goto unmask;
c713c877
RS
343
344 total_len += sent;
345 send_length += 4;
346 }
347
662bf5bc
PA
348 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
07cd952f
RS
350 for (j = 0; j < num_rqst; j++) {
351 iov = rqst[j].rq_iov;
352 n_vec = rqst[j].rq_nvec;
3ab3f2a1 353
07cd952f 354 size = 0;
662bf5bc
PA
355 for (i = 0; i < n_vec; i++) {
356 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 357 size += iov[i].iov_len;
662bf5bc 358 }
97bc00b3 359
aa563d7b 360 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 361
3ab3f2a1 362 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 363 if (rc < 0)
b30c74c7 364 goto unmask;
97bc00b3
JL
365
366 total_len += sent;
07cd952f
RS
367
368 /* now walk the page array and send each page in it */
369 for (i = 0; i < rqst[j].rq_npages; i++) {
370 struct bio_vec bvec;
371
372 bvec.bv_page = rqst[j].rq_pages[i];
373 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374 &bvec.bv_offset);
375
aa563d7b 376 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
377 &bvec, 1, bvec.bv_len);
378 rc = smb_send_kvec(server, &smb_msg, &sent);
379 if (rc < 0)
380 break;
381
382 total_len += sent;
383 }
97bc00b3 384 }
1da177e4 385
b30c74c7
PS
386unmask:
387 sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389 /*
390 * If signal is pending but we have already sent the whole packet to
391 * the server we need to return success status to allow a corresponding
392 * mid entry to be kept in the pending requests queue thus allowing
393 * to handle responses from the server by the client.
394 *
395 * If only part of the packet has been sent there is no need to hide
396 * interrupt because the session will be reconnected anyway, so there
397 * won't be any response from the server to handle.
398 */
399
400 if (signal_pending(current) && (total_len != send_length)) {
401 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402 rc = -EINTR;
403 }
404
b8eed283
JL
405 /* uncork it */
406 val = 0;
407 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408 (char *)&val, sizeof(val));
409
c713c877 410 if ((total_len > 0) && (total_len != send_length)) {
f96637be 411 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 412 send_length, total_len);
6f49f46b
JL
413 /*
414 * If we have only sent part of an SMB then the next SMB could
415 * be taken as the remainder of this one. We need to kill the
416 * socket so the server throws away the partial SMB
417 */
edf1ae40 418 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
419 trace_smb3_partial_send_reconnect(server->CurrentMid,
420 server->hostname);
edf1ae40 421 }
9762c2d0 422smbd_done:
d804d41d 423 if (rc < 0 && rc != -EINTR)
f96637be
JP
424 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425 rc);
ee13919c 426 else if (rc > 0)
1da177e4 427 rc = 0;
1da177e4
LT
428
429 return rc;
430}
431
6f49f46b 432static int
1f3a8f5f
RS
433smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434 struct smb_rqst *rqst, int flags)
6f49f46b 435{
b2c96de7
RS
436 struct kvec iov;
437 struct smb2_transform_hdr tr_hdr;
438 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
439 int rc;
440
441 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
442 return __smb_send_rqst(server, num_rqst, rqst);
443
444 if (num_rqst > MAX_COMPOUND - 1)
445 return -ENOMEM;
7fb8986e 446
b2c96de7
RS
447 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448 memset(&iov, 0, sizeof(iov));
449 memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451 iov.iov_base = &tr_hdr;
452 iov.iov_len = sizeof(tr_hdr);
453 cur_rqst[0].rq_iov = &iov;
454 cur_rqst[0].rq_nvec = 1;
455
456 if (!server->ops->init_transform_rq) {
457 cifs_dbg(VFS, "Encryption requested but transform callback "
458 "is missing\n");
7fb8986e
PS
459 return -EIO;
460 }
6f49f46b 461
1f3a8f5f
RS
462 rc = server->ops->init_transform_rq(server, num_rqst + 1,
463 &cur_rqst[0], rqst);
7fb8986e
PS
464 if (rc)
465 return rc;
466
1f3a8f5f
RS
467 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 469 return rc;
6f49f46b
JL
470}
471
0496e02d
JL
472int
473smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474 unsigned int smb_buf_length)
475{
738f9de5 476 struct kvec iov[2];
7fb8986e
PS
477 struct smb_rqst rqst = { .rq_iov = iov,
478 .rq_nvec = 2 };
0496e02d 479
738f9de5
PS
480 iov[0].iov_base = smb_buffer;
481 iov[0].iov_len = 4;
482 iov[1].iov_base = (char *)smb_buffer + 4;
483 iov[1].iov_len = smb_buf_length;
0496e02d 484
07cd952f 485 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
486}
487
fc40f9cf 488static int
b227d215 489wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
490 const int timeout, const int flags,
491 unsigned int *instance)
1da177e4 492{
5bc59498 493 int rc;
4230cff8
RS
494 int *credits;
495 int optype;
2b53b929
RS
496 long int t;
497
498 if (timeout < 0)
499 t = MAX_JIFFY_OFFSET;
500 else
501 t = msecs_to_jiffies(timeout);
4230cff8
RS
502
503 optype = flags & CIFS_OP_MASK;
5bc59498 504
34f4deb7
PS
505 *instance = 0;
506
4230cff8
RS
507 credits = server->ops->get_credits_field(server, optype);
508 /* Since an echo is already inflight, no need to wait to send another */
509 if (*credits <= 0 && optype == CIFS_ECHO_OP)
510 return -EAGAIN;
511
fc40f9cf 512 spin_lock(&server->req_lock);
4230cff8 513 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
1da177e4 514 /* oplock breaks must not be held up */
fc40f9cf 515 server->in_flight++;
bc205ed1 516 *credits -= 1;
34f4deb7 517 *instance = server->reconnect_instance;
fc40f9cf 518 spin_unlock(&server->req_lock);
27a97a61
VL
519 return 0;
520 }
521
27a97a61 522 while (1) {
b227d215 523 if (*credits < num_credits) {
fc40f9cf 524 spin_unlock(&server->req_lock);
789e6661 525 cifs_num_waiters_inc(server);
2b53b929
RS
526 rc = wait_event_killable_timeout(server->request_q,
527 has_credits(server, credits, num_credits), t);
789e6661 528 cifs_num_waiters_dec(server);
2b53b929 529 if (!rc) {
7937ca96
SF
530 trace_smb3_credit_timeout(server->CurrentMid,
531 server->hostname, num_credits);
2b53b929
RS
532 cifs_dbg(VFS, "wait timed out after %d ms\n",
533 timeout);
534 return -ENOTSUPP;
535 }
536 if (rc == -ERESTARTSYS)
537 return -ERESTARTSYS;
fc40f9cf 538 spin_lock(&server->req_lock);
27a97a61 539 } else {
c5797a94 540 if (server->tcpStatus == CifsExiting) {
fc40f9cf 541 spin_unlock(&server->req_lock);
27a97a61 542 return -ENOENT;
1da177e4 543 }
27a97a61 544
16b34aa4
RS
545 /*
546 * For normal commands, reserve the last MAX_COMPOUND
547 * credits to compound requests.
548 * Otherwise these compounds could be permanently
549 * starved for credits by single-credit requests.
550 *
551 * To prevent spinning CPU, block this thread until
552 * there are >MAX_COMPOUND credits available.
553 * But only do this is we already have a lot of
554 * credits in flight to avoid triggering this check
555 * for servers that are slow to hand out credits on
556 * new sessions.
557 */
558 if (!optype && num_credits == 1 &&
559 server->in_flight > 2 * MAX_COMPOUND &&
560 *credits <= MAX_COMPOUND) {
561 spin_unlock(&server->req_lock);
562 cifs_num_waiters_inc(server);
2b53b929
RS
563 rc = wait_event_killable_timeout(
564 server->request_q,
16b34aa4 565 has_credits(server, credits,
2b53b929
RS
566 MAX_COMPOUND + 1),
567 t);
16b34aa4 568 cifs_num_waiters_dec(server);
2b53b929 569 if (!rc) {
7937ca96
SF
570 trace_smb3_credit_timeout(
571 server->CurrentMid,
572 server->hostname, num_credits);
2b53b929
RS
573 cifs_dbg(VFS, "wait timed out after %d ms\n",
574 timeout);
575 return -ENOTSUPP;
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
16b34aa4
RS
579 spin_lock(&server->req_lock);
580 continue;
581 }
582
2d86dbc9
PS
583 /*
584 * Can not count locking commands against total
585 * as they are allowed to block on server.
586 */
27a97a61
VL
587
588 /* update # of requests on the wire to server */
4230cff8 589 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
590 *credits -= num_credits;
591 server->in_flight += num_credits;
34f4deb7 592 *instance = server->reconnect_instance;
2d86dbc9 593 }
fc40f9cf 594 spin_unlock(&server->req_lock);
27a97a61 595 break;
1da177e4
LT
596 }
597 }
7ee1af76
JA
598 return 0;
599}
1da177e4 600
bc205ed1 601static int
480b1cb9
RS
602wait_for_free_request(struct TCP_Server_Info *server, const int flags,
603 unsigned int *instance)
bc205ed1 604{
2b53b929
RS
605 return wait_for_free_credits(server, 1, -1, flags,
606 instance);
bc205ed1
PS
607}
608
cb7e9eab
PS
609int
610cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 611 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
612{
613 *num = size;
335b7b62
PS
614 credits->value = 0;
615 credits->instance = server->reconnect_instance;
cb7e9eab
PS
616 return 0;
617}
618
96daf2b0 619static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
620 struct mid_q_entry **ppmidQ)
621{
1da177e4 622 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 623 return -ENOENT;
8fbbd365
VL
624 }
625
626 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 627 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 628 return -EAGAIN;
8fbbd365
VL
629 }
630
7f48558e 631 if (ses->status == CifsNew) {
79a58d1f 632 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 633 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 634 return -EAGAIN;
ad7a2926 635 /* else ok - we are setting up session */
1da177e4 636 }
7f48558e
SP
637
638 if (ses->status == CifsExiting) {
639 /* check if SMB session is bad because we are setting it up */
640 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
641 return -EAGAIN;
642 /* else ok - we are shutting down session */
643 }
644
24b9b06b 645 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 646 if (*ppmidQ == NULL)
7ee1af76 647 return -ENOMEM;
ddc8cf8f
JL
648 spin_lock(&GlobalMid_Lock);
649 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
650 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
651 return 0;
652}
653
0ade640e
JL
654static int
655wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 656{
0ade640e 657 int error;
7ee1af76 658
5853cc2a 659 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 660 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
661 if (error < 0)
662 return -ERESTARTSYS;
7ee1af76 663
0ade640e 664 return 0;
7ee1af76
JA
665}
666
fec344e3
JL
667struct mid_q_entry *
668cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
669{
670 int rc;
fec344e3 671 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
672 struct mid_q_entry *mid;
673
738f9de5
PS
674 if (rqst->rq_iov[0].iov_len != 4 ||
675 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
676 return ERR_PTR(-EIO);
677
792af7b0 678 /* enable signing if server requires it */
38d77c50 679 if (server->sign)
792af7b0
PS
680 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
681
682 mid = AllocMidQEntry(hdr, server);
683 if (mid == NULL)
fec344e3 684 return ERR_PTR(-ENOMEM);
792af7b0 685
fec344e3 686 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
687 if (rc) {
688 DeleteMidQEntry(mid);
fec344e3 689 return ERR_PTR(rc);
ffc61ccb
SP
690 }
691
fec344e3 692 return mid;
792af7b0 693}
133672ef 694
a6827c18
JL
695/*
696 * Send a SMB request and set the callback function in the mid to handle
697 * the result. Caller is responsible for dealing with timeouts.
698 */
699int
fec344e3 700cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 701 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
702 mid_handle_t *handle, void *cbdata, const int flags,
703 const struct cifs_credits *exist_credits)
a6827c18 704{
480b1cb9 705 int rc;
a6827c18 706 struct mid_q_entry *mid;
335b7b62 707 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 708 unsigned int instance;
480b1cb9 709 int optype;
a6827c18 710
a891f0f8
PS
711 optype = flags & CIFS_OP_MASK;
712
cb7e9eab 713 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 714 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
715 if (rc)
716 return rc;
335b7b62 717 credits.value = 1;
34f4deb7 718 credits.instance = instance;
3349c3a7
PS
719 } else
720 instance = exist_credits->instance;
a6827c18
JL
721
722 mutex_lock(&server->srv_mutex);
3349c3a7
PS
723
724 /*
725 * We can't use credits obtained from the previous session to send this
726 * request. Check if there were reconnects after we obtained credits and
727 * return -EAGAIN in such cases to let callers handle it.
728 */
729 if (instance != server->reconnect_instance) {
730 mutex_unlock(&server->srv_mutex);
731 add_credits_and_wake_if(server, &credits, optype);
732 return -EAGAIN;
733 }
734
fec344e3
JL
735 mid = server->ops->setup_async_request(server, rqst);
736 if (IS_ERR(mid)) {
a6827c18 737 mutex_unlock(&server->srv_mutex);
335b7b62 738 add_credits_and_wake_if(server, &credits, optype);
fec344e3 739 return PTR_ERR(mid);
a6827c18
JL
740 }
741
44d22d84 742 mid->receive = receive;
a6827c18
JL
743 mid->callback = callback;
744 mid->callback_data = cbdata;
9b7c18a2 745 mid->handle = handle;
7c9421e1 746 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 747
ffc61ccb
SP
748 /* put it on the pending_mid_q */
749 spin_lock(&GlobalMid_Lock);
750 list_add_tail(&mid->qhead, &server->pending_mid_q);
751 spin_unlock(&GlobalMid_Lock);
752
93d2cb6c
LL
753 /*
754 * Need to store the time in mid before calling I/O. For call_async,
755 * I/O response may come back and free the mid entry on another thread.
756 */
757 cifs_save_when_sent(mid);
789e6661 758 cifs_in_send_inc(server);
1f3a8f5f 759 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 760 cifs_in_send_dec(server);
ad313cb8 761
820962dc 762 if (rc < 0) {
c781af7e 763 revert_current_mid(server, mid->credits);
ad313cb8 764 server->sequence_number -= 2;
820962dc
RV
765 cifs_delete_mid(mid);
766 }
767
a6827c18 768 mutex_unlock(&server->srv_mutex);
789e6661 769
ffc61ccb
SP
770 if (rc == 0)
771 return 0;
a6827c18 772
335b7b62 773 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
774 return rc;
775}
776
133672ef
SF
777/*
778 *
779 * Send an SMB Request. No response info (other than return code)
780 * needs to be parsed.
781 *
782 * flags indicate the type of request buffer and how long to wait
783 * and whether to log NT STATUS code (error) before mapping it to POSIX error
784 *
785 */
786int
96daf2b0 787SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 788 char *in_buf, int flags)
133672ef
SF
789{
790 int rc;
791 struct kvec iov[1];
da502f7d 792 struct kvec rsp_iov;
133672ef
SF
793 int resp_buf_type;
794
792af7b0
PS
795 iov[0].iov_base = in_buf;
796 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 797 flags |= CIFS_NO_RESP;
da502f7d 798 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 799 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 800
133672ef
SF
801 return rc;
802}
803
053d5034 804static int
3c1105df 805cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
806{
807 int rc = 0;
808
f96637be
JP
809 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
810 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 811
74dd92a8 812 spin_lock(&GlobalMid_Lock);
7c9421e1 813 switch (mid->mid_state) {
74dd92a8 814 case MID_RESPONSE_RECEIVED:
053d5034
JL
815 spin_unlock(&GlobalMid_Lock);
816 return rc;
74dd92a8
JL
817 case MID_RETRY_NEEDED:
818 rc = -EAGAIN;
819 break;
71823baf
JL
820 case MID_RESPONSE_MALFORMED:
821 rc = -EIO;
822 break;
3c1105df
JL
823 case MID_SHUTDOWN:
824 rc = -EHOSTDOWN;
825 break;
74dd92a8 826 default:
3c1105df 827 list_del_init(&mid->qhead);
f96637be
JP
828 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
829 __func__, mid->mid, mid->mid_state);
74dd92a8 830 rc = -EIO;
053d5034
JL
831 }
832 spin_unlock(&GlobalMid_Lock);
833
2b84a36c 834 DeleteMidQEntry(mid);
053d5034
JL
835 return rc;
836}
837
121b046a 838static inline int
fb2036d8
PS
839send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
840 struct mid_q_entry *mid)
76dcc26f 841{
121b046a 842 return server->ops->send_cancel ?
fb2036d8 843 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
844}
845
2c8f981d
JL
846int
847cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
848 bool log_error)
849{
792af7b0 850 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
851
852 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
853
854 /* convert the length into a more usable form */
38d77c50 855 if (server->sign) {
738f9de5 856 struct kvec iov[2];
985e4ff0 857 int rc = 0;
738f9de5
PS
858 struct smb_rqst rqst = { .rq_iov = iov,
859 .rq_nvec = 2 };
826a95e4 860
738f9de5
PS
861 iov[0].iov_base = mid->resp_buf;
862 iov[0].iov_len = 4;
863 iov[1].iov_base = (char *)mid->resp_buf + 4;
864 iov[1].iov_len = len - 4;
2c8f981d 865 /* FIXME: add code to kill session */
bf5ea0e2 866 rc = cifs_verify_signature(&rqst, server,
0124cc45 867 mid->sequence_number);
985e4ff0 868 if (rc)
f96637be
JP
869 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
870 rc);
2c8f981d
JL
871 }
872
873 /* BB special case reconnect tid and uid here? */
874 return map_smb_to_linux_error(mid->resp_buf, log_error);
875}
876
fec344e3
JL
877struct mid_q_entry *
878cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
879{
880 int rc;
fec344e3 881 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
882 struct mid_q_entry *mid;
883
738f9de5
PS
884 if (rqst->rq_iov[0].iov_len != 4 ||
885 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
886 return ERR_PTR(-EIO);
887
792af7b0
PS
888 rc = allocate_mid(ses, hdr, &mid);
889 if (rc)
fec344e3
JL
890 return ERR_PTR(rc);
891 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
892 if (rc) {
3c1bf7e4 893 cifs_delete_mid(mid);
fec344e3
JL
894 return ERR_PTR(rc);
895 }
896 return mid;
792af7b0
PS
897}
898
4e34feb5 899static void
ee258d79 900cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
901{
902 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
903 struct cifs_credits credits;
904
905 credits.value = server->ops->get_credits(mid);
906 credits.instance = server->reconnect_instance;
8a26f0f7 907
34f4deb7 908 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
909}
910
ee258d79
PS
911static void
912cifs_compound_last_callback(struct mid_q_entry *mid)
913{
914 cifs_compound_callback(mid);
915 cifs_wake_up_task(mid);
916}
917
918static void
919cifs_cancelled_callback(struct mid_q_entry *mid)
920{
921 cifs_compound_callback(mid);
922 DeleteMidQEntry(mid);
923}
924
b8f57ee8 925int
e0bba0b8
RS
926compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
927 const int flags, const int num_rqst, struct smb_rqst *rqst,
928 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 929{
480b1cb9 930 int i, j, optype, rc = 0;
e0bba0b8 931 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 932 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
933 struct cifs_credits credits[MAX_COMPOUND] = {
934 { .value = 0, .instance = 0 }
935 };
936 unsigned int instance;
97ea4998 937 unsigned int first_instance = 0;
738f9de5 938 char *buf;
50c2f753 939
a891f0f8 940 optype = flags & CIFS_OP_MASK;
133672ef 941
e0bba0b8
RS
942 for (i = 0; i < num_rqst; i++)
943 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
944
945 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 946 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
947 return -EIO;
948 }
949
da502f7d 950 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 951 return -ENOENT;
7ee1af76 952
7091bcab
PS
953 spin_lock(&ses->server->req_lock);
954 if (ses->server->credits < num_rqst) {
955 /*
956 * Return immediately if not too many requests in flight since
957 * we will likely be stuck on waiting for credits.
958 */
959 if (ses->server->in_flight < num_rqst - ses->server->credits) {
960 spin_unlock(&ses->server->req_lock);
961 return -ENOTSUPP;
962 }
963 } else {
964 /* enough credits to send the whole compounded request */
965 ses->server->credits -= num_rqst;
966 ses->server->in_flight += num_rqst;
967 first_instance = ses->server->reconnect_instance;
968 }
969 spin_unlock(&ses->server->req_lock);
970
971 if (first_instance) {
972 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
973 for (i = 0; i < num_rqst; i++) {
974 credits[i].value = 1;
975 credits[i].instance = first_instance;
976 }
977 goto setup_rqsts;
978 }
979
792af7b0 980 /*
7091bcab
PS
981 * There are not enough credits to send the whole compound request but
982 * there are requests in flight that may bring credits from the server.
983 * This approach still leaves the possibility to be stuck waiting for
984 * credits if the server doesn't grant credits to the outstanding
985 * requests. This should be fixed by returning immediately and letting
986 * a caller fallback to sequential commands instead of compounding.
8544f4aa 987 * Ensure we obtain 1 credit per request in the compound chain.
792af7b0 988 */
8544f4aa 989 for (i = 0; i < num_rqst; i++) {
480b1cb9 990 rc = wait_for_free_request(ses->server, flags, &instance);
97ea4998
PS
991
992 if (rc == 0) {
993 credits[i].value = 1;
994 credits[i].instance = instance;
995 /*
996 * All parts of the compound chain must get credits from
997 * the same session, otherwise we may end up using more
998 * credits than the server granted. If there were
999 * reconnects in between, return -EAGAIN and let callers
1000 * handle it.
1001 */
1002 if (i == 0)
1003 first_instance = instance;
1004 else if (first_instance != instance) {
1005 i++;
1006 rc = -EAGAIN;
1007 }
1008 }
1009
8544f4aa
PS
1010 if (rc) {
1011 /*
1012 * We haven't sent an SMB packet to the server yet but
1013 * we already obtained credits for i requests in the
1014 * compound chain - need to return those credits back
1015 * for future use. Note that we need to call add_credits
1016 * multiple times to match the way we obtained credits
1017 * in the first place and to account for in flight
1018 * requests correctly.
1019 */
1020 for (j = 0; j < i; j++)
34f4deb7 1021 add_credits(ses->server, &credits[j], optype);
8544f4aa
PS
1022 return rc;
1023 }
8544f4aa 1024 }
7ee1af76 1025
7091bcab 1026setup_rqsts:
792af7b0
PS
1027 /*
1028 * Make sure that we sign in the same order that we send on this socket
1029 * and avoid races inside tcp sendmsg code that could cause corruption
1030 * of smb data.
1031 */
7ee1af76 1032
72ca545b 1033 mutex_lock(&ses->server->srv_mutex);
7ee1af76 1034
97ea4998
PS
1035 /*
1036 * All the parts of the compound chain belong obtained credits from the
1037 * same session (see the appropriate checks above). In the same time
1038 * there might be reconnects after those checks but before we acquired
1039 * the srv_mutex. We can not use credits obtained from the previous
1040 * session to send this request. Check if there were reconnects after
1041 * we obtained credits and return -EAGAIN in such cases to let callers
1042 * handle it.
1043 */
1044 if (first_instance != ses->server->reconnect_instance) {
1045 mutex_unlock(&ses->server->srv_mutex);
1046 for (j = 0; j < num_rqst; j++)
1047 add_credits(ses->server, &credits[j], optype);
1048 return -EAGAIN;
1049 }
1050
e0bba0b8
RS
1051 for (i = 0; i < num_rqst; i++) {
1052 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1053 if (IS_ERR(midQ[i])) {
c781af7e 1054 revert_current_mid(ses->server, i);
e0bba0b8
RS
1055 for (j = 0; j < i; j++)
1056 cifs_delete_mid(midQ[j]);
1057 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 1058
e0bba0b8 1059 /* Update # of requests on wire to server */
8544f4aa 1060 for (j = 0; j < num_rqst; j++)
34f4deb7 1061 add_credits(ses->server, &credits[j], optype);
e0bba0b8
RS
1062 return PTR_ERR(midQ[i]);
1063 }
1064
1065 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1066 midQ[i]->optype = optype;
4e34feb5 1067 /*
ee258d79
PS
1068 * Invoke callback for every part of the compound chain
1069 * to calculate credits properly. Wake up this thread only when
1070 * the last element is received.
4e34feb5
RS
1071 */
1072 if (i < num_rqst - 1)
ee258d79
PS
1073 midQ[i]->callback = cifs_compound_callback;
1074 else
1075 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1076 }
789e6661 1077 cifs_in_send_inc(ses->server);
e0bba0b8 1078 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 1079 cifs_in_send_dec(ses->server);
e0bba0b8
RS
1080
1081 for (i = 0; i < num_rqst; i++)
1082 cifs_save_when_sent(midQ[i]);
7ee1af76 1083
c781af7e
PS
1084 if (rc < 0) {
1085 revert_current_mid(ses->server, num_rqst);
ad313cb8 1086 ses->server->sequence_number -= 2;
c781af7e 1087 }
e0bba0b8 1088
72ca545b 1089 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1090
ee258d79
PS
1091 if (rc < 0) {
1092 /* Sending failed for some reason - return credits back */
1093 for (i = 0; i < num_rqst; i++)
34f4deb7 1094 add_credits(ses->server, &credits[i], optype);
cb5c2e63 1095 goto out;
ee258d79
PS
1096 }
1097
1098 /*
1099 * At this point the request is passed to the network stack - we assume
1100 * that any credits taken from the server structure on the client have
1101 * been spent and we can't return them back. Once we receive responses
1102 * we will collect credits granted by the server in the mid callbacks
1103 * and add those credits to the server structure.
1104 */
e0bba0b8 1105
cb5c2e63
RS
1106 /*
1107 * Compounding is never used during session establish.
1108 */
1109 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1110 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1111 rqst[0].rq_nvec);
e0bba0b8 1112
480b1cb9 1113 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
cb5c2e63 1114 goto out;
e0bba0b8 1115
cb5c2e63 1116 for (i = 0; i < num_rqst; i++) {
e0bba0b8 1117 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
1118 if (rc != 0)
1119 break;
1120 }
1121 if (rc != 0) {
1122 for (; i < num_rqst; i++) {
43de1db3
SF
1123 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1124 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
1125 send_cancel(ses->server, &rqst[i], midQ[i]);
1126 spin_lock(&GlobalMid_Lock);
1127 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1128 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1129 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1130 cancelled_mid[i] = true;
34f4deb7 1131 credits[i].value = 0;
e0bba0b8 1132 }
1be912dd 1133 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1134 }
cb5c2e63
RS
1135 }
1136
cb5c2e63
RS
1137 for (i = 0; i < num_rqst; i++) {
1138 if (rc < 0)
1139 goto out;
e0bba0b8
RS
1140
1141 rc = cifs_sync_mid_result(midQ[i], ses->server);
1142 if (rc != 0) {
8544f4aa
PS
1143 /* mark this mid as cancelled to not free it below */
1144 cancelled_mid[i] = true;
1145 goto out;
1be912dd 1146 }
2b2bdfba 1147
e0bba0b8
RS
1148 if (!midQ[i]->resp_buf ||
1149 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1150 rc = -EIO;
1151 cifs_dbg(FYI, "Bad MID state?\n");
1152 goto out;
1153 }
a891f0f8 1154
e0bba0b8
RS
1155 buf = (char *)midQ[i]->resp_buf;
1156 resp_iov[i].iov_base = buf;
1157 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1158 ses->server->vals->header_preamble_size;
1159
1160 if (midQ[i]->large_buf)
1161 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1162 else
1163 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1164
e0bba0b8
RS
1165 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1166 flags & CIFS_LOG_ERROR);
1da177e4 1167
e0bba0b8
RS
1168 /* mark it so buf will not be freed by cifs_delete_mid */
1169 if ((flags & CIFS_NO_RESP) == 0)
1170 midQ[i]->resp_buf = NULL;
cb5c2e63 1171
e0bba0b8 1172 }
cb5c2e63
RS
1173
1174 /*
1175 * Compounding is never used during session establish.
1176 */
1177 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1178 struct kvec iov = {
1179 .iov_base = resp_iov[0].iov_base,
1180 .iov_len = resp_iov[0].iov_len
1181 };
1182 smb311_update_preauth_hash(ses, &iov, 1);
1183 }
1184
7ee1af76 1185out:
4e34feb5
RS
1186 /*
1187 * This will dequeue all mids. After this it is important that the
1188 * demultiplex_thread will not process any of these mids any futher.
1189 * This is prevented above by using a noop callback that will not
1190 * wake this thread except for the very last PDU.
1191 */
8544f4aa
PS
1192 for (i = 0; i < num_rqst; i++) {
1193 if (!cancelled_mid[i])
1194 cifs_delete_mid(midQ[i]);
8544f4aa 1195 }
1da177e4 1196
d6e04ae6
SF
1197 return rc;
1198}
1da177e4 1199
e0bba0b8
RS
1200int
1201cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1202 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1203 struct kvec *resp_iov)
1204{
1205 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1206 resp_iov);
1207}
1208
738f9de5
PS
1209int
1210SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1211 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1212 const int flags, struct kvec *resp_iov)
1213{
1214 struct smb_rqst rqst;
3cecf486 1215 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1216 int rc;
1217
3cecf486 1218 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1219 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1220 GFP_KERNEL);
117e3b7f
SF
1221 if (!new_iov) {
1222 /* otherwise cifs_send_recv below sets resp_buf_type */
1223 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1224 return -ENOMEM;
117e3b7f 1225 }
3cecf486
RS
1226 } else
1227 new_iov = s_iov;
738f9de5
PS
1228
1229 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1230 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1231
1232 new_iov[0].iov_base = new_iov[1].iov_base;
1233 new_iov[0].iov_len = 4;
1234 new_iov[1].iov_base += 4;
1235 new_iov[1].iov_len -= 4;
1236
1237 memset(&rqst, 0, sizeof(struct smb_rqst));
1238 rqst.rq_iov = new_iov;
1239 rqst.rq_nvec = n_vec + 1;
1240
1241 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1242 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1243 kfree(new_iov);
738f9de5
PS
1244 return rc;
1245}
1246
1da177e4 1247int
96daf2b0 1248SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1249 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1250 int *pbytes_returned, const int flags)
1da177e4
LT
1251{
1252 int rc = 0;
1da177e4 1253 struct mid_q_entry *midQ;
fb2036d8
PS
1254 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1255 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1256 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1257 struct cifs_credits credits = { .value = 1, .instance = 0 };
1da177e4
LT
1258
1259 if (ses == NULL) {
f96637be 1260 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1261 return -EIO;
1262 }
79a58d1f 1263 if (ses->server == NULL) {
f96637be 1264 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1265 return -EIO;
1266 }
1267
79a58d1f 1268 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1269 return -ENOENT;
1270
79a58d1f 1271 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1272 to the same server. We may make this configurable later or
1273 use ses->maxReq */
1da177e4 1274
fb2036d8 1275 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1276 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1277 len);
6d9c6d54
VL
1278 return -EIO;
1279 }
1280
480b1cb9 1281 rc = wait_for_free_request(ses->server, flags, &credits.instance);
7ee1af76
JA
1282 if (rc)
1283 return rc;
1284
79a58d1f 1285 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1286 and avoid races inside tcp sendmsg code that could cause corruption
1287 of smb data */
1288
72ca545b 1289 mutex_lock(&ses->server->srv_mutex);
1da177e4 1290
7ee1af76
JA
1291 rc = allocate_mid(ses, in_buf, &midQ);
1292 if (rc) {
72ca545b 1293 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1294 /* Update # of requests on wire to server */
34f4deb7 1295 add_credits(ses->server, &credits, 0);
7ee1af76 1296 return rc;
1da177e4
LT
1297 }
1298
ad009ac9 1299 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1300 if (rc) {
1301 mutex_unlock(&ses->server->srv_mutex);
1302 goto out;
1303 }
1da177e4 1304
7c9421e1 1305 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1306
1307 cifs_in_send_inc(ses->server);
fb2036d8 1308 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1309 cifs_in_send_dec(ses->server);
1310 cifs_save_when_sent(midQ);
ad313cb8
JL
1311
1312 if (rc < 0)
1313 ses->server->sequence_number -= 2;
1314
72ca545b 1315 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1316
79a58d1f 1317 if (rc < 0)
7ee1af76
JA
1318 goto out;
1319
480b1cb9 1320 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
7ee1af76 1321 goto out;
1da177e4 1322
0ade640e 1323 rc = wait_for_response(ses->server, midQ);
1be912dd 1324 if (rc != 0) {
fb2036d8 1325 send_cancel(ses->server, &rqst, midQ);
1be912dd 1326 spin_lock(&GlobalMid_Lock);
7c9421e1 1327 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1328 /* no longer considered to be "in-flight" */
1329 midQ->callback = DeleteMidQEntry;
1330 spin_unlock(&GlobalMid_Lock);
34f4deb7 1331 add_credits(ses->server, &credits, 0);
1be912dd
JL
1332 return rc;
1333 }
1334 spin_unlock(&GlobalMid_Lock);
1335 }
1da177e4 1336
3c1105df 1337 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1338 if (rc != 0) {
34f4deb7 1339 add_credits(ses->server, &credits, 0);
1da177e4
LT
1340 return rc;
1341 }
50c2f753 1342
2c8f981d 1343 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1344 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1345 rc = -EIO;
f96637be 1346 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1347 goto out;
1da177e4 1348 }
7ee1af76 1349
d4e4854f 1350 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1351 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1352 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1353out:
3c1bf7e4 1354 cifs_delete_mid(midQ);
34f4deb7 1355 add_credits(ses->server, &credits, 0);
1da177e4 1356
7ee1af76
JA
1357 return rc;
1358}
1da177e4 1359
7ee1af76
JA
1360/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1361 blocking lock to return. */
1362
1363static int
96daf2b0 1364send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1365 struct smb_hdr *in_buf,
1366 struct smb_hdr *out_buf)
1367{
1368 int bytes_returned;
96daf2b0 1369 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1370 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1371
1372 /* We just modify the current in_buf to change
1373 the type of lock from LOCKING_ANDX_SHARED_LOCK
1374 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1375 LOCKING_ANDX_CANCEL_LOCK. */
1376
1377 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1378 pSMB->Timeout = 0;
88257360 1379 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1380
1381 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1382 &bytes_returned, 0);
7ee1af76
JA
1383}
1384
1385int
96daf2b0 1386SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1387 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1388 int *pbytes_returned)
1389{
1390 int rc = 0;
1391 int rstart = 0;
7ee1af76 1392 struct mid_q_entry *midQ;
96daf2b0 1393 struct cifs_ses *ses;
fb2036d8
PS
1394 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1395 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1396 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1397 unsigned int instance;
7ee1af76
JA
1398
1399 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1400 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1401 return -EIO;
1402 }
1403 ses = tcon->ses;
1404
79a58d1f 1405 if (ses->server == NULL) {
f96637be 1406 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1407 return -EIO;
1408 }
1409
79a58d1f 1410 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1411 return -ENOENT;
1412
79a58d1f 1413 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1414 to the same server. We may make this configurable later or
1415 use ses->maxReq */
1416
fb2036d8 1417 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1418 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1419 len);
6d9c6d54
VL
1420 return -EIO;
1421 }
1422
480b1cb9 1423 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1424 if (rc)
1425 return rc;
1426
79a58d1f 1427 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1428 and avoid races inside tcp sendmsg code that could cause corruption
1429 of smb data */
1430
72ca545b 1431 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1432
1433 rc = allocate_mid(ses, in_buf, &midQ);
1434 if (rc) {
72ca545b 1435 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1436 return rc;
1437 }
1438
7ee1af76 1439 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1440 if (rc) {
3c1bf7e4 1441 cifs_delete_mid(midQ);
829049cb
VL
1442 mutex_unlock(&ses->server->srv_mutex);
1443 return rc;
1444 }
1da177e4 1445
7c9421e1 1446 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1447 cifs_in_send_inc(ses->server);
fb2036d8 1448 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1449 cifs_in_send_dec(ses->server);
1450 cifs_save_when_sent(midQ);
ad313cb8
JL
1451
1452 if (rc < 0)
1453 ses->server->sequence_number -= 2;
1454
72ca545b 1455 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1456
79a58d1f 1457 if (rc < 0) {
3c1bf7e4 1458 cifs_delete_mid(midQ);
7ee1af76
JA
1459 return rc;
1460 }
1461
1462 /* Wait for a reply - allow signals to interrupt. */
1463 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1464 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1465 ((ses->server->tcpStatus != CifsGood) &&
1466 (ses->server->tcpStatus != CifsNew)));
1467
1468 /* Were we interrupted by a signal ? */
1469 if ((rc == -ERESTARTSYS) &&
7c9421e1 1470 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1471 ((ses->server->tcpStatus == CifsGood) ||
1472 (ses->server->tcpStatus == CifsNew))) {
1473
1474 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1475 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1476 blocking lock to return. */
fb2036d8 1477 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1478 if (rc) {
3c1bf7e4 1479 cifs_delete_mid(midQ);
7ee1af76
JA
1480 return rc;
1481 }
1482 } else {
1483 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1484 to cause the blocking lock to return. */
1485
1486 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1487
1488 /* If we get -ENOLCK back the lock may have
1489 already been removed. Don't exit in this case. */
1490 if (rc && rc != -ENOLCK) {
3c1bf7e4 1491 cifs_delete_mid(midQ);
7ee1af76
JA
1492 return rc;
1493 }
1494 }
1495
1be912dd
JL
1496 rc = wait_for_response(ses->server, midQ);
1497 if (rc) {
fb2036d8 1498 send_cancel(ses->server, &rqst, midQ);
1be912dd 1499 spin_lock(&GlobalMid_Lock);
7c9421e1 1500 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1501 /* no longer considered to be "in-flight" */
1502 midQ->callback = DeleteMidQEntry;
1503 spin_unlock(&GlobalMid_Lock);
1504 return rc;
1505 }
1506 spin_unlock(&GlobalMid_Lock);
7ee1af76 1507 }
1be912dd
JL
1508
1509 /* We got the response - restart system call. */
1510 rstart = 1;
7ee1af76
JA
1511 }
1512
3c1105df 1513 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1514 if (rc != 0)
7ee1af76 1515 return rc;
50c2f753 1516
17c8bfed 1517 /* rcvd frame is ok */
7c9421e1 1518 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1519 rc = -EIO;
f96637be 1520 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1521 goto out;
1522 }
1da177e4 1523
d4e4854f 1524 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1525 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1526 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1527out:
3c1bf7e4 1528 cifs_delete_mid(midQ);
7ee1af76
JA
1529 if (rstart && rc == -EACCES)
1530 return -ERESTARTSYS;
1da177e4
LT
1531 return rc;
1532}