cifs: change wait_for_free_request() to take flags as argument
[linux-2.6-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
b30c74c7 36#include <linux/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
1da177e4 81
1da177e4 82 atomic_inc(&midCount);
7c9421e1 83 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
84 return temp;
85}
86
696e420b
LP
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
766fdbb5 102void
1da177e4
LT
103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
1047abc1 105#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 106 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
107 unsigned long now;
108#endif
7c9421e1 109 midEntry->mid_state = MID_FREE;
8097531a 110 atomic_dec(&midCount);
7c9421e1 111 if (midEntry->large_buf)
b8643e1b
SF
112 cifs_buf_release(midEntry->resp_buf);
113 else
114 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
115#ifdef CONFIG_CIFS_STATS2
116 now = jiffies;
00778e22
SF
117 /*
118 * commands taking longer than one second (default) can be indications
119 * that something is wrong, unless it is quite a slow link or a very
120 * busy server. Note that this calc is unlikely or impossible to wrap
121 * as long as slow_rsp_threshold is not set way above recommended max
122 * value (32767 ie 9 hours) and is generally harmless even if wrong
123 * since only affects debug counters - so leaving the calc as simple
124 * comparison rather than doing multiple conversions and overflow
125 * checks
126 */
127 if ((slow_rsp_threshold != 0) &&
128 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 129 (midEntry->command != command)) {
f5942db5
SF
130 /*
131 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132 * NB: le16_to_cpu returns unsigned so can not be negative below
133 */
134 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
468d6779
SF
135 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
020eec5f
SF
137 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138 midEntry->mid, midEntry->pid,
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
0b456f04 141 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 142 midEntry->command, midEntry->mid);
f80eaedd 143 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
147 }
148 }
149#endif
696e420b 150 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
151}
152
3c1bf7e4
PS
153void
154cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
155{
156 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
157 list_del_init(&mid->qhead);
158 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
159 spin_unlock(&GlobalMid_Lock);
160
161 DeleteMidQEntry(mid);
162}
163
6f49f46b
JL
164/*
165 * smb_send_kvec - send an array of kvecs to the server
166 * @server: Server to send the data to
3ab3f2a1 167 * @smb_msg: Message to send
6f49f46b
JL
168 * @sent: amount of data sent on socket is stored here
169 *
170 * Our basic "send data to server" function. Should be called with srv_mutex
171 * held. The caller is responsible for handling the results.
172 */
d6e04ae6 173static int
3ab3f2a1
AV
174smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175 size_t *sent)
1da177e4
LT
176{
177 int rc = 0;
3ab3f2a1 178 int retries = 0;
edf1ae40 179 struct socket *ssocket = server->ssocket;
50c2f753 180
6f49f46b
JL
181 *sent = 0;
182
3ab3f2a1
AV
183 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184 smb_msg->msg_namelen = sizeof(struct sockaddr);
185 smb_msg->msg_control = NULL;
186 smb_msg->msg_controllen = 0;
0496e02d 187 if (server->noblocksnd)
3ab3f2a1 188 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 189 else
3ab3f2a1 190 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 191
3ab3f2a1 192 while (msg_data_left(smb_msg)) {
6f49f46b
JL
193 /*
194 * If blocking send, we try 3 times, since each can block
195 * for 5 seconds. For nonblocking we have to try more
196 * but wait increasing amounts of time allowing time for
197 * socket to clear. The overall time we wait in either
198 * case to send on the socket is about 15 seconds.
199 * Similarly we wait for 15 seconds for a response from
200 * the server in SendReceive[2] for the server to send
201 * a response back for most types of requests (except
202 * SMB Write past end of file which can be slow, and
203 * blocking lock operations). NFS waits slightly longer
204 * than CIFS, but this can make it take longer for
205 * nonresponsive servers to be detected and 15 seconds
206 * is more than enough time for modern networks to
207 * send a packet. In most cases if we fail to send
208 * after the retries we will kill the socket and
209 * reconnect which may clear the network problem.
210 */
3ab3f2a1 211 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 212 if (rc == -EAGAIN) {
3ab3f2a1
AV
213 retries++;
214 if (retries >= 14 ||
215 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
216 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217 ssocket);
3ab3f2a1 218 return -EAGAIN;
1da177e4 219 }
3ab3f2a1 220 msleep(1 << retries);
1da177e4
LT
221 continue;
222 }
6f49f46b 223
79a58d1f 224 if (rc < 0)
3ab3f2a1 225 return rc;
6f49f46b 226
79a58d1f 227 if (rc == 0) {
3e84469d
SF
228 /* should never happen, letting socket clear before
229 retrying is our only obvious option here */
f96637be 230 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
231 msleep(500);
232 continue;
d6e04ae6 233 }
6f49f46b 234
3ab3f2a1
AV
235 /* send was at least partially successful */
236 *sent += rc;
237 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 238 }
3ab3f2a1 239 return 0;
97bc00b3
JL
240}
241
35e2cc1b 242unsigned long
81f39f95 243smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
244{
245 unsigned int i;
35e2cc1b
PA
246 struct kvec *iov;
247 int nvec;
a26054d1
JL
248 unsigned long buflen = 0;
249
81f39f95
RS
250 if (server->vals->header_preamble_size == 0 &&
251 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
252 iov = &rqst->rq_iov[1];
253 nvec = rqst->rq_nvec - 1;
254 } else {
255 iov = rqst->rq_iov;
256 nvec = rqst->rq_nvec;
257 }
258
a26054d1 259 /* total up iov array first */
35e2cc1b 260 for (i = 0; i < nvec; i++)
a26054d1
JL
261 buflen += iov[i].iov_len;
262
c06a0f2d
LL
263 /*
264 * Add in the page array if there is one. The caller needs to make
265 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266 * multiple pages ends at page boundary, rq_tailsz needs to be set to
267 * PAGE_SIZE.
268 */
a26054d1 269 if (rqst->rq_npages) {
c06a0f2d
LL
270 if (rqst->rq_npages == 1)
271 buflen += rqst->rq_tailsz;
272 else {
273 /*
274 * If there is more than one page, calculate the
275 * buffer length based on rq_offset and rq_tailsz
276 */
277 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278 rqst->rq_offset;
279 buflen += rqst->rq_tailsz;
280 }
a26054d1
JL
281 }
282
283 return buflen;
284}
285
6f49f46b 286static int
07cd952f
RS
287__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288 struct smb_rqst *rqst)
6f49f46b 289{
07cd952f
RS
290 int rc = 0;
291 struct kvec *iov;
292 int n_vec;
293 unsigned int send_length = 0;
294 unsigned int i, j;
b30c74c7 295 sigset_t mask, oldmask;
3ab3f2a1 296 size_t total_len = 0, sent, size;
b8eed283 297 struct socket *ssocket = server->ssocket;
3ab3f2a1 298 struct msghdr smb_msg;
b8eed283 299 int val = 1;
c713c877
RS
300 __be32 rfc1002_marker;
301
9762c2d0 302 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 303 rc = smbd_send(server, rqst);
9762c2d0
LL
304 goto smbd_done;
305 }
afc18a6f 306
ea702b80 307 if (ssocket == NULL)
afc18a6f 308 return -EAGAIN;
ea702b80 309
b30c74c7
PS
310 if (signal_pending(current)) {
311 cifs_dbg(FYI, "signal is pending before sending any data\n");
312 return -EINTR;
313 }
314
b8eed283
JL
315 /* cork the socket */
316 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317 (char *)&val, sizeof(val));
318
07cd952f 319 for (j = 0; j < num_rqst; j++)
81f39f95 320 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
321 rfc1002_marker = cpu_to_be32(send_length);
322
b30c74c7
PS
323 /*
324 * We should not allow signals to interrupt the network send because
325 * any partial send will cause session reconnects thus increasing
326 * latency of system calls and overload a server with unnecessary
327 * requests.
328 */
329
330 sigfillset(&mask);
331 sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
c713c877
RS
333 /* Generate a rfc1002 marker for SMB2+ */
334 if (server->vals->header_preamble_size == 0) {
335 struct kvec hiov = {
336 .iov_base = &rfc1002_marker,
337 .iov_len = 4
338 };
aa563d7b 339 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
340 rc = smb_send_kvec(server, &smb_msg, &sent);
341 if (rc < 0)
b30c74c7 342 goto unmask;
c713c877
RS
343
344 total_len += sent;
345 send_length += 4;
346 }
347
662bf5bc
PA
348 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
07cd952f
RS
350 for (j = 0; j < num_rqst; j++) {
351 iov = rqst[j].rq_iov;
352 n_vec = rqst[j].rq_nvec;
3ab3f2a1 353
07cd952f 354 size = 0;
662bf5bc
PA
355 for (i = 0; i < n_vec; i++) {
356 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 357 size += iov[i].iov_len;
662bf5bc 358 }
97bc00b3 359
aa563d7b 360 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 361
3ab3f2a1 362 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 363 if (rc < 0)
b30c74c7 364 goto unmask;
97bc00b3
JL
365
366 total_len += sent;
07cd952f
RS
367
368 /* now walk the page array and send each page in it */
369 for (i = 0; i < rqst[j].rq_npages; i++) {
370 struct bio_vec bvec;
371
372 bvec.bv_page = rqst[j].rq_pages[i];
373 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374 &bvec.bv_offset);
375
aa563d7b 376 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
377 &bvec, 1, bvec.bv_len);
378 rc = smb_send_kvec(server, &smb_msg, &sent);
379 if (rc < 0)
380 break;
381
382 total_len += sent;
383 }
97bc00b3 384 }
1da177e4 385
b30c74c7
PS
386unmask:
387 sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389 /*
390 * If signal is pending but we have already sent the whole packet to
391 * the server we need to return success status to allow a corresponding
392 * mid entry to be kept in the pending requests queue thus allowing
393 * to handle responses from the server by the client.
394 *
395 * If only part of the packet has been sent there is no need to hide
396 * interrupt because the session will be reconnected anyway, so there
397 * won't be any response from the server to handle.
398 */
399
400 if (signal_pending(current) && (total_len != send_length)) {
401 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402 rc = -EINTR;
403 }
404
b8eed283
JL
405 /* uncork it */
406 val = 0;
407 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408 (char *)&val, sizeof(val));
409
c713c877 410 if ((total_len > 0) && (total_len != send_length)) {
f96637be 411 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 412 send_length, total_len);
6f49f46b
JL
413 /*
414 * If we have only sent part of an SMB then the next SMB could
415 * be taken as the remainder of this one. We need to kill the
416 * socket so the server throws away the partial SMB
417 */
edf1ae40 418 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
419 trace_smb3_partial_send_reconnect(server->CurrentMid,
420 server->hostname);
edf1ae40 421 }
9762c2d0 422smbd_done:
d804d41d 423 if (rc < 0 && rc != -EINTR)
f96637be
JP
424 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425 rc);
ee13919c 426 else if (rc > 0)
1da177e4 427 rc = 0;
1da177e4
LT
428
429 return rc;
430}
431
6f49f46b 432static int
1f3a8f5f
RS
433smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434 struct smb_rqst *rqst, int flags)
6f49f46b 435{
b2c96de7
RS
436 struct kvec iov;
437 struct smb2_transform_hdr tr_hdr;
438 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
439 int rc;
440
441 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
442 return __smb_send_rqst(server, num_rqst, rqst);
443
444 if (num_rqst > MAX_COMPOUND - 1)
445 return -ENOMEM;
7fb8986e 446
b2c96de7
RS
447 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448 memset(&iov, 0, sizeof(iov));
449 memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451 iov.iov_base = &tr_hdr;
452 iov.iov_len = sizeof(tr_hdr);
453 cur_rqst[0].rq_iov = &iov;
454 cur_rqst[0].rq_nvec = 1;
455
456 if (!server->ops->init_transform_rq) {
457 cifs_dbg(VFS, "Encryption requested but transform callback "
458 "is missing\n");
7fb8986e
PS
459 return -EIO;
460 }
6f49f46b 461
1f3a8f5f
RS
462 rc = server->ops->init_transform_rq(server, num_rqst + 1,
463 &cur_rqst[0], rqst);
7fb8986e
PS
464 if (rc)
465 return rc;
466
1f3a8f5f
RS
467 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 469 return rc;
6f49f46b
JL
470}
471
0496e02d
JL
472int
473smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474 unsigned int smb_buf_length)
475{
738f9de5 476 struct kvec iov[2];
7fb8986e
PS
477 struct smb_rqst rqst = { .rq_iov = iov,
478 .rq_nvec = 2 };
0496e02d 479
738f9de5
PS
480 iov[0].iov_base = smb_buffer;
481 iov[0].iov_len = 4;
482 iov[1].iov_base = (char *)smb_buffer + 4;
483 iov[1].iov_len = smb_buf_length;
0496e02d 484
07cd952f 485 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
486}
487
fc40f9cf 488static int
a891f0f8 489wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
34f4deb7 490 int *credits, unsigned int *instance)
1da177e4 491{
5bc59498
PS
492 int rc;
493
34f4deb7
PS
494 *instance = 0;
495
fc40f9cf 496 spin_lock(&server->req_lock);
a891f0f8 497 if (timeout == CIFS_ASYNC_OP) {
1da177e4 498 /* oplock breaks must not be held up */
fc40f9cf 499 server->in_flight++;
bc205ed1 500 *credits -= 1;
34f4deb7 501 *instance = server->reconnect_instance;
fc40f9cf 502 spin_unlock(&server->req_lock);
27a97a61
VL
503 return 0;
504 }
505
27a97a61 506 while (1) {
bc205ed1 507 if (*credits <= 0) {
fc40f9cf 508 spin_unlock(&server->req_lock);
789e6661 509 cifs_num_waiters_inc(server);
5bc59498 510 rc = wait_event_killable(server->request_q,
bc205ed1 511 has_credits(server, credits));
789e6661 512 cifs_num_waiters_dec(server);
5bc59498
PS
513 if (rc)
514 return rc;
fc40f9cf 515 spin_lock(&server->req_lock);
27a97a61 516 } else {
c5797a94 517 if (server->tcpStatus == CifsExiting) {
fc40f9cf 518 spin_unlock(&server->req_lock);
27a97a61 519 return -ENOENT;
1da177e4 520 }
27a97a61 521
2d86dbc9
PS
522 /*
523 * Can not count locking commands against total
524 * as they are allowed to block on server.
525 */
27a97a61
VL
526
527 /* update # of requests on the wire to server */
a891f0f8 528 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 529 *credits -= 1;
fc40f9cf 530 server->in_flight++;
34f4deb7 531 *instance = server->reconnect_instance;
2d86dbc9 532 }
fc40f9cf 533 spin_unlock(&server->req_lock);
27a97a61 534 break;
1da177e4
LT
535 }
536 }
7ee1af76
JA
537 return 0;
538}
1da177e4 539
bc205ed1 540static int
480b1cb9
RS
541wait_for_free_request(struct TCP_Server_Info *server, const int flags,
542 unsigned int *instance)
bc205ed1 543{
eb4c7df6 544 int *val;
480b1cb9
RS
545 int timeout, optype;
546
547 timeout = flags & CIFS_TIMEOUT_MASK;
548 optype = flags & CIFS_OP_MASK;
eb4c7df6
SP
549
550 val = server->ops->get_credits_field(server, optype);
551 /* Since an echo is already inflight, no need to wait to send another */
552 if (*val <= 0 && optype == CIFS_ECHO_OP)
553 return -EAGAIN;
480b1cb9 554
34f4deb7 555 return wait_for_free_credits(server, timeout, val, instance);
bc205ed1
PS
556}
557
cb7e9eab
PS
558int
559cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 560 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
561{
562 *num = size;
335b7b62
PS
563 credits->value = 0;
564 credits->instance = server->reconnect_instance;
cb7e9eab
PS
565 return 0;
566}
567
96daf2b0 568static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
569 struct mid_q_entry **ppmidQ)
570{
1da177e4 571 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 572 return -ENOENT;
8fbbd365
VL
573 }
574
575 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 576 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 577 return -EAGAIN;
8fbbd365
VL
578 }
579
7f48558e 580 if (ses->status == CifsNew) {
79a58d1f 581 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 582 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 583 return -EAGAIN;
ad7a2926 584 /* else ok - we are setting up session */
1da177e4 585 }
7f48558e
SP
586
587 if (ses->status == CifsExiting) {
588 /* check if SMB session is bad because we are setting it up */
589 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
590 return -EAGAIN;
591 /* else ok - we are shutting down session */
592 }
593
24b9b06b 594 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 595 if (*ppmidQ == NULL)
7ee1af76 596 return -ENOMEM;
ddc8cf8f
JL
597 spin_lock(&GlobalMid_Lock);
598 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
599 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
600 return 0;
601}
602
0ade640e
JL
603static int
604wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 605{
0ade640e 606 int error;
7ee1af76 607
5853cc2a 608 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 609 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
610 if (error < 0)
611 return -ERESTARTSYS;
7ee1af76 612
0ade640e 613 return 0;
7ee1af76
JA
614}
615
fec344e3
JL
616struct mid_q_entry *
617cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
618{
619 int rc;
fec344e3 620 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
621 struct mid_q_entry *mid;
622
738f9de5
PS
623 if (rqst->rq_iov[0].iov_len != 4 ||
624 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
625 return ERR_PTR(-EIO);
626
792af7b0 627 /* enable signing if server requires it */
38d77c50 628 if (server->sign)
792af7b0
PS
629 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
630
631 mid = AllocMidQEntry(hdr, server);
632 if (mid == NULL)
fec344e3 633 return ERR_PTR(-ENOMEM);
792af7b0 634
fec344e3 635 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
636 if (rc) {
637 DeleteMidQEntry(mid);
fec344e3 638 return ERR_PTR(rc);
ffc61ccb
SP
639 }
640
fec344e3 641 return mid;
792af7b0 642}
133672ef 643
a6827c18
JL
644/*
645 * Send a SMB request and set the callback function in the mid to handle
646 * the result. Caller is responsible for dealing with timeouts.
647 */
648int
fec344e3 649cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 650 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
651 mid_handle_t *handle, void *cbdata, const int flags,
652 const struct cifs_credits *exist_credits)
a6827c18 653{
480b1cb9 654 int rc;
a6827c18 655 struct mid_q_entry *mid;
335b7b62 656 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 657 unsigned int instance;
480b1cb9 658 int optype;
a6827c18 659
a891f0f8
PS
660 optype = flags & CIFS_OP_MASK;
661
cb7e9eab 662 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 663 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
664 if (rc)
665 return rc;
335b7b62 666 credits.value = 1;
34f4deb7 667 credits.instance = instance;
3349c3a7
PS
668 } else
669 instance = exist_credits->instance;
a6827c18
JL
670
671 mutex_lock(&server->srv_mutex);
3349c3a7
PS
672
673 /*
674 * We can't use credits obtained from the previous session to send this
675 * request. Check if there were reconnects after we obtained credits and
676 * return -EAGAIN in such cases to let callers handle it.
677 */
678 if (instance != server->reconnect_instance) {
679 mutex_unlock(&server->srv_mutex);
680 add_credits_and_wake_if(server, &credits, optype);
681 return -EAGAIN;
682 }
683
fec344e3
JL
684 mid = server->ops->setup_async_request(server, rqst);
685 if (IS_ERR(mid)) {
a6827c18 686 mutex_unlock(&server->srv_mutex);
335b7b62 687 add_credits_and_wake_if(server, &credits, optype);
fec344e3 688 return PTR_ERR(mid);
a6827c18
JL
689 }
690
44d22d84 691 mid->receive = receive;
a6827c18
JL
692 mid->callback = callback;
693 mid->callback_data = cbdata;
9b7c18a2 694 mid->handle = handle;
7c9421e1 695 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 696
ffc61ccb
SP
697 /* put it on the pending_mid_q */
698 spin_lock(&GlobalMid_Lock);
699 list_add_tail(&mid->qhead, &server->pending_mid_q);
700 spin_unlock(&GlobalMid_Lock);
701
93d2cb6c
LL
702 /*
703 * Need to store the time in mid before calling I/O. For call_async,
704 * I/O response may come back and free the mid entry on another thread.
705 */
706 cifs_save_when_sent(mid);
789e6661 707 cifs_in_send_inc(server);
1f3a8f5f 708 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 709 cifs_in_send_dec(server);
ad313cb8 710
820962dc 711 if (rc < 0) {
c781af7e 712 revert_current_mid(server, mid->credits);
ad313cb8 713 server->sequence_number -= 2;
820962dc
RV
714 cifs_delete_mid(mid);
715 }
716
a6827c18 717 mutex_unlock(&server->srv_mutex);
789e6661 718
ffc61ccb
SP
719 if (rc == 0)
720 return 0;
a6827c18 721
335b7b62 722 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
723 return rc;
724}
725
133672ef
SF
726/*
727 *
728 * Send an SMB Request. No response info (other than return code)
729 * needs to be parsed.
730 *
731 * flags indicate the type of request buffer and how long to wait
732 * and whether to log NT STATUS code (error) before mapping it to POSIX error
733 *
734 */
735int
96daf2b0 736SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 737 char *in_buf, int flags)
133672ef
SF
738{
739 int rc;
740 struct kvec iov[1];
da502f7d 741 struct kvec rsp_iov;
133672ef
SF
742 int resp_buf_type;
743
792af7b0
PS
744 iov[0].iov_base = in_buf;
745 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 746 flags |= CIFS_NO_RESP;
da502f7d 747 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 748 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 749
133672ef
SF
750 return rc;
751}
752
053d5034 753static int
3c1105df 754cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
755{
756 int rc = 0;
757
f96637be
JP
758 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
759 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 760
74dd92a8 761 spin_lock(&GlobalMid_Lock);
7c9421e1 762 switch (mid->mid_state) {
74dd92a8 763 case MID_RESPONSE_RECEIVED:
053d5034
JL
764 spin_unlock(&GlobalMid_Lock);
765 return rc;
74dd92a8
JL
766 case MID_RETRY_NEEDED:
767 rc = -EAGAIN;
768 break;
71823baf
JL
769 case MID_RESPONSE_MALFORMED:
770 rc = -EIO;
771 break;
3c1105df
JL
772 case MID_SHUTDOWN:
773 rc = -EHOSTDOWN;
774 break;
74dd92a8 775 default:
3c1105df 776 list_del_init(&mid->qhead);
f96637be
JP
777 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
778 __func__, mid->mid, mid->mid_state);
74dd92a8 779 rc = -EIO;
053d5034
JL
780 }
781 spin_unlock(&GlobalMid_Lock);
782
2b84a36c 783 DeleteMidQEntry(mid);
053d5034
JL
784 return rc;
785}
786
121b046a 787static inline int
fb2036d8
PS
788send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
789 struct mid_q_entry *mid)
76dcc26f 790{
121b046a 791 return server->ops->send_cancel ?
fb2036d8 792 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
793}
794
2c8f981d
JL
795int
796cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
797 bool log_error)
798{
792af7b0 799 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
800
801 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
802
803 /* convert the length into a more usable form */
38d77c50 804 if (server->sign) {
738f9de5 805 struct kvec iov[2];
985e4ff0 806 int rc = 0;
738f9de5
PS
807 struct smb_rqst rqst = { .rq_iov = iov,
808 .rq_nvec = 2 };
826a95e4 809
738f9de5
PS
810 iov[0].iov_base = mid->resp_buf;
811 iov[0].iov_len = 4;
812 iov[1].iov_base = (char *)mid->resp_buf + 4;
813 iov[1].iov_len = len - 4;
2c8f981d 814 /* FIXME: add code to kill session */
bf5ea0e2 815 rc = cifs_verify_signature(&rqst, server,
0124cc45 816 mid->sequence_number);
985e4ff0 817 if (rc)
f96637be
JP
818 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
819 rc);
2c8f981d
JL
820 }
821
822 /* BB special case reconnect tid and uid here? */
823 return map_smb_to_linux_error(mid->resp_buf, log_error);
824}
825
fec344e3
JL
826struct mid_q_entry *
827cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
828{
829 int rc;
fec344e3 830 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
831 struct mid_q_entry *mid;
832
738f9de5
PS
833 if (rqst->rq_iov[0].iov_len != 4 ||
834 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
835 return ERR_PTR(-EIO);
836
792af7b0
PS
837 rc = allocate_mid(ses, hdr, &mid);
838 if (rc)
fec344e3
JL
839 return ERR_PTR(rc);
840 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
841 if (rc) {
3c1bf7e4 842 cifs_delete_mid(mid);
fec344e3
JL
843 return ERR_PTR(rc);
844 }
845 return mid;
792af7b0
PS
846}
847
4e34feb5 848static void
ee258d79 849cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
850{
851 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
852 struct cifs_credits credits;
853
854 credits.value = server->ops->get_credits(mid);
855 credits.instance = server->reconnect_instance;
8a26f0f7 856
34f4deb7 857 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
858}
859
ee258d79
PS
860static void
861cifs_compound_last_callback(struct mid_q_entry *mid)
862{
863 cifs_compound_callback(mid);
864 cifs_wake_up_task(mid);
865}
866
867static void
868cifs_cancelled_callback(struct mid_q_entry *mid)
869{
870 cifs_compound_callback(mid);
871 DeleteMidQEntry(mid);
872}
873
b8f57ee8 874int
e0bba0b8
RS
875compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
876 const int flags, const int num_rqst, struct smb_rqst *rqst,
877 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 878{
480b1cb9 879 int i, j, optype, rc = 0;
e0bba0b8 880 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 881 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
882 struct cifs_credits credits[MAX_COMPOUND] = {
883 { .value = 0, .instance = 0 }
884 };
885 unsigned int instance;
97ea4998 886 unsigned int first_instance = 0;
738f9de5 887 char *buf;
50c2f753 888
a891f0f8 889 optype = flags & CIFS_OP_MASK;
133672ef 890
e0bba0b8
RS
891 for (i = 0; i < num_rqst; i++)
892 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
893
894 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 895 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
896 return -EIO;
897 }
898
da502f7d 899 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 900 return -ENOENT;
7ee1af76 901
7091bcab
PS
902 spin_lock(&ses->server->req_lock);
903 if (ses->server->credits < num_rqst) {
904 /*
905 * Return immediately if not too many requests in flight since
906 * we will likely be stuck on waiting for credits.
907 */
908 if (ses->server->in_flight < num_rqst - ses->server->credits) {
909 spin_unlock(&ses->server->req_lock);
910 return -ENOTSUPP;
911 }
912 } else {
913 /* enough credits to send the whole compounded request */
914 ses->server->credits -= num_rqst;
915 ses->server->in_flight += num_rqst;
916 first_instance = ses->server->reconnect_instance;
917 }
918 spin_unlock(&ses->server->req_lock);
919
920 if (first_instance) {
921 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
922 for (i = 0; i < num_rqst; i++) {
923 credits[i].value = 1;
924 credits[i].instance = first_instance;
925 }
926 goto setup_rqsts;
927 }
928
792af7b0 929 /*
7091bcab
PS
930 * There are not enough credits to send the whole compound request but
931 * there are requests in flight that may bring credits from the server.
932 * This approach still leaves the possibility to be stuck waiting for
933 * credits if the server doesn't grant credits to the outstanding
934 * requests. This should be fixed by returning immediately and letting
935 * a caller fallback to sequential commands instead of compounding.
8544f4aa 936 * Ensure we obtain 1 credit per request in the compound chain.
792af7b0 937 */
8544f4aa 938 for (i = 0; i < num_rqst; i++) {
480b1cb9 939 rc = wait_for_free_request(ses->server, flags, &instance);
97ea4998
PS
940
941 if (rc == 0) {
942 credits[i].value = 1;
943 credits[i].instance = instance;
944 /*
945 * All parts of the compound chain must get credits from
946 * the same session, otherwise we may end up using more
947 * credits than the server granted. If there were
948 * reconnects in between, return -EAGAIN and let callers
949 * handle it.
950 */
951 if (i == 0)
952 first_instance = instance;
953 else if (first_instance != instance) {
954 i++;
955 rc = -EAGAIN;
956 }
957 }
958
8544f4aa
PS
959 if (rc) {
960 /*
961 * We haven't sent an SMB packet to the server yet but
962 * we already obtained credits for i requests in the
963 * compound chain - need to return those credits back
964 * for future use. Note that we need to call add_credits
965 * multiple times to match the way we obtained credits
966 * in the first place and to account for in flight
967 * requests correctly.
968 */
969 for (j = 0; j < i; j++)
34f4deb7 970 add_credits(ses->server, &credits[j], optype);
8544f4aa
PS
971 return rc;
972 }
8544f4aa 973 }
7ee1af76 974
7091bcab 975setup_rqsts:
792af7b0
PS
976 /*
977 * Make sure that we sign in the same order that we send on this socket
978 * and avoid races inside tcp sendmsg code that could cause corruption
979 * of smb data.
980 */
7ee1af76 981
72ca545b 982 mutex_lock(&ses->server->srv_mutex);
7ee1af76 983
97ea4998
PS
984 /*
985 * All the parts of the compound chain belong obtained credits from the
986 * same session (see the appropriate checks above). In the same time
987 * there might be reconnects after those checks but before we acquired
988 * the srv_mutex. We can not use credits obtained from the previous
989 * session to send this request. Check if there were reconnects after
990 * we obtained credits and return -EAGAIN in such cases to let callers
991 * handle it.
992 */
993 if (first_instance != ses->server->reconnect_instance) {
994 mutex_unlock(&ses->server->srv_mutex);
995 for (j = 0; j < num_rqst; j++)
996 add_credits(ses->server, &credits[j], optype);
997 return -EAGAIN;
998 }
999
e0bba0b8
RS
1000 for (i = 0; i < num_rqst; i++) {
1001 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1002 if (IS_ERR(midQ[i])) {
c781af7e 1003 revert_current_mid(ses->server, i);
e0bba0b8
RS
1004 for (j = 0; j < i; j++)
1005 cifs_delete_mid(midQ[j]);
1006 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 1007
e0bba0b8 1008 /* Update # of requests on wire to server */
8544f4aa 1009 for (j = 0; j < num_rqst; j++)
34f4deb7 1010 add_credits(ses->server, &credits[j], optype);
e0bba0b8
RS
1011 return PTR_ERR(midQ[i]);
1012 }
1013
1014 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1015 midQ[i]->optype = optype;
4e34feb5 1016 /*
ee258d79
PS
1017 * Invoke callback for every part of the compound chain
1018 * to calculate credits properly. Wake up this thread only when
1019 * the last element is received.
4e34feb5
RS
1020 */
1021 if (i < num_rqst - 1)
ee258d79
PS
1022 midQ[i]->callback = cifs_compound_callback;
1023 else
1024 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1025 }
789e6661 1026 cifs_in_send_inc(ses->server);
e0bba0b8 1027 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 1028 cifs_in_send_dec(ses->server);
e0bba0b8
RS
1029
1030 for (i = 0; i < num_rqst; i++)
1031 cifs_save_when_sent(midQ[i]);
7ee1af76 1032
c781af7e
PS
1033 if (rc < 0) {
1034 revert_current_mid(ses->server, num_rqst);
ad313cb8 1035 ses->server->sequence_number -= 2;
c781af7e 1036 }
e0bba0b8 1037
72ca545b 1038 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1039
ee258d79
PS
1040 if (rc < 0) {
1041 /* Sending failed for some reason - return credits back */
1042 for (i = 0; i < num_rqst; i++)
34f4deb7 1043 add_credits(ses->server, &credits[i], optype);
cb5c2e63 1044 goto out;
ee258d79
PS
1045 }
1046
1047 /*
1048 * At this point the request is passed to the network stack - we assume
1049 * that any credits taken from the server structure on the client have
1050 * been spent and we can't return them back. Once we receive responses
1051 * we will collect credits granted by the server in the mid callbacks
1052 * and add those credits to the server structure.
1053 */
e0bba0b8 1054
cb5c2e63
RS
1055 /*
1056 * Compounding is never used during session establish.
1057 */
1058 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1059 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1060 rqst[0].rq_nvec);
e0bba0b8 1061
480b1cb9 1062 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
cb5c2e63 1063 goto out;
e0bba0b8 1064
cb5c2e63 1065 for (i = 0; i < num_rqst; i++) {
e0bba0b8 1066 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
1067 if (rc != 0)
1068 break;
1069 }
1070 if (rc != 0) {
1071 for (; i < num_rqst; i++) {
43de1db3
SF
1072 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1073 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
1074 send_cancel(ses->server, &rqst[i], midQ[i]);
1075 spin_lock(&GlobalMid_Lock);
1076 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1077 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1078 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1079 cancelled_mid[i] = true;
34f4deb7 1080 credits[i].value = 0;
e0bba0b8 1081 }
1be912dd 1082 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1083 }
cb5c2e63
RS
1084 }
1085
cb5c2e63
RS
1086 for (i = 0; i < num_rqst; i++) {
1087 if (rc < 0)
1088 goto out;
e0bba0b8
RS
1089
1090 rc = cifs_sync_mid_result(midQ[i], ses->server);
1091 if (rc != 0) {
8544f4aa
PS
1092 /* mark this mid as cancelled to not free it below */
1093 cancelled_mid[i] = true;
1094 goto out;
1be912dd 1095 }
2b2bdfba 1096
e0bba0b8
RS
1097 if (!midQ[i]->resp_buf ||
1098 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1099 rc = -EIO;
1100 cifs_dbg(FYI, "Bad MID state?\n");
1101 goto out;
1102 }
a891f0f8 1103
e0bba0b8
RS
1104 buf = (char *)midQ[i]->resp_buf;
1105 resp_iov[i].iov_base = buf;
1106 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1107 ses->server->vals->header_preamble_size;
1108
1109 if (midQ[i]->large_buf)
1110 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1111 else
1112 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1113
e0bba0b8
RS
1114 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1115 flags & CIFS_LOG_ERROR);
1da177e4 1116
e0bba0b8
RS
1117 /* mark it so buf will not be freed by cifs_delete_mid */
1118 if ((flags & CIFS_NO_RESP) == 0)
1119 midQ[i]->resp_buf = NULL;
cb5c2e63 1120
e0bba0b8 1121 }
cb5c2e63
RS
1122
1123 /*
1124 * Compounding is never used during session establish.
1125 */
1126 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1127 struct kvec iov = {
1128 .iov_base = resp_iov[0].iov_base,
1129 .iov_len = resp_iov[0].iov_len
1130 };
1131 smb311_update_preauth_hash(ses, &iov, 1);
1132 }
1133
7ee1af76 1134out:
4e34feb5
RS
1135 /*
1136 * This will dequeue all mids. After this it is important that the
1137 * demultiplex_thread will not process any of these mids any futher.
1138 * This is prevented above by using a noop callback that will not
1139 * wake this thread except for the very last PDU.
1140 */
8544f4aa
PS
1141 for (i = 0; i < num_rqst; i++) {
1142 if (!cancelled_mid[i])
1143 cifs_delete_mid(midQ[i]);
8544f4aa 1144 }
1da177e4 1145
d6e04ae6
SF
1146 return rc;
1147}
1da177e4 1148
e0bba0b8
RS
1149int
1150cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1151 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1152 struct kvec *resp_iov)
1153{
1154 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1155 resp_iov);
1156}
1157
738f9de5
PS
1158int
1159SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1160 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1161 const int flags, struct kvec *resp_iov)
1162{
1163 struct smb_rqst rqst;
3cecf486 1164 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1165 int rc;
1166
3cecf486 1167 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1168 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1169 GFP_KERNEL);
117e3b7f
SF
1170 if (!new_iov) {
1171 /* otherwise cifs_send_recv below sets resp_buf_type */
1172 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1173 return -ENOMEM;
117e3b7f 1174 }
3cecf486
RS
1175 } else
1176 new_iov = s_iov;
738f9de5
PS
1177
1178 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1179 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1180
1181 new_iov[0].iov_base = new_iov[1].iov_base;
1182 new_iov[0].iov_len = 4;
1183 new_iov[1].iov_base += 4;
1184 new_iov[1].iov_len -= 4;
1185
1186 memset(&rqst, 0, sizeof(struct smb_rqst));
1187 rqst.rq_iov = new_iov;
1188 rqst.rq_nvec = n_vec + 1;
1189
1190 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1191 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1192 kfree(new_iov);
738f9de5
PS
1193 return rc;
1194}
1195
1da177e4 1196int
96daf2b0 1197SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1198 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1199 int *pbytes_returned, const int flags)
1da177e4
LT
1200{
1201 int rc = 0;
1da177e4 1202 struct mid_q_entry *midQ;
fb2036d8
PS
1203 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1204 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1205 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1206 struct cifs_credits credits = { .value = 1, .instance = 0 };
1da177e4
LT
1207
1208 if (ses == NULL) {
f96637be 1209 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1210 return -EIO;
1211 }
79a58d1f 1212 if (ses->server == NULL) {
f96637be 1213 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1214 return -EIO;
1215 }
1216
79a58d1f 1217 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1218 return -ENOENT;
1219
79a58d1f 1220 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1221 to the same server. We may make this configurable later or
1222 use ses->maxReq */
1da177e4 1223
fb2036d8 1224 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1225 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1226 len);
6d9c6d54
VL
1227 return -EIO;
1228 }
1229
480b1cb9 1230 rc = wait_for_free_request(ses->server, flags, &credits.instance);
7ee1af76
JA
1231 if (rc)
1232 return rc;
1233
79a58d1f 1234 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1235 and avoid races inside tcp sendmsg code that could cause corruption
1236 of smb data */
1237
72ca545b 1238 mutex_lock(&ses->server->srv_mutex);
1da177e4 1239
7ee1af76
JA
1240 rc = allocate_mid(ses, in_buf, &midQ);
1241 if (rc) {
72ca545b 1242 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1243 /* Update # of requests on wire to server */
34f4deb7 1244 add_credits(ses->server, &credits, 0);
7ee1af76 1245 return rc;
1da177e4
LT
1246 }
1247
ad009ac9 1248 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1249 if (rc) {
1250 mutex_unlock(&ses->server->srv_mutex);
1251 goto out;
1252 }
1da177e4 1253
7c9421e1 1254 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1255
1256 cifs_in_send_inc(ses->server);
fb2036d8 1257 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1258 cifs_in_send_dec(ses->server);
1259 cifs_save_when_sent(midQ);
ad313cb8
JL
1260
1261 if (rc < 0)
1262 ses->server->sequence_number -= 2;
1263
72ca545b 1264 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1265
79a58d1f 1266 if (rc < 0)
7ee1af76
JA
1267 goto out;
1268
480b1cb9 1269 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
7ee1af76 1270 goto out;
1da177e4 1271
0ade640e 1272 rc = wait_for_response(ses->server, midQ);
1be912dd 1273 if (rc != 0) {
fb2036d8 1274 send_cancel(ses->server, &rqst, midQ);
1be912dd 1275 spin_lock(&GlobalMid_Lock);
7c9421e1 1276 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1277 /* no longer considered to be "in-flight" */
1278 midQ->callback = DeleteMidQEntry;
1279 spin_unlock(&GlobalMid_Lock);
34f4deb7 1280 add_credits(ses->server, &credits, 0);
1be912dd
JL
1281 return rc;
1282 }
1283 spin_unlock(&GlobalMid_Lock);
1284 }
1da177e4 1285
3c1105df 1286 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1287 if (rc != 0) {
34f4deb7 1288 add_credits(ses->server, &credits, 0);
1da177e4
LT
1289 return rc;
1290 }
50c2f753 1291
2c8f981d 1292 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1293 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1294 rc = -EIO;
f96637be 1295 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1296 goto out;
1da177e4 1297 }
7ee1af76 1298
d4e4854f 1299 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1300 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1301 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1302out:
3c1bf7e4 1303 cifs_delete_mid(midQ);
34f4deb7 1304 add_credits(ses->server, &credits, 0);
1da177e4 1305
7ee1af76
JA
1306 return rc;
1307}
1da177e4 1308
7ee1af76
JA
1309/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1310 blocking lock to return. */
1311
1312static int
96daf2b0 1313send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1314 struct smb_hdr *in_buf,
1315 struct smb_hdr *out_buf)
1316{
1317 int bytes_returned;
96daf2b0 1318 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1319 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1320
1321 /* We just modify the current in_buf to change
1322 the type of lock from LOCKING_ANDX_SHARED_LOCK
1323 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1324 LOCKING_ANDX_CANCEL_LOCK. */
1325
1326 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1327 pSMB->Timeout = 0;
88257360 1328 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1329
1330 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1331 &bytes_returned, 0);
7ee1af76
JA
1332}
1333
1334int
96daf2b0 1335SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1336 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1337 int *pbytes_returned)
1338{
1339 int rc = 0;
1340 int rstart = 0;
7ee1af76 1341 struct mid_q_entry *midQ;
96daf2b0 1342 struct cifs_ses *ses;
fb2036d8
PS
1343 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1344 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1345 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1346 unsigned int instance;
7ee1af76
JA
1347
1348 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1349 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1350 return -EIO;
1351 }
1352 ses = tcon->ses;
1353
79a58d1f 1354 if (ses->server == NULL) {
f96637be 1355 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1356 return -EIO;
1357 }
1358
79a58d1f 1359 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1360 return -ENOENT;
1361
79a58d1f 1362 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1363 to the same server. We may make this configurable later or
1364 use ses->maxReq */
1365
fb2036d8 1366 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1367 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1368 len);
6d9c6d54
VL
1369 return -EIO;
1370 }
1371
480b1cb9 1372 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1373 if (rc)
1374 return rc;
1375
79a58d1f 1376 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1377 and avoid races inside tcp sendmsg code that could cause corruption
1378 of smb data */
1379
72ca545b 1380 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1381
1382 rc = allocate_mid(ses, in_buf, &midQ);
1383 if (rc) {
72ca545b 1384 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1385 return rc;
1386 }
1387
7ee1af76 1388 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1389 if (rc) {
3c1bf7e4 1390 cifs_delete_mid(midQ);
829049cb
VL
1391 mutex_unlock(&ses->server->srv_mutex);
1392 return rc;
1393 }
1da177e4 1394
7c9421e1 1395 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1396 cifs_in_send_inc(ses->server);
fb2036d8 1397 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1398 cifs_in_send_dec(ses->server);
1399 cifs_save_when_sent(midQ);
ad313cb8
JL
1400
1401 if (rc < 0)
1402 ses->server->sequence_number -= 2;
1403
72ca545b 1404 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1405
79a58d1f 1406 if (rc < 0) {
3c1bf7e4 1407 cifs_delete_mid(midQ);
7ee1af76
JA
1408 return rc;
1409 }
1410
1411 /* Wait for a reply - allow signals to interrupt. */
1412 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1413 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1414 ((ses->server->tcpStatus != CifsGood) &&
1415 (ses->server->tcpStatus != CifsNew)));
1416
1417 /* Were we interrupted by a signal ? */
1418 if ((rc == -ERESTARTSYS) &&
7c9421e1 1419 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1420 ((ses->server->tcpStatus == CifsGood) ||
1421 (ses->server->tcpStatus == CifsNew))) {
1422
1423 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1424 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1425 blocking lock to return. */
fb2036d8 1426 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1427 if (rc) {
3c1bf7e4 1428 cifs_delete_mid(midQ);
7ee1af76
JA
1429 return rc;
1430 }
1431 } else {
1432 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1433 to cause the blocking lock to return. */
1434
1435 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1436
1437 /* If we get -ENOLCK back the lock may have
1438 already been removed. Don't exit in this case. */
1439 if (rc && rc != -ENOLCK) {
3c1bf7e4 1440 cifs_delete_mid(midQ);
7ee1af76
JA
1441 return rc;
1442 }
1443 }
1444
1be912dd
JL
1445 rc = wait_for_response(ses->server, midQ);
1446 if (rc) {
fb2036d8 1447 send_cancel(ses->server, &rqst, midQ);
1be912dd 1448 spin_lock(&GlobalMid_Lock);
7c9421e1 1449 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1450 /* no longer considered to be "in-flight" */
1451 midQ->callback = DeleteMidQEntry;
1452 spin_unlock(&GlobalMid_Lock);
1453 return rc;
1454 }
1455 spin_unlock(&GlobalMid_Lock);
7ee1af76 1456 }
1be912dd
JL
1457
1458 /* We got the response - restart system call. */
1459 rstart = 1;
7ee1af76
JA
1460 }
1461
3c1105df 1462 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1463 if (rc != 0)
7ee1af76 1464 return rc;
50c2f753 1465
17c8bfed 1466 /* rcvd frame is ok */
7c9421e1 1467 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1468 rc = -EIO;
f96637be 1469 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1470 goto out;
1471 }
1da177e4 1472
d4e4854f 1473 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1474 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1475 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1476out:
3c1bf7e4 1477 cifs_delete_mid(midQ);
7ee1af76
JA
1478 if (rstart && rc == -EACCES)
1479 return -ERESTARTSYS;
1da177e4
LT
1480 return rc;
1481}