cifs: prevent starvation in wait_for_free_credits for multi-credit requests
[linux-2.6-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
b30c74c7 36#include <linux/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
1da177e4 81
1da177e4 82 atomic_inc(&midCount);
7c9421e1 83 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
84 return temp;
85}
86
696e420b
LP
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
766fdbb5 102void
1da177e4
LT
103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
1047abc1 105#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 106 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
107 unsigned long now;
108#endif
7c9421e1 109 midEntry->mid_state = MID_FREE;
8097531a 110 atomic_dec(&midCount);
7c9421e1 111 if (midEntry->large_buf)
b8643e1b
SF
112 cifs_buf_release(midEntry->resp_buf);
113 else
114 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
115#ifdef CONFIG_CIFS_STATS2
116 now = jiffies;
00778e22
SF
117 /*
118 * commands taking longer than one second (default) can be indications
119 * that something is wrong, unless it is quite a slow link or a very
120 * busy server. Note that this calc is unlikely or impossible to wrap
121 * as long as slow_rsp_threshold is not set way above recommended max
122 * value (32767 ie 9 hours) and is generally harmless even if wrong
123 * since only affects debug counters - so leaving the calc as simple
124 * comparison rather than doing multiple conversions and overflow
125 * checks
126 */
127 if ((slow_rsp_threshold != 0) &&
128 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 129 (midEntry->command != command)) {
f5942db5
SF
130 /*
131 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132 * NB: le16_to_cpu returns unsigned so can not be negative below
133 */
134 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
468d6779
SF
135 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
020eec5f
SF
137 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138 midEntry->mid, midEntry->pid,
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
0b456f04 141 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 142 midEntry->command, midEntry->mid);
f80eaedd 143 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
147 }
148 }
149#endif
696e420b 150 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
151}
152
3c1bf7e4
PS
153void
154cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
155{
156 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
157 list_del_init(&mid->qhead);
158 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
159 spin_unlock(&GlobalMid_Lock);
160
161 DeleteMidQEntry(mid);
162}
163
6f49f46b
JL
164/*
165 * smb_send_kvec - send an array of kvecs to the server
166 * @server: Server to send the data to
3ab3f2a1 167 * @smb_msg: Message to send
6f49f46b
JL
168 * @sent: amount of data sent on socket is stored here
169 *
170 * Our basic "send data to server" function. Should be called with srv_mutex
171 * held. The caller is responsible for handling the results.
172 */
d6e04ae6 173static int
3ab3f2a1
AV
174smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175 size_t *sent)
1da177e4
LT
176{
177 int rc = 0;
3ab3f2a1 178 int retries = 0;
edf1ae40 179 struct socket *ssocket = server->ssocket;
50c2f753 180
6f49f46b
JL
181 *sent = 0;
182
3ab3f2a1
AV
183 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184 smb_msg->msg_namelen = sizeof(struct sockaddr);
185 smb_msg->msg_control = NULL;
186 smb_msg->msg_controllen = 0;
0496e02d 187 if (server->noblocksnd)
3ab3f2a1 188 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 189 else
3ab3f2a1 190 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 191
3ab3f2a1 192 while (msg_data_left(smb_msg)) {
6f49f46b
JL
193 /*
194 * If blocking send, we try 3 times, since each can block
195 * for 5 seconds. For nonblocking we have to try more
196 * but wait increasing amounts of time allowing time for
197 * socket to clear. The overall time we wait in either
198 * case to send on the socket is about 15 seconds.
199 * Similarly we wait for 15 seconds for a response from
200 * the server in SendReceive[2] for the server to send
201 * a response back for most types of requests (except
202 * SMB Write past end of file which can be slow, and
203 * blocking lock operations). NFS waits slightly longer
204 * than CIFS, but this can make it take longer for
205 * nonresponsive servers to be detected and 15 seconds
206 * is more than enough time for modern networks to
207 * send a packet. In most cases if we fail to send
208 * after the retries we will kill the socket and
209 * reconnect which may clear the network problem.
210 */
3ab3f2a1 211 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 212 if (rc == -EAGAIN) {
3ab3f2a1
AV
213 retries++;
214 if (retries >= 14 ||
215 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
216 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217 ssocket);
3ab3f2a1 218 return -EAGAIN;
1da177e4 219 }
3ab3f2a1 220 msleep(1 << retries);
1da177e4
LT
221 continue;
222 }
6f49f46b 223
79a58d1f 224 if (rc < 0)
3ab3f2a1 225 return rc;
6f49f46b 226
79a58d1f 227 if (rc == 0) {
3e84469d
SF
228 /* should never happen, letting socket clear before
229 retrying is our only obvious option here */
f96637be 230 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
231 msleep(500);
232 continue;
d6e04ae6 233 }
6f49f46b 234
3ab3f2a1
AV
235 /* send was at least partially successful */
236 *sent += rc;
237 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 238 }
3ab3f2a1 239 return 0;
97bc00b3
JL
240}
241
35e2cc1b 242unsigned long
81f39f95 243smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
244{
245 unsigned int i;
35e2cc1b
PA
246 struct kvec *iov;
247 int nvec;
a26054d1
JL
248 unsigned long buflen = 0;
249
81f39f95
RS
250 if (server->vals->header_preamble_size == 0 &&
251 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
252 iov = &rqst->rq_iov[1];
253 nvec = rqst->rq_nvec - 1;
254 } else {
255 iov = rqst->rq_iov;
256 nvec = rqst->rq_nvec;
257 }
258
a26054d1 259 /* total up iov array first */
35e2cc1b 260 for (i = 0; i < nvec; i++)
a26054d1
JL
261 buflen += iov[i].iov_len;
262
c06a0f2d
LL
263 /*
264 * Add in the page array if there is one. The caller needs to make
265 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266 * multiple pages ends at page boundary, rq_tailsz needs to be set to
267 * PAGE_SIZE.
268 */
a26054d1 269 if (rqst->rq_npages) {
c06a0f2d
LL
270 if (rqst->rq_npages == 1)
271 buflen += rqst->rq_tailsz;
272 else {
273 /*
274 * If there is more than one page, calculate the
275 * buffer length based on rq_offset and rq_tailsz
276 */
277 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278 rqst->rq_offset;
279 buflen += rqst->rq_tailsz;
280 }
a26054d1
JL
281 }
282
283 return buflen;
284}
285
6f49f46b 286static int
07cd952f
RS
287__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288 struct smb_rqst *rqst)
6f49f46b 289{
07cd952f
RS
290 int rc = 0;
291 struct kvec *iov;
292 int n_vec;
293 unsigned int send_length = 0;
294 unsigned int i, j;
b30c74c7 295 sigset_t mask, oldmask;
3ab3f2a1 296 size_t total_len = 0, sent, size;
b8eed283 297 struct socket *ssocket = server->ssocket;
3ab3f2a1 298 struct msghdr smb_msg;
b8eed283 299 int val = 1;
c713c877
RS
300 __be32 rfc1002_marker;
301
9762c2d0 302 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 303 rc = smbd_send(server, rqst);
9762c2d0
LL
304 goto smbd_done;
305 }
afc18a6f 306
ea702b80 307 if (ssocket == NULL)
afc18a6f 308 return -EAGAIN;
ea702b80 309
b30c74c7
PS
310 if (signal_pending(current)) {
311 cifs_dbg(FYI, "signal is pending before sending any data\n");
312 return -EINTR;
313 }
314
b8eed283
JL
315 /* cork the socket */
316 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317 (char *)&val, sizeof(val));
318
07cd952f 319 for (j = 0; j < num_rqst; j++)
81f39f95 320 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
321 rfc1002_marker = cpu_to_be32(send_length);
322
b30c74c7
PS
323 /*
324 * We should not allow signals to interrupt the network send because
325 * any partial send will cause session reconnects thus increasing
326 * latency of system calls and overload a server with unnecessary
327 * requests.
328 */
329
330 sigfillset(&mask);
331 sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
c713c877
RS
333 /* Generate a rfc1002 marker for SMB2+ */
334 if (server->vals->header_preamble_size == 0) {
335 struct kvec hiov = {
336 .iov_base = &rfc1002_marker,
337 .iov_len = 4
338 };
aa563d7b 339 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
340 rc = smb_send_kvec(server, &smb_msg, &sent);
341 if (rc < 0)
b30c74c7 342 goto unmask;
c713c877
RS
343
344 total_len += sent;
345 send_length += 4;
346 }
347
662bf5bc
PA
348 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
07cd952f
RS
350 for (j = 0; j < num_rqst; j++) {
351 iov = rqst[j].rq_iov;
352 n_vec = rqst[j].rq_nvec;
3ab3f2a1 353
07cd952f 354 size = 0;
662bf5bc
PA
355 for (i = 0; i < n_vec; i++) {
356 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 357 size += iov[i].iov_len;
662bf5bc 358 }
97bc00b3 359
aa563d7b 360 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 361
3ab3f2a1 362 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 363 if (rc < 0)
b30c74c7 364 goto unmask;
97bc00b3
JL
365
366 total_len += sent;
07cd952f
RS
367
368 /* now walk the page array and send each page in it */
369 for (i = 0; i < rqst[j].rq_npages; i++) {
370 struct bio_vec bvec;
371
372 bvec.bv_page = rqst[j].rq_pages[i];
373 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374 &bvec.bv_offset);
375
aa563d7b 376 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
377 &bvec, 1, bvec.bv_len);
378 rc = smb_send_kvec(server, &smb_msg, &sent);
379 if (rc < 0)
380 break;
381
382 total_len += sent;
383 }
97bc00b3 384 }
1da177e4 385
b30c74c7
PS
386unmask:
387 sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389 /*
390 * If signal is pending but we have already sent the whole packet to
391 * the server we need to return success status to allow a corresponding
392 * mid entry to be kept in the pending requests queue thus allowing
393 * to handle responses from the server by the client.
394 *
395 * If only part of the packet has been sent there is no need to hide
396 * interrupt because the session will be reconnected anyway, so there
397 * won't be any response from the server to handle.
398 */
399
400 if (signal_pending(current) && (total_len != send_length)) {
401 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402 rc = -EINTR;
403 }
404
b8eed283
JL
405 /* uncork it */
406 val = 0;
407 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408 (char *)&val, sizeof(val));
409
c713c877 410 if ((total_len > 0) && (total_len != send_length)) {
f96637be 411 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 412 send_length, total_len);
6f49f46b
JL
413 /*
414 * If we have only sent part of an SMB then the next SMB could
415 * be taken as the remainder of this one. We need to kill the
416 * socket so the server throws away the partial SMB
417 */
edf1ae40 418 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
419 trace_smb3_partial_send_reconnect(server->CurrentMid,
420 server->hostname);
edf1ae40 421 }
9762c2d0 422smbd_done:
d804d41d 423 if (rc < 0 && rc != -EINTR)
f96637be
JP
424 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425 rc);
ee13919c 426 else if (rc > 0)
1da177e4 427 rc = 0;
1da177e4
LT
428
429 return rc;
430}
431
6f49f46b 432static int
1f3a8f5f
RS
433smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434 struct smb_rqst *rqst, int flags)
6f49f46b 435{
b2c96de7
RS
436 struct kvec iov;
437 struct smb2_transform_hdr tr_hdr;
438 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
439 int rc;
440
441 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
442 return __smb_send_rqst(server, num_rqst, rqst);
443
444 if (num_rqst > MAX_COMPOUND - 1)
445 return -ENOMEM;
7fb8986e 446
b2c96de7
RS
447 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448 memset(&iov, 0, sizeof(iov));
449 memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451 iov.iov_base = &tr_hdr;
452 iov.iov_len = sizeof(tr_hdr);
453 cur_rqst[0].rq_iov = &iov;
454 cur_rqst[0].rq_nvec = 1;
455
456 if (!server->ops->init_transform_rq) {
457 cifs_dbg(VFS, "Encryption requested but transform callback "
458 "is missing\n");
7fb8986e
PS
459 return -EIO;
460 }
6f49f46b 461
1f3a8f5f
RS
462 rc = server->ops->init_transform_rq(server, num_rqst + 1,
463 &cur_rqst[0], rqst);
7fb8986e
PS
464 if (rc)
465 return rc;
466
1f3a8f5f
RS
467 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 469 return rc;
6f49f46b
JL
470}
471
0496e02d
JL
472int
473smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474 unsigned int smb_buf_length)
475{
738f9de5 476 struct kvec iov[2];
7fb8986e
PS
477 struct smb_rqst rqst = { .rq_iov = iov,
478 .rq_nvec = 2 };
0496e02d 479
738f9de5
PS
480 iov[0].iov_base = smb_buffer;
481 iov[0].iov_len = 4;
482 iov[1].iov_base = (char *)smb_buffer + 4;
483 iov[1].iov_len = smb_buf_length;
0496e02d 484
07cd952f 485 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
486}
487
fc40f9cf 488static int
b227d215
RS
489wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
490 const int flags, unsigned int *instance)
1da177e4 491{
5bc59498 492 int rc;
4230cff8
RS
493 int *credits;
494 int optype;
495
496 optype = flags & CIFS_OP_MASK;
5bc59498 497
34f4deb7
PS
498 *instance = 0;
499
4230cff8
RS
500 credits = server->ops->get_credits_field(server, optype);
501 /* Since an echo is already inflight, no need to wait to send another */
502 if (*credits <= 0 && optype == CIFS_ECHO_OP)
503 return -EAGAIN;
504
fc40f9cf 505 spin_lock(&server->req_lock);
4230cff8 506 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
1da177e4 507 /* oplock breaks must not be held up */
fc40f9cf 508 server->in_flight++;
bc205ed1 509 *credits -= 1;
34f4deb7 510 *instance = server->reconnect_instance;
fc40f9cf 511 spin_unlock(&server->req_lock);
27a97a61
VL
512 return 0;
513 }
514
27a97a61 515 while (1) {
b227d215 516 if (*credits < num_credits) {
fc40f9cf 517 spin_unlock(&server->req_lock);
789e6661 518 cifs_num_waiters_inc(server);
5bc59498 519 rc = wait_event_killable(server->request_q,
b227d215 520 has_credits(server, credits, num_credits));
789e6661 521 cifs_num_waiters_dec(server);
5bc59498
PS
522 if (rc)
523 return rc;
fc40f9cf 524 spin_lock(&server->req_lock);
27a97a61 525 } else {
c5797a94 526 if (server->tcpStatus == CifsExiting) {
fc40f9cf 527 spin_unlock(&server->req_lock);
27a97a61 528 return -ENOENT;
1da177e4 529 }
27a97a61 530
16b34aa4
RS
531 /*
532 * For normal commands, reserve the last MAX_COMPOUND
533 * credits to compound requests.
534 * Otherwise these compounds could be permanently
535 * starved for credits by single-credit requests.
536 *
537 * To prevent spinning CPU, block this thread until
538 * there are >MAX_COMPOUND credits available.
539 * But only do this is we already have a lot of
540 * credits in flight to avoid triggering this check
541 * for servers that are slow to hand out credits on
542 * new sessions.
543 */
544 if (!optype && num_credits == 1 &&
545 server->in_flight > 2 * MAX_COMPOUND &&
546 *credits <= MAX_COMPOUND) {
547 spin_unlock(&server->req_lock);
548 cifs_num_waiters_inc(server);
549 rc = wait_event_killable(server->request_q,
550 has_credits(server, credits,
551 MAX_COMPOUND + 1));
552 cifs_num_waiters_dec(server);
553 if (rc)
554 return rc;
555 spin_lock(&server->req_lock);
556 continue;
557 }
558
2d86dbc9
PS
559 /*
560 * Can not count locking commands against total
561 * as they are allowed to block on server.
562 */
27a97a61
VL
563
564 /* update # of requests on the wire to server */
4230cff8 565 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
566 *credits -= num_credits;
567 server->in_flight += num_credits;
34f4deb7 568 *instance = server->reconnect_instance;
2d86dbc9 569 }
fc40f9cf 570 spin_unlock(&server->req_lock);
27a97a61 571 break;
1da177e4
LT
572 }
573 }
7ee1af76
JA
574 return 0;
575}
1da177e4 576
bc205ed1 577static int
480b1cb9
RS
578wait_for_free_request(struct TCP_Server_Info *server, const int flags,
579 unsigned int *instance)
bc205ed1 580{
b227d215 581 return wait_for_free_credits(server, 1, flags, instance);
bc205ed1
PS
582}
583
cb7e9eab
PS
584int
585cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 586 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
587{
588 *num = size;
335b7b62
PS
589 credits->value = 0;
590 credits->instance = server->reconnect_instance;
cb7e9eab
PS
591 return 0;
592}
593
96daf2b0 594static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
595 struct mid_q_entry **ppmidQ)
596{
1da177e4 597 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 598 return -ENOENT;
8fbbd365
VL
599 }
600
601 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 602 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 603 return -EAGAIN;
8fbbd365
VL
604 }
605
7f48558e 606 if (ses->status == CifsNew) {
79a58d1f 607 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 608 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 609 return -EAGAIN;
ad7a2926 610 /* else ok - we are setting up session */
1da177e4 611 }
7f48558e
SP
612
613 if (ses->status == CifsExiting) {
614 /* check if SMB session is bad because we are setting it up */
615 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
616 return -EAGAIN;
617 /* else ok - we are shutting down session */
618 }
619
24b9b06b 620 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 621 if (*ppmidQ == NULL)
7ee1af76 622 return -ENOMEM;
ddc8cf8f
JL
623 spin_lock(&GlobalMid_Lock);
624 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
625 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
626 return 0;
627}
628
0ade640e
JL
629static int
630wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 631{
0ade640e 632 int error;
7ee1af76 633
5853cc2a 634 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 635 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
636 if (error < 0)
637 return -ERESTARTSYS;
7ee1af76 638
0ade640e 639 return 0;
7ee1af76
JA
640}
641
fec344e3
JL
642struct mid_q_entry *
643cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
644{
645 int rc;
fec344e3 646 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
647 struct mid_q_entry *mid;
648
738f9de5
PS
649 if (rqst->rq_iov[0].iov_len != 4 ||
650 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
651 return ERR_PTR(-EIO);
652
792af7b0 653 /* enable signing if server requires it */
38d77c50 654 if (server->sign)
792af7b0
PS
655 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
656
657 mid = AllocMidQEntry(hdr, server);
658 if (mid == NULL)
fec344e3 659 return ERR_PTR(-ENOMEM);
792af7b0 660
fec344e3 661 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
662 if (rc) {
663 DeleteMidQEntry(mid);
fec344e3 664 return ERR_PTR(rc);
ffc61ccb
SP
665 }
666
fec344e3 667 return mid;
792af7b0 668}
133672ef 669
a6827c18
JL
670/*
671 * Send a SMB request and set the callback function in the mid to handle
672 * the result. Caller is responsible for dealing with timeouts.
673 */
674int
fec344e3 675cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 676 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
677 mid_handle_t *handle, void *cbdata, const int flags,
678 const struct cifs_credits *exist_credits)
a6827c18 679{
480b1cb9 680 int rc;
a6827c18 681 struct mid_q_entry *mid;
335b7b62 682 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 683 unsigned int instance;
480b1cb9 684 int optype;
a6827c18 685
a891f0f8
PS
686 optype = flags & CIFS_OP_MASK;
687
cb7e9eab 688 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 689 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
690 if (rc)
691 return rc;
335b7b62 692 credits.value = 1;
34f4deb7 693 credits.instance = instance;
3349c3a7
PS
694 } else
695 instance = exist_credits->instance;
a6827c18
JL
696
697 mutex_lock(&server->srv_mutex);
3349c3a7
PS
698
699 /*
700 * We can't use credits obtained from the previous session to send this
701 * request. Check if there were reconnects after we obtained credits and
702 * return -EAGAIN in such cases to let callers handle it.
703 */
704 if (instance != server->reconnect_instance) {
705 mutex_unlock(&server->srv_mutex);
706 add_credits_and_wake_if(server, &credits, optype);
707 return -EAGAIN;
708 }
709
fec344e3
JL
710 mid = server->ops->setup_async_request(server, rqst);
711 if (IS_ERR(mid)) {
a6827c18 712 mutex_unlock(&server->srv_mutex);
335b7b62 713 add_credits_and_wake_if(server, &credits, optype);
fec344e3 714 return PTR_ERR(mid);
a6827c18
JL
715 }
716
44d22d84 717 mid->receive = receive;
a6827c18
JL
718 mid->callback = callback;
719 mid->callback_data = cbdata;
9b7c18a2 720 mid->handle = handle;
7c9421e1 721 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 722
ffc61ccb
SP
723 /* put it on the pending_mid_q */
724 spin_lock(&GlobalMid_Lock);
725 list_add_tail(&mid->qhead, &server->pending_mid_q);
726 spin_unlock(&GlobalMid_Lock);
727
93d2cb6c
LL
728 /*
729 * Need to store the time in mid before calling I/O. For call_async,
730 * I/O response may come back and free the mid entry on another thread.
731 */
732 cifs_save_when_sent(mid);
789e6661 733 cifs_in_send_inc(server);
1f3a8f5f 734 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 735 cifs_in_send_dec(server);
ad313cb8 736
820962dc 737 if (rc < 0) {
c781af7e 738 revert_current_mid(server, mid->credits);
ad313cb8 739 server->sequence_number -= 2;
820962dc
RV
740 cifs_delete_mid(mid);
741 }
742
a6827c18 743 mutex_unlock(&server->srv_mutex);
789e6661 744
ffc61ccb
SP
745 if (rc == 0)
746 return 0;
a6827c18 747
335b7b62 748 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
749 return rc;
750}
751
133672ef
SF
752/*
753 *
754 * Send an SMB Request. No response info (other than return code)
755 * needs to be parsed.
756 *
757 * flags indicate the type of request buffer and how long to wait
758 * and whether to log NT STATUS code (error) before mapping it to POSIX error
759 *
760 */
761int
96daf2b0 762SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 763 char *in_buf, int flags)
133672ef
SF
764{
765 int rc;
766 struct kvec iov[1];
da502f7d 767 struct kvec rsp_iov;
133672ef
SF
768 int resp_buf_type;
769
792af7b0
PS
770 iov[0].iov_base = in_buf;
771 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 772 flags |= CIFS_NO_RESP;
da502f7d 773 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 774 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 775
133672ef
SF
776 return rc;
777}
778
053d5034 779static int
3c1105df 780cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
781{
782 int rc = 0;
783
f96637be
JP
784 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
785 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 786
74dd92a8 787 spin_lock(&GlobalMid_Lock);
7c9421e1 788 switch (mid->mid_state) {
74dd92a8 789 case MID_RESPONSE_RECEIVED:
053d5034
JL
790 spin_unlock(&GlobalMid_Lock);
791 return rc;
74dd92a8
JL
792 case MID_RETRY_NEEDED:
793 rc = -EAGAIN;
794 break;
71823baf
JL
795 case MID_RESPONSE_MALFORMED:
796 rc = -EIO;
797 break;
3c1105df
JL
798 case MID_SHUTDOWN:
799 rc = -EHOSTDOWN;
800 break;
74dd92a8 801 default:
3c1105df 802 list_del_init(&mid->qhead);
f96637be
JP
803 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
804 __func__, mid->mid, mid->mid_state);
74dd92a8 805 rc = -EIO;
053d5034
JL
806 }
807 spin_unlock(&GlobalMid_Lock);
808
2b84a36c 809 DeleteMidQEntry(mid);
053d5034
JL
810 return rc;
811}
812
121b046a 813static inline int
fb2036d8
PS
814send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
815 struct mid_q_entry *mid)
76dcc26f 816{
121b046a 817 return server->ops->send_cancel ?
fb2036d8 818 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
819}
820
2c8f981d
JL
821int
822cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
823 bool log_error)
824{
792af7b0 825 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
826
827 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
828
829 /* convert the length into a more usable form */
38d77c50 830 if (server->sign) {
738f9de5 831 struct kvec iov[2];
985e4ff0 832 int rc = 0;
738f9de5
PS
833 struct smb_rqst rqst = { .rq_iov = iov,
834 .rq_nvec = 2 };
826a95e4 835
738f9de5
PS
836 iov[0].iov_base = mid->resp_buf;
837 iov[0].iov_len = 4;
838 iov[1].iov_base = (char *)mid->resp_buf + 4;
839 iov[1].iov_len = len - 4;
2c8f981d 840 /* FIXME: add code to kill session */
bf5ea0e2 841 rc = cifs_verify_signature(&rqst, server,
0124cc45 842 mid->sequence_number);
985e4ff0 843 if (rc)
f96637be
JP
844 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
845 rc);
2c8f981d
JL
846 }
847
848 /* BB special case reconnect tid and uid here? */
849 return map_smb_to_linux_error(mid->resp_buf, log_error);
850}
851
fec344e3
JL
852struct mid_q_entry *
853cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
854{
855 int rc;
fec344e3 856 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
857 struct mid_q_entry *mid;
858
738f9de5
PS
859 if (rqst->rq_iov[0].iov_len != 4 ||
860 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
861 return ERR_PTR(-EIO);
862
792af7b0
PS
863 rc = allocate_mid(ses, hdr, &mid);
864 if (rc)
fec344e3
JL
865 return ERR_PTR(rc);
866 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
867 if (rc) {
3c1bf7e4 868 cifs_delete_mid(mid);
fec344e3
JL
869 return ERR_PTR(rc);
870 }
871 return mid;
792af7b0
PS
872}
873
4e34feb5 874static void
ee258d79 875cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
876{
877 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
878 struct cifs_credits credits;
879
880 credits.value = server->ops->get_credits(mid);
881 credits.instance = server->reconnect_instance;
8a26f0f7 882
34f4deb7 883 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
884}
885
ee258d79
PS
886static void
887cifs_compound_last_callback(struct mid_q_entry *mid)
888{
889 cifs_compound_callback(mid);
890 cifs_wake_up_task(mid);
891}
892
893static void
894cifs_cancelled_callback(struct mid_q_entry *mid)
895{
896 cifs_compound_callback(mid);
897 DeleteMidQEntry(mid);
898}
899
b8f57ee8 900int
e0bba0b8
RS
901compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
902 const int flags, const int num_rqst, struct smb_rqst *rqst,
903 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 904{
480b1cb9 905 int i, j, optype, rc = 0;
e0bba0b8 906 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 907 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
908 struct cifs_credits credits[MAX_COMPOUND] = {
909 { .value = 0, .instance = 0 }
910 };
911 unsigned int instance;
97ea4998 912 unsigned int first_instance = 0;
738f9de5 913 char *buf;
50c2f753 914
a891f0f8 915 optype = flags & CIFS_OP_MASK;
133672ef 916
e0bba0b8
RS
917 for (i = 0; i < num_rqst; i++)
918 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
919
920 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 921 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
922 return -EIO;
923 }
924
da502f7d 925 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 926 return -ENOENT;
7ee1af76 927
7091bcab
PS
928 spin_lock(&ses->server->req_lock);
929 if (ses->server->credits < num_rqst) {
930 /*
931 * Return immediately if not too many requests in flight since
932 * we will likely be stuck on waiting for credits.
933 */
934 if (ses->server->in_flight < num_rqst - ses->server->credits) {
935 spin_unlock(&ses->server->req_lock);
936 return -ENOTSUPP;
937 }
938 } else {
939 /* enough credits to send the whole compounded request */
940 ses->server->credits -= num_rqst;
941 ses->server->in_flight += num_rqst;
942 first_instance = ses->server->reconnect_instance;
943 }
944 spin_unlock(&ses->server->req_lock);
945
946 if (first_instance) {
947 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
948 for (i = 0; i < num_rqst; i++) {
949 credits[i].value = 1;
950 credits[i].instance = first_instance;
951 }
952 goto setup_rqsts;
953 }
954
792af7b0 955 /*
7091bcab
PS
956 * There are not enough credits to send the whole compound request but
957 * there are requests in flight that may bring credits from the server.
958 * This approach still leaves the possibility to be stuck waiting for
959 * credits if the server doesn't grant credits to the outstanding
960 * requests. This should be fixed by returning immediately and letting
961 * a caller fallback to sequential commands instead of compounding.
8544f4aa 962 * Ensure we obtain 1 credit per request in the compound chain.
792af7b0 963 */
8544f4aa 964 for (i = 0; i < num_rqst; i++) {
480b1cb9 965 rc = wait_for_free_request(ses->server, flags, &instance);
97ea4998
PS
966
967 if (rc == 0) {
968 credits[i].value = 1;
969 credits[i].instance = instance;
970 /*
971 * All parts of the compound chain must get credits from
972 * the same session, otherwise we may end up using more
973 * credits than the server granted. If there were
974 * reconnects in between, return -EAGAIN and let callers
975 * handle it.
976 */
977 if (i == 0)
978 first_instance = instance;
979 else if (first_instance != instance) {
980 i++;
981 rc = -EAGAIN;
982 }
983 }
984
8544f4aa
PS
985 if (rc) {
986 /*
987 * We haven't sent an SMB packet to the server yet but
988 * we already obtained credits for i requests in the
989 * compound chain - need to return those credits back
990 * for future use. Note that we need to call add_credits
991 * multiple times to match the way we obtained credits
992 * in the first place and to account for in flight
993 * requests correctly.
994 */
995 for (j = 0; j < i; j++)
34f4deb7 996 add_credits(ses->server, &credits[j], optype);
8544f4aa
PS
997 return rc;
998 }
8544f4aa 999 }
7ee1af76 1000
7091bcab 1001setup_rqsts:
792af7b0
PS
1002 /*
1003 * Make sure that we sign in the same order that we send on this socket
1004 * and avoid races inside tcp sendmsg code that could cause corruption
1005 * of smb data.
1006 */
7ee1af76 1007
72ca545b 1008 mutex_lock(&ses->server->srv_mutex);
7ee1af76 1009
97ea4998
PS
1010 /*
1011 * All the parts of the compound chain belong obtained credits from the
1012 * same session (see the appropriate checks above). In the same time
1013 * there might be reconnects after those checks but before we acquired
1014 * the srv_mutex. We can not use credits obtained from the previous
1015 * session to send this request. Check if there were reconnects after
1016 * we obtained credits and return -EAGAIN in such cases to let callers
1017 * handle it.
1018 */
1019 if (first_instance != ses->server->reconnect_instance) {
1020 mutex_unlock(&ses->server->srv_mutex);
1021 for (j = 0; j < num_rqst; j++)
1022 add_credits(ses->server, &credits[j], optype);
1023 return -EAGAIN;
1024 }
1025
e0bba0b8
RS
1026 for (i = 0; i < num_rqst; i++) {
1027 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1028 if (IS_ERR(midQ[i])) {
c781af7e 1029 revert_current_mid(ses->server, i);
e0bba0b8
RS
1030 for (j = 0; j < i; j++)
1031 cifs_delete_mid(midQ[j]);
1032 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 1033
e0bba0b8 1034 /* Update # of requests on wire to server */
8544f4aa 1035 for (j = 0; j < num_rqst; j++)
34f4deb7 1036 add_credits(ses->server, &credits[j], optype);
e0bba0b8
RS
1037 return PTR_ERR(midQ[i]);
1038 }
1039
1040 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1041 midQ[i]->optype = optype;
4e34feb5 1042 /*
ee258d79
PS
1043 * Invoke callback for every part of the compound chain
1044 * to calculate credits properly. Wake up this thread only when
1045 * the last element is received.
4e34feb5
RS
1046 */
1047 if (i < num_rqst - 1)
ee258d79
PS
1048 midQ[i]->callback = cifs_compound_callback;
1049 else
1050 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1051 }
789e6661 1052 cifs_in_send_inc(ses->server);
e0bba0b8 1053 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 1054 cifs_in_send_dec(ses->server);
e0bba0b8
RS
1055
1056 for (i = 0; i < num_rqst; i++)
1057 cifs_save_when_sent(midQ[i]);
7ee1af76 1058
c781af7e
PS
1059 if (rc < 0) {
1060 revert_current_mid(ses->server, num_rqst);
ad313cb8 1061 ses->server->sequence_number -= 2;
c781af7e 1062 }
e0bba0b8 1063
72ca545b 1064 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1065
ee258d79
PS
1066 if (rc < 0) {
1067 /* Sending failed for some reason - return credits back */
1068 for (i = 0; i < num_rqst; i++)
34f4deb7 1069 add_credits(ses->server, &credits[i], optype);
cb5c2e63 1070 goto out;
ee258d79
PS
1071 }
1072
1073 /*
1074 * At this point the request is passed to the network stack - we assume
1075 * that any credits taken from the server structure on the client have
1076 * been spent and we can't return them back. Once we receive responses
1077 * we will collect credits granted by the server in the mid callbacks
1078 * and add those credits to the server structure.
1079 */
e0bba0b8 1080
cb5c2e63
RS
1081 /*
1082 * Compounding is never used during session establish.
1083 */
1084 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1085 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1086 rqst[0].rq_nvec);
e0bba0b8 1087
480b1cb9 1088 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
cb5c2e63 1089 goto out;
e0bba0b8 1090
cb5c2e63 1091 for (i = 0; i < num_rqst; i++) {
e0bba0b8 1092 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
1093 if (rc != 0)
1094 break;
1095 }
1096 if (rc != 0) {
1097 for (; i < num_rqst; i++) {
43de1db3
SF
1098 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1099 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
1100 send_cancel(ses->server, &rqst[i], midQ[i]);
1101 spin_lock(&GlobalMid_Lock);
1102 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1103 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1104 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1105 cancelled_mid[i] = true;
34f4deb7 1106 credits[i].value = 0;
e0bba0b8 1107 }
1be912dd 1108 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1109 }
cb5c2e63
RS
1110 }
1111
cb5c2e63
RS
1112 for (i = 0; i < num_rqst; i++) {
1113 if (rc < 0)
1114 goto out;
e0bba0b8
RS
1115
1116 rc = cifs_sync_mid_result(midQ[i], ses->server);
1117 if (rc != 0) {
8544f4aa
PS
1118 /* mark this mid as cancelled to not free it below */
1119 cancelled_mid[i] = true;
1120 goto out;
1be912dd 1121 }
2b2bdfba 1122
e0bba0b8
RS
1123 if (!midQ[i]->resp_buf ||
1124 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1125 rc = -EIO;
1126 cifs_dbg(FYI, "Bad MID state?\n");
1127 goto out;
1128 }
a891f0f8 1129
e0bba0b8
RS
1130 buf = (char *)midQ[i]->resp_buf;
1131 resp_iov[i].iov_base = buf;
1132 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1133 ses->server->vals->header_preamble_size;
1134
1135 if (midQ[i]->large_buf)
1136 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1137 else
1138 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1139
e0bba0b8
RS
1140 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1141 flags & CIFS_LOG_ERROR);
1da177e4 1142
e0bba0b8
RS
1143 /* mark it so buf will not be freed by cifs_delete_mid */
1144 if ((flags & CIFS_NO_RESP) == 0)
1145 midQ[i]->resp_buf = NULL;
cb5c2e63 1146
e0bba0b8 1147 }
cb5c2e63
RS
1148
1149 /*
1150 * Compounding is never used during session establish.
1151 */
1152 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1153 struct kvec iov = {
1154 .iov_base = resp_iov[0].iov_base,
1155 .iov_len = resp_iov[0].iov_len
1156 };
1157 smb311_update_preauth_hash(ses, &iov, 1);
1158 }
1159
7ee1af76 1160out:
4e34feb5
RS
1161 /*
1162 * This will dequeue all mids. After this it is important that the
1163 * demultiplex_thread will not process any of these mids any futher.
1164 * This is prevented above by using a noop callback that will not
1165 * wake this thread except for the very last PDU.
1166 */
8544f4aa
PS
1167 for (i = 0; i < num_rqst; i++) {
1168 if (!cancelled_mid[i])
1169 cifs_delete_mid(midQ[i]);
8544f4aa 1170 }
1da177e4 1171
d6e04ae6
SF
1172 return rc;
1173}
1da177e4 1174
e0bba0b8
RS
1175int
1176cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1177 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1178 struct kvec *resp_iov)
1179{
1180 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1181 resp_iov);
1182}
1183
738f9de5
PS
1184int
1185SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1186 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1187 const int flags, struct kvec *resp_iov)
1188{
1189 struct smb_rqst rqst;
3cecf486 1190 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1191 int rc;
1192
3cecf486 1193 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1194 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1195 GFP_KERNEL);
117e3b7f
SF
1196 if (!new_iov) {
1197 /* otherwise cifs_send_recv below sets resp_buf_type */
1198 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1199 return -ENOMEM;
117e3b7f 1200 }
3cecf486
RS
1201 } else
1202 new_iov = s_iov;
738f9de5
PS
1203
1204 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1205 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1206
1207 new_iov[0].iov_base = new_iov[1].iov_base;
1208 new_iov[0].iov_len = 4;
1209 new_iov[1].iov_base += 4;
1210 new_iov[1].iov_len -= 4;
1211
1212 memset(&rqst, 0, sizeof(struct smb_rqst));
1213 rqst.rq_iov = new_iov;
1214 rqst.rq_nvec = n_vec + 1;
1215
1216 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1217 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1218 kfree(new_iov);
738f9de5
PS
1219 return rc;
1220}
1221
1da177e4 1222int
96daf2b0 1223SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1224 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1225 int *pbytes_returned, const int flags)
1da177e4
LT
1226{
1227 int rc = 0;
1da177e4 1228 struct mid_q_entry *midQ;
fb2036d8
PS
1229 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1230 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1231 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1232 struct cifs_credits credits = { .value = 1, .instance = 0 };
1da177e4
LT
1233
1234 if (ses == NULL) {
f96637be 1235 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1236 return -EIO;
1237 }
79a58d1f 1238 if (ses->server == NULL) {
f96637be 1239 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1240 return -EIO;
1241 }
1242
79a58d1f 1243 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1244 return -ENOENT;
1245
79a58d1f 1246 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1247 to the same server. We may make this configurable later or
1248 use ses->maxReq */
1da177e4 1249
fb2036d8 1250 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1251 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1252 len);
6d9c6d54
VL
1253 return -EIO;
1254 }
1255
480b1cb9 1256 rc = wait_for_free_request(ses->server, flags, &credits.instance);
7ee1af76
JA
1257 if (rc)
1258 return rc;
1259
79a58d1f 1260 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1261 and avoid races inside tcp sendmsg code that could cause corruption
1262 of smb data */
1263
72ca545b 1264 mutex_lock(&ses->server->srv_mutex);
1da177e4 1265
7ee1af76
JA
1266 rc = allocate_mid(ses, in_buf, &midQ);
1267 if (rc) {
72ca545b 1268 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1269 /* Update # of requests on wire to server */
34f4deb7 1270 add_credits(ses->server, &credits, 0);
7ee1af76 1271 return rc;
1da177e4
LT
1272 }
1273
ad009ac9 1274 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1275 if (rc) {
1276 mutex_unlock(&ses->server->srv_mutex);
1277 goto out;
1278 }
1da177e4 1279
7c9421e1 1280 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1281
1282 cifs_in_send_inc(ses->server);
fb2036d8 1283 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1284 cifs_in_send_dec(ses->server);
1285 cifs_save_when_sent(midQ);
ad313cb8
JL
1286
1287 if (rc < 0)
1288 ses->server->sequence_number -= 2;
1289
72ca545b 1290 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1291
79a58d1f 1292 if (rc < 0)
7ee1af76
JA
1293 goto out;
1294
480b1cb9 1295 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
7ee1af76 1296 goto out;
1da177e4 1297
0ade640e 1298 rc = wait_for_response(ses->server, midQ);
1be912dd 1299 if (rc != 0) {
fb2036d8 1300 send_cancel(ses->server, &rqst, midQ);
1be912dd 1301 spin_lock(&GlobalMid_Lock);
7c9421e1 1302 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1303 /* no longer considered to be "in-flight" */
1304 midQ->callback = DeleteMidQEntry;
1305 spin_unlock(&GlobalMid_Lock);
34f4deb7 1306 add_credits(ses->server, &credits, 0);
1be912dd
JL
1307 return rc;
1308 }
1309 spin_unlock(&GlobalMid_Lock);
1310 }
1da177e4 1311
3c1105df 1312 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1313 if (rc != 0) {
34f4deb7 1314 add_credits(ses->server, &credits, 0);
1da177e4
LT
1315 return rc;
1316 }
50c2f753 1317
2c8f981d 1318 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1319 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1320 rc = -EIO;
f96637be 1321 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1322 goto out;
1da177e4 1323 }
7ee1af76 1324
d4e4854f 1325 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1326 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1327 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1328out:
3c1bf7e4 1329 cifs_delete_mid(midQ);
34f4deb7 1330 add_credits(ses->server, &credits, 0);
1da177e4 1331
7ee1af76
JA
1332 return rc;
1333}
1da177e4 1334
7ee1af76
JA
1335/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1336 blocking lock to return. */
1337
1338static int
96daf2b0 1339send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1340 struct smb_hdr *in_buf,
1341 struct smb_hdr *out_buf)
1342{
1343 int bytes_returned;
96daf2b0 1344 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1345 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1346
1347 /* We just modify the current in_buf to change
1348 the type of lock from LOCKING_ANDX_SHARED_LOCK
1349 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1350 LOCKING_ANDX_CANCEL_LOCK. */
1351
1352 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1353 pSMB->Timeout = 0;
88257360 1354 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1355
1356 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1357 &bytes_returned, 0);
7ee1af76
JA
1358}
1359
1360int
96daf2b0 1361SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1362 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1363 int *pbytes_returned)
1364{
1365 int rc = 0;
1366 int rstart = 0;
7ee1af76 1367 struct mid_q_entry *midQ;
96daf2b0 1368 struct cifs_ses *ses;
fb2036d8
PS
1369 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1370 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1371 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1372 unsigned int instance;
7ee1af76
JA
1373
1374 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1375 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1376 return -EIO;
1377 }
1378 ses = tcon->ses;
1379
79a58d1f 1380 if (ses->server == NULL) {
f96637be 1381 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1382 return -EIO;
1383 }
1384
79a58d1f 1385 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1386 return -ENOENT;
1387
79a58d1f 1388 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1389 to the same server. We may make this configurable later or
1390 use ses->maxReq */
1391
fb2036d8 1392 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1393 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1394 len);
6d9c6d54
VL
1395 return -EIO;
1396 }
1397
480b1cb9 1398 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1399 if (rc)
1400 return rc;
1401
79a58d1f 1402 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1403 and avoid races inside tcp sendmsg code that could cause corruption
1404 of smb data */
1405
72ca545b 1406 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1407
1408 rc = allocate_mid(ses, in_buf, &midQ);
1409 if (rc) {
72ca545b 1410 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1411 return rc;
1412 }
1413
7ee1af76 1414 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1415 if (rc) {
3c1bf7e4 1416 cifs_delete_mid(midQ);
829049cb
VL
1417 mutex_unlock(&ses->server->srv_mutex);
1418 return rc;
1419 }
1da177e4 1420
7c9421e1 1421 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1422 cifs_in_send_inc(ses->server);
fb2036d8 1423 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1424 cifs_in_send_dec(ses->server);
1425 cifs_save_when_sent(midQ);
ad313cb8
JL
1426
1427 if (rc < 0)
1428 ses->server->sequence_number -= 2;
1429
72ca545b 1430 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1431
79a58d1f 1432 if (rc < 0) {
3c1bf7e4 1433 cifs_delete_mid(midQ);
7ee1af76
JA
1434 return rc;
1435 }
1436
1437 /* Wait for a reply - allow signals to interrupt. */
1438 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1439 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1440 ((ses->server->tcpStatus != CifsGood) &&
1441 (ses->server->tcpStatus != CifsNew)));
1442
1443 /* Were we interrupted by a signal ? */
1444 if ((rc == -ERESTARTSYS) &&
7c9421e1 1445 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1446 ((ses->server->tcpStatus == CifsGood) ||
1447 (ses->server->tcpStatus == CifsNew))) {
1448
1449 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1450 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1451 blocking lock to return. */
fb2036d8 1452 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1453 if (rc) {
3c1bf7e4 1454 cifs_delete_mid(midQ);
7ee1af76
JA
1455 return rc;
1456 }
1457 } else {
1458 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1459 to cause the blocking lock to return. */
1460
1461 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1462
1463 /* If we get -ENOLCK back the lock may have
1464 already been removed. Don't exit in this case. */
1465 if (rc && rc != -ENOLCK) {
3c1bf7e4 1466 cifs_delete_mid(midQ);
7ee1af76
JA
1467 return rc;
1468 }
1469 }
1470
1be912dd
JL
1471 rc = wait_for_response(ses->server, midQ);
1472 if (rc) {
fb2036d8 1473 send_cancel(ses->server, &rqst, midQ);
1be912dd 1474 spin_lock(&GlobalMid_Lock);
7c9421e1 1475 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1476 /* no longer considered to be "in-flight" */
1477 midQ->callback = DeleteMidQEntry;
1478 spin_unlock(&GlobalMid_Lock);
1479 return rc;
1480 }
1481 spin_unlock(&GlobalMid_Lock);
7ee1af76 1482 }
1be912dd
JL
1483
1484 /* We got the response - restart system call. */
1485 rstart = 1;
7ee1af76
JA
1486 }
1487
3c1105df 1488 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1489 if (rc != 0)
7ee1af76 1490 return rc;
50c2f753 1491
17c8bfed 1492 /* rcvd frame is ok */
7c9421e1 1493 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1494 rc = -EIO;
f96637be 1495 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1496 goto out;
1497 }
1da177e4 1498
d4e4854f 1499 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1500 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1501 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1502out:
3c1bf7e4 1503 cifs_delete_mid(midQ);
7ee1af76
JA
1504 if (rstart && rc == -EACCES)
1505 return -ERESTARTSYS;
1da177e4
LT
1506 return rc;
1507}