CIFS: Fix module dependency
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
1da177e4 81
1da177e4 82 atomic_inc(&midCount);
7c9421e1 83 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
84 return temp;
85}
86
696e420b
LP
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
766fdbb5 102void
1da177e4
LT
103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
1047abc1 105#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 106 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 107 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 108 unsigned long now;
433b8dd7
SF
109 unsigned long roundtrip_time;
110 struct TCP_Server_Info *server = midEntry->server;
1047abc1 111#endif
7c9421e1 112 midEntry->mid_state = MID_FREE;
8097531a 113 atomic_dec(&midCount);
7c9421e1 114 if (midEntry->large_buf)
b8643e1b
SF
115 cifs_buf_release(midEntry->resp_buf);
116 else
117 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
118#ifdef CONFIG_CIFS_STATS2
119 now = jiffies;
433b8dd7
SF
120 if (now < midEntry->when_alloc)
121 cifs_dbg(VFS, "invalid mid allocation time\n");
122 roundtrip_time = now - midEntry->when_alloc;
123
124 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126 server->slowest_cmd[smb_cmd] = roundtrip_time;
127 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 } else {
129 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130 server->slowest_cmd[smb_cmd] = roundtrip_time;
131 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132 server->fastest_cmd[smb_cmd] = roundtrip_time;
133 }
134 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135 server->time_per_cmd[smb_cmd] += roundtrip_time;
136 }
00778e22
SF
137 /*
138 * commands taking longer than one second (default) can be indications
139 * that something is wrong, unless it is quite a slow link or a very
140 * busy server. Note that this calc is unlikely or impossible to wrap
141 * as long as slow_rsp_threshold is not set way above recommended max
142 * value (32767 ie 9 hours) and is generally harmless even if wrong
143 * since only affects debug counters - so leaving the calc as simple
144 * comparison rather than doing multiple conversions and overflow
145 * checks
146 */
147 if ((slow_rsp_threshold != 0) &&
148 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 149 (midEntry->command != command)) {
f5942db5
SF
150 /*
151 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152 * NB: le16_to_cpu returns unsigned so can not be negative below
153 */
433b8dd7
SF
154 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 156
433b8dd7 157 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
158 midEntry->when_sent, midEntry->when_received);
159 if (cifsFYI & CIFS_TIMER) {
0b456f04 160 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 161 midEntry->command, midEntry->mid);
f80eaedd 162 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
163 now - midEntry->when_alloc,
164 now - midEntry->when_sent,
165 now - midEntry->when_received);
166 }
167 }
168#endif
696e420b 169 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
170}
171
3c1bf7e4
PS
172void
173cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
174{
175 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
176 list_del_init(&mid->qhead);
177 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
178 spin_unlock(&GlobalMid_Lock);
179
180 DeleteMidQEntry(mid);
181}
182
6f49f46b
JL
183/*
184 * smb_send_kvec - send an array of kvecs to the server
185 * @server: Server to send the data to
3ab3f2a1 186 * @smb_msg: Message to send
6f49f46b
JL
187 * @sent: amount of data sent on socket is stored here
188 *
189 * Our basic "send data to server" function. Should be called with srv_mutex
190 * held. The caller is responsible for handling the results.
191 */
d6e04ae6 192static int
3ab3f2a1
AV
193smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194 size_t *sent)
1da177e4
LT
195{
196 int rc = 0;
3ab3f2a1 197 int retries = 0;
edf1ae40 198 struct socket *ssocket = server->ssocket;
50c2f753 199
6f49f46b
JL
200 *sent = 0;
201
3ab3f2a1
AV
202 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203 smb_msg->msg_namelen = sizeof(struct sockaddr);
204 smb_msg->msg_control = NULL;
205 smb_msg->msg_controllen = 0;
0496e02d 206 if (server->noblocksnd)
3ab3f2a1 207 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 208 else
3ab3f2a1 209 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 210
3ab3f2a1 211 while (msg_data_left(smb_msg)) {
6f49f46b
JL
212 /*
213 * If blocking send, we try 3 times, since each can block
214 * for 5 seconds. For nonblocking we have to try more
215 * but wait increasing amounts of time allowing time for
216 * socket to clear. The overall time we wait in either
217 * case to send on the socket is about 15 seconds.
218 * Similarly we wait for 15 seconds for a response from
219 * the server in SendReceive[2] for the server to send
220 * a response back for most types of requests (except
221 * SMB Write past end of file which can be slow, and
222 * blocking lock operations). NFS waits slightly longer
223 * than CIFS, but this can make it take longer for
224 * nonresponsive servers to be detected and 15 seconds
225 * is more than enough time for modern networks to
226 * send a packet. In most cases if we fail to send
227 * after the retries we will kill the socket and
228 * reconnect which may clear the network problem.
229 */
3ab3f2a1 230 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 231 if (rc == -EAGAIN) {
3ab3f2a1
AV
232 retries++;
233 if (retries >= 14 ||
234 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
235 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
236 ssocket);
3ab3f2a1 237 return -EAGAIN;
1da177e4 238 }
3ab3f2a1 239 msleep(1 << retries);
1da177e4
LT
240 continue;
241 }
6f49f46b 242
79a58d1f 243 if (rc < 0)
3ab3f2a1 244 return rc;
6f49f46b 245
79a58d1f 246 if (rc == 0) {
3e84469d
SF
247 /* should never happen, letting socket clear before
248 retrying is our only obvious option here */
f96637be 249 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
250 msleep(500);
251 continue;
d6e04ae6 252 }
6f49f46b 253
3ab3f2a1
AV
254 /* send was at least partially successful */
255 *sent += rc;
256 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 257 }
3ab3f2a1 258 return 0;
97bc00b3
JL
259}
260
35e2cc1b 261unsigned long
81f39f95 262smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
263{
264 unsigned int i;
35e2cc1b
PA
265 struct kvec *iov;
266 int nvec;
a26054d1
JL
267 unsigned long buflen = 0;
268
81f39f95
RS
269 if (server->vals->header_preamble_size == 0 &&
270 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
271 iov = &rqst->rq_iov[1];
272 nvec = rqst->rq_nvec - 1;
273 } else {
274 iov = rqst->rq_iov;
275 nvec = rqst->rq_nvec;
276 }
277
a26054d1 278 /* total up iov array first */
35e2cc1b 279 for (i = 0; i < nvec; i++)
a26054d1
JL
280 buflen += iov[i].iov_len;
281
c06a0f2d
LL
282 /*
283 * Add in the page array if there is one. The caller needs to make
284 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285 * multiple pages ends at page boundary, rq_tailsz needs to be set to
286 * PAGE_SIZE.
287 */
a26054d1 288 if (rqst->rq_npages) {
c06a0f2d
LL
289 if (rqst->rq_npages == 1)
290 buflen += rqst->rq_tailsz;
291 else {
292 /*
293 * If there is more than one page, calculate the
294 * buffer length based on rq_offset and rq_tailsz
295 */
296 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297 rqst->rq_offset;
298 buflen += rqst->rq_tailsz;
299 }
a26054d1
JL
300 }
301
302 return buflen;
303}
304
6f49f46b 305static int
07cd952f
RS
306__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307 struct smb_rqst *rqst)
6f49f46b 308{
07cd952f
RS
309 int rc = 0;
310 struct kvec *iov;
311 int n_vec;
312 unsigned int send_length = 0;
313 unsigned int i, j;
b30c74c7 314 sigset_t mask, oldmask;
3ab3f2a1 315 size_t total_len = 0, sent, size;
b8eed283 316 struct socket *ssocket = server->ssocket;
3ab3f2a1 317 struct msghdr smb_msg;
b8eed283 318 int val = 1;
c713c877
RS
319 __be32 rfc1002_marker;
320
9762c2d0 321 if (cifs_rdma_enabled(server) && server->smbd_conn) {
4739f232 322 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
323 goto smbd_done;
324 }
afc18a6f 325
ea702b80 326 if (ssocket == NULL)
afc18a6f 327 return -EAGAIN;
ea702b80 328
b30c74c7
PS
329 if (signal_pending(current)) {
330 cifs_dbg(FYI, "signal is pending before sending any data\n");
331 return -EINTR;
332 }
333
b8eed283
JL
334 /* cork the socket */
335 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336 (char *)&val, sizeof(val));
337
07cd952f 338 for (j = 0; j < num_rqst; j++)
81f39f95 339 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
340 rfc1002_marker = cpu_to_be32(send_length);
341
b30c74c7
PS
342 /*
343 * We should not allow signals to interrupt the network send because
344 * any partial send will cause session reconnects thus increasing
345 * latency of system calls and overload a server with unnecessary
346 * requests.
347 */
348
349 sigfillset(&mask);
350 sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
c713c877
RS
352 /* Generate a rfc1002 marker for SMB2+ */
353 if (server->vals->header_preamble_size == 0) {
354 struct kvec hiov = {
355 .iov_base = &rfc1002_marker,
356 .iov_len = 4
357 };
aa563d7b 358 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
359 rc = smb_send_kvec(server, &smb_msg, &sent);
360 if (rc < 0)
b30c74c7 361 goto unmask;
c713c877
RS
362
363 total_len += sent;
364 send_length += 4;
365 }
366
662bf5bc
PA
367 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
07cd952f
RS
369 for (j = 0; j < num_rqst; j++) {
370 iov = rqst[j].rq_iov;
371 n_vec = rqst[j].rq_nvec;
3ab3f2a1 372
07cd952f 373 size = 0;
662bf5bc
PA
374 for (i = 0; i < n_vec; i++) {
375 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 376 size += iov[i].iov_len;
662bf5bc 377 }
97bc00b3 378
aa563d7b 379 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 380
3ab3f2a1 381 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 382 if (rc < 0)
b30c74c7 383 goto unmask;
97bc00b3
JL
384
385 total_len += sent;
07cd952f
RS
386
387 /* now walk the page array and send each page in it */
388 for (i = 0; i < rqst[j].rq_npages; i++) {
389 struct bio_vec bvec;
390
391 bvec.bv_page = rqst[j].rq_pages[i];
392 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393 &bvec.bv_offset);
394
aa563d7b 395 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
396 &bvec, 1, bvec.bv_len);
397 rc = smb_send_kvec(server, &smb_msg, &sent);
398 if (rc < 0)
399 break;
400
401 total_len += sent;
402 }
97bc00b3 403 }
1da177e4 404
b30c74c7
PS
405unmask:
406 sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408 /*
409 * If signal is pending but we have already sent the whole packet to
410 * the server we need to return success status to allow a corresponding
411 * mid entry to be kept in the pending requests queue thus allowing
412 * to handle responses from the server by the client.
413 *
414 * If only part of the packet has been sent there is no need to hide
415 * interrupt because the session will be reconnected anyway, so there
416 * won't be any response from the server to handle.
417 */
418
419 if (signal_pending(current) && (total_len != send_length)) {
420 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421 rc = -EINTR;
422 }
423
b8eed283
JL
424 /* uncork it */
425 val = 0;
426 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427 (char *)&val, sizeof(val));
428
c713c877 429 if ((total_len > 0) && (total_len != send_length)) {
f96637be 430 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 431 send_length, total_len);
6f49f46b
JL
432 /*
433 * If we have only sent part of an SMB then the next SMB could
434 * be taken as the remainder of this one. We need to kill the
435 * socket so the server throws away the partial SMB
436 */
edf1ae40 437 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
438 trace_smb3_partial_send_reconnect(server->CurrentMid,
439 server->hostname);
edf1ae40 440 }
9762c2d0 441smbd_done:
d804d41d 442 if (rc < 0 && rc != -EINTR)
f96637be
JP
443 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
444 rc);
ee13919c 445 else if (rc > 0)
1da177e4 446 rc = 0;
1da177e4
LT
447
448 return rc;
449}
450
6f49f46b 451static int
1f3a8f5f
RS
452smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453 struct smb_rqst *rqst, int flags)
6f49f46b 454{
b2c96de7
RS
455 struct kvec iov;
456 struct smb2_transform_hdr tr_hdr;
457 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
458 int rc;
459
460 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
461 return __smb_send_rqst(server, num_rqst, rqst);
462
463 if (num_rqst > MAX_COMPOUND - 1)
464 return -ENOMEM;
7fb8986e 465
b2c96de7
RS
466 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467 memset(&iov, 0, sizeof(iov));
468 memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470 iov.iov_base = &tr_hdr;
471 iov.iov_len = sizeof(tr_hdr);
472 cur_rqst[0].rq_iov = &iov;
473 cur_rqst[0].rq_nvec = 1;
474
475 if (!server->ops->init_transform_rq) {
476 cifs_dbg(VFS, "Encryption requested but transform callback "
477 "is missing\n");
7fb8986e
PS
478 return -EIO;
479 }
6f49f46b 480
1f3a8f5f
RS
481 rc = server->ops->init_transform_rq(server, num_rqst + 1,
482 &cur_rqst[0], rqst);
7fb8986e
PS
483 if (rc)
484 return rc;
485
1f3a8f5f
RS
486 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 488 return rc;
6f49f46b
JL
489}
490
0496e02d
JL
491int
492smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493 unsigned int smb_buf_length)
494{
738f9de5 495 struct kvec iov[2];
7fb8986e
PS
496 struct smb_rqst rqst = { .rq_iov = iov,
497 .rq_nvec = 2 };
0496e02d 498
738f9de5
PS
499 iov[0].iov_base = smb_buffer;
500 iov[0].iov_len = 4;
501 iov[1].iov_base = (char *)smb_buffer + 4;
502 iov[1].iov_len = smb_buf_length;
0496e02d 503
07cd952f 504 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
505}
506
fc40f9cf 507static int
b227d215 508wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
509 const int timeout, const int flags,
510 unsigned int *instance)
1da177e4 511{
5bc59498 512 int rc;
4230cff8
RS
513 int *credits;
514 int optype;
2b53b929
RS
515 long int t;
516
517 if (timeout < 0)
518 t = MAX_JIFFY_OFFSET;
519 else
520 t = msecs_to_jiffies(timeout);
4230cff8
RS
521
522 optype = flags & CIFS_OP_MASK;
5bc59498 523
34f4deb7
PS
524 *instance = 0;
525
4230cff8
RS
526 credits = server->ops->get_credits_field(server, optype);
527 /* Since an echo is already inflight, no need to wait to send another */
528 if (*credits <= 0 && optype == CIFS_ECHO_OP)
529 return -EAGAIN;
530
fc40f9cf 531 spin_lock(&server->req_lock);
392e1c5d 532 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 533 /* oplock breaks must not be held up */
fc40f9cf 534 server->in_flight++;
bc205ed1 535 *credits -= 1;
34f4deb7 536 *instance = server->reconnect_instance;
fc40f9cf 537 spin_unlock(&server->req_lock);
27a97a61
VL
538 return 0;
539 }
540
27a97a61 541 while (1) {
b227d215 542 if (*credits < num_credits) {
fc40f9cf 543 spin_unlock(&server->req_lock);
789e6661 544 cifs_num_waiters_inc(server);
2b53b929
RS
545 rc = wait_event_killable_timeout(server->request_q,
546 has_credits(server, credits, num_credits), t);
789e6661 547 cifs_num_waiters_dec(server);
2b53b929 548 if (!rc) {
7937ca96
SF
549 trace_smb3_credit_timeout(server->CurrentMid,
550 server->hostname, num_credits);
2b53b929
RS
551 cifs_dbg(VFS, "wait timed out after %d ms\n",
552 timeout);
553 return -ENOTSUPP;
554 }
555 if (rc == -ERESTARTSYS)
556 return -ERESTARTSYS;
fc40f9cf 557 spin_lock(&server->req_lock);
27a97a61 558 } else {
c5797a94 559 if (server->tcpStatus == CifsExiting) {
fc40f9cf 560 spin_unlock(&server->req_lock);
27a97a61 561 return -ENOENT;
1da177e4 562 }
27a97a61 563
16b34aa4
RS
564 /*
565 * For normal commands, reserve the last MAX_COMPOUND
566 * credits to compound requests.
567 * Otherwise these compounds could be permanently
568 * starved for credits by single-credit requests.
569 *
570 * To prevent spinning CPU, block this thread until
571 * there are >MAX_COMPOUND credits available.
572 * But only do this is we already have a lot of
573 * credits in flight to avoid triggering this check
574 * for servers that are slow to hand out credits on
575 * new sessions.
576 */
577 if (!optype && num_credits == 1 &&
578 server->in_flight > 2 * MAX_COMPOUND &&
579 *credits <= MAX_COMPOUND) {
580 spin_unlock(&server->req_lock);
581 cifs_num_waiters_inc(server);
2b53b929
RS
582 rc = wait_event_killable_timeout(
583 server->request_q,
16b34aa4 584 has_credits(server, credits,
2b53b929
RS
585 MAX_COMPOUND + 1),
586 t);
16b34aa4 587 cifs_num_waiters_dec(server);
2b53b929 588 if (!rc) {
7937ca96
SF
589 trace_smb3_credit_timeout(
590 server->CurrentMid,
591 server->hostname, num_credits);
2b53b929
RS
592 cifs_dbg(VFS, "wait timed out after %d ms\n",
593 timeout);
594 return -ENOTSUPP;
595 }
596 if (rc == -ERESTARTSYS)
597 return -ERESTARTSYS;
16b34aa4
RS
598 spin_lock(&server->req_lock);
599 continue;
600 }
601
2d86dbc9
PS
602 /*
603 * Can not count locking commands against total
604 * as they are allowed to block on server.
605 */
27a97a61
VL
606
607 /* update # of requests on the wire to server */
4230cff8 608 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
609 *credits -= num_credits;
610 server->in_flight += num_credits;
34f4deb7 611 *instance = server->reconnect_instance;
2d86dbc9 612 }
fc40f9cf 613 spin_unlock(&server->req_lock);
27a97a61 614 break;
1da177e4
LT
615 }
616 }
7ee1af76
JA
617 return 0;
618}
1da177e4 619
bc205ed1 620static int
480b1cb9
RS
621wait_for_free_request(struct TCP_Server_Info *server, const int flags,
622 unsigned int *instance)
bc205ed1 623{
2b53b929
RS
624 return wait_for_free_credits(server, 1, -1, flags,
625 instance);
bc205ed1
PS
626}
627
257b7809
RS
628static int
629wait_for_compound_request(struct TCP_Server_Info *server, int num,
630 const int flags, unsigned int *instance)
631{
632 int *credits;
633
634 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
635
636 spin_lock(&server->req_lock);
637 if (*credits < num) {
638 /*
639 * Return immediately if not too many requests in flight since
640 * we will likely be stuck on waiting for credits.
641 */
642 if (server->in_flight < num - *credits) {
643 spin_unlock(&server->req_lock);
644 return -ENOTSUPP;
645 }
646 }
647 spin_unlock(&server->req_lock);
648
649 return wait_for_free_credits(server, num, 60000, flags,
650 instance);
651}
652
cb7e9eab
PS
653int
654cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 655 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
656{
657 *num = size;
335b7b62
PS
658 credits->value = 0;
659 credits->instance = server->reconnect_instance;
cb7e9eab
PS
660 return 0;
661}
662
96daf2b0 663static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
664 struct mid_q_entry **ppmidQ)
665{
1da177e4 666 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 667 return -ENOENT;
8fbbd365
VL
668 }
669
670 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 671 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 672 return -EAGAIN;
8fbbd365
VL
673 }
674
7f48558e 675 if (ses->status == CifsNew) {
79a58d1f 676 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 677 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 678 return -EAGAIN;
ad7a2926 679 /* else ok - we are setting up session */
1da177e4 680 }
7f48558e
SP
681
682 if (ses->status == CifsExiting) {
683 /* check if SMB session is bad because we are setting it up */
684 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
685 return -EAGAIN;
686 /* else ok - we are shutting down session */
687 }
688
24b9b06b 689 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 690 if (*ppmidQ == NULL)
7ee1af76 691 return -ENOMEM;
ddc8cf8f
JL
692 spin_lock(&GlobalMid_Lock);
693 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
694 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
695 return 0;
696}
697
0ade640e
JL
698static int
699wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 700{
0ade640e 701 int error;
7ee1af76 702
5853cc2a 703 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 704 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
705 if (error < 0)
706 return -ERESTARTSYS;
7ee1af76 707
0ade640e 708 return 0;
7ee1af76
JA
709}
710
fec344e3
JL
711struct mid_q_entry *
712cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
713{
714 int rc;
fec344e3 715 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
716 struct mid_q_entry *mid;
717
738f9de5
PS
718 if (rqst->rq_iov[0].iov_len != 4 ||
719 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
720 return ERR_PTR(-EIO);
721
792af7b0 722 /* enable signing if server requires it */
38d77c50 723 if (server->sign)
792af7b0
PS
724 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
725
726 mid = AllocMidQEntry(hdr, server);
727 if (mid == NULL)
fec344e3 728 return ERR_PTR(-ENOMEM);
792af7b0 729
fec344e3 730 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
731 if (rc) {
732 DeleteMidQEntry(mid);
fec344e3 733 return ERR_PTR(rc);
ffc61ccb
SP
734 }
735
fec344e3 736 return mid;
792af7b0 737}
133672ef 738
a6827c18
JL
739/*
740 * Send a SMB request and set the callback function in the mid to handle
741 * the result. Caller is responsible for dealing with timeouts.
742 */
743int
fec344e3 744cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 745 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
746 mid_handle_t *handle, void *cbdata, const int flags,
747 const struct cifs_credits *exist_credits)
a6827c18 748{
480b1cb9 749 int rc;
a6827c18 750 struct mid_q_entry *mid;
335b7b62 751 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 752 unsigned int instance;
480b1cb9 753 int optype;
a6827c18 754
a891f0f8
PS
755 optype = flags & CIFS_OP_MASK;
756
cb7e9eab 757 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 758 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
759 if (rc)
760 return rc;
335b7b62 761 credits.value = 1;
34f4deb7 762 credits.instance = instance;
3349c3a7
PS
763 } else
764 instance = exist_credits->instance;
a6827c18
JL
765
766 mutex_lock(&server->srv_mutex);
3349c3a7
PS
767
768 /*
769 * We can't use credits obtained from the previous session to send this
770 * request. Check if there were reconnects after we obtained credits and
771 * return -EAGAIN in such cases to let callers handle it.
772 */
773 if (instance != server->reconnect_instance) {
774 mutex_unlock(&server->srv_mutex);
775 add_credits_and_wake_if(server, &credits, optype);
776 return -EAGAIN;
777 }
778
fec344e3
JL
779 mid = server->ops->setup_async_request(server, rqst);
780 if (IS_ERR(mid)) {
a6827c18 781 mutex_unlock(&server->srv_mutex);
335b7b62 782 add_credits_and_wake_if(server, &credits, optype);
fec344e3 783 return PTR_ERR(mid);
a6827c18
JL
784 }
785
44d22d84 786 mid->receive = receive;
a6827c18
JL
787 mid->callback = callback;
788 mid->callback_data = cbdata;
9b7c18a2 789 mid->handle = handle;
7c9421e1 790 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 791
ffc61ccb
SP
792 /* put it on the pending_mid_q */
793 spin_lock(&GlobalMid_Lock);
794 list_add_tail(&mid->qhead, &server->pending_mid_q);
795 spin_unlock(&GlobalMid_Lock);
796
93d2cb6c
LL
797 /*
798 * Need to store the time in mid before calling I/O. For call_async,
799 * I/O response may come back and free the mid entry on another thread.
800 */
801 cifs_save_when_sent(mid);
789e6661 802 cifs_in_send_inc(server);
1f3a8f5f 803 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 804 cifs_in_send_dec(server);
ad313cb8 805
820962dc 806 if (rc < 0) {
c781af7e 807 revert_current_mid(server, mid->credits);
ad313cb8 808 server->sequence_number -= 2;
820962dc
RV
809 cifs_delete_mid(mid);
810 }
811
a6827c18 812 mutex_unlock(&server->srv_mutex);
789e6661 813
ffc61ccb
SP
814 if (rc == 0)
815 return 0;
a6827c18 816
335b7b62 817 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
818 return rc;
819}
820
133672ef
SF
821/*
822 *
823 * Send an SMB Request. No response info (other than return code)
824 * needs to be parsed.
825 *
826 * flags indicate the type of request buffer and how long to wait
827 * and whether to log NT STATUS code (error) before mapping it to POSIX error
828 *
829 */
830int
96daf2b0 831SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 832 char *in_buf, int flags)
133672ef
SF
833{
834 int rc;
835 struct kvec iov[1];
da502f7d 836 struct kvec rsp_iov;
133672ef
SF
837 int resp_buf_type;
838
792af7b0
PS
839 iov[0].iov_base = in_buf;
840 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 841 flags |= CIFS_NO_RSP_BUF;
da502f7d 842 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 843 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 844
133672ef
SF
845 return rc;
846}
847
053d5034 848static int
3c1105df 849cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
850{
851 int rc = 0;
852
f96637be
JP
853 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
854 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 855
74dd92a8 856 spin_lock(&GlobalMid_Lock);
7c9421e1 857 switch (mid->mid_state) {
74dd92a8 858 case MID_RESPONSE_RECEIVED:
053d5034
JL
859 spin_unlock(&GlobalMid_Lock);
860 return rc;
74dd92a8
JL
861 case MID_RETRY_NEEDED:
862 rc = -EAGAIN;
863 break;
71823baf
JL
864 case MID_RESPONSE_MALFORMED:
865 rc = -EIO;
866 break;
3c1105df
JL
867 case MID_SHUTDOWN:
868 rc = -EHOSTDOWN;
869 break;
74dd92a8 870 default:
3c1105df 871 list_del_init(&mid->qhead);
f96637be
JP
872 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
873 __func__, mid->mid, mid->mid_state);
74dd92a8 874 rc = -EIO;
053d5034
JL
875 }
876 spin_unlock(&GlobalMid_Lock);
877
2b84a36c 878 DeleteMidQEntry(mid);
053d5034
JL
879 return rc;
880}
881
121b046a 882static inline int
fb2036d8
PS
883send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
884 struct mid_q_entry *mid)
76dcc26f 885{
121b046a 886 return server->ops->send_cancel ?
fb2036d8 887 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
888}
889
2c8f981d
JL
890int
891cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
892 bool log_error)
893{
792af7b0 894 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
895
896 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
897
898 /* convert the length into a more usable form */
38d77c50 899 if (server->sign) {
738f9de5 900 struct kvec iov[2];
985e4ff0 901 int rc = 0;
738f9de5
PS
902 struct smb_rqst rqst = { .rq_iov = iov,
903 .rq_nvec = 2 };
826a95e4 904
738f9de5
PS
905 iov[0].iov_base = mid->resp_buf;
906 iov[0].iov_len = 4;
907 iov[1].iov_base = (char *)mid->resp_buf + 4;
908 iov[1].iov_len = len - 4;
2c8f981d 909 /* FIXME: add code to kill session */
bf5ea0e2 910 rc = cifs_verify_signature(&rqst, server,
0124cc45 911 mid->sequence_number);
985e4ff0 912 if (rc)
f96637be
JP
913 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
914 rc);
2c8f981d
JL
915 }
916
917 /* BB special case reconnect tid and uid here? */
918 return map_smb_to_linux_error(mid->resp_buf, log_error);
919}
920
fec344e3
JL
921struct mid_q_entry *
922cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
923{
924 int rc;
fec344e3 925 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
926 struct mid_q_entry *mid;
927
738f9de5
PS
928 if (rqst->rq_iov[0].iov_len != 4 ||
929 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
930 return ERR_PTR(-EIO);
931
792af7b0
PS
932 rc = allocate_mid(ses, hdr, &mid);
933 if (rc)
fec344e3
JL
934 return ERR_PTR(rc);
935 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
936 if (rc) {
3c1bf7e4 937 cifs_delete_mid(mid);
fec344e3
JL
938 return ERR_PTR(rc);
939 }
940 return mid;
792af7b0
PS
941}
942
4e34feb5 943static void
ee258d79 944cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
945{
946 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
947 struct cifs_credits credits;
948
949 credits.value = server->ops->get_credits(mid);
950 credits.instance = server->reconnect_instance;
8a26f0f7 951
34f4deb7 952 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
953}
954
ee258d79
PS
955static void
956cifs_compound_last_callback(struct mid_q_entry *mid)
957{
958 cifs_compound_callback(mid);
959 cifs_wake_up_task(mid);
960}
961
962static void
963cifs_cancelled_callback(struct mid_q_entry *mid)
964{
965 cifs_compound_callback(mid);
966 DeleteMidQEntry(mid);
967}
968
b8f57ee8 969int
e0bba0b8
RS
970compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
971 const int flags, const int num_rqst, struct smb_rqst *rqst,
972 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 973{
480b1cb9 974 int i, j, optype, rc = 0;
e0bba0b8 975 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 976 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
977 struct cifs_credits credits[MAX_COMPOUND] = {
978 { .value = 0, .instance = 0 }
979 };
980 unsigned int instance;
738f9de5 981 char *buf;
50c2f753 982
a891f0f8 983 optype = flags & CIFS_OP_MASK;
133672ef 984
e0bba0b8
RS
985 for (i = 0; i < num_rqst; i++)
986 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
987
988 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 989 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
990 return -EIO;
991 }
992
da502f7d 993 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 994 return -ENOENT;
7ee1af76 995
792af7b0 996 /*
257b7809 997 * Wait for all the requests to become available.
7091bcab
PS
998 * This approach still leaves the possibility to be stuck waiting for
999 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1000 * requests and if the client is completely idle, not generating any
1001 * other requests.
1002 * This can be handled by the eventual session reconnect.
792af7b0 1003 */
257b7809
RS
1004 rc = wait_for_compound_request(ses->server, num_rqst, flags,
1005 &instance);
1006 if (rc)
1007 return rc;
97ea4998 1008
257b7809
RS
1009 for (i = 0; i < num_rqst; i++) {
1010 credits[i].value = 1;
1011 credits[i].instance = instance;
8544f4aa 1012 }
7ee1af76 1013
792af7b0
PS
1014 /*
1015 * Make sure that we sign in the same order that we send on this socket
1016 * and avoid races inside tcp sendmsg code that could cause corruption
1017 * of smb data.
1018 */
7ee1af76 1019
72ca545b 1020 mutex_lock(&ses->server->srv_mutex);
7ee1af76 1021
97ea4998
PS
1022 /*
1023 * All the parts of the compound chain belong obtained credits from the
257b7809 1024 * same session. We can not use credits obtained from the previous
97ea4998
PS
1025 * session to send this request. Check if there were reconnects after
1026 * we obtained credits and return -EAGAIN in such cases to let callers
1027 * handle it.
1028 */
257b7809 1029 if (instance != ses->server->reconnect_instance) {
97ea4998
PS
1030 mutex_unlock(&ses->server->srv_mutex);
1031 for (j = 0; j < num_rqst; j++)
1032 add_credits(ses->server, &credits[j], optype);
1033 return -EAGAIN;
1034 }
1035
e0bba0b8
RS
1036 for (i = 0; i < num_rqst; i++) {
1037 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1038 if (IS_ERR(midQ[i])) {
c781af7e 1039 revert_current_mid(ses->server, i);
e0bba0b8
RS
1040 for (j = 0; j < i; j++)
1041 cifs_delete_mid(midQ[j]);
1042 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 1043
e0bba0b8 1044 /* Update # of requests on wire to server */
8544f4aa 1045 for (j = 0; j < num_rqst; j++)
34f4deb7 1046 add_credits(ses->server, &credits[j], optype);
e0bba0b8
RS
1047 return PTR_ERR(midQ[i]);
1048 }
1049
1050 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1051 midQ[i]->optype = optype;
4e34feb5 1052 /*
ee258d79
PS
1053 * Invoke callback for every part of the compound chain
1054 * to calculate credits properly. Wake up this thread only when
1055 * the last element is received.
4e34feb5
RS
1056 */
1057 if (i < num_rqst - 1)
ee258d79
PS
1058 midQ[i]->callback = cifs_compound_callback;
1059 else
1060 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1061 }
789e6661 1062 cifs_in_send_inc(ses->server);
e0bba0b8 1063 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 1064 cifs_in_send_dec(ses->server);
e0bba0b8
RS
1065
1066 for (i = 0; i < num_rqst; i++)
1067 cifs_save_when_sent(midQ[i]);
7ee1af76 1068
c781af7e
PS
1069 if (rc < 0) {
1070 revert_current_mid(ses->server, num_rqst);
ad313cb8 1071 ses->server->sequence_number -= 2;
c781af7e 1072 }
e0bba0b8 1073
72ca545b 1074 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1075
d69cb728
RS
1076 /*
1077 * If sending failed for some reason or it is an oplock break that we
1078 * will not receive a response to - return credits back
1079 */
1080 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1081 for (i = 0; i < num_rqst; i++)
34f4deb7 1082 add_credits(ses->server, &credits[i], optype);
cb5c2e63 1083 goto out;
ee258d79
PS
1084 }
1085
1086 /*
1087 * At this point the request is passed to the network stack - we assume
1088 * that any credits taken from the server structure on the client have
1089 * been spent and we can't return them back. Once we receive responses
1090 * we will collect credits granted by the server in the mid callbacks
1091 * and add those credits to the server structure.
1092 */
e0bba0b8 1093
cb5c2e63
RS
1094 /*
1095 * Compounding is never used during session establish.
1096 */
1097 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1098 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1099 rqst[0].rq_nvec);
e0bba0b8 1100
cb5c2e63 1101 for (i = 0; i < num_rqst; i++) {
e0bba0b8 1102 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
1103 if (rc != 0)
1104 break;
1105 }
1106 if (rc != 0) {
1107 for (; i < num_rqst; i++) {
43de1db3
SF
1108 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1109 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
1110 send_cancel(ses->server, &rqst[i], midQ[i]);
1111 spin_lock(&GlobalMid_Lock);
1112 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1113 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1114 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1115 cancelled_mid[i] = true;
34f4deb7 1116 credits[i].value = 0;
e0bba0b8 1117 }
1be912dd 1118 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1119 }
cb5c2e63
RS
1120 }
1121
cb5c2e63
RS
1122 for (i = 0; i < num_rqst; i++) {
1123 if (rc < 0)
1124 goto out;
e0bba0b8
RS
1125
1126 rc = cifs_sync_mid_result(midQ[i], ses->server);
1127 if (rc != 0) {
8544f4aa
PS
1128 /* mark this mid as cancelled to not free it below */
1129 cancelled_mid[i] = true;
1130 goto out;
1be912dd 1131 }
2b2bdfba 1132
e0bba0b8
RS
1133 if (!midQ[i]->resp_buf ||
1134 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1135 rc = -EIO;
1136 cifs_dbg(FYI, "Bad MID state?\n");
1137 goto out;
1138 }
a891f0f8 1139
e0bba0b8
RS
1140 buf = (char *)midQ[i]->resp_buf;
1141 resp_iov[i].iov_base = buf;
1142 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1143 ses->server->vals->header_preamble_size;
1144
1145 if (midQ[i]->large_buf)
1146 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1147 else
1148 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1149
e0bba0b8
RS
1150 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1151 flags & CIFS_LOG_ERROR);
1da177e4 1152
e0bba0b8 1153 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1154 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1155 midQ[i]->resp_buf = NULL;
cb5c2e63 1156
e0bba0b8 1157 }
cb5c2e63
RS
1158
1159 /*
1160 * Compounding is never used during session establish.
1161 */
1162 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1163 struct kvec iov = {
1164 .iov_base = resp_iov[0].iov_base,
1165 .iov_len = resp_iov[0].iov_len
1166 };
1167 smb311_update_preauth_hash(ses, &iov, 1);
1168 }
1169
7ee1af76 1170out:
4e34feb5
RS
1171 /*
1172 * This will dequeue all mids. After this it is important that the
1173 * demultiplex_thread will not process any of these mids any futher.
1174 * This is prevented above by using a noop callback that will not
1175 * wake this thread except for the very last PDU.
1176 */
8544f4aa
PS
1177 for (i = 0; i < num_rqst; i++) {
1178 if (!cancelled_mid[i])
1179 cifs_delete_mid(midQ[i]);
8544f4aa 1180 }
1da177e4 1181
d6e04ae6
SF
1182 return rc;
1183}
1da177e4 1184
e0bba0b8
RS
1185int
1186cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1187 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1188 struct kvec *resp_iov)
1189{
1190 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1191 resp_iov);
1192}
1193
738f9de5
PS
1194int
1195SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1196 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1197 const int flags, struct kvec *resp_iov)
1198{
1199 struct smb_rqst rqst;
3cecf486 1200 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1201 int rc;
1202
3cecf486 1203 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1204 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1205 GFP_KERNEL);
117e3b7f
SF
1206 if (!new_iov) {
1207 /* otherwise cifs_send_recv below sets resp_buf_type */
1208 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1209 return -ENOMEM;
117e3b7f 1210 }
3cecf486
RS
1211 } else
1212 new_iov = s_iov;
738f9de5
PS
1213
1214 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1215 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1216
1217 new_iov[0].iov_base = new_iov[1].iov_base;
1218 new_iov[0].iov_len = 4;
1219 new_iov[1].iov_base += 4;
1220 new_iov[1].iov_len -= 4;
1221
1222 memset(&rqst, 0, sizeof(struct smb_rqst));
1223 rqst.rq_iov = new_iov;
1224 rqst.rq_nvec = n_vec + 1;
1225
1226 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1227 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1228 kfree(new_iov);
738f9de5
PS
1229 return rc;
1230}
1231
1da177e4 1232int
96daf2b0 1233SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1234 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1235 int *pbytes_returned, const int flags)
1da177e4
LT
1236{
1237 int rc = 0;
1da177e4 1238 struct mid_q_entry *midQ;
fb2036d8
PS
1239 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1240 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1241 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1242 struct cifs_credits credits = { .value = 1, .instance = 0 };
1da177e4
LT
1243
1244 if (ses == NULL) {
f96637be 1245 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1246 return -EIO;
1247 }
79a58d1f 1248 if (ses->server == NULL) {
f96637be 1249 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1250 return -EIO;
1251 }
1252
79a58d1f 1253 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1254 return -ENOENT;
1255
79a58d1f 1256 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1257 to the same server. We may make this configurable later or
1258 use ses->maxReq */
1da177e4 1259
fb2036d8 1260 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1261 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1262 len);
6d9c6d54
VL
1263 return -EIO;
1264 }
1265
480b1cb9 1266 rc = wait_for_free_request(ses->server, flags, &credits.instance);
7ee1af76
JA
1267 if (rc)
1268 return rc;
1269
79a58d1f 1270 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1271 and avoid races inside tcp sendmsg code that could cause corruption
1272 of smb data */
1273
72ca545b 1274 mutex_lock(&ses->server->srv_mutex);
1da177e4 1275
7ee1af76
JA
1276 rc = allocate_mid(ses, in_buf, &midQ);
1277 if (rc) {
72ca545b 1278 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1279 /* Update # of requests on wire to server */
34f4deb7 1280 add_credits(ses->server, &credits, 0);
7ee1af76 1281 return rc;
1da177e4
LT
1282 }
1283
ad009ac9 1284 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1285 if (rc) {
1286 mutex_unlock(&ses->server->srv_mutex);
1287 goto out;
1288 }
1da177e4 1289
7c9421e1 1290 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1291
1292 cifs_in_send_inc(ses->server);
fb2036d8 1293 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1294 cifs_in_send_dec(ses->server);
1295 cifs_save_when_sent(midQ);
ad313cb8
JL
1296
1297 if (rc < 0)
1298 ses->server->sequence_number -= 2;
1299
72ca545b 1300 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1301
79a58d1f 1302 if (rc < 0)
7ee1af76
JA
1303 goto out;
1304
0ade640e 1305 rc = wait_for_response(ses->server, midQ);
1be912dd 1306 if (rc != 0) {
fb2036d8 1307 send_cancel(ses->server, &rqst, midQ);
1be912dd 1308 spin_lock(&GlobalMid_Lock);
7c9421e1 1309 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1310 /* no longer considered to be "in-flight" */
1311 midQ->callback = DeleteMidQEntry;
1312 spin_unlock(&GlobalMid_Lock);
34f4deb7 1313 add_credits(ses->server, &credits, 0);
1be912dd
JL
1314 return rc;
1315 }
1316 spin_unlock(&GlobalMid_Lock);
1317 }
1da177e4 1318
3c1105df 1319 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1320 if (rc != 0) {
34f4deb7 1321 add_credits(ses->server, &credits, 0);
1da177e4
LT
1322 return rc;
1323 }
50c2f753 1324
2c8f981d 1325 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1326 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1327 rc = -EIO;
f96637be 1328 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1329 goto out;
1da177e4 1330 }
7ee1af76 1331
d4e4854f 1332 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1333 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1334 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1335out:
3c1bf7e4 1336 cifs_delete_mid(midQ);
34f4deb7 1337 add_credits(ses->server, &credits, 0);
1da177e4 1338
7ee1af76
JA
1339 return rc;
1340}
1da177e4 1341
7ee1af76
JA
1342/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1343 blocking lock to return. */
1344
1345static int
96daf2b0 1346send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1347 struct smb_hdr *in_buf,
1348 struct smb_hdr *out_buf)
1349{
1350 int bytes_returned;
96daf2b0 1351 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1352 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1353
1354 /* We just modify the current in_buf to change
1355 the type of lock from LOCKING_ANDX_SHARED_LOCK
1356 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1357 LOCKING_ANDX_CANCEL_LOCK. */
1358
1359 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1360 pSMB->Timeout = 0;
88257360 1361 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1362
1363 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1364 &bytes_returned, 0);
7ee1af76
JA
1365}
1366
1367int
96daf2b0 1368SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1369 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1370 int *pbytes_returned)
1371{
1372 int rc = 0;
1373 int rstart = 0;
7ee1af76 1374 struct mid_q_entry *midQ;
96daf2b0 1375 struct cifs_ses *ses;
fb2036d8
PS
1376 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1377 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1378 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1379 unsigned int instance;
7ee1af76
JA
1380
1381 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1382 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1383 return -EIO;
1384 }
1385 ses = tcon->ses;
1386
79a58d1f 1387 if (ses->server == NULL) {
f96637be 1388 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1389 return -EIO;
1390 }
1391
79a58d1f 1392 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1393 return -ENOENT;
1394
79a58d1f 1395 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1396 to the same server. We may make this configurable later or
1397 use ses->maxReq */
1398
fb2036d8 1399 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1400 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1401 len);
6d9c6d54
VL
1402 return -EIO;
1403 }
1404
480b1cb9 1405 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1406 if (rc)
1407 return rc;
1408
79a58d1f 1409 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1410 and avoid races inside tcp sendmsg code that could cause corruption
1411 of smb data */
1412
72ca545b 1413 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1414
1415 rc = allocate_mid(ses, in_buf, &midQ);
1416 if (rc) {
72ca545b 1417 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1418 return rc;
1419 }
1420
7ee1af76 1421 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1422 if (rc) {
3c1bf7e4 1423 cifs_delete_mid(midQ);
829049cb
VL
1424 mutex_unlock(&ses->server->srv_mutex);
1425 return rc;
1426 }
1da177e4 1427
7c9421e1 1428 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1429 cifs_in_send_inc(ses->server);
fb2036d8 1430 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1431 cifs_in_send_dec(ses->server);
1432 cifs_save_when_sent(midQ);
ad313cb8
JL
1433
1434 if (rc < 0)
1435 ses->server->sequence_number -= 2;
1436
72ca545b 1437 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1438
79a58d1f 1439 if (rc < 0) {
3c1bf7e4 1440 cifs_delete_mid(midQ);
7ee1af76
JA
1441 return rc;
1442 }
1443
1444 /* Wait for a reply - allow signals to interrupt. */
1445 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1446 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1447 ((ses->server->tcpStatus != CifsGood) &&
1448 (ses->server->tcpStatus != CifsNew)));
1449
1450 /* Were we interrupted by a signal ? */
1451 if ((rc == -ERESTARTSYS) &&
7c9421e1 1452 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1453 ((ses->server->tcpStatus == CifsGood) ||
1454 (ses->server->tcpStatus == CifsNew))) {
1455
1456 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1457 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1458 blocking lock to return. */
fb2036d8 1459 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1460 if (rc) {
3c1bf7e4 1461 cifs_delete_mid(midQ);
7ee1af76
JA
1462 return rc;
1463 }
1464 } else {
1465 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1466 to cause the blocking lock to return. */
1467
1468 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1469
1470 /* If we get -ENOLCK back the lock may have
1471 already been removed. Don't exit in this case. */
1472 if (rc && rc != -ENOLCK) {
3c1bf7e4 1473 cifs_delete_mid(midQ);
7ee1af76
JA
1474 return rc;
1475 }
1476 }
1477
1be912dd
JL
1478 rc = wait_for_response(ses->server, midQ);
1479 if (rc) {
fb2036d8 1480 send_cancel(ses->server, &rqst, midQ);
1be912dd 1481 spin_lock(&GlobalMid_Lock);
7c9421e1 1482 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1483 /* no longer considered to be "in-flight" */
1484 midQ->callback = DeleteMidQEntry;
1485 spin_unlock(&GlobalMid_Lock);
1486 return rc;
1487 }
1488 spin_unlock(&GlobalMid_Lock);
7ee1af76 1489 }
1be912dd
JL
1490
1491 /* We got the response - restart system call. */
1492 rstart = 1;
7ee1af76
JA
1493 }
1494
3c1105df 1495 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1496 if (rc != 0)
7ee1af76 1497 return rc;
50c2f753 1498
17c8bfed 1499 /* rcvd frame is ok */
7c9421e1 1500 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1501 rc = -EIO;
f96637be 1502 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1503 goto out;
1504 }
1da177e4 1505
d4e4854f 1506 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1507 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1508 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1509out:
3c1bf7e4 1510 cifs_delete_mid(midQ);
7ee1af76
JA
1511 if (rstart && rc == -EACCES)
1512 return -ERESTARTSYS;
1da177e4
LT
1513 return rc;
1514}