cifs: Fix in error types returned for out-of-credit situations.
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
f1f27ad7
VW
79 get_task_struct(current);
80 temp->creator = current;
a6f74e80
N
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
1da177e4 83
1da177e4 84 atomic_inc(&midCount);
7c9421e1 85 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
86 return temp;
87}
88
696e420b
LP
89static void _cifs_mid_q_entry_release(struct kref *refcount)
90{
abe57073
PS
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 93#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 94 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 96 unsigned long now;
433b8dd7 97 unsigned long roundtrip_time;
1047abc1 98#endif
7b71843f
PS
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
7c9421e1 106 midEntry->mid_state = MID_FREE;
8097531a 107 atomic_dec(&midCount);
7c9421e1 108 if (midEntry->large_buf)
b8643e1b
SF
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
112#ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
433b8dd7 114 if (now < midEntry->when_alloc)
a0a3036b 115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
00778e22
SF
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 143 (midEntry->command != command)) {
f5942db5
SF
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
433b8dd7
SF
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 150
433b8dd7 151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
1047abc1
SF
160 }
161 }
162#endif
f1f27ad7 163 put_task_struct(midEntry->creator);
abe57073
PS
164
165 mempool_free(midEntry, cifs_mid_poolp);
166}
167
168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169{
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173}
174
175void DeleteMidQEntry(struct mid_q_entry *midEntry)
176{
696e420b 177 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
178}
179
3c1bf7e4
PS
180void
181cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
182{
183 spin_lock(&GlobalMid_Lock);
abe57073
PS
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
ddc8cf8f
JL
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191}
192
6f49f46b
JL
193/*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
3ab3f2a1 196 * @smb_msg: Message to send
6f49f46b
JL
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
d6e04ae6 202static int
3ab3f2a1
AV
203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
1da177e4
LT
205{
206 int rc = 0;
3ab3f2a1 207 int retries = 0;
edf1ae40 208 struct socket *ssocket = server->ssocket;
50c2f753 209
6f49f46b
JL
210 *sent = 0;
211
3ab3f2a1
AV
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
0496e02d 216 if (server->noblocksnd)
3ab3f2a1 217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 218 else
3ab3f2a1 219 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 220
3ab3f2a1 221 while (msg_data_left(smb_msg)) {
6f49f46b
JL
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
3ab3f2a1 240 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 241 if (rc == -EAGAIN) {
3ab3f2a1
AV
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
afe6f653 245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 246 ssocket);
3ab3f2a1 247 return -EAGAIN;
1da177e4 248 }
3ab3f2a1 249 msleep(1 << retries);
1da177e4
LT
250 continue;
251 }
6f49f46b 252
79a58d1f 253 if (rc < 0)
3ab3f2a1 254 return rc;
6f49f46b 255
79a58d1f 256 if (rc == 0) {
3e84469d
SF
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
afe6f653 259 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
260 msleep(500);
261 continue;
d6e04ae6 262 }
6f49f46b 263
3ab3f2a1
AV
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 267 }
3ab3f2a1 268 return 0;
97bc00b3
JL
269}
270
35e2cc1b 271unsigned long
81f39f95 272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
273{
274 unsigned int i;
35e2cc1b
PA
275 struct kvec *iov;
276 int nvec;
a26054d1
JL
277 unsigned long buflen = 0;
278
81f39f95
RS
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
a26054d1 288 /* total up iov array first */
35e2cc1b 289 for (i = 0; i < nvec; i++)
a26054d1
JL
290 buflen += iov[i].iov_len;
291
c06a0f2d
LL
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
a26054d1 298 if (rqst->rq_npages) {
c06a0f2d
LL
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
a26054d1
JL
310 }
311
312 return buflen;
313}
314
6f49f46b 315static int
07cd952f
RS
316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
6f49f46b 318{
07cd952f
RS
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
b30c74c7 324 sigset_t mask, oldmask;
3ab3f2a1 325 size_t total_len = 0, sent, size;
b8eed283 326 struct socket *ssocket = server->ssocket;
3ab3f2a1 327 struct msghdr smb_msg;
c713c877
RS
328 __be32 rfc1002_marker;
329
4357d45f
LL
330 if (cifs_rdma_enabled(server)) {
331 /* return -EAGAIN when connecting or reconnecting */
332 rc = -EAGAIN;
333 if (server->smbd_conn)
334 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
335 goto smbd_done;
336 }
afc18a6f 337
ea702b80 338 if (ssocket == NULL)
afc18a6f 339 return -EAGAIN;
ea702b80 340
214a5ea0 341 if (fatal_signal_pending(current)) {
6988a619
PA
342 cifs_dbg(FYI, "signal pending before send request\n");
343 return -ERESTARTSYS;
b30c74c7
PS
344 }
345
b8eed283 346 /* cork the socket */
db10538a 347 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 348
07cd952f 349 for (j = 0; j < num_rqst; j++)
81f39f95 350 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
351 rfc1002_marker = cpu_to_be32(send_length);
352
b30c74c7
PS
353 /*
354 * We should not allow signals to interrupt the network send because
355 * any partial send will cause session reconnects thus increasing
356 * latency of system calls and overload a server with unnecessary
357 * requests.
358 */
359
360 sigfillset(&mask);
361 sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
c713c877
RS
363 /* Generate a rfc1002 marker for SMB2+ */
364 if (server->vals->header_preamble_size == 0) {
365 struct kvec hiov = {
366 .iov_base = &rfc1002_marker,
367 .iov_len = 4
368 };
aa563d7b 369 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
370 rc = smb_send_kvec(server, &smb_msg, &sent);
371 if (rc < 0)
b30c74c7 372 goto unmask;
c713c877
RS
373
374 total_len += sent;
375 send_length += 4;
376 }
377
662bf5bc
PA
378 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
07cd952f
RS
380 for (j = 0; j < num_rqst; j++) {
381 iov = rqst[j].rq_iov;
382 n_vec = rqst[j].rq_nvec;
3ab3f2a1 383
07cd952f 384 size = 0;
662bf5bc
PA
385 for (i = 0; i < n_vec; i++) {
386 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 387 size += iov[i].iov_len;
662bf5bc 388 }
97bc00b3 389
aa563d7b 390 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 391
3ab3f2a1 392 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 393 if (rc < 0)
b30c74c7 394 goto unmask;
97bc00b3
JL
395
396 total_len += sent;
07cd952f
RS
397
398 /* now walk the page array and send each page in it */
399 for (i = 0; i < rqst[j].rq_npages; i++) {
400 struct bio_vec bvec;
401
402 bvec.bv_page = rqst[j].rq_pages[i];
403 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 &bvec.bv_offset);
405
aa563d7b 406 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
407 &bvec, 1, bvec.bv_len);
408 rc = smb_send_kvec(server, &smb_msg, &sent);
409 if (rc < 0)
410 break;
411
412 total_len += sent;
413 }
97bc00b3 414 }
1da177e4 415
b30c74c7
PS
416unmask:
417 sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419 /*
420 * If signal is pending but we have already sent the whole packet to
421 * the server we need to return success status to allow a corresponding
422 * mid entry to be kept in the pending requests queue thus allowing
423 * to handle responses from the server by the client.
424 *
425 * If only part of the packet has been sent there is no need to hide
426 * interrupt because the session will be reconnected anyway, so there
427 * won't be any response from the server to handle.
428 */
429
430 if (signal_pending(current) && (total_len != send_length)) {
431 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 432 rc = -ERESTARTSYS;
b30c74c7
PS
433 }
434
b8eed283 435 /* uncork it */
db10538a 436 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 437
c713c877 438 if ((total_len > 0) && (total_len != send_length)) {
f96637be 439 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 440 send_length, total_len);
6f49f46b
JL
441 /*
442 * If we have only sent part of an SMB then the next SMB could
443 * be taken as the remainder of this one. We need to kill the
444 * socket so the server throws away the partial SMB
445 */
edf1ae40 446 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
447 trace_smb3_partial_send_reconnect(server->CurrentMid,
448 server->hostname);
edf1ae40 449 }
9762c2d0 450smbd_done:
d804d41d 451 if (rc < 0 && rc != -EINTR)
afe6f653 452 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 453 rc);
ee13919c 454 else if (rc > 0)
1da177e4 455 rc = 0;
1da177e4
LT
456
457 return rc;
458}
459
6f49f46b 460static int
1f3a8f5f
RS
461smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 struct smb_rqst *rqst, int flags)
6f49f46b 463{
b2c96de7 464 struct kvec iov;
3946d0d0 465 struct smb2_transform_hdr *tr_hdr;
b2c96de7 466 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
467 int rc;
468
469 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
470 return __smb_send_rqst(server, num_rqst, rqst);
471
472 if (num_rqst > MAX_COMPOUND - 1)
473 return -ENOMEM;
7fb8986e 474
b2c96de7 475 if (!server->ops->init_transform_rq) {
a0a3036b 476 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
477 return -EIO;
478 }
6f49f46b 479
3946d0d0
LL
480 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 if (!tr_hdr)
482 return -ENOMEM;
483
484 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 memset(&iov, 0, sizeof(iov));
486 memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488 iov.iov_base = tr_hdr;
489 iov.iov_len = sizeof(*tr_hdr);
490 cur_rqst[0].rq_iov = &iov;
491 cur_rqst[0].rq_nvec = 1;
492
1f3a8f5f
RS
493 rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 &cur_rqst[0], rqst);
7fb8986e 495 if (rc)
3946d0d0 496 goto out;
7fb8986e 497
1f3a8f5f
RS
498 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
500out:
501 kfree(tr_hdr);
7fb8986e 502 return rc;
6f49f46b
JL
503}
504
0496e02d
JL
505int
506smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 unsigned int smb_buf_length)
508{
738f9de5 509 struct kvec iov[2];
7fb8986e
PS
510 struct smb_rqst rqst = { .rq_iov = iov,
511 .rq_nvec = 2 };
0496e02d 512
738f9de5
PS
513 iov[0].iov_base = smb_buffer;
514 iov[0].iov_len = 4;
515 iov[1].iov_base = (char *)smb_buffer + 4;
516 iov[1].iov_len = smb_buf_length;
0496e02d 517
07cd952f 518 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
519}
520
fc40f9cf 521static int
b227d215 522wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
523 const int timeout, const int flags,
524 unsigned int *instance)
1da177e4 525{
19e88867 526 long rc;
4230cff8
RS
527 int *credits;
528 int optype;
2b53b929 529 long int t;
cd7b699b 530 int scredits = server->credits;
2b53b929
RS
531
532 if (timeout < 0)
533 t = MAX_JIFFY_OFFSET;
534 else
535 t = msecs_to_jiffies(timeout);
4230cff8
RS
536
537 optype = flags & CIFS_OP_MASK;
5bc59498 538
34f4deb7
PS
539 *instance = 0;
540
4230cff8
RS
541 credits = server->ops->get_credits_field(server, optype);
542 /* Since an echo is already inflight, no need to wait to send another */
543 if (*credits <= 0 && optype == CIFS_ECHO_OP)
544 return -EAGAIN;
545
fc40f9cf 546 spin_lock(&server->req_lock);
392e1c5d 547 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 548 /* oplock breaks must not be held up */
fc40f9cf 549 server->in_flight++;
1b63f184
SF
550 if (server->in_flight > server->max_in_flight)
551 server->max_in_flight = server->in_flight;
bc205ed1 552 *credits -= 1;
34f4deb7 553 *instance = server->reconnect_instance;
fc40f9cf 554 spin_unlock(&server->req_lock);
27a97a61
VL
555 return 0;
556 }
557
27a97a61 558 while (1) {
b227d215 559 if (*credits < num_credits) {
fc40f9cf 560 spin_unlock(&server->req_lock);
789e6661 561 cifs_num_waiters_inc(server);
2b53b929
RS
562 rc = wait_event_killable_timeout(server->request_q,
563 has_credits(server, credits, num_credits), t);
789e6661 564 cifs_num_waiters_dec(server);
2b53b929 565 if (!rc) {
7937ca96 566 trace_smb3_credit_timeout(server->CurrentMid,
9eec21bf 567 server->hostname, num_credits, 0);
afe6f653 568 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929 569 timeout);
7de03948 570 return -EBUSY;
2b53b929
RS
571 }
572 if (rc == -ERESTARTSYS)
573 return -ERESTARTSYS;
fc40f9cf 574 spin_lock(&server->req_lock);
27a97a61 575 } else {
c5797a94 576 if (server->tcpStatus == CifsExiting) {
fc40f9cf 577 spin_unlock(&server->req_lock);
27a97a61 578 return -ENOENT;
1da177e4 579 }
27a97a61 580
16b34aa4
RS
581 /*
582 * For normal commands, reserve the last MAX_COMPOUND
583 * credits to compound requests.
584 * Otherwise these compounds could be permanently
585 * starved for credits by single-credit requests.
586 *
587 * To prevent spinning CPU, block this thread until
588 * there are >MAX_COMPOUND credits available.
589 * But only do this is we already have a lot of
590 * credits in flight to avoid triggering this check
591 * for servers that are slow to hand out credits on
592 * new sessions.
593 */
594 if (!optype && num_credits == 1 &&
595 server->in_flight > 2 * MAX_COMPOUND &&
596 *credits <= MAX_COMPOUND) {
597 spin_unlock(&server->req_lock);
598 cifs_num_waiters_inc(server);
2b53b929
RS
599 rc = wait_event_killable_timeout(
600 server->request_q,
16b34aa4 601 has_credits(server, credits,
2b53b929
RS
602 MAX_COMPOUND + 1),
603 t);
16b34aa4 604 cifs_num_waiters_dec(server);
2b53b929 605 if (!rc) {
7937ca96
SF
606 trace_smb3_credit_timeout(
607 server->CurrentMid,
9eec21bf
SF
608 server->hostname, num_credits,
609 0);
afe6f653 610 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929 611 timeout);
7de03948 612 return -EBUSY;
2b53b929
RS
613 }
614 if (rc == -ERESTARTSYS)
615 return -ERESTARTSYS;
16b34aa4
RS
616 spin_lock(&server->req_lock);
617 continue;
618 }
619
2d86dbc9
PS
620 /*
621 * Can not count locking commands against total
622 * as they are allowed to block on server.
623 */
27a97a61
VL
624
625 /* update # of requests on the wire to server */
4230cff8 626 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215 627 *credits -= num_credits;
cd7b699b 628 scredits = *credits;
b227d215 629 server->in_flight += num_credits;
1b63f184
SF
630 if (server->in_flight > server->max_in_flight)
631 server->max_in_flight = server->in_flight;
34f4deb7 632 *instance = server->reconnect_instance;
2d86dbc9 633 }
fc40f9cf 634 spin_unlock(&server->req_lock);
cd7b699b
SP
635
636 trace_smb3_add_credits(server->CurrentMid,
637 server->hostname, scredits, -(num_credits));
638 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
639 __func__, num_credits, scredits);
27a97a61 640 break;
1da177e4
LT
641 }
642 }
7ee1af76
JA
643 return 0;
644}
1da177e4 645
bc205ed1 646static int
480b1cb9
RS
647wait_for_free_request(struct TCP_Server_Info *server, const int flags,
648 unsigned int *instance)
bc205ed1 649{
2b53b929
RS
650 return wait_for_free_credits(server, 1, -1, flags,
651 instance);
bc205ed1
PS
652}
653
257b7809
RS
654static int
655wait_for_compound_request(struct TCP_Server_Info *server, int num,
656 const int flags, unsigned int *instance)
657{
658 int *credits;
cd7b699b 659 int scredits, sin_flight;
257b7809
RS
660
661 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
662
663 spin_lock(&server->req_lock);
cd7b699b
SP
664 scredits = *credits;
665 sin_flight = server->in_flight;
666
257b7809
RS
667 if (*credits < num) {
668 /*
91792bb8
PS
669 * If the server is tight on resources or just gives us less
670 * credits for other reasons (e.g. requests are coming out of
671 * order and the server delays granting more credits until it
672 * processes a missing mid) and we exhausted most available
673 * credits there may be situations when we try to send
674 * a compound request but we don't have enough credits. At this
675 * point the client needs to decide if it should wait for
676 * additional credits or fail the request. If at least one
677 * request is in flight there is a high probability that the
678 * server will return enough credits to satisfy this compound
679 * request.
680 *
681 * Return immediately if no requests in flight since we will be
682 * stuck on waiting for credits.
257b7809 683 */
91792bb8 684 if (server->in_flight == 0) {
257b7809 685 spin_unlock(&server->req_lock);
cd7b699b
SP
686 trace_smb3_insufficient_credits(server->CurrentMid,
687 server->hostname, scredits, sin_flight);
688 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
689 __func__, sin_flight, num, scredits);
7de03948 690 return -EDEADLK;
257b7809
RS
691 }
692 }
693 spin_unlock(&server->req_lock);
694
695 return wait_for_free_credits(server, num, 60000, flags,
696 instance);
697}
698
cb7e9eab
PS
699int
700cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 701 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
702{
703 *num = size;
335b7b62
PS
704 credits->value = 0;
705 credits->instance = server->reconnect_instance;
cb7e9eab
PS
706 return 0;
707}
708
96daf2b0 709static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
710 struct mid_q_entry **ppmidQ)
711{
1da177e4 712 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 713 return -ENOENT;
8fbbd365
VL
714 }
715
716 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 717 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 718 return -EAGAIN;
8fbbd365
VL
719 }
720
7f48558e 721 if (ses->status == CifsNew) {
79a58d1f 722 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 723 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 724 return -EAGAIN;
ad7a2926 725 /* else ok - we are setting up session */
1da177e4 726 }
7f48558e
SP
727
728 if (ses->status == CifsExiting) {
729 /* check if SMB session is bad because we are setting it up */
730 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
731 return -EAGAIN;
732 /* else ok - we are shutting down session */
733 }
734
24b9b06b 735 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 736 if (*ppmidQ == NULL)
7ee1af76 737 return -ENOMEM;
ddc8cf8f
JL
738 spin_lock(&GlobalMid_Lock);
739 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
740 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
741 return 0;
742}
743
0ade640e
JL
744static int
745wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 746{
0ade640e 747 int error;
7ee1af76 748
5853cc2a 749 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 750 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
751 if (error < 0)
752 return -ERESTARTSYS;
7ee1af76 753
0ade640e 754 return 0;
7ee1af76
JA
755}
756
fec344e3
JL
757struct mid_q_entry *
758cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
759{
760 int rc;
fec344e3 761 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
762 struct mid_q_entry *mid;
763
738f9de5
PS
764 if (rqst->rq_iov[0].iov_len != 4 ||
765 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
766 return ERR_PTR(-EIO);
767
792af7b0 768 /* enable signing if server requires it */
38d77c50 769 if (server->sign)
792af7b0
PS
770 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
771
772 mid = AllocMidQEntry(hdr, server);
773 if (mid == NULL)
fec344e3 774 return ERR_PTR(-ENOMEM);
792af7b0 775
fec344e3 776 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
777 if (rc) {
778 DeleteMidQEntry(mid);
fec344e3 779 return ERR_PTR(rc);
ffc61ccb
SP
780 }
781
fec344e3 782 return mid;
792af7b0 783}
133672ef 784
a6827c18
JL
785/*
786 * Send a SMB request and set the callback function in the mid to handle
787 * the result. Caller is responsible for dealing with timeouts.
788 */
789int
fec344e3 790cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 791 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
792 mid_handle_t *handle, void *cbdata, const int flags,
793 const struct cifs_credits *exist_credits)
a6827c18 794{
480b1cb9 795 int rc;
a6827c18 796 struct mid_q_entry *mid;
335b7b62 797 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 798 unsigned int instance;
480b1cb9 799 int optype;
a6827c18 800
a891f0f8
PS
801 optype = flags & CIFS_OP_MASK;
802
cb7e9eab 803 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 804 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
805 if (rc)
806 return rc;
335b7b62 807 credits.value = 1;
34f4deb7 808 credits.instance = instance;
3349c3a7
PS
809 } else
810 instance = exist_credits->instance;
a6827c18
JL
811
812 mutex_lock(&server->srv_mutex);
3349c3a7
PS
813
814 /*
815 * We can't use credits obtained from the previous session to send this
816 * request. Check if there were reconnects after we obtained credits and
817 * return -EAGAIN in such cases to let callers handle it.
818 */
819 if (instance != server->reconnect_instance) {
820 mutex_unlock(&server->srv_mutex);
821 add_credits_and_wake_if(server, &credits, optype);
822 return -EAGAIN;
823 }
824
fec344e3
JL
825 mid = server->ops->setup_async_request(server, rqst);
826 if (IS_ERR(mid)) {
a6827c18 827 mutex_unlock(&server->srv_mutex);
335b7b62 828 add_credits_and_wake_if(server, &credits, optype);
fec344e3 829 return PTR_ERR(mid);
a6827c18
JL
830 }
831
44d22d84 832 mid->receive = receive;
a6827c18
JL
833 mid->callback = callback;
834 mid->callback_data = cbdata;
9b7c18a2 835 mid->handle = handle;
7c9421e1 836 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 837
ffc61ccb
SP
838 /* put it on the pending_mid_q */
839 spin_lock(&GlobalMid_Lock);
840 list_add_tail(&mid->qhead, &server->pending_mid_q);
841 spin_unlock(&GlobalMid_Lock);
842
93d2cb6c
LL
843 /*
844 * Need to store the time in mid before calling I/O. For call_async,
845 * I/O response may come back and free the mid entry on another thread.
846 */
847 cifs_save_when_sent(mid);
789e6661 848 cifs_in_send_inc(server);
1f3a8f5f 849 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 850 cifs_in_send_dec(server);
ad313cb8 851
820962dc 852 if (rc < 0) {
c781af7e 853 revert_current_mid(server, mid->credits);
ad313cb8 854 server->sequence_number -= 2;
820962dc
RV
855 cifs_delete_mid(mid);
856 }
857
a6827c18 858 mutex_unlock(&server->srv_mutex);
789e6661 859
ffc61ccb
SP
860 if (rc == 0)
861 return 0;
a6827c18 862
335b7b62 863 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
864 return rc;
865}
866
133672ef
SF
867/*
868 *
869 * Send an SMB Request. No response info (other than return code)
870 * needs to be parsed.
871 *
872 * flags indicate the type of request buffer and how long to wait
873 * and whether to log NT STATUS code (error) before mapping it to POSIX error
874 *
875 */
876int
96daf2b0 877SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 878 char *in_buf, int flags)
133672ef
SF
879{
880 int rc;
881 struct kvec iov[1];
da502f7d 882 struct kvec rsp_iov;
133672ef
SF
883 int resp_buf_type;
884
792af7b0
PS
885 iov[0].iov_base = in_buf;
886 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 887 flags |= CIFS_NO_RSP_BUF;
da502f7d 888 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 889 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 890
133672ef
SF
891 return rc;
892}
893
053d5034 894static int
3c1105df 895cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
896{
897 int rc = 0;
898
f96637be
JP
899 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
900 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 901
74dd92a8 902 spin_lock(&GlobalMid_Lock);
7c9421e1 903 switch (mid->mid_state) {
74dd92a8 904 case MID_RESPONSE_RECEIVED:
053d5034
JL
905 spin_unlock(&GlobalMid_Lock);
906 return rc;
74dd92a8
JL
907 case MID_RETRY_NEEDED:
908 rc = -EAGAIN;
909 break;
71823baf
JL
910 case MID_RESPONSE_MALFORMED:
911 rc = -EIO;
912 break;
3c1105df
JL
913 case MID_SHUTDOWN:
914 rc = -EHOSTDOWN;
915 break;
74dd92a8 916 default:
abe57073
PS
917 if (!(mid->mid_flags & MID_DELETED)) {
918 list_del_init(&mid->qhead);
919 mid->mid_flags |= MID_DELETED;
920 }
afe6f653 921 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 922 __func__, mid->mid, mid->mid_state);
74dd92a8 923 rc = -EIO;
053d5034
JL
924 }
925 spin_unlock(&GlobalMid_Lock);
926
2b84a36c 927 DeleteMidQEntry(mid);
053d5034
JL
928 return rc;
929}
930
121b046a 931static inline int
fb2036d8
PS
932send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
933 struct mid_q_entry *mid)
76dcc26f 934{
121b046a 935 return server->ops->send_cancel ?
fb2036d8 936 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
937}
938
2c8f981d
JL
939int
940cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
941 bool log_error)
942{
792af7b0 943 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
944
945 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
946
947 /* convert the length into a more usable form */
38d77c50 948 if (server->sign) {
738f9de5 949 struct kvec iov[2];
985e4ff0 950 int rc = 0;
738f9de5
PS
951 struct smb_rqst rqst = { .rq_iov = iov,
952 .rq_nvec = 2 };
826a95e4 953
738f9de5
PS
954 iov[0].iov_base = mid->resp_buf;
955 iov[0].iov_len = 4;
956 iov[1].iov_base = (char *)mid->resp_buf + 4;
957 iov[1].iov_len = len - 4;
2c8f981d 958 /* FIXME: add code to kill session */
bf5ea0e2 959 rc = cifs_verify_signature(&rqst, server,
0124cc45 960 mid->sequence_number);
985e4ff0 961 if (rc)
afe6f653 962 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 963 rc);
2c8f981d
JL
964 }
965
966 /* BB special case reconnect tid and uid here? */
a3713ec3 967 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
968}
969
fec344e3 970struct mid_q_entry *
f780bd3f
AA
971cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
972 struct smb_rqst *rqst)
792af7b0
PS
973{
974 int rc;
fec344e3 975 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
976 struct mid_q_entry *mid;
977
738f9de5
PS
978 if (rqst->rq_iov[0].iov_len != 4 ||
979 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
980 return ERR_PTR(-EIO);
981
792af7b0
PS
982 rc = allocate_mid(ses, hdr, &mid);
983 if (rc)
fec344e3
JL
984 return ERR_PTR(rc);
985 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
986 if (rc) {
3c1bf7e4 987 cifs_delete_mid(mid);
fec344e3
JL
988 return ERR_PTR(rc);
989 }
990 return mid;
792af7b0
PS
991}
992
4e34feb5 993static void
ee258d79 994cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
995{
996 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
997 struct cifs_credits credits;
998
999 credits.value = server->ops->get_credits(mid);
1000 credits.instance = server->reconnect_instance;
8a26f0f7 1001
34f4deb7 1002 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1003}
1004
ee258d79
PS
1005static void
1006cifs_compound_last_callback(struct mid_q_entry *mid)
1007{
1008 cifs_compound_callback(mid);
1009 cifs_wake_up_task(mid);
1010}
1011
1012static void
1013cifs_cancelled_callback(struct mid_q_entry *mid)
1014{
1015 cifs_compound_callback(mid);
1016 DeleteMidQEntry(mid);
1017}
1018
5f68ea4a
AA
1019/*
1020 * Return a channel (master if none) of @ses that can be used to send
1021 * regular requests.
1022 *
1023 * If we are currently binding a new channel (negprot/sess.setup),
1024 * return the new incomplete channel.
1025 */
1026struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1027{
1028 uint index = 0;
1029
1030 if (!ses)
1031 return NULL;
1032
1033 if (!ses->binding) {
1034 /* round robin */
1035 if (ses->chan_count > 1) {
1036 index = (uint)atomic_inc_return(&ses->chan_seq);
1037 index %= ses->chan_count;
1038 }
1039 return ses->chans[index].server;
1040 } else {
1041 return cifs_ses_server(ses);
1042 }
1043}
1044
b8f57ee8 1045int
e0bba0b8 1046compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1047 struct TCP_Server_Info *server,
e0bba0b8
RS
1048 const int flags, const int num_rqst, struct smb_rqst *rqst,
1049 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1050{
480b1cb9 1051 int i, j, optype, rc = 0;
e0bba0b8 1052 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1053 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1054 struct cifs_credits credits[MAX_COMPOUND] = {
1055 { .value = 0, .instance = 0 }
1056 };
1057 unsigned int instance;
738f9de5 1058 char *buf;
50c2f753 1059
a891f0f8 1060 optype = flags & CIFS_OP_MASK;
133672ef 1061
e0bba0b8
RS
1062 for (i = 0; i < num_rqst; i++)
1063 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1064
352d96f3 1065 if (!ses || !ses->server || !server) {
f96637be 1066 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1067 return -EIO;
1068 }
1069
3190b59a 1070 if (server->tcpStatus == CifsExiting)
7ee1af76 1071 return -ENOENT;
7ee1af76 1072
792af7b0 1073 /*
257b7809 1074 * Wait for all the requests to become available.
7091bcab
PS
1075 * This approach still leaves the possibility to be stuck waiting for
1076 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1077 * requests and if the client is completely idle, not generating any
1078 * other requests.
1079 * This can be handled by the eventual session reconnect.
792af7b0 1080 */
3190b59a 1081 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1082 &instance);
1083 if (rc)
1084 return rc;
97ea4998 1085
257b7809
RS
1086 for (i = 0; i < num_rqst; i++) {
1087 credits[i].value = 1;
1088 credits[i].instance = instance;
8544f4aa 1089 }
7ee1af76 1090
792af7b0
PS
1091 /*
1092 * Make sure that we sign in the same order that we send on this socket
1093 * and avoid races inside tcp sendmsg code that could cause corruption
1094 * of smb data.
1095 */
7ee1af76 1096
3190b59a 1097 mutex_lock(&server->srv_mutex);
7ee1af76 1098
97ea4998
PS
1099 /*
1100 * All the parts of the compound chain belong obtained credits from the
257b7809 1101 * same session. We can not use credits obtained from the previous
97ea4998
PS
1102 * session to send this request. Check if there were reconnects after
1103 * we obtained credits and return -EAGAIN in such cases to let callers
1104 * handle it.
1105 */
3190b59a
AA
1106 if (instance != server->reconnect_instance) {
1107 mutex_unlock(&server->srv_mutex);
97ea4998 1108 for (j = 0; j < num_rqst; j++)
3190b59a 1109 add_credits(server, &credits[j], optype);
97ea4998
PS
1110 return -EAGAIN;
1111 }
1112
e0bba0b8 1113 for (i = 0; i < num_rqst; i++) {
f780bd3f 1114 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1115 if (IS_ERR(midQ[i])) {
3190b59a 1116 revert_current_mid(server, i);
e0bba0b8
RS
1117 for (j = 0; j < i; j++)
1118 cifs_delete_mid(midQ[j]);
3190b59a 1119 mutex_unlock(&server->srv_mutex);
8544f4aa 1120
e0bba0b8 1121 /* Update # of requests on wire to server */
8544f4aa 1122 for (j = 0; j < num_rqst; j++)
3190b59a 1123 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1124 return PTR_ERR(midQ[i]);
1125 }
1126
1127 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1128 midQ[i]->optype = optype;
4e34feb5 1129 /*
ee258d79
PS
1130 * Invoke callback for every part of the compound chain
1131 * to calculate credits properly. Wake up this thread only when
1132 * the last element is received.
4e34feb5
RS
1133 */
1134 if (i < num_rqst - 1)
ee258d79
PS
1135 midQ[i]->callback = cifs_compound_callback;
1136 else
1137 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1138 }
3190b59a
AA
1139 cifs_in_send_inc(server);
1140 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1141 cifs_in_send_dec(server);
e0bba0b8
RS
1142
1143 for (i = 0; i < num_rqst; i++)
1144 cifs_save_when_sent(midQ[i]);
7ee1af76 1145
c781af7e 1146 if (rc < 0) {
3190b59a
AA
1147 revert_current_mid(server, num_rqst);
1148 server->sequence_number -= 2;
c781af7e 1149 }
e0bba0b8 1150
3190b59a 1151 mutex_unlock(&server->srv_mutex);
7ee1af76 1152
d69cb728
RS
1153 /*
1154 * If sending failed for some reason or it is an oplock break that we
1155 * will not receive a response to - return credits back
1156 */
1157 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1158 for (i = 0; i < num_rqst; i++)
3190b59a 1159 add_credits(server, &credits[i], optype);
cb5c2e63 1160 goto out;
ee258d79
PS
1161 }
1162
1163 /*
1164 * At this point the request is passed to the network stack - we assume
1165 * that any credits taken from the server structure on the client have
1166 * been spent and we can't return them back. Once we receive responses
1167 * we will collect credits granted by the server in the mid callbacks
1168 * and add those credits to the server structure.
1169 */
e0bba0b8 1170
cb5c2e63
RS
1171 /*
1172 * Compounding is never used during session establish.
1173 */
0f56db83 1174 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP))
cb5c2e63
RS
1175 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1176 rqst[0].rq_nvec);
e0bba0b8 1177
cb5c2e63 1178 for (i = 0; i < num_rqst; i++) {
3190b59a 1179 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1180 if (rc != 0)
1181 break;
1182 }
1183 if (rc != 0) {
1184 for (; i < num_rqst; i++) {
afe6f653 1185 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1186 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1187 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1188 spin_lock(&GlobalMid_Lock);
7b71843f 1189 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1190 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1191 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1192 cancelled_mid[i] = true;
34f4deb7 1193 credits[i].value = 0;
e0bba0b8 1194 }
1be912dd 1195 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1196 }
cb5c2e63
RS
1197 }
1198
cb5c2e63
RS
1199 for (i = 0; i < num_rqst; i++) {
1200 if (rc < 0)
1201 goto out;
e0bba0b8 1202
3190b59a 1203 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1204 if (rc != 0) {
8544f4aa
PS
1205 /* mark this mid as cancelled to not free it below */
1206 cancelled_mid[i] = true;
1207 goto out;
1be912dd 1208 }
2b2bdfba 1209
e0bba0b8
RS
1210 if (!midQ[i]->resp_buf ||
1211 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1212 rc = -EIO;
1213 cifs_dbg(FYI, "Bad MID state?\n");
1214 goto out;
1215 }
a891f0f8 1216
e0bba0b8
RS
1217 buf = (char *)midQ[i]->resp_buf;
1218 resp_iov[i].iov_base = buf;
1219 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1220 server->vals->header_preamble_size;
e0bba0b8
RS
1221
1222 if (midQ[i]->large_buf)
1223 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1224 else
1225 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1226
3190b59a 1227 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1228 flags & CIFS_LOG_ERROR);
1da177e4 1229
e0bba0b8 1230 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1231 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1232 midQ[i]->resp_buf = NULL;
cb5c2e63 1233
e0bba0b8 1234 }
cb5c2e63
RS
1235
1236 /*
1237 * Compounding is never used during session establish.
1238 */
0f56db83 1239 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1240 struct kvec iov = {
1241 .iov_base = resp_iov[0].iov_base,
1242 .iov_len = resp_iov[0].iov_len
1243 };
1244 smb311_update_preauth_hash(ses, &iov, 1);
1245 }
1246
7ee1af76 1247out:
4e34feb5
RS
1248 /*
1249 * This will dequeue all mids. After this it is important that the
1250 * demultiplex_thread will not process any of these mids any futher.
1251 * This is prevented above by using a noop callback that will not
1252 * wake this thread except for the very last PDU.
1253 */
8544f4aa
PS
1254 for (i = 0; i < num_rqst; i++) {
1255 if (!cancelled_mid[i])
1256 cifs_delete_mid(midQ[i]);
8544f4aa 1257 }
1da177e4 1258
d6e04ae6
SF
1259 return rc;
1260}
1da177e4 1261
e0bba0b8
RS
1262int
1263cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1264 struct TCP_Server_Info *server,
e0bba0b8
RS
1265 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1266 struct kvec *resp_iov)
1267{
352d96f3
AA
1268 return compound_send_recv(xid, ses, server, flags, 1,
1269 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1270}
1271
738f9de5
PS
1272int
1273SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1274 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1275 const int flags, struct kvec *resp_iov)
1276{
1277 struct smb_rqst rqst;
3cecf486 1278 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1279 int rc;
1280
3cecf486 1281 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1282 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1283 GFP_KERNEL);
117e3b7f
SF
1284 if (!new_iov) {
1285 /* otherwise cifs_send_recv below sets resp_buf_type */
1286 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1287 return -ENOMEM;
117e3b7f 1288 }
3cecf486
RS
1289 } else
1290 new_iov = s_iov;
738f9de5
PS
1291
1292 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1293 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1294
1295 new_iov[0].iov_base = new_iov[1].iov_base;
1296 new_iov[0].iov_len = 4;
1297 new_iov[1].iov_base += 4;
1298 new_iov[1].iov_len -= 4;
1299
1300 memset(&rqst, 0, sizeof(struct smb_rqst));
1301 rqst.rq_iov = new_iov;
1302 rqst.rq_nvec = n_vec + 1;
1303
352d96f3
AA
1304 rc = cifs_send_recv(xid, ses, ses->server,
1305 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1306 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1307 kfree(new_iov);
738f9de5
PS
1308 return rc;
1309}
1310
1da177e4 1311int
96daf2b0 1312SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1313 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1314 int *pbytes_returned, const int flags)
1da177e4
LT
1315{
1316 int rc = 0;
1da177e4 1317 struct mid_q_entry *midQ;
fb2036d8
PS
1318 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1319 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1320 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1321 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1322 struct TCP_Server_Info *server;
1da177e4
LT
1323
1324 if (ses == NULL) {
f96637be 1325 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1326 return -EIO;
1327 }
ac6ad7a8 1328 server = ses->server;
afe6f653 1329 if (server == NULL) {
f96637be 1330 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1331 return -EIO;
1332 }
1333
afe6f653 1334 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1335 return -ENOENT;
1336
79a58d1f 1337 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1338 to the same server. We may make this configurable later or
1339 use ses->maxReq */
1da177e4 1340
fb2036d8 1341 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1342 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1343 len);
6d9c6d54
VL
1344 return -EIO;
1345 }
1346
afe6f653 1347 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1348 if (rc)
1349 return rc;
1350
79a58d1f 1351 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1352 and avoid races inside tcp sendmsg code that could cause corruption
1353 of smb data */
1354
afe6f653 1355 mutex_lock(&server->srv_mutex);
1da177e4 1356
7ee1af76
JA
1357 rc = allocate_mid(ses, in_buf, &midQ);
1358 if (rc) {
8bd3754c 1359 mutex_unlock(&server->srv_mutex);
7ee1af76 1360 /* Update # of requests on wire to server */
afe6f653 1361 add_credits(server, &credits, 0);
7ee1af76 1362 return rc;
1da177e4
LT
1363 }
1364
afe6f653 1365 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1366 if (rc) {
afe6f653 1367 mutex_unlock(&server->srv_mutex);
829049cb
VL
1368 goto out;
1369 }
1da177e4 1370
7c9421e1 1371 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1372
afe6f653
RS
1373 cifs_in_send_inc(server);
1374 rc = smb_send(server, in_buf, len);
1375 cifs_in_send_dec(server);
789e6661 1376 cifs_save_when_sent(midQ);
ad313cb8
JL
1377
1378 if (rc < 0)
afe6f653 1379 server->sequence_number -= 2;
ad313cb8 1380
afe6f653 1381 mutex_unlock(&server->srv_mutex);
7ee1af76 1382
79a58d1f 1383 if (rc < 0)
7ee1af76
JA
1384 goto out;
1385
afe6f653 1386 rc = wait_for_response(server, midQ);
1be912dd 1387 if (rc != 0) {
afe6f653 1388 send_cancel(server, &rqst, midQ);
1be912dd 1389 spin_lock(&GlobalMid_Lock);
7c9421e1 1390 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1391 /* no longer considered to be "in-flight" */
1392 midQ->callback = DeleteMidQEntry;
1393 spin_unlock(&GlobalMid_Lock);
afe6f653 1394 add_credits(server, &credits, 0);
1be912dd
JL
1395 return rc;
1396 }
1397 spin_unlock(&GlobalMid_Lock);
1398 }
1da177e4 1399
afe6f653 1400 rc = cifs_sync_mid_result(midQ, server);
053d5034 1401 if (rc != 0) {
afe6f653 1402 add_credits(server, &credits, 0);
1da177e4
LT
1403 return rc;
1404 }
50c2f753 1405
2c8f981d 1406 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1407 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1408 rc = -EIO;
afe6f653 1409 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1410 goto out;
1da177e4 1411 }
7ee1af76 1412
d4e4854f 1413 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1414 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1415 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1416out:
3c1bf7e4 1417 cifs_delete_mid(midQ);
afe6f653 1418 add_credits(server, &credits, 0);
1da177e4 1419
7ee1af76
JA
1420 return rc;
1421}
1da177e4 1422
7ee1af76
JA
1423/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1424 blocking lock to return. */
1425
1426static int
96daf2b0 1427send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1428 struct smb_hdr *in_buf,
1429 struct smb_hdr *out_buf)
1430{
1431 int bytes_returned;
96daf2b0 1432 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1433 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1434
1435 /* We just modify the current in_buf to change
1436 the type of lock from LOCKING_ANDX_SHARED_LOCK
1437 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1438 LOCKING_ANDX_CANCEL_LOCK. */
1439
1440 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1441 pSMB->Timeout = 0;
88257360 1442 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1443
1444 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1445 &bytes_returned, 0);
7ee1af76
JA
1446}
1447
1448int
96daf2b0 1449SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1450 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1451 int *pbytes_returned)
1452{
1453 int rc = 0;
1454 int rstart = 0;
7ee1af76 1455 struct mid_q_entry *midQ;
96daf2b0 1456 struct cifs_ses *ses;
fb2036d8
PS
1457 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1458 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1459 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1460 unsigned int instance;
afe6f653 1461 struct TCP_Server_Info *server;
7ee1af76
JA
1462
1463 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1464 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1465 return -EIO;
1466 }
1467 ses = tcon->ses;
afe6f653 1468 server = ses->server;
7ee1af76 1469
afe6f653 1470 if (server == NULL) {
f96637be 1471 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1472 return -EIO;
1473 }
1474
afe6f653 1475 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1476 return -ENOENT;
1477
79a58d1f 1478 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1479 to the same server. We may make this configurable later or
1480 use ses->maxReq */
1481
fb2036d8 1482 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1483 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1484 len);
6d9c6d54
VL
1485 return -EIO;
1486 }
1487
afe6f653 1488 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1489 if (rc)
1490 return rc;
1491
79a58d1f 1492 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1493 and avoid races inside tcp sendmsg code that could cause corruption
1494 of smb data */
1495
afe6f653 1496 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1497
1498 rc = allocate_mid(ses, in_buf, &midQ);
1499 if (rc) {
afe6f653 1500 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1501 return rc;
1502 }
1503
afe6f653 1504 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1505 if (rc) {
3c1bf7e4 1506 cifs_delete_mid(midQ);
afe6f653 1507 mutex_unlock(&server->srv_mutex);
829049cb
VL
1508 return rc;
1509 }
1da177e4 1510
7c9421e1 1511 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1512 cifs_in_send_inc(server);
1513 rc = smb_send(server, in_buf, len);
1514 cifs_in_send_dec(server);
789e6661 1515 cifs_save_when_sent(midQ);
ad313cb8
JL
1516
1517 if (rc < 0)
afe6f653 1518 server->sequence_number -= 2;
ad313cb8 1519
afe6f653 1520 mutex_unlock(&server->srv_mutex);
7ee1af76 1521
79a58d1f 1522 if (rc < 0) {
3c1bf7e4 1523 cifs_delete_mid(midQ);
7ee1af76
JA
1524 return rc;
1525 }
1526
1527 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1528 rc = wait_event_interruptible(server->response_q,
7c9421e1 1529 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1530 ((server->tcpStatus != CifsGood) &&
1531 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1532
1533 /* Were we interrupted by a signal ? */
1534 if ((rc == -ERESTARTSYS) &&
7c9421e1 1535 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1536 ((server->tcpStatus == CifsGood) ||
1537 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1538
1539 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1540 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1541 blocking lock to return. */
afe6f653 1542 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1543 if (rc) {
3c1bf7e4 1544 cifs_delete_mid(midQ);
7ee1af76
JA
1545 return rc;
1546 }
1547 } else {
1548 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1549 to cause the blocking lock to return. */
1550
1551 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1552
1553 /* If we get -ENOLCK back the lock may have
1554 already been removed. Don't exit in this case. */
1555 if (rc && rc != -ENOLCK) {
3c1bf7e4 1556 cifs_delete_mid(midQ);
7ee1af76
JA
1557 return rc;
1558 }
1559 }
1560
afe6f653 1561 rc = wait_for_response(server, midQ);
1be912dd 1562 if (rc) {
afe6f653 1563 send_cancel(server, &rqst, midQ);
1be912dd 1564 spin_lock(&GlobalMid_Lock);
7c9421e1 1565 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1566 /* no longer considered to be "in-flight" */
1567 midQ->callback = DeleteMidQEntry;
1568 spin_unlock(&GlobalMid_Lock);
1569 return rc;
1570 }
1571 spin_unlock(&GlobalMid_Lock);
7ee1af76 1572 }
1be912dd
JL
1573
1574 /* We got the response - restart system call. */
1575 rstart = 1;
7ee1af76
JA
1576 }
1577
afe6f653 1578 rc = cifs_sync_mid_result(midQ, server);
053d5034 1579 if (rc != 0)
7ee1af76 1580 return rc;
50c2f753 1581
17c8bfed 1582 /* rcvd frame is ok */
7c9421e1 1583 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1584 rc = -EIO;
3175eb9b 1585 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1586 goto out;
1587 }
1da177e4 1588
d4e4854f 1589 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1590 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1591 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1592out:
3c1bf7e4 1593 cifs_delete_mid(midQ);
7ee1af76
JA
1594 if (rstart && rc == -EACCES)
1595 return -ERESTARTSYS;
1da177e4
LT
1596 return rc;
1597}