cifs: reconnect only the connection and not smb session where possible
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
1da177e4
LT
24#include "cifspdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "cifs_debug.h"
8bd68c6e 28#include "smb2proto.h"
9762c2d0 29#include "smbdirect.h"
50c2f753 30
3cecf486
RS
31/* Max number of iovectors we can use off the stack when sending requests. */
32#define CIFS_MAX_IOV_SIZE 8
33
2dc7e1c0
PS
34void
35cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
36{
37 wake_up_process(mid->callback_data);
38}
39
a6827c18 40struct mid_q_entry *
24b9b06b 41AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
42{
43 struct mid_q_entry *temp;
44
24b9b06b 45 if (server == NULL) {
f96637be 46 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
47 return NULL;
48 }
50c2f753 49
232087cb 50 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 51 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 52 kref_init(&temp->refcount);
a6f74e80
N
53 temp->mid = get_mid(smb_buffer);
54 temp->pid = current->pid;
55 temp->command = cpu_to_le16(smb_buffer->Command);
56 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 57 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
58 /* when mid allocated can be before when sent */
59 temp->when_alloc = jiffies;
60 temp->server = server;
2b84a36c 61
a6f74e80
N
62 /*
63 * The default is for the mid to be synchronous, so the
64 * default callback just wakes up the current task.
65 */
f1f27ad7
VW
66 get_task_struct(current);
67 temp->creator = current;
a6f74e80
N
68 temp->callback = cifs_wake_up_task;
69 temp->callback_data = current;
1da177e4 70
1da177e4 71 atomic_inc(&midCount);
7c9421e1 72 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
73 return temp;
74}
75
696e420b
LP
76static void _cifs_mid_q_entry_release(struct kref *refcount)
77{
abe57073
PS
78 struct mid_q_entry *midEntry =
79 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 80#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 81 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 82 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 83 unsigned long now;
433b8dd7 84 unsigned long roundtrip_time;
1047abc1 85#endif
7b71843f
PS
86 struct TCP_Server_Info *server = midEntry->server;
87
88 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 server->ops->handle_cancelled_mid)
04ad69c3 91 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 92
7c9421e1 93 midEntry->mid_state = MID_FREE;
8097531a 94 atomic_dec(&midCount);
7c9421e1 95 if (midEntry->large_buf)
b8643e1b
SF
96 cifs_buf_release(midEntry->resp_buf);
97 else
98 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
99#ifdef CONFIG_CIFS_STATS2
100 now = jiffies;
433b8dd7 101 if (now < midEntry->when_alloc)
a0a3036b 102 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
103 roundtrip_time = now - midEntry->when_alloc;
104
105 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 server->slowest_cmd[smb_cmd] = roundtrip_time;
108 server->fastest_cmd[smb_cmd] = roundtrip_time;
109 } else {
110 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 }
115 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 server->time_per_cmd[smb_cmd] += roundtrip_time;
117 }
00778e22
SF
118 /*
119 * commands taking longer than one second (default) can be indications
120 * that something is wrong, unless it is quite a slow link or a very
121 * busy server. Note that this calc is unlikely or impossible to wrap
122 * as long as slow_rsp_threshold is not set way above recommended max
123 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 * since only affects debug counters - so leaving the calc as simple
125 * comparison rather than doing multiple conversions and overflow
126 * checks
127 */
128 if ((slow_rsp_threshold != 0) &&
129 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 130 (midEntry->command != command)) {
f5942db5
SF
131 /*
132 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 * NB: le16_to_cpu returns unsigned so can not be negative below
134 */
433b8dd7
SF
135 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 137
433b8dd7 138 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
141 pr_debug("slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
1047abc1
SF
147 }
148 }
149#endif
f1f27ad7 150 put_task_struct(midEntry->creator);
abe57073
PS
151
152 mempool_free(midEntry, cifs_mid_poolp);
153}
154
155void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156{
157 spin_lock(&GlobalMid_Lock);
158 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 spin_unlock(&GlobalMid_Lock);
160}
161
162void DeleteMidQEntry(struct mid_q_entry *midEntry)
163{
696e420b 164 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
165}
166
3c1bf7e4
PS
167void
168cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
169{
170 spin_lock(&GlobalMid_Lock);
abe57073
PS
171 if (!(mid->mid_flags & MID_DELETED)) {
172 list_del_init(&mid->qhead);
173 mid->mid_flags |= MID_DELETED;
174 }
ddc8cf8f
JL
175 spin_unlock(&GlobalMid_Lock);
176
177 DeleteMidQEntry(mid);
178}
179
6f49f46b
JL
180/*
181 * smb_send_kvec - send an array of kvecs to the server
182 * @server: Server to send the data to
3ab3f2a1 183 * @smb_msg: Message to send
6f49f46b
JL
184 * @sent: amount of data sent on socket is stored here
185 *
186 * Our basic "send data to server" function. Should be called with srv_mutex
187 * held. The caller is responsible for handling the results.
188 */
d6e04ae6 189static int
3ab3f2a1
AV
190smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 size_t *sent)
1da177e4
LT
192{
193 int rc = 0;
3ab3f2a1 194 int retries = 0;
edf1ae40 195 struct socket *ssocket = server->ssocket;
50c2f753 196
6f49f46b
JL
197 *sent = 0;
198
3ab3f2a1
AV
199 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200 smb_msg->msg_namelen = sizeof(struct sockaddr);
201 smb_msg->msg_control = NULL;
202 smb_msg->msg_controllen = 0;
0496e02d 203 if (server->noblocksnd)
3ab3f2a1 204 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 205 else
3ab3f2a1 206 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 207
3ab3f2a1 208 while (msg_data_left(smb_msg)) {
6f49f46b
JL
209 /*
210 * If blocking send, we try 3 times, since each can block
211 * for 5 seconds. For nonblocking we have to try more
212 * but wait increasing amounts of time allowing time for
213 * socket to clear. The overall time we wait in either
214 * case to send on the socket is about 15 seconds.
215 * Similarly we wait for 15 seconds for a response from
216 * the server in SendReceive[2] for the server to send
217 * a response back for most types of requests (except
218 * SMB Write past end of file which can be slow, and
219 * blocking lock operations). NFS waits slightly longer
220 * than CIFS, but this can make it take longer for
221 * nonresponsive servers to be detected and 15 seconds
222 * is more than enough time for modern networks to
223 * send a packet. In most cases if we fail to send
224 * after the retries we will kill the socket and
225 * reconnect which may clear the network problem.
226 */
3ab3f2a1 227 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 228 if (rc == -EAGAIN) {
3ab3f2a1
AV
229 retries++;
230 if (retries >= 14 ||
231 (!server->noblocksnd && (retries > 2))) {
afe6f653 232 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 233 ssocket);
3ab3f2a1 234 return -EAGAIN;
1da177e4 235 }
3ab3f2a1 236 msleep(1 << retries);
1da177e4
LT
237 continue;
238 }
6f49f46b 239
79a58d1f 240 if (rc < 0)
3ab3f2a1 241 return rc;
6f49f46b 242
79a58d1f 243 if (rc == 0) {
3e84469d
SF
244 /* should never happen, letting socket clear before
245 retrying is our only obvious option here */
afe6f653 246 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
247 msleep(500);
248 continue;
d6e04ae6 249 }
6f49f46b 250
3ab3f2a1
AV
251 /* send was at least partially successful */
252 *sent += rc;
253 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 254 }
3ab3f2a1 255 return 0;
97bc00b3
JL
256}
257
35e2cc1b 258unsigned long
81f39f95 259smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
260{
261 unsigned int i;
35e2cc1b
PA
262 struct kvec *iov;
263 int nvec;
a26054d1
JL
264 unsigned long buflen = 0;
265
81f39f95
RS
266 if (server->vals->header_preamble_size == 0 &&
267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
268 iov = &rqst->rq_iov[1];
269 nvec = rqst->rq_nvec - 1;
270 } else {
271 iov = rqst->rq_iov;
272 nvec = rqst->rq_nvec;
273 }
274
a26054d1 275 /* total up iov array first */
35e2cc1b 276 for (i = 0; i < nvec; i++)
a26054d1
JL
277 buflen += iov[i].iov_len;
278
c06a0f2d
LL
279 /*
280 * Add in the page array if there is one. The caller needs to make
281 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282 * multiple pages ends at page boundary, rq_tailsz needs to be set to
283 * PAGE_SIZE.
284 */
a26054d1 285 if (rqst->rq_npages) {
c06a0f2d
LL
286 if (rqst->rq_npages == 1)
287 buflen += rqst->rq_tailsz;
288 else {
289 /*
290 * If there is more than one page, calculate the
291 * buffer length based on rq_offset and rq_tailsz
292 */
293 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294 rqst->rq_offset;
295 buflen += rqst->rq_tailsz;
296 }
a26054d1
JL
297 }
298
299 return buflen;
300}
301
6f49f46b 302static int
07cd952f
RS
303__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304 struct smb_rqst *rqst)
6f49f46b 305{
07cd952f
RS
306 int rc = 0;
307 struct kvec *iov;
308 int n_vec;
309 unsigned int send_length = 0;
310 unsigned int i, j;
b30c74c7 311 sigset_t mask, oldmask;
3ab3f2a1 312 size_t total_len = 0, sent, size;
b8eed283 313 struct socket *ssocket = server->ssocket;
3ab3f2a1 314 struct msghdr smb_msg;
c713c877
RS
315 __be32 rfc1002_marker;
316
4357d45f
LL
317 if (cifs_rdma_enabled(server)) {
318 /* return -EAGAIN when connecting or reconnecting */
319 rc = -EAGAIN;
320 if (server->smbd_conn)
321 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
322 goto smbd_done;
323 }
afc18a6f 324
ea702b80 325 if (ssocket == NULL)
afc18a6f 326 return -EAGAIN;
ea702b80 327
214a5ea0 328 if (fatal_signal_pending(current)) {
6988a619
PA
329 cifs_dbg(FYI, "signal pending before send request\n");
330 return -ERESTARTSYS;
b30c74c7
PS
331 }
332
b8eed283 333 /* cork the socket */
db10538a 334 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 335
07cd952f 336 for (j = 0; j < num_rqst; j++)
81f39f95 337 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
338 rfc1002_marker = cpu_to_be32(send_length);
339
b30c74c7
PS
340 /*
341 * We should not allow signals to interrupt the network send because
342 * any partial send will cause session reconnects thus increasing
343 * latency of system calls and overload a server with unnecessary
344 * requests.
345 */
346
347 sigfillset(&mask);
348 sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
c713c877
RS
350 /* Generate a rfc1002 marker for SMB2+ */
351 if (server->vals->header_preamble_size == 0) {
352 struct kvec hiov = {
353 .iov_base = &rfc1002_marker,
354 .iov_len = 4
355 };
aa563d7b 356 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
357 rc = smb_send_kvec(server, &smb_msg, &sent);
358 if (rc < 0)
b30c74c7 359 goto unmask;
c713c877
RS
360
361 total_len += sent;
362 send_length += 4;
363 }
364
662bf5bc
PA
365 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
07cd952f
RS
367 for (j = 0; j < num_rqst; j++) {
368 iov = rqst[j].rq_iov;
369 n_vec = rqst[j].rq_nvec;
3ab3f2a1 370
07cd952f 371 size = 0;
662bf5bc
PA
372 for (i = 0; i < n_vec; i++) {
373 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 374 size += iov[i].iov_len;
662bf5bc 375 }
97bc00b3 376
aa563d7b 377 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 378
3ab3f2a1 379 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 380 if (rc < 0)
b30c74c7 381 goto unmask;
97bc00b3
JL
382
383 total_len += sent;
07cd952f
RS
384
385 /* now walk the page array and send each page in it */
386 for (i = 0; i < rqst[j].rq_npages; i++) {
387 struct bio_vec bvec;
388
389 bvec.bv_page = rqst[j].rq_pages[i];
390 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 &bvec.bv_offset);
392
aa563d7b 393 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
394 &bvec, 1, bvec.bv_len);
395 rc = smb_send_kvec(server, &smb_msg, &sent);
396 if (rc < 0)
397 break;
398
399 total_len += sent;
400 }
97bc00b3 401 }
1da177e4 402
b30c74c7
PS
403unmask:
404 sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406 /*
407 * If signal is pending but we have already sent the whole packet to
408 * the server we need to return success status to allow a corresponding
409 * mid entry to be kept in the pending requests queue thus allowing
410 * to handle responses from the server by the client.
411 *
412 * If only part of the packet has been sent there is no need to hide
413 * interrupt because the session will be reconnected anyway, so there
414 * won't be any response from the server to handle.
415 */
416
417 if (signal_pending(current) && (total_len != send_length)) {
418 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 419 rc = -ERESTARTSYS;
b30c74c7
PS
420 }
421
b8eed283 422 /* uncork it */
db10538a 423 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 424
c713c877 425 if ((total_len > 0) && (total_len != send_length)) {
f96637be 426 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 427 send_length, total_len);
6f49f46b
JL
428 /*
429 * If we have only sent part of an SMB then the next SMB could
430 * be taken as the remainder of this one. We need to kill the
431 * socket so the server throws away the partial SMB
432 */
01cf3082 433 spin_lock(&GlobalMid_Lock);
edf1ae40 434 server->tcpStatus = CifsNeedReconnect;
01cf3082 435 spin_unlock(&GlobalMid_Lock);
bf1fdeb7 436 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 437 server->conn_id, server->hostname);
edf1ae40 438 }
9762c2d0 439smbd_done:
d804d41d 440 if (rc < 0 && rc != -EINTR)
afe6f653 441 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 442 rc);
ee13919c 443 else if (rc > 0)
1da177e4 444 rc = 0;
1da177e4
LT
445
446 return rc;
447}
448
6f49f46b 449static int
1f3a8f5f
RS
450smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
451 struct smb_rqst *rqst, int flags)
6f49f46b 452{
b2c96de7 453 struct kvec iov;
3946d0d0 454 struct smb2_transform_hdr *tr_hdr;
b2c96de7 455 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
456 int rc;
457
458 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
459 return __smb_send_rqst(server, num_rqst, rqst);
460
461 if (num_rqst > MAX_COMPOUND - 1)
462 return -ENOMEM;
7fb8986e 463
b2c96de7 464 if (!server->ops->init_transform_rq) {
a0a3036b 465 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
466 return -EIO;
467 }
6f49f46b 468
3946d0d0
LL
469 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
470 if (!tr_hdr)
471 return -ENOMEM;
472
473 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
474 memset(&iov, 0, sizeof(iov));
475 memset(tr_hdr, 0, sizeof(*tr_hdr));
476
477 iov.iov_base = tr_hdr;
478 iov.iov_len = sizeof(*tr_hdr);
479 cur_rqst[0].rq_iov = &iov;
480 cur_rqst[0].rq_nvec = 1;
481
1f3a8f5f
RS
482 rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 &cur_rqst[0], rqst);
7fb8986e 484 if (rc)
3946d0d0 485 goto out;
7fb8986e 486
1f3a8f5f
RS
487 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
489out:
490 kfree(tr_hdr);
7fb8986e 491 return rc;
6f49f46b
JL
492}
493
0496e02d
JL
494int
495smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
496 unsigned int smb_buf_length)
497{
738f9de5 498 struct kvec iov[2];
7fb8986e
PS
499 struct smb_rqst rqst = { .rq_iov = iov,
500 .rq_nvec = 2 };
0496e02d 501
738f9de5
PS
502 iov[0].iov_base = smb_buffer;
503 iov[0].iov_len = 4;
504 iov[1].iov_base = (char *)smb_buffer + 4;
505 iov[1].iov_len = smb_buf_length;
0496e02d 506
07cd952f 507 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
508}
509
fc40f9cf 510static int
b227d215 511wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
512 const int timeout, const int flags,
513 unsigned int *instance)
1da177e4 514{
19e88867 515 long rc;
4230cff8
RS
516 int *credits;
517 int optype;
2b53b929 518 long int t;
6d82c27a 519 int scredits, in_flight;
2b53b929
RS
520
521 if (timeout < 0)
522 t = MAX_JIFFY_OFFSET;
523 else
524 t = msecs_to_jiffies(timeout);
4230cff8
RS
525
526 optype = flags & CIFS_OP_MASK;
5bc59498 527
34f4deb7
PS
528 *instance = 0;
529
4230cff8
RS
530 credits = server->ops->get_credits_field(server, optype);
531 /* Since an echo is already inflight, no need to wait to send another */
532 if (*credits <= 0 && optype == CIFS_ECHO_OP)
533 return -EAGAIN;
534
fc40f9cf 535 spin_lock(&server->req_lock);
392e1c5d 536 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 537 /* oplock breaks must not be held up */
fc40f9cf 538 server->in_flight++;
1b63f184
SF
539 if (server->in_flight > server->max_in_flight)
540 server->max_in_flight = server->in_flight;
bc205ed1 541 *credits -= 1;
34f4deb7 542 *instance = server->reconnect_instance;
6d82c27a
SP
543 scredits = *credits;
544 in_flight = server->in_flight;
fc40f9cf 545 spin_unlock(&server->req_lock);
6d82c27a
SP
546
547 trace_smb3_add_credits(server->CurrentMid,
548 server->conn_id, server->hostname, scredits, -1, in_flight);
549 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
550 __func__, 1, scredits);
551
27a97a61
VL
552 return 0;
553 }
554
27a97a61 555 while (1) {
b227d215 556 if (*credits < num_credits) {
6d82c27a 557 scredits = *credits;
fc40f9cf 558 spin_unlock(&server->req_lock);
6d82c27a 559
789e6661 560 cifs_num_waiters_inc(server);
2b53b929
RS
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
789e6661 563 cifs_num_waiters_dec(server);
2b53b929 564 if (!rc) {
6d82c27a
SP
565 spin_lock(&server->req_lock);
566 scredits = *credits;
567 in_flight = server->in_flight;
568 spin_unlock(&server->req_lock);
569
7937ca96 570 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
571 server->conn_id, server->hostname, scredits,
572 num_credits, in_flight);
afe6f653 573 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 574 timeout);
7de03948 575 return -EBUSY;
2b53b929
RS
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
fc40f9cf 579 spin_lock(&server->req_lock);
27a97a61 580 } else {
c5797a94 581 if (server->tcpStatus == CifsExiting) {
fc40f9cf 582 spin_unlock(&server->req_lock);
27a97a61 583 return -ENOENT;
1da177e4 584 }
27a97a61 585
16b34aa4
RS
586 /*
587 * For normal commands, reserve the last MAX_COMPOUND
588 * credits to compound requests.
589 * Otherwise these compounds could be permanently
590 * starved for credits by single-credit requests.
591 *
592 * To prevent spinning CPU, block this thread until
593 * there are >MAX_COMPOUND credits available.
594 * But only do this is we already have a lot of
595 * credits in flight to avoid triggering this check
596 * for servers that are slow to hand out credits on
597 * new sessions.
598 */
599 if (!optype && num_credits == 1 &&
600 server->in_flight > 2 * MAX_COMPOUND &&
601 *credits <= MAX_COMPOUND) {
602 spin_unlock(&server->req_lock);
6d82c27a 603
16b34aa4 604 cifs_num_waiters_inc(server);
2b53b929
RS
605 rc = wait_event_killable_timeout(
606 server->request_q,
16b34aa4 607 has_credits(server, credits,
2b53b929
RS
608 MAX_COMPOUND + 1),
609 t);
16b34aa4 610 cifs_num_waiters_dec(server);
2b53b929 611 if (!rc) {
6d82c27a
SP
612 spin_lock(&server->req_lock);
613 scredits = *credits;
614 in_flight = server->in_flight;
615 spin_unlock(&server->req_lock);
616
7937ca96 617 trace_smb3_credit_timeout(
6d82c27a
SP
618 server->CurrentMid,
619 server->conn_id, server->hostname,
620 scredits, num_credits, in_flight);
afe6f653 621 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 622 timeout);
7de03948 623 return -EBUSY;
2b53b929
RS
624 }
625 if (rc == -ERESTARTSYS)
626 return -ERESTARTSYS;
16b34aa4
RS
627 spin_lock(&server->req_lock);
628 continue;
629 }
630
2d86dbc9
PS
631 /*
632 * Can not count locking commands against total
633 * as they are allowed to block on server.
634 */
27a97a61
VL
635
636 /* update # of requests on the wire to server */
4230cff8 637 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
638 *credits -= num_credits;
639 server->in_flight += num_credits;
1b63f184
SF
640 if (server->in_flight > server->max_in_flight)
641 server->max_in_flight = server->in_flight;
34f4deb7 642 *instance = server->reconnect_instance;
2d86dbc9 643 }
6d82c27a
SP
644 scredits = *credits;
645 in_flight = server->in_flight;
fc40f9cf 646 spin_unlock(&server->req_lock);
cd7b699b
SP
647
648 trace_smb3_add_credits(server->CurrentMid,
6d82c27a
SP
649 server->conn_id, server->hostname, scredits,
650 -(num_credits), in_flight);
cd7b699b
SP
651 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
652 __func__, num_credits, scredits);
27a97a61 653 break;
1da177e4
LT
654 }
655 }
7ee1af76
JA
656 return 0;
657}
1da177e4 658
bc205ed1 659static int
480b1cb9
RS
660wait_for_free_request(struct TCP_Server_Info *server, const int flags,
661 unsigned int *instance)
bc205ed1 662{
2b53b929
RS
663 return wait_for_free_credits(server, 1, -1, flags,
664 instance);
bc205ed1
PS
665}
666
257b7809
RS
667static int
668wait_for_compound_request(struct TCP_Server_Info *server, int num,
669 const int flags, unsigned int *instance)
670{
671 int *credits;
6d82c27a 672 int scredits, in_flight;
257b7809
RS
673
674 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
675
676 spin_lock(&server->req_lock);
cd7b699b 677 scredits = *credits;
6d82c27a 678 in_flight = server->in_flight;
cd7b699b 679
257b7809
RS
680 if (*credits < num) {
681 /*
91792bb8
PS
682 * If the server is tight on resources or just gives us less
683 * credits for other reasons (e.g. requests are coming out of
684 * order and the server delays granting more credits until it
685 * processes a missing mid) and we exhausted most available
686 * credits there may be situations when we try to send
687 * a compound request but we don't have enough credits. At this
688 * point the client needs to decide if it should wait for
689 * additional credits or fail the request. If at least one
690 * request is in flight there is a high probability that the
691 * server will return enough credits to satisfy this compound
692 * request.
693 *
694 * Return immediately if no requests in flight since we will be
695 * stuck on waiting for credits.
257b7809 696 */
91792bb8 697 if (server->in_flight == 0) {
257b7809 698 spin_unlock(&server->req_lock);
cd7b699b 699 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
700 server->conn_id, server->hostname, scredits,
701 num, in_flight);
cd7b699b 702 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 703 __func__, in_flight, num, scredits);
7de03948 704 return -EDEADLK;
257b7809
RS
705 }
706 }
707 spin_unlock(&server->req_lock);
708
709 return wait_for_free_credits(server, num, 60000, flags,
710 instance);
711}
712
cb7e9eab
PS
713int
714cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 715 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
716{
717 *num = size;
335b7b62
PS
718 credits->value = 0;
719 credits->instance = server->reconnect_instance;
cb7e9eab
PS
720 return 0;
721}
722
96daf2b0 723static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
724 struct mid_q_entry **ppmidQ)
725{
1da177e4 726 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 727 return -ENOENT;
8fbbd365
VL
728 }
729
730 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 731 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 732 return -EAGAIN;
8fbbd365
VL
733 }
734
7f48558e 735 if (ses->status == CifsNew) {
79a58d1f 736 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 737 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 738 return -EAGAIN;
ad7a2926 739 /* else ok - we are setting up session */
1da177e4 740 }
7f48558e
SP
741
742 if (ses->status == CifsExiting) {
743 /* check if SMB session is bad because we are setting it up */
744 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
745 return -EAGAIN;
746 /* else ok - we are shutting down session */
747 }
748
24b9b06b 749 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 750 if (*ppmidQ == NULL)
7ee1af76 751 return -ENOMEM;
ddc8cf8f
JL
752 spin_lock(&GlobalMid_Lock);
753 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
754 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
755 return 0;
756}
757
0ade640e
JL
758static int
759wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 760{
0ade640e 761 int error;
7ee1af76 762
5853cc2a 763 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 764 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
765 if (error < 0)
766 return -ERESTARTSYS;
7ee1af76 767
0ade640e 768 return 0;
7ee1af76
JA
769}
770
fec344e3
JL
771struct mid_q_entry *
772cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
773{
774 int rc;
fec344e3 775 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
776 struct mid_q_entry *mid;
777
738f9de5
PS
778 if (rqst->rq_iov[0].iov_len != 4 ||
779 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
780 return ERR_PTR(-EIO);
781
792af7b0 782 /* enable signing if server requires it */
38d77c50 783 if (server->sign)
792af7b0
PS
784 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
785
786 mid = AllocMidQEntry(hdr, server);
787 if (mid == NULL)
fec344e3 788 return ERR_PTR(-ENOMEM);
792af7b0 789
fec344e3 790 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
791 if (rc) {
792 DeleteMidQEntry(mid);
fec344e3 793 return ERR_PTR(rc);
ffc61ccb
SP
794 }
795
fec344e3 796 return mid;
792af7b0 797}
133672ef 798
a6827c18
JL
799/*
800 * Send a SMB request and set the callback function in the mid to handle
801 * the result. Caller is responsible for dealing with timeouts.
802 */
803int
fec344e3 804cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 805 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
806 mid_handle_t *handle, void *cbdata, const int flags,
807 const struct cifs_credits *exist_credits)
a6827c18 808{
480b1cb9 809 int rc;
a6827c18 810 struct mid_q_entry *mid;
335b7b62 811 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 812 unsigned int instance;
480b1cb9 813 int optype;
a6827c18 814
a891f0f8
PS
815 optype = flags & CIFS_OP_MASK;
816
cb7e9eab 817 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 818 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
819 if (rc)
820 return rc;
335b7b62 821 credits.value = 1;
34f4deb7 822 credits.instance = instance;
3349c3a7
PS
823 } else
824 instance = exist_credits->instance;
a6827c18
JL
825
826 mutex_lock(&server->srv_mutex);
3349c3a7
PS
827
828 /*
829 * We can't use credits obtained from the previous session to send this
830 * request. Check if there were reconnects after we obtained credits and
831 * return -EAGAIN in such cases to let callers handle it.
832 */
833 if (instance != server->reconnect_instance) {
834 mutex_unlock(&server->srv_mutex);
835 add_credits_and_wake_if(server, &credits, optype);
836 return -EAGAIN;
837 }
838
fec344e3
JL
839 mid = server->ops->setup_async_request(server, rqst);
840 if (IS_ERR(mid)) {
a6827c18 841 mutex_unlock(&server->srv_mutex);
335b7b62 842 add_credits_and_wake_if(server, &credits, optype);
fec344e3 843 return PTR_ERR(mid);
a6827c18
JL
844 }
845
44d22d84 846 mid->receive = receive;
a6827c18
JL
847 mid->callback = callback;
848 mid->callback_data = cbdata;
9b7c18a2 849 mid->handle = handle;
7c9421e1 850 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 851
ffc61ccb
SP
852 /* put it on the pending_mid_q */
853 spin_lock(&GlobalMid_Lock);
854 list_add_tail(&mid->qhead, &server->pending_mid_q);
855 spin_unlock(&GlobalMid_Lock);
856
93d2cb6c
LL
857 /*
858 * Need to store the time in mid before calling I/O. For call_async,
859 * I/O response may come back and free the mid entry on another thread.
860 */
861 cifs_save_when_sent(mid);
789e6661 862 cifs_in_send_inc(server);
1f3a8f5f 863 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 864 cifs_in_send_dec(server);
ad313cb8 865
820962dc 866 if (rc < 0) {
c781af7e 867 revert_current_mid(server, mid->credits);
ad313cb8 868 server->sequence_number -= 2;
820962dc
RV
869 cifs_delete_mid(mid);
870 }
871
a6827c18 872 mutex_unlock(&server->srv_mutex);
789e6661 873
ffc61ccb
SP
874 if (rc == 0)
875 return 0;
a6827c18 876
335b7b62 877 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
878 return rc;
879}
880
133672ef
SF
881/*
882 *
883 * Send an SMB Request. No response info (other than return code)
884 * needs to be parsed.
885 *
886 * flags indicate the type of request buffer and how long to wait
887 * and whether to log NT STATUS code (error) before mapping it to POSIX error
888 *
889 */
890int
96daf2b0 891SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 892 char *in_buf, int flags)
133672ef
SF
893{
894 int rc;
895 struct kvec iov[1];
da502f7d 896 struct kvec rsp_iov;
133672ef
SF
897 int resp_buf_type;
898
792af7b0
PS
899 iov[0].iov_base = in_buf;
900 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 901 flags |= CIFS_NO_RSP_BUF;
da502f7d 902 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 903 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 904
133672ef
SF
905 return rc;
906}
907
053d5034 908static int
3c1105df 909cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
910{
911 int rc = 0;
912
f96637be
JP
913 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
914 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 915
74dd92a8 916 spin_lock(&GlobalMid_Lock);
7c9421e1 917 switch (mid->mid_state) {
74dd92a8 918 case MID_RESPONSE_RECEIVED:
053d5034
JL
919 spin_unlock(&GlobalMid_Lock);
920 return rc;
74dd92a8
JL
921 case MID_RETRY_NEEDED:
922 rc = -EAGAIN;
923 break;
71823baf
JL
924 case MID_RESPONSE_MALFORMED:
925 rc = -EIO;
926 break;
3c1105df
JL
927 case MID_SHUTDOWN:
928 rc = -EHOSTDOWN;
929 break;
74dd92a8 930 default:
abe57073
PS
931 if (!(mid->mid_flags & MID_DELETED)) {
932 list_del_init(&mid->qhead);
933 mid->mid_flags |= MID_DELETED;
934 }
afe6f653 935 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 936 __func__, mid->mid, mid->mid_state);
74dd92a8 937 rc = -EIO;
053d5034
JL
938 }
939 spin_unlock(&GlobalMid_Lock);
940
2b84a36c 941 DeleteMidQEntry(mid);
053d5034
JL
942 return rc;
943}
944
121b046a 945static inline int
fb2036d8
PS
946send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
947 struct mid_q_entry *mid)
76dcc26f 948{
121b046a 949 return server->ops->send_cancel ?
fb2036d8 950 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
951}
952
2c8f981d
JL
953int
954cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
955 bool log_error)
956{
792af7b0 957 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
958
959 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
960
961 /* convert the length into a more usable form */
38d77c50 962 if (server->sign) {
738f9de5 963 struct kvec iov[2];
985e4ff0 964 int rc = 0;
738f9de5
PS
965 struct smb_rqst rqst = { .rq_iov = iov,
966 .rq_nvec = 2 };
826a95e4 967
738f9de5
PS
968 iov[0].iov_base = mid->resp_buf;
969 iov[0].iov_len = 4;
970 iov[1].iov_base = (char *)mid->resp_buf + 4;
971 iov[1].iov_len = len - 4;
2c8f981d 972 /* FIXME: add code to kill session */
bf5ea0e2 973 rc = cifs_verify_signature(&rqst, server,
0124cc45 974 mid->sequence_number);
985e4ff0 975 if (rc)
afe6f653 976 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 977 rc);
2c8f981d
JL
978 }
979
980 /* BB special case reconnect tid and uid here? */
a3713ec3 981 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
982}
983
fec344e3 984struct mid_q_entry *
f780bd3f
AA
985cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
986 struct smb_rqst *rqst)
792af7b0
PS
987{
988 int rc;
fec344e3 989 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
990 struct mid_q_entry *mid;
991
738f9de5
PS
992 if (rqst->rq_iov[0].iov_len != 4 ||
993 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
994 return ERR_PTR(-EIO);
995
792af7b0
PS
996 rc = allocate_mid(ses, hdr, &mid);
997 if (rc)
fec344e3
JL
998 return ERR_PTR(rc);
999 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1000 if (rc) {
3c1bf7e4 1001 cifs_delete_mid(mid);
fec344e3
JL
1002 return ERR_PTR(rc);
1003 }
1004 return mid;
792af7b0
PS
1005}
1006
4e34feb5 1007static void
ee258d79 1008cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1009{
1010 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1011 struct cifs_credits credits;
1012
1013 credits.value = server->ops->get_credits(mid);
1014 credits.instance = server->reconnect_instance;
8a26f0f7 1015
34f4deb7 1016 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1017}
1018
ee258d79
PS
1019static void
1020cifs_compound_last_callback(struct mid_q_entry *mid)
1021{
1022 cifs_compound_callback(mid);
1023 cifs_wake_up_task(mid);
1024}
1025
1026static void
1027cifs_cancelled_callback(struct mid_q_entry *mid)
1028{
1029 cifs_compound_callback(mid);
1030 DeleteMidQEntry(mid);
1031}
1032
5f68ea4a
AA
1033/*
1034 * Return a channel (master if none) of @ses that can be used to send
1035 * regular requests.
1036 *
1037 * If we are currently binding a new channel (negprot/sess.setup),
1038 * return the new incomplete channel.
1039 */
1040struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1041{
1042 uint index = 0;
1043
1044 if (!ses)
1045 return NULL;
1046
724244cd 1047 spin_lock(&ses->chan_lock);
f486ef8e
SP
1048 /* round robin */
1049pick_another:
1050 if (ses->chan_count > 1 &&
1051 !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
1052 index = (uint)atomic_inc_return(&ses->chan_seq);
1053 index %= ses->chan_count;
1054
1055 if (CIFS_CHAN_NEEDS_RECONNECT(ses, index))
1056 goto pick_another;
5f68ea4a 1057 }
f486ef8e
SP
1058 spin_unlock(&ses->chan_lock);
1059
1060 return ses->chans[index].server;
5f68ea4a
AA
1061}
1062
b8f57ee8 1063int
e0bba0b8 1064compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1065 struct TCP_Server_Info *server,
e0bba0b8
RS
1066 const int flags, const int num_rqst, struct smb_rqst *rqst,
1067 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1068{
480b1cb9 1069 int i, j, optype, rc = 0;
e0bba0b8 1070 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1071 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1072 struct cifs_credits credits[MAX_COMPOUND] = {
1073 { .value = 0, .instance = 0 }
1074 };
1075 unsigned int instance;
738f9de5 1076 char *buf;
50c2f753 1077
a891f0f8 1078 optype = flags & CIFS_OP_MASK;
133672ef 1079
e0bba0b8
RS
1080 for (i = 0; i < num_rqst; i++)
1081 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1082
352d96f3 1083 if (!ses || !ses->server || !server) {
f96637be 1084 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1085 return -EIO;
1086 }
1087
3190b59a 1088 if (server->tcpStatus == CifsExiting)
7ee1af76 1089 return -ENOENT;
7ee1af76 1090
792af7b0 1091 /*
257b7809 1092 * Wait for all the requests to become available.
7091bcab
PS
1093 * This approach still leaves the possibility to be stuck waiting for
1094 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1095 * requests and if the client is completely idle, not generating any
1096 * other requests.
1097 * This can be handled by the eventual session reconnect.
792af7b0 1098 */
3190b59a 1099 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1100 &instance);
1101 if (rc)
1102 return rc;
97ea4998 1103
257b7809
RS
1104 for (i = 0; i < num_rqst; i++) {
1105 credits[i].value = 1;
1106 credits[i].instance = instance;
8544f4aa 1107 }
7ee1af76 1108
792af7b0
PS
1109 /*
1110 * Make sure that we sign in the same order that we send on this socket
1111 * and avoid races inside tcp sendmsg code that could cause corruption
1112 * of smb data.
1113 */
7ee1af76 1114
3190b59a 1115 mutex_lock(&server->srv_mutex);
7ee1af76 1116
97ea4998
PS
1117 /*
1118 * All the parts of the compound chain belong obtained credits from the
257b7809 1119 * same session. We can not use credits obtained from the previous
97ea4998
PS
1120 * session to send this request. Check if there were reconnects after
1121 * we obtained credits and return -EAGAIN in such cases to let callers
1122 * handle it.
1123 */
3190b59a
AA
1124 if (instance != server->reconnect_instance) {
1125 mutex_unlock(&server->srv_mutex);
97ea4998 1126 for (j = 0; j < num_rqst; j++)
3190b59a 1127 add_credits(server, &credits[j], optype);
97ea4998
PS
1128 return -EAGAIN;
1129 }
1130
e0bba0b8 1131 for (i = 0; i < num_rqst; i++) {
f780bd3f 1132 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1133 if (IS_ERR(midQ[i])) {
3190b59a 1134 revert_current_mid(server, i);
e0bba0b8
RS
1135 for (j = 0; j < i; j++)
1136 cifs_delete_mid(midQ[j]);
3190b59a 1137 mutex_unlock(&server->srv_mutex);
8544f4aa 1138
e0bba0b8 1139 /* Update # of requests on wire to server */
8544f4aa 1140 for (j = 0; j < num_rqst; j++)
3190b59a 1141 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1142 return PTR_ERR(midQ[i]);
1143 }
1144
1145 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1146 midQ[i]->optype = optype;
4e34feb5 1147 /*
ee258d79
PS
1148 * Invoke callback for every part of the compound chain
1149 * to calculate credits properly. Wake up this thread only when
1150 * the last element is received.
4e34feb5
RS
1151 */
1152 if (i < num_rqst - 1)
ee258d79
PS
1153 midQ[i]->callback = cifs_compound_callback;
1154 else
1155 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1156 }
3190b59a
AA
1157 cifs_in_send_inc(server);
1158 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1159 cifs_in_send_dec(server);
e0bba0b8
RS
1160
1161 for (i = 0; i < num_rqst; i++)
1162 cifs_save_when_sent(midQ[i]);
7ee1af76 1163
c781af7e 1164 if (rc < 0) {
3190b59a
AA
1165 revert_current_mid(server, num_rqst);
1166 server->sequence_number -= 2;
c781af7e 1167 }
e0bba0b8 1168
3190b59a 1169 mutex_unlock(&server->srv_mutex);
7ee1af76 1170
d69cb728
RS
1171 /*
1172 * If sending failed for some reason or it is an oplock break that we
1173 * will not receive a response to - return credits back
1174 */
1175 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1176 for (i = 0; i < num_rqst; i++)
3190b59a 1177 add_credits(server, &credits[i], optype);
cb5c2e63 1178 goto out;
ee258d79
PS
1179 }
1180
1181 /*
1182 * At this point the request is passed to the network stack - we assume
1183 * that any credits taken from the server structure on the client have
1184 * been spent and we can't return them back. Once we receive responses
1185 * we will collect credits granted by the server in the mid callbacks
1186 * and add those credits to the server structure.
1187 */
e0bba0b8 1188
cb5c2e63
RS
1189 /*
1190 * Compounding is never used during session establish.
1191 */
05946d4b
VW
1192 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1193 mutex_lock(&server->srv_mutex);
f486ef8e 1194 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
05946d4b
VW
1195 mutex_unlock(&server->srv_mutex);
1196 }
e0bba0b8 1197
cb5c2e63 1198 for (i = 0; i < num_rqst; i++) {
3190b59a 1199 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1200 if (rc != 0)
1201 break;
1202 }
1203 if (rc != 0) {
1204 for (; i < num_rqst; i++) {
e3d100ea 1205 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1206 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1207 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1208 spin_lock(&GlobalMid_Lock);
7b71843f 1209 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1210 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1211 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1212 cancelled_mid[i] = true;
34f4deb7 1213 credits[i].value = 0;
e0bba0b8 1214 }
1be912dd 1215 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1216 }
cb5c2e63
RS
1217 }
1218
cb5c2e63
RS
1219 for (i = 0; i < num_rqst; i++) {
1220 if (rc < 0)
1221 goto out;
e0bba0b8 1222
3190b59a 1223 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1224 if (rc != 0) {
8544f4aa
PS
1225 /* mark this mid as cancelled to not free it below */
1226 cancelled_mid[i] = true;
1227 goto out;
1be912dd 1228 }
2b2bdfba 1229
e0bba0b8
RS
1230 if (!midQ[i]->resp_buf ||
1231 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1232 rc = -EIO;
1233 cifs_dbg(FYI, "Bad MID state?\n");
1234 goto out;
1235 }
a891f0f8 1236
e0bba0b8
RS
1237 buf = (char *)midQ[i]->resp_buf;
1238 resp_iov[i].iov_base = buf;
1239 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1240 server->vals->header_preamble_size;
e0bba0b8
RS
1241
1242 if (midQ[i]->large_buf)
1243 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1244 else
1245 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1246
3190b59a 1247 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1248 flags & CIFS_LOG_ERROR);
1da177e4 1249
e0bba0b8 1250 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1251 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1252 midQ[i]->resp_buf = NULL;
cb5c2e63 1253
e0bba0b8 1254 }
cb5c2e63
RS
1255
1256 /*
1257 * Compounding is never used during session establish.
1258 */
0f56db83 1259 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1260 struct kvec iov = {
1261 .iov_base = resp_iov[0].iov_base,
1262 .iov_len = resp_iov[0].iov_len
1263 };
05946d4b 1264 mutex_lock(&server->srv_mutex);
f486ef8e 1265 smb311_update_preauth_hash(ses, server, &iov, 1);
05946d4b 1266 mutex_unlock(&server->srv_mutex);
cb5c2e63
RS
1267 }
1268
7ee1af76 1269out:
4e34feb5
RS
1270 /*
1271 * This will dequeue all mids. After this it is important that the
1272 * demultiplex_thread will not process any of these mids any futher.
1273 * This is prevented above by using a noop callback that will not
1274 * wake this thread except for the very last PDU.
1275 */
8544f4aa
PS
1276 for (i = 0; i < num_rqst; i++) {
1277 if (!cancelled_mid[i])
1278 cifs_delete_mid(midQ[i]);
8544f4aa 1279 }
1da177e4 1280
d6e04ae6
SF
1281 return rc;
1282}
1da177e4 1283
e0bba0b8
RS
1284int
1285cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1286 struct TCP_Server_Info *server,
e0bba0b8
RS
1287 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1288 struct kvec *resp_iov)
1289{
352d96f3
AA
1290 return compound_send_recv(xid, ses, server, flags, 1,
1291 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1292}
1293
738f9de5
PS
1294int
1295SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1296 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1297 const int flags, struct kvec *resp_iov)
1298{
1299 struct smb_rqst rqst;
3cecf486 1300 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1301 int rc;
1302
3cecf486 1303 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1304 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1305 GFP_KERNEL);
117e3b7f
SF
1306 if (!new_iov) {
1307 /* otherwise cifs_send_recv below sets resp_buf_type */
1308 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1309 return -ENOMEM;
117e3b7f 1310 }
3cecf486
RS
1311 } else
1312 new_iov = s_iov;
738f9de5
PS
1313
1314 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1315 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1316
1317 new_iov[0].iov_base = new_iov[1].iov_base;
1318 new_iov[0].iov_len = 4;
1319 new_iov[1].iov_base += 4;
1320 new_iov[1].iov_len -= 4;
1321
1322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = new_iov;
1324 rqst.rq_nvec = n_vec + 1;
1325
352d96f3
AA
1326 rc = cifs_send_recv(xid, ses, ses->server,
1327 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1328 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1329 kfree(new_iov);
738f9de5
PS
1330 return rc;
1331}
1332
1da177e4 1333int
96daf2b0 1334SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1335 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1336 int *pbytes_returned, const int flags)
1da177e4
LT
1337{
1338 int rc = 0;
1da177e4 1339 struct mid_q_entry *midQ;
fb2036d8
PS
1340 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1341 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1342 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1343 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1344 struct TCP_Server_Info *server;
1da177e4
LT
1345
1346 if (ses == NULL) {
f96637be 1347 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1348 return -EIO;
1349 }
ac6ad7a8 1350 server = ses->server;
afe6f653 1351 if (server == NULL) {
f96637be 1352 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1353 return -EIO;
1354 }
1355
afe6f653 1356 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1357 return -ENOENT;
1358
79a58d1f 1359 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1360 to the same server. We may make this configurable later or
1361 use ses->maxReq */
1da177e4 1362
fb2036d8 1363 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1364 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1365 len);
6d9c6d54
VL
1366 return -EIO;
1367 }
1368
afe6f653 1369 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1370 if (rc)
1371 return rc;
1372
79a58d1f 1373 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1374 and avoid races inside tcp sendmsg code that could cause corruption
1375 of smb data */
1376
afe6f653 1377 mutex_lock(&server->srv_mutex);
1da177e4 1378
7ee1af76
JA
1379 rc = allocate_mid(ses, in_buf, &midQ);
1380 if (rc) {
8bd3754c 1381 mutex_unlock(&server->srv_mutex);
7ee1af76 1382 /* Update # of requests on wire to server */
afe6f653 1383 add_credits(server, &credits, 0);
7ee1af76 1384 return rc;
1da177e4
LT
1385 }
1386
afe6f653 1387 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1388 if (rc) {
afe6f653 1389 mutex_unlock(&server->srv_mutex);
829049cb
VL
1390 goto out;
1391 }
1da177e4 1392
7c9421e1 1393 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1394
afe6f653
RS
1395 cifs_in_send_inc(server);
1396 rc = smb_send(server, in_buf, len);
1397 cifs_in_send_dec(server);
789e6661 1398 cifs_save_when_sent(midQ);
ad313cb8
JL
1399
1400 if (rc < 0)
afe6f653 1401 server->sequence_number -= 2;
ad313cb8 1402
afe6f653 1403 mutex_unlock(&server->srv_mutex);
7ee1af76 1404
79a58d1f 1405 if (rc < 0)
7ee1af76
JA
1406 goto out;
1407
afe6f653 1408 rc = wait_for_response(server, midQ);
1be912dd 1409 if (rc != 0) {
afe6f653 1410 send_cancel(server, &rqst, midQ);
1be912dd 1411 spin_lock(&GlobalMid_Lock);
7c9421e1 1412 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1413 /* no longer considered to be "in-flight" */
1414 midQ->callback = DeleteMidQEntry;
1415 spin_unlock(&GlobalMid_Lock);
afe6f653 1416 add_credits(server, &credits, 0);
1be912dd
JL
1417 return rc;
1418 }
1419 spin_unlock(&GlobalMid_Lock);
1420 }
1da177e4 1421
afe6f653 1422 rc = cifs_sync_mid_result(midQ, server);
053d5034 1423 if (rc != 0) {
afe6f653 1424 add_credits(server, &credits, 0);
1da177e4
LT
1425 return rc;
1426 }
50c2f753 1427
2c8f981d 1428 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1429 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1430 rc = -EIO;
afe6f653 1431 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1432 goto out;
1da177e4 1433 }
7ee1af76 1434
d4e4854f 1435 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1436 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1437 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1438out:
3c1bf7e4 1439 cifs_delete_mid(midQ);
afe6f653 1440 add_credits(server, &credits, 0);
1da177e4 1441
7ee1af76
JA
1442 return rc;
1443}
1da177e4 1444
7ee1af76
JA
1445/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1446 blocking lock to return. */
1447
1448static int
96daf2b0 1449send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1450 struct smb_hdr *in_buf,
1451 struct smb_hdr *out_buf)
1452{
1453 int bytes_returned;
96daf2b0 1454 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1455 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1456
1457 /* We just modify the current in_buf to change
1458 the type of lock from LOCKING_ANDX_SHARED_LOCK
1459 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1460 LOCKING_ANDX_CANCEL_LOCK. */
1461
1462 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1463 pSMB->Timeout = 0;
88257360 1464 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1465
1466 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1467 &bytes_returned, 0);
7ee1af76
JA
1468}
1469
1470int
96daf2b0 1471SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1472 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1473 int *pbytes_returned)
1474{
1475 int rc = 0;
1476 int rstart = 0;
7ee1af76 1477 struct mid_q_entry *midQ;
96daf2b0 1478 struct cifs_ses *ses;
fb2036d8
PS
1479 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1480 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1481 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1482 unsigned int instance;
afe6f653 1483 struct TCP_Server_Info *server;
7ee1af76
JA
1484
1485 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1486 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1487 return -EIO;
1488 }
1489 ses = tcon->ses;
afe6f653 1490 server = ses->server;
7ee1af76 1491
afe6f653 1492 if (server == NULL) {
f96637be 1493 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1494 return -EIO;
1495 }
1496
afe6f653 1497 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1498 return -ENOENT;
1499
79a58d1f 1500 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1501 to the same server. We may make this configurable later or
1502 use ses->maxReq */
1503
fb2036d8 1504 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1505 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1506 len);
6d9c6d54
VL
1507 return -EIO;
1508 }
1509
afe6f653 1510 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1511 if (rc)
1512 return rc;
1513
79a58d1f 1514 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1515 and avoid races inside tcp sendmsg code that could cause corruption
1516 of smb data */
1517
afe6f653 1518 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1519
1520 rc = allocate_mid(ses, in_buf, &midQ);
1521 if (rc) {
afe6f653 1522 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1523 return rc;
1524 }
1525
afe6f653 1526 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1527 if (rc) {
3c1bf7e4 1528 cifs_delete_mid(midQ);
afe6f653 1529 mutex_unlock(&server->srv_mutex);
829049cb
VL
1530 return rc;
1531 }
1da177e4 1532
7c9421e1 1533 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1534 cifs_in_send_inc(server);
1535 rc = smb_send(server, in_buf, len);
1536 cifs_in_send_dec(server);
789e6661 1537 cifs_save_when_sent(midQ);
ad313cb8
JL
1538
1539 if (rc < 0)
afe6f653 1540 server->sequence_number -= 2;
ad313cb8 1541
afe6f653 1542 mutex_unlock(&server->srv_mutex);
7ee1af76 1543
79a58d1f 1544 if (rc < 0) {
3c1bf7e4 1545 cifs_delete_mid(midQ);
7ee1af76
JA
1546 return rc;
1547 }
1548
1549 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1550 rc = wait_event_interruptible(server->response_q,
7c9421e1 1551 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1552 ((server->tcpStatus != CifsGood) &&
1553 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1554
1555 /* Were we interrupted by a signal ? */
1556 if ((rc == -ERESTARTSYS) &&
7c9421e1 1557 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1558 ((server->tcpStatus == CifsGood) ||
1559 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1560
1561 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1562 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1563 blocking lock to return. */
afe6f653 1564 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1565 if (rc) {
3c1bf7e4 1566 cifs_delete_mid(midQ);
7ee1af76
JA
1567 return rc;
1568 }
1569 } else {
1570 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1571 to cause the blocking lock to return. */
1572
1573 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1574
1575 /* If we get -ENOLCK back the lock may have
1576 already been removed. Don't exit in this case. */
1577 if (rc && rc != -ENOLCK) {
3c1bf7e4 1578 cifs_delete_mid(midQ);
7ee1af76
JA
1579 return rc;
1580 }
1581 }
1582
afe6f653 1583 rc = wait_for_response(server, midQ);
1be912dd 1584 if (rc) {
afe6f653 1585 send_cancel(server, &rqst, midQ);
1be912dd 1586 spin_lock(&GlobalMid_Lock);
7c9421e1 1587 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1588 /* no longer considered to be "in-flight" */
1589 midQ->callback = DeleteMidQEntry;
1590 spin_unlock(&GlobalMid_Lock);
1591 return rc;
1592 }
1593 spin_unlock(&GlobalMid_Lock);
7ee1af76 1594 }
1be912dd
JL
1595
1596 /* We got the response - restart system call. */
1597 rstart = 1;
7ee1af76
JA
1598 }
1599
afe6f653 1600 rc = cifs_sync_mid_result(midQ, server);
053d5034 1601 if (rc != 0)
7ee1af76 1602 return rc;
50c2f753 1603
17c8bfed 1604 /* rcvd frame is ok */
7c9421e1 1605 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1606 rc = -EIO;
3175eb9b 1607 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1608 goto out;
1609 }
1da177e4 1610
d4e4854f 1611 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1612 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1613 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1614out:
3c1bf7e4 1615 cifs_delete_mid(midQ);
7ee1af76
JA
1616 if (rstart && rc == -EACCES)
1617 return -ERESTARTSYS;
1da177e4
LT
1618 return rc;
1619}