bcachefs: Run btree key cache shrinker less aggressively
[linux-block.git] / fs / smb / client / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
2c75426c 21#include <linux/processor.h>
1da177e4 22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c 37{
d527f513
ZX
38 if (mid->mid_state == MID_RESPONSE_RECEIVED)
39 mid->mid_state = MID_RESPONSE_READY;
2b84a36c
JL
40 wake_up_process(mid->callback_data);
41}
42
ea75a78c 43static struct mid_q_entry *
70f08f91 44alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
45{
46 struct mid_q_entry *temp;
47
24b9b06b 48 if (server == NULL) {
70f08f91 49 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
50 return NULL;
51 }
50c2f753 52
232087cb 53 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 54 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 55 kref_init(&temp->refcount);
a6f74e80
N
56 temp->mid = get_mid(smb_buffer);
57 temp->pid = current->pid;
58 temp->command = cpu_to_le16(smb_buffer->Command);
59 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
a5998a9e 60 /* easier to use jiffies */
a6f74e80
N
61 /* when mid allocated can be before when sent */
62 temp->when_alloc = jiffies;
63 temp->server = server;
2b84a36c 64
a6f74e80
N
65 /*
66 * The default is for the mid to be synchronous, so the
67 * default callback just wakes up the current task.
68 */
f1f27ad7
VW
69 get_task_struct(current);
70 temp->creator = current;
a6f74e80
N
71 temp->callback = cifs_wake_up_task;
72 temp->callback_data = current;
1da177e4 73
c2c17ddb 74 atomic_inc(&mid_count);
7c9421e1 75 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
76 return temp;
77}
78
e6322fd1 79void __release_mid(struct kref *refcount)
696e420b 80{
abe57073
PS
81 struct mid_q_entry *midEntry =
82 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 83#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 84 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 85 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 86 unsigned long now;
433b8dd7 87 unsigned long roundtrip_time;
1047abc1 88#endif
7b71843f
PS
89 struct TCP_Server_Info *server = midEntry->server;
90
91 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
d527f513
ZX
92 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
93 midEntry->mid_state == MID_RESPONSE_READY) &&
7b71843f 94 server->ops->handle_cancelled_mid)
04ad69c3 95 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 96
7c9421e1 97 midEntry->mid_state = MID_FREE;
c2c17ddb 98 atomic_dec(&mid_count);
7c9421e1 99 if (midEntry->large_buf)
b8643e1b
SF
100 cifs_buf_release(midEntry->resp_buf);
101 else
102 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
103#ifdef CONFIG_CIFS_STATS2
104 now = jiffies;
433b8dd7 105 if (now < midEntry->when_alloc)
a0a3036b 106 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
107 roundtrip_time = now - midEntry->when_alloc;
108
109 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
110 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 server->fastest_cmd[smb_cmd] = roundtrip_time;
113 } else {
114 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115 server->slowest_cmd[smb_cmd] = roundtrip_time;
116 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117 server->fastest_cmd[smb_cmd] = roundtrip_time;
118 }
119 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120 server->time_per_cmd[smb_cmd] += roundtrip_time;
121 }
00778e22
SF
122 /*
123 * commands taking longer than one second (default) can be indications
124 * that something is wrong, unless it is quite a slow link or a very
125 * busy server. Note that this calc is unlikely or impossible to wrap
126 * as long as slow_rsp_threshold is not set way above recommended max
127 * value (32767 ie 9 hours) and is generally harmless even if wrong
128 * since only affects debug counters - so leaving the calc as simple
129 * comparison rather than doing multiple conversions and overflow
130 * checks
131 */
132 if ((slow_rsp_threshold != 0) &&
133 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 134 (midEntry->command != command)) {
f5942db5
SF
135 /*
136 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
137 * NB: le16_to_cpu returns unsigned so can not be negative below
138 */
433b8dd7
SF
139 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
140 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 141
433b8dd7 142 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
143 midEntry->when_sent, midEntry->when_received);
144 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
145 pr_debug("slow rsp: cmd %d mid %llu",
146 midEntry->command, midEntry->mid);
147 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
148 now - midEntry->when_alloc,
149 now - midEntry->when_sent,
150 now - midEntry->when_received);
1047abc1
SF
151 }
152 }
153#endif
f1f27ad7 154 put_task_struct(midEntry->creator);
abe57073
PS
155
156 mempool_free(midEntry, cifs_mid_poolp);
157}
158
3c1bf7e4 159void
70f08f91 160delete_mid(struct mid_q_entry *mid)
ddc8cf8f 161{
d7d7a66a 162 spin_lock(&mid->server->mid_lock);
abe57073
PS
163 if (!(mid->mid_flags & MID_DELETED)) {
164 list_del_init(&mid->qhead);
165 mid->mid_flags |= MID_DELETED;
166 }
d7d7a66a 167 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 168
70f08f91 169 release_mid(mid);
ddc8cf8f
JL
170}
171
6f49f46b
JL
172/*
173 * smb_send_kvec - send an array of kvecs to the server
174 * @server: Server to send the data to
3ab3f2a1 175 * @smb_msg: Message to send
6f49f46b
JL
176 * @sent: amount of data sent on socket is stored here
177 *
178 * Our basic "send data to server" function. Should be called with srv_mutex
179 * held. The caller is responsible for handling the results.
180 */
d6e04ae6 181static int
3ab3f2a1
AV
182smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
183 size_t *sent)
1da177e4
LT
184{
185 int rc = 0;
3ab3f2a1 186 int retries = 0;
edf1ae40 187 struct socket *ssocket = server->ssocket;
50c2f753 188
6f49f46b
JL
189 *sent = 0;
190
0496e02d 191 if (server->noblocksnd)
3ab3f2a1 192 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 193 else
3ab3f2a1 194 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 195
3ab3f2a1 196 while (msg_data_left(smb_msg)) {
6f49f46b
JL
197 /*
198 * If blocking send, we try 3 times, since each can block
199 * for 5 seconds. For nonblocking we have to try more
200 * but wait increasing amounts of time allowing time for
201 * socket to clear. The overall time we wait in either
202 * case to send on the socket is about 15 seconds.
203 * Similarly we wait for 15 seconds for a response from
204 * the server in SendReceive[2] for the server to send
205 * a response back for most types of requests (except
206 * SMB Write past end of file which can be slow, and
207 * blocking lock operations). NFS waits slightly longer
208 * than CIFS, but this can make it take longer for
209 * nonresponsive servers to be detected and 15 seconds
210 * is more than enough time for modern networks to
211 * send a packet. In most cases if we fail to send
212 * after the retries we will kill the socket and
213 * reconnect which may clear the network problem.
214 */
3ab3f2a1 215 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 216 if (rc == -EAGAIN) {
3ab3f2a1
AV
217 retries++;
218 if (retries >= 14 ||
219 (!server->noblocksnd && (retries > 2))) {
afe6f653 220 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 221 ssocket);
3ab3f2a1 222 return -EAGAIN;
1da177e4 223 }
3ab3f2a1 224 msleep(1 << retries);
1da177e4
LT
225 continue;
226 }
6f49f46b 227
79a58d1f 228 if (rc < 0)
3ab3f2a1 229 return rc;
6f49f46b 230
79a58d1f 231 if (rc == 0) {
3e84469d
SF
232 /* should never happen, letting socket clear before
233 retrying is our only obvious option here */
afe6f653 234 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
235 msleep(500);
236 continue;
d6e04ae6 237 }
6f49f46b 238
3ab3f2a1
AV
239 /* send was at least partially successful */
240 *sent += rc;
241 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 242 }
3ab3f2a1 243 return 0;
97bc00b3
JL
244}
245
35e2cc1b 246unsigned long
81f39f95 247smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
248{
249 unsigned int i;
35e2cc1b
PA
250 struct kvec *iov;
251 int nvec;
a26054d1
JL
252 unsigned long buflen = 0;
253
d291e703 254 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 255 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
256 iov = &rqst->rq_iov[1];
257 nvec = rqst->rq_nvec - 1;
258 } else {
259 iov = rqst->rq_iov;
260 nvec = rqst->rq_nvec;
261 }
262
a26054d1 263 /* total up iov array first */
35e2cc1b 264 for (i = 0; i < nvec; i++)
a26054d1
JL
265 buflen += iov[i].iov_len;
266
d08089f6 267 buflen += iov_iter_count(&rqst->rq_iter);
a26054d1
JL
268 return buflen;
269}
270
6f49f46b 271static int
07cd952f
RS
272__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
273 struct smb_rqst *rqst)
6f49f46b 274{
d0dc4111 275 int rc;
07cd952f
RS
276 struct kvec *iov;
277 int n_vec;
278 unsigned int send_length = 0;
279 unsigned int i, j;
b30c74c7 280 sigset_t mask, oldmask;
3ab3f2a1 281 size_t total_len = 0, sent, size;
b8eed283 282 struct socket *ssocket = server->ssocket;
bedc8f76 283 struct msghdr smb_msg = {};
c713c877
RS
284 __be32 rfc1002_marker;
285
d0dc4111 286 cifs_in_send_inc(server);
4357d45f
LL
287 if (cifs_rdma_enabled(server)) {
288 /* return -EAGAIN when connecting or reconnecting */
289 rc = -EAGAIN;
290 if (server->smbd_conn)
291 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
292 goto smbd_done;
293 }
afc18a6f 294
d0dc4111 295 rc = -EAGAIN;
ea702b80 296 if (ssocket == NULL)
d0dc4111 297 goto out;
ea702b80 298
d0dc4111 299 rc = -ERESTARTSYS;
214a5ea0 300 if (fatal_signal_pending(current)) {
6988a619 301 cifs_dbg(FYI, "signal pending before send request\n");
d0dc4111 302 goto out;
b30c74c7
PS
303 }
304
d0dc4111 305 rc = 0;
b8eed283 306 /* cork the socket */
db10538a 307 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 308
07cd952f 309 for (j = 0; j < num_rqst; j++)
81f39f95 310 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
311 rfc1002_marker = cpu_to_be32(send_length);
312
b30c74c7
PS
313 /*
314 * We should not allow signals to interrupt the network send because
315 * any partial send will cause session reconnects thus increasing
316 * latency of system calls and overload a server with unnecessary
317 * requests.
318 */
319
320 sigfillset(&mask);
321 sigprocmask(SIG_BLOCK, &mask, &oldmask);
322
c713c877 323 /* Generate a rfc1002 marker for SMB2+ */
d291e703 324 if (!is_smb1(server)) {
c713c877
RS
325 struct kvec hiov = {
326 .iov_base = &rfc1002_marker,
327 .iov_len = 4
328 };
de4eda9d 329 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
330 rc = smb_send_kvec(server, &smb_msg, &sent);
331 if (rc < 0)
b30c74c7 332 goto unmask;
c713c877
RS
333
334 total_len += sent;
335 send_length += 4;
336 }
337
662bf5bc
PA
338 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
339
07cd952f
RS
340 for (j = 0; j < num_rqst; j++) {
341 iov = rqst[j].rq_iov;
342 n_vec = rqst[j].rq_nvec;
3ab3f2a1 343
07cd952f 344 size = 0;
662bf5bc
PA
345 for (i = 0; i < n_vec; i++) {
346 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 347 size += iov[i].iov_len;
662bf5bc 348 }
97bc00b3 349
de4eda9d 350 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 351
3ab3f2a1 352 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 353 if (rc < 0)
b30c74c7 354 goto unmask;
97bc00b3
JL
355
356 total_len += sent;
07cd952f 357
d08089f6
DH
358 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
359 smb_msg.msg_iter = rqst[j].rq_iter;
07cd952f
RS
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
07cd952f
RS
363 total_len += sent;
364 }
d08089f6
DH
365
366}
1da177e4 367
b30c74c7
PS
368unmask:
369 sigprocmask(SIG_SETMASK, &oldmask, NULL);
370
371 /*
372 * If signal is pending but we have already sent the whole packet to
373 * the server we need to return success status to allow a corresponding
374 * mid entry to be kept in the pending requests queue thus allowing
375 * to handle responses from the server by the client.
376 *
377 * If only part of the packet has been sent there is no need to hide
378 * interrupt because the session will be reconnected anyway, so there
379 * won't be any response from the server to handle.
380 */
381
382 if (signal_pending(current) && (total_len != send_length)) {
383 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 384 rc = -ERESTARTSYS;
b30c74c7
PS
385 }
386
b8eed283 387 /* uncork it */
db10538a 388 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 389
c713c877 390 if ((total_len > 0) && (total_len != send_length)) {
f96637be 391 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 392 send_length, total_len);
6f49f46b
JL
393 /*
394 * If we have only sent part of an SMB then the next SMB could
395 * be taken as the remainder of this one. We need to kill the
396 * socket so the server throws away the partial SMB
397 */
dca65818 398 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 399 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 400 server->conn_id, server->hostname);
edf1ae40 401 }
9762c2d0 402smbd_done:
d804d41d 403 if (rc < 0 && rc != -EINTR)
afe6f653 404 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 405 rc);
ee13919c 406 else if (rc > 0)
1da177e4 407 rc = 0;
d0dc4111
ZX
408out:
409 cifs_in_send_dec(server);
1da177e4
LT
410 return rc;
411}
412
933148a4
PA
413struct send_req_vars {
414 struct smb2_transform_hdr tr_hdr;
415 struct smb_rqst rqst[MAX_COMPOUND];
416 struct kvec iov;
417};
418
6f49f46b 419static int
1f3a8f5f
RS
420smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
421 struct smb_rqst *rqst, int flags)
6f49f46b 422{
933148a4
PA
423 struct send_req_vars *vars;
424 struct smb_rqst *cur_rqst;
425 struct kvec *iov;
7fb8986e
PS
426 int rc;
427
428 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
429 return __smb_send_rqst(server, num_rqst, rqst);
430
431 if (num_rqst > MAX_COMPOUND - 1)
432 return -ENOMEM;
7fb8986e 433
b2c96de7 434 if (!server->ops->init_transform_rq) {
a0a3036b 435 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
436 return -EIO;
437 }
6f49f46b 438
933148a4
PA
439 vars = kzalloc(sizeof(*vars), GFP_NOFS);
440 if (!vars)
3946d0d0 441 return -ENOMEM;
933148a4
PA
442 cur_rqst = vars->rqst;
443 iov = &vars->iov;
3946d0d0 444
933148a4
PA
445 iov->iov_base = &vars->tr_hdr;
446 iov->iov_len = sizeof(vars->tr_hdr);
447 cur_rqst[0].rq_iov = iov;
3946d0d0
LL
448 cur_rqst[0].rq_nvec = 1;
449
1f3a8f5f
RS
450 rc = server->ops->init_transform_rq(server, num_rqst + 1,
451 &cur_rqst[0], rqst);
7fb8986e 452 if (rc)
3946d0d0 453 goto out;
7fb8986e 454
1f3a8f5f
RS
455 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
456 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0 457out:
933148a4 458 kfree(vars);
7fb8986e 459 return rc;
6f49f46b
JL
460}
461
0496e02d
JL
462int
463smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
464 unsigned int smb_buf_length)
465{
738f9de5 466 struct kvec iov[2];
7fb8986e
PS
467 struct smb_rqst rqst = { .rq_iov = iov,
468 .rq_nvec = 2 };
0496e02d 469
738f9de5
PS
470 iov[0].iov_base = smb_buffer;
471 iov[0].iov_len = 4;
472 iov[1].iov_base = (char *)smb_buffer + 4;
473 iov[1].iov_len = smb_buf_length;
0496e02d 474
07cd952f 475 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
476}
477
fc40f9cf 478static int
b227d215 479wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
480 const int timeout, const int flags,
481 unsigned int *instance)
1da177e4 482{
19e88867 483 long rc;
4230cff8
RS
484 int *credits;
485 int optype;
2b53b929 486 long int t;
6d82c27a 487 int scredits, in_flight;
2b53b929
RS
488
489 if (timeout < 0)
490 t = MAX_JIFFY_OFFSET;
491 else
492 t = msecs_to_jiffies(timeout);
4230cff8
RS
493
494 optype = flags & CIFS_OP_MASK;
5bc59498 495
34f4deb7
PS
496 *instance = 0;
497
4230cff8
RS
498 credits = server->ops->get_credits_field(server, optype);
499 /* Since an echo is already inflight, no need to wait to send another */
500 if (*credits <= 0 && optype == CIFS_ECHO_OP)
501 return -EAGAIN;
502
fc40f9cf 503 spin_lock(&server->req_lock);
392e1c5d 504 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 505 /* oplock breaks must not be held up */
fc40f9cf 506 server->in_flight++;
1b63f184
SF
507 if (server->in_flight > server->max_in_flight)
508 server->max_in_flight = server->in_flight;
bc205ed1 509 *credits -= 1;
34f4deb7 510 *instance = server->reconnect_instance;
6d82c27a
SP
511 scredits = *credits;
512 in_flight = server->in_flight;
fc40f9cf 513 spin_unlock(&server->req_lock);
6d82c27a 514
1ddff774 515 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
516 server->conn_id, server->hostname, scredits, -1, in_flight);
517 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
518 __func__, 1, scredits);
519
27a97a61
VL
520 return 0;
521 }
522
27a97a61 523 while (1) {
326a8d04
SP
524 spin_unlock(&server->req_lock);
525
526 spin_lock(&server->srv_lock);
527 if (server->tcpStatus == CifsExiting) {
528 spin_unlock(&server->srv_lock);
529 return -ENOENT;
530 }
531 spin_unlock(&server->srv_lock);
532
533 spin_lock(&server->req_lock);
b227d215 534 if (*credits < num_credits) {
6d82c27a 535 scredits = *credits;
fc40f9cf 536 spin_unlock(&server->req_lock);
6d82c27a 537
789e6661 538 cifs_num_waiters_inc(server);
2b53b929
RS
539 rc = wait_event_killable_timeout(server->request_q,
540 has_credits(server, credits, num_credits), t);
789e6661 541 cifs_num_waiters_dec(server);
2b53b929 542 if (!rc) {
6d82c27a
SP
543 spin_lock(&server->req_lock);
544 scredits = *credits;
545 in_flight = server->in_flight;
546 spin_unlock(&server->req_lock);
547
7937ca96 548 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
549 server->conn_id, server->hostname, scredits,
550 num_credits, in_flight);
afe6f653 551 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 552 timeout);
7de03948 553 return -EBUSY;
2b53b929
RS
554 }
555 if (rc == -ERESTARTSYS)
556 return -ERESTARTSYS;
fc40f9cf 557 spin_lock(&server->req_lock);
27a97a61 558 } else {
16b34aa4
RS
559 /*
560 * For normal commands, reserve the last MAX_COMPOUND
561 * credits to compound requests.
562 * Otherwise these compounds could be permanently
563 * starved for credits by single-credit requests.
564 *
565 * To prevent spinning CPU, block this thread until
566 * there are >MAX_COMPOUND credits available.
567 * But only do this is we already have a lot of
568 * credits in flight to avoid triggering this check
569 * for servers that are slow to hand out credits on
570 * new sessions.
571 */
572 if (!optype && num_credits == 1 &&
573 server->in_flight > 2 * MAX_COMPOUND &&
574 *credits <= MAX_COMPOUND) {
575 spin_unlock(&server->req_lock);
6d82c27a 576
16b34aa4 577 cifs_num_waiters_inc(server);
2b53b929
RS
578 rc = wait_event_killable_timeout(
579 server->request_q,
16b34aa4 580 has_credits(server, credits,
2b53b929
RS
581 MAX_COMPOUND + 1),
582 t);
16b34aa4 583 cifs_num_waiters_dec(server);
2b53b929 584 if (!rc) {
6d82c27a
SP
585 spin_lock(&server->req_lock);
586 scredits = *credits;
587 in_flight = server->in_flight;
588 spin_unlock(&server->req_lock);
589
7937ca96 590 trace_smb3_credit_timeout(
6d82c27a
SP
591 server->CurrentMid,
592 server->conn_id, server->hostname,
593 scredits, num_credits, in_flight);
afe6f653 594 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 595 timeout);
7de03948 596 return -EBUSY;
2b53b929
RS
597 }
598 if (rc == -ERESTARTSYS)
599 return -ERESTARTSYS;
16b34aa4
RS
600 spin_lock(&server->req_lock);
601 continue;
602 }
603
2d86dbc9
PS
604 /*
605 * Can not count locking commands against total
606 * as they are allowed to block on server.
607 */
27a97a61
VL
608
609 /* update # of requests on the wire to server */
4230cff8 610 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
611 *credits -= num_credits;
612 server->in_flight += num_credits;
1b63f184
SF
613 if (server->in_flight > server->max_in_flight)
614 server->max_in_flight = server->in_flight;
34f4deb7 615 *instance = server->reconnect_instance;
2d86dbc9 616 }
6d82c27a
SP
617 scredits = *credits;
618 in_flight = server->in_flight;
fc40f9cf 619 spin_unlock(&server->req_lock);
cd7b699b 620
1ddff774 621 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
622 server->conn_id, server->hostname, scredits,
623 -(num_credits), in_flight);
cd7b699b
SP
624 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
625 __func__, num_credits, scredits);
27a97a61 626 break;
1da177e4
LT
627 }
628 }
7ee1af76
JA
629 return 0;
630}
1da177e4 631
bc205ed1 632static int
480b1cb9
RS
633wait_for_free_request(struct TCP_Server_Info *server, const int flags,
634 unsigned int *instance)
bc205ed1 635{
2b53b929
RS
636 return wait_for_free_credits(server, 1, -1, flags,
637 instance);
bc205ed1
PS
638}
639
257b7809
RS
640static int
641wait_for_compound_request(struct TCP_Server_Info *server, int num,
642 const int flags, unsigned int *instance)
643{
644 int *credits;
6d82c27a 645 int scredits, in_flight;
257b7809
RS
646
647 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
648
649 spin_lock(&server->req_lock);
cd7b699b 650 scredits = *credits;
6d82c27a 651 in_flight = server->in_flight;
cd7b699b 652
257b7809
RS
653 if (*credits < num) {
654 /*
91792bb8
PS
655 * If the server is tight on resources or just gives us less
656 * credits for other reasons (e.g. requests are coming out of
657 * order and the server delays granting more credits until it
658 * processes a missing mid) and we exhausted most available
659 * credits there may be situations when we try to send
660 * a compound request but we don't have enough credits. At this
661 * point the client needs to decide if it should wait for
662 * additional credits or fail the request. If at least one
663 * request is in flight there is a high probability that the
664 * server will return enough credits to satisfy this compound
665 * request.
666 *
667 * Return immediately if no requests in flight since we will be
668 * stuck on waiting for credits.
257b7809 669 */
91792bb8 670 if (server->in_flight == 0) {
257b7809 671 spin_unlock(&server->req_lock);
cd7b699b 672 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
673 server->conn_id, server->hostname, scredits,
674 num, in_flight);
cd7b699b 675 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 676 __func__, in_flight, num, scredits);
7de03948 677 return -EDEADLK;
257b7809
RS
678 }
679 }
680 spin_unlock(&server->req_lock);
681
682 return wait_for_free_credits(server, num, 60000, flags,
683 instance);
684}
685
cb7e9eab
PS
686int
687cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 688 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
689{
690 *num = size;
335b7b62
PS
691 credits->value = 0;
692 credits->instance = server->reconnect_instance;
cb7e9eab
PS
693 return 0;
694}
695
96daf2b0 696static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
697 struct mid_q_entry **ppmidQ)
698{
d7d7a66a 699 spin_lock(&ses->ses_lock);
dd3cd870 700 if (ses->ses_status == SES_NEW) {
79a58d1f 701 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 702 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 703 spin_unlock(&ses->ses_lock);
7ee1af76 704 return -EAGAIN;
080dc5e5 705 }
ad7a2926 706 /* else ok - we are setting up session */
1da177e4 707 }
7f48558e 708
dd3cd870 709 if (ses->ses_status == SES_EXITING) {
7f48558e 710 /* check if SMB session is bad because we are setting it up */
080dc5e5 711 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 712 spin_unlock(&ses->ses_lock);
7f48558e 713 return -EAGAIN;
080dc5e5 714 }
7f48558e
SP
715 /* else ok - we are shutting down session */
716 }
d7d7a66a 717 spin_unlock(&ses->ses_lock);
7f48558e 718
70f08f91 719 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 720 if (*ppmidQ == NULL)
7ee1af76 721 return -ENOMEM;
d7d7a66a 722 spin_lock(&ses->server->mid_lock);
ddc8cf8f 723 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 724 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
725 return 0;
726}
727
0ade640e
JL
728static int
729wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 730{
0ade640e 731 int error;
7ee1af76 732
f5d39b02 733 error = wait_event_state(server->response_q,
d527f513
ZX
734 midQ->mid_state != MID_REQUEST_SUBMITTED &&
735 midQ->mid_state != MID_RESPONSE_RECEIVED,
f5d39b02 736 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
737 if (error < 0)
738 return -ERESTARTSYS;
7ee1af76 739
0ade640e 740 return 0;
7ee1af76
JA
741}
742
fec344e3
JL
743struct mid_q_entry *
744cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
745{
746 int rc;
fec344e3 747 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
748 struct mid_q_entry *mid;
749
738f9de5
PS
750 if (rqst->rq_iov[0].iov_len != 4 ||
751 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
752 return ERR_PTR(-EIO);
753
792af7b0 754 /* enable signing if server requires it */
38d77c50 755 if (server->sign)
792af7b0
PS
756 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
757
70f08f91 758 mid = alloc_mid(hdr, server);
792af7b0 759 if (mid == NULL)
fec344e3 760 return ERR_PTR(-ENOMEM);
792af7b0 761
fec344e3 762 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 763 if (rc) {
70f08f91 764 release_mid(mid);
fec344e3 765 return ERR_PTR(rc);
ffc61ccb
SP
766 }
767
fec344e3 768 return mid;
792af7b0 769}
133672ef 770
a6827c18
JL
771/*
772 * Send a SMB request and set the callback function in the mid to handle
773 * the result. Caller is responsible for dealing with timeouts.
774 */
775int
fec344e3 776cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 777 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
778 mid_handle_t *handle, void *cbdata, const int flags,
779 const struct cifs_credits *exist_credits)
a6827c18 780{
480b1cb9 781 int rc;
a6827c18 782 struct mid_q_entry *mid;
335b7b62 783 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 784 unsigned int instance;
480b1cb9 785 int optype;
a6827c18 786
a891f0f8
PS
787 optype = flags & CIFS_OP_MASK;
788
cb7e9eab 789 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 790 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
791 if (rc)
792 return rc;
335b7b62 793 credits.value = 1;
34f4deb7 794 credits.instance = instance;
3349c3a7
PS
795 } else
796 instance = exist_credits->instance;
a6827c18 797
cc391b69 798 cifs_server_lock(server);
3349c3a7
PS
799
800 /*
801 * We can't use credits obtained from the previous session to send this
802 * request. Check if there were reconnects after we obtained credits and
803 * return -EAGAIN in such cases to let callers handle it.
804 */
805 if (instance != server->reconnect_instance) {
cc391b69 806 cifs_server_unlock(server);
3349c3a7
PS
807 add_credits_and_wake_if(server, &credits, optype);
808 return -EAGAIN;
809 }
810
fec344e3
JL
811 mid = server->ops->setup_async_request(server, rqst);
812 if (IS_ERR(mid)) {
cc391b69 813 cifs_server_unlock(server);
335b7b62 814 add_credits_and_wake_if(server, &credits, optype);
fec344e3 815 return PTR_ERR(mid);
a6827c18
JL
816 }
817
44d22d84 818 mid->receive = receive;
a6827c18
JL
819 mid->callback = callback;
820 mid->callback_data = cbdata;
9b7c18a2 821 mid->handle = handle;
7c9421e1 822 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 823
ffc61ccb 824 /* put it on the pending_mid_q */
d7d7a66a 825 spin_lock(&server->mid_lock);
ffc61ccb 826 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 827 spin_unlock(&server->mid_lock);
ffc61ccb 828
93d2cb6c
LL
829 /*
830 * Need to store the time in mid before calling I/O. For call_async,
831 * I/O response may come back and free the mid entry on another thread.
832 */
833 cifs_save_when_sent(mid);
1f3a8f5f 834 rc = smb_send_rqst(server, 1, rqst, flags);
ad313cb8 835
820962dc 836 if (rc < 0) {
c781af7e 837 revert_current_mid(server, mid->credits);
ad313cb8 838 server->sequence_number -= 2;
70f08f91 839 delete_mid(mid);
820962dc
RV
840 }
841
cc391b69 842 cifs_server_unlock(server);
789e6661 843
ffc61ccb
SP
844 if (rc == 0)
845 return 0;
a6827c18 846
335b7b62 847 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
848 return rc;
849}
850
133672ef
SF
851/*
852 *
853 * Send an SMB Request. No response info (other than return code)
854 * needs to be parsed.
855 *
856 * flags indicate the type of request buffer and how long to wait
857 * and whether to log NT STATUS code (error) before mapping it to POSIX error
858 *
859 */
860int
96daf2b0 861SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 862 char *in_buf, int flags)
133672ef
SF
863{
864 int rc;
865 struct kvec iov[1];
da502f7d 866 struct kvec rsp_iov;
133672ef
SF
867 int resp_buf_type;
868
792af7b0
PS
869 iov[0].iov_base = in_buf;
870 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 871 flags |= CIFS_NO_RSP_BUF;
da502f7d 872 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 873 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 874
133672ef
SF
875 return rc;
876}
877
053d5034 878static int
3c1105df 879cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
880{
881 int rc = 0;
882
f96637be
JP
883 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
884 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 885
d7d7a66a 886 spin_lock(&server->mid_lock);
7c9421e1 887 switch (mid->mid_state) {
d527f513 888 case MID_RESPONSE_READY:
d7d7a66a 889 spin_unlock(&server->mid_lock);
053d5034 890 return rc;
74dd92a8
JL
891 case MID_RETRY_NEEDED:
892 rc = -EAGAIN;
893 break;
71823baf
JL
894 case MID_RESPONSE_MALFORMED:
895 rc = -EIO;
896 break;
3c1105df
JL
897 case MID_SHUTDOWN:
898 rc = -EHOSTDOWN;
899 break;
74dd92a8 900 default:
abe57073
PS
901 if (!(mid->mid_flags & MID_DELETED)) {
902 list_del_init(&mid->qhead);
903 mid->mid_flags |= MID_DELETED;
904 }
afe6f653 905 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 906 __func__, mid->mid, mid->mid_state);
74dd92a8 907 rc = -EIO;
053d5034 908 }
d7d7a66a 909 spin_unlock(&server->mid_lock);
053d5034 910
70f08f91 911 release_mid(mid);
053d5034
JL
912 return rc;
913}
914
121b046a 915static inline int
fb2036d8
PS
916send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
917 struct mid_q_entry *mid)
76dcc26f 918{
121b046a 919 return server->ops->send_cancel ?
fb2036d8 920 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
921}
922
2c8f981d
JL
923int
924cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
925 bool log_error)
926{
792af7b0 927 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
928
929 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
930
931 /* convert the length into a more usable form */
38d77c50 932 if (server->sign) {
738f9de5 933 struct kvec iov[2];
985e4ff0 934 int rc = 0;
738f9de5
PS
935 struct smb_rqst rqst = { .rq_iov = iov,
936 .rq_nvec = 2 };
826a95e4 937
738f9de5
PS
938 iov[0].iov_base = mid->resp_buf;
939 iov[0].iov_len = 4;
940 iov[1].iov_base = (char *)mid->resp_buf + 4;
941 iov[1].iov_len = len - 4;
2c8f981d 942 /* FIXME: add code to kill session */
bf5ea0e2 943 rc = cifs_verify_signature(&rqst, server,
0124cc45 944 mid->sequence_number);
985e4ff0 945 if (rc)
afe6f653 946 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 947 rc);
2c8f981d
JL
948 }
949
950 /* BB special case reconnect tid and uid here? */
a3713ec3 951 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
952}
953
fec344e3 954struct mid_q_entry *
f780bd3f
AA
955cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
956 struct smb_rqst *rqst)
792af7b0
PS
957{
958 int rc;
fec344e3 959 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
960 struct mid_q_entry *mid;
961
738f9de5
PS
962 if (rqst->rq_iov[0].iov_len != 4 ||
963 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
964 return ERR_PTR(-EIO);
965
792af7b0
PS
966 rc = allocate_mid(ses, hdr, &mid);
967 if (rc)
fec344e3
JL
968 return ERR_PTR(rc);
969 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
970 if (rc) {
70f08f91 971 delete_mid(mid);
fec344e3
JL
972 return ERR_PTR(rc);
973 }
974 return mid;
792af7b0
PS
975}
976
4e34feb5 977static void
ee258d79 978cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
979{
980 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
981 struct cifs_credits credits;
982
983 credits.value = server->ops->get_credits(mid);
984 credits.instance = server->reconnect_instance;
8a26f0f7 985
34f4deb7 986 add_credits(server, &credits, mid->optype);
d527f513
ZX
987
988 if (mid->mid_state == MID_RESPONSE_RECEIVED)
989 mid->mid_state = MID_RESPONSE_READY;
8a26f0f7
PS
990}
991
ee258d79
PS
992static void
993cifs_compound_last_callback(struct mid_q_entry *mid)
994{
995 cifs_compound_callback(mid);
996 cifs_wake_up_task(mid);
997}
998
999static void
1000cifs_cancelled_callback(struct mid_q_entry *mid)
1001{
1002 cifs_compound_callback(mid);
70f08f91 1003 release_mid(mid);
ee258d79
PS
1004}
1005
5f68ea4a
AA
1006/*
1007 * Return a channel (master if none) of @ses that can be used to send
1008 * regular requests.
1009 *
1010 * If we are currently binding a new channel (negprot/sess.setup),
1011 * return the new incomplete channel.
1012 */
1013struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1014{
1015 uint index = 0;
ea90708d
SP
1016 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1017 struct TCP_Server_Info *server = NULL;
1018 int i;
5f68ea4a
AA
1019
1020 if (!ses)
1021 return NULL;
1022
88b024f5 1023 spin_lock(&ses->chan_lock);
ea90708d
SP
1024 for (i = 0; i < ses->chan_count; i++) {
1025 server = ses->chans[i].server;
ee1d2179 1026 if (!server || server->terminate)
ea90708d
SP
1027 continue;
1028
1029 /*
1030 * strictly speaking, we should pick up req_lock to read
1031 * server->in_flight. But it shouldn't matter much here if we
1032 * race while reading this data. The worst that can happen is
1033 * that we could use a channel that's not least loaded. Avoiding
1034 * taking the lock could help reduce wait time, which is
1035 * important for this function
1036 */
1037 if (server->in_flight < min_in_flight) {
1038 min_in_flight = server->in_flight;
1039 index = i;
1040 }
1041 if (server->in_flight > max_in_flight)
1042 max_in_flight = server->in_flight;
1043 }
1044
1045 /* if all channels are equally loaded, fall back to round-robin */
1046 if (min_in_flight == max_in_flight) {
1047 index = (uint)atomic_inc_return(&ses->chan_seq);
1048 index %= ses->chan_count;
1049 }
88b024f5 1050 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1051
1052 return ses->chans[index].server;
5f68ea4a
AA
1053}
1054
b8f57ee8 1055int
e0bba0b8 1056compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1057 struct TCP_Server_Info *server,
e0bba0b8
RS
1058 const int flags, const int num_rqst, struct smb_rqst *rqst,
1059 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1060{
480b1cb9 1061 int i, j, optype, rc = 0;
e0bba0b8 1062 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1063 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1064 struct cifs_credits credits[MAX_COMPOUND] = {
1065 { .value = 0, .instance = 0 }
1066 };
1067 unsigned int instance;
738f9de5 1068 char *buf;
50c2f753 1069
a891f0f8 1070 optype = flags & CIFS_OP_MASK;
133672ef 1071
e0bba0b8
RS
1072 for (i = 0; i < num_rqst; i++)
1073 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1074
352d96f3 1075 if (!ses || !ses->server || !server) {
f96637be 1076 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1077 return -EIO;
1078 }
1079
d7d7a66a 1080 spin_lock(&server->srv_lock);
080dc5e5 1081 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1082 spin_unlock(&server->srv_lock);
7ee1af76 1083 return -ENOENT;
080dc5e5 1084 }
d7d7a66a 1085 spin_unlock(&server->srv_lock);
7ee1af76 1086
792af7b0 1087 /*
257b7809 1088 * Wait for all the requests to become available.
7091bcab
PS
1089 * This approach still leaves the possibility to be stuck waiting for
1090 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1091 * requests and if the client is completely idle, not generating any
1092 * other requests.
1093 * This can be handled by the eventual session reconnect.
792af7b0 1094 */
3190b59a 1095 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1096 &instance);
1097 if (rc)
1098 return rc;
97ea4998 1099
257b7809
RS
1100 for (i = 0; i < num_rqst; i++) {
1101 credits[i].value = 1;
1102 credits[i].instance = instance;
8544f4aa 1103 }
7ee1af76 1104
792af7b0
PS
1105 /*
1106 * Make sure that we sign in the same order that we send on this socket
1107 * and avoid races inside tcp sendmsg code that could cause corruption
1108 * of smb data.
1109 */
7ee1af76 1110
cc391b69 1111 cifs_server_lock(server);
7ee1af76 1112
97ea4998
PS
1113 /*
1114 * All the parts of the compound chain belong obtained credits from the
257b7809 1115 * same session. We can not use credits obtained from the previous
97ea4998
PS
1116 * session to send this request. Check if there were reconnects after
1117 * we obtained credits and return -EAGAIN in such cases to let callers
1118 * handle it.
1119 */
3190b59a 1120 if (instance != server->reconnect_instance) {
cc391b69 1121 cifs_server_unlock(server);
97ea4998 1122 for (j = 0; j < num_rqst; j++)
3190b59a 1123 add_credits(server, &credits[j], optype);
97ea4998
PS
1124 return -EAGAIN;
1125 }
1126
e0bba0b8 1127 for (i = 0; i < num_rqst; i++) {
f780bd3f 1128 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1129 if (IS_ERR(midQ[i])) {
3190b59a 1130 revert_current_mid(server, i);
e0bba0b8 1131 for (j = 0; j < i; j++)
70f08f91 1132 delete_mid(midQ[j]);
cc391b69 1133 cifs_server_unlock(server);
8544f4aa 1134
e0bba0b8 1135 /* Update # of requests on wire to server */
8544f4aa 1136 for (j = 0; j < num_rqst; j++)
3190b59a 1137 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1138 return PTR_ERR(midQ[i]);
1139 }
1140
1141 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1142 midQ[i]->optype = optype;
4e34feb5 1143 /*
ee258d79
PS
1144 * Invoke callback for every part of the compound chain
1145 * to calculate credits properly. Wake up this thread only when
1146 * the last element is received.
4e34feb5
RS
1147 */
1148 if (i < num_rqst - 1)
ee258d79
PS
1149 midQ[i]->callback = cifs_compound_callback;
1150 else
1151 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1152 }
3190b59a 1153 rc = smb_send_rqst(server, num_rqst, rqst, flags);
e0bba0b8
RS
1154
1155 for (i = 0; i < num_rqst; i++)
1156 cifs_save_when_sent(midQ[i]);
7ee1af76 1157
c781af7e 1158 if (rc < 0) {
3190b59a
AA
1159 revert_current_mid(server, num_rqst);
1160 server->sequence_number -= 2;
c781af7e 1161 }
e0bba0b8 1162
cc391b69 1163 cifs_server_unlock(server);
7ee1af76 1164
d69cb728
RS
1165 /*
1166 * If sending failed for some reason or it is an oplock break that we
1167 * will not receive a response to - return credits back
1168 */
1169 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1170 for (i = 0; i < num_rqst; i++)
3190b59a 1171 add_credits(server, &credits[i], optype);
cb5c2e63 1172 goto out;
ee258d79
PS
1173 }
1174
1175 /*
1176 * At this point the request is passed to the network stack - we assume
1177 * that any credits taken from the server structure on the client have
1178 * been spent and we can't return them back. Once we receive responses
1179 * we will collect credits granted by the server in the mid callbacks
1180 * and add those credits to the server structure.
1181 */
e0bba0b8 1182
cb5c2e63
RS
1183 /*
1184 * Compounding is never used during session establish.
1185 */
d7d7a66a 1186 spin_lock(&ses->ses_lock);
dd3cd870 1187 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1188 spin_unlock(&ses->ses_lock);
080dc5e5 1189
cc391b69 1190 cifs_server_lock(server);
f486ef8e 1191 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1192 cifs_server_unlock(server);
080dc5e5 1193
d7d7a66a 1194 spin_lock(&ses->ses_lock);
05946d4b 1195 }
d7d7a66a 1196 spin_unlock(&ses->ses_lock);
e0bba0b8 1197
cb5c2e63 1198 for (i = 0; i < num_rqst; i++) {
3190b59a 1199 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1200 if (rc != 0)
1201 break;
1202 }
1203 if (rc != 0) {
1204 for (; i < num_rqst; i++) {
e3d100ea 1205 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1206 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1207 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1208 spin_lock(&server->mid_lock);
7b71843f 1209 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
d527f513
ZX
1210 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1211 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
8a26f0f7 1212 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1213 cancelled_mid[i] = true;
34f4deb7 1214 credits[i].value = 0;
e0bba0b8 1215 }
d7d7a66a 1216 spin_unlock(&server->mid_lock);
e0bba0b8 1217 }
cb5c2e63
RS
1218 }
1219
cb5c2e63
RS
1220 for (i = 0; i < num_rqst; i++) {
1221 if (rc < 0)
1222 goto out;
e0bba0b8 1223
3190b59a 1224 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1225 if (rc != 0) {
8544f4aa
PS
1226 /* mark this mid as cancelled to not free it below */
1227 cancelled_mid[i] = true;
1228 goto out;
1be912dd 1229 }
2b2bdfba 1230
e0bba0b8 1231 if (!midQ[i]->resp_buf ||
d527f513 1232 midQ[i]->mid_state != MID_RESPONSE_READY) {
e0bba0b8
RS
1233 rc = -EIO;
1234 cifs_dbg(FYI, "Bad MID state?\n");
1235 goto out;
1236 }
a891f0f8 1237
e0bba0b8
RS
1238 buf = (char *)midQ[i]->resp_buf;
1239 resp_iov[i].iov_base = buf;
1240 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1241 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1242
1243 if (midQ[i]->large_buf)
1244 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1245 else
1246 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1247
3190b59a 1248 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1249 flags & CIFS_LOG_ERROR);
1da177e4 1250
70f08f91 1251 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1252 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1253 midQ[i]->resp_buf = NULL;
cb5c2e63 1254
e0bba0b8 1255 }
cb5c2e63
RS
1256
1257 /*
1258 * Compounding is never used during session establish.
1259 */
d7d7a66a 1260 spin_lock(&ses->ses_lock);
dd3cd870 1261 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1262 struct kvec iov = {
1263 .iov_base = resp_iov[0].iov_base,
1264 .iov_len = resp_iov[0].iov_len
1265 };
d7d7a66a 1266 spin_unlock(&ses->ses_lock);
cc391b69 1267 cifs_server_lock(server);
f486ef8e 1268 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1269 cifs_server_unlock(server);
d7d7a66a 1270 spin_lock(&ses->ses_lock);
cb5c2e63 1271 }
d7d7a66a 1272 spin_unlock(&ses->ses_lock);
cb5c2e63 1273
7ee1af76 1274out:
4e34feb5
RS
1275 /*
1276 * This will dequeue all mids. After this it is important that the
1277 * demultiplex_thread will not process any of these mids any futher.
1278 * This is prevented above by using a noop callback that will not
1279 * wake this thread except for the very last PDU.
1280 */
8544f4aa
PS
1281 for (i = 0; i < num_rqst; i++) {
1282 if (!cancelled_mid[i])
70f08f91 1283 delete_mid(midQ[i]);
8544f4aa 1284 }
1da177e4 1285
d6e04ae6
SF
1286 return rc;
1287}
1da177e4 1288
e0bba0b8
RS
1289int
1290cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1291 struct TCP_Server_Info *server,
e0bba0b8
RS
1292 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1293 struct kvec *resp_iov)
1294{
352d96f3
AA
1295 return compound_send_recv(xid, ses, server, flags, 1,
1296 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1297}
1298
738f9de5
PS
1299int
1300SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1301 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1302 const int flags, struct kvec *resp_iov)
1303{
1304 struct smb_rqst rqst;
3cecf486 1305 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1306 int rc;
1307
3cecf486 1308 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1309 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1310 GFP_KERNEL);
117e3b7f
SF
1311 if (!new_iov) {
1312 /* otherwise cifs_send_recv below sets resp_buf_type */
1313 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1314 return -ENOMEM;
117e3b7f 1315 }
3cecf486
RS
1316 } else
1317 new_iov = s_iov;
738f9de5
PS
1318
1319 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1320 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1321
1322 new_iov[0].iov_base = new_iov[1].iov_base;
1323 new_iov[0].iov_len = 4;
1324 new_iov[1].iov_base += 4;
1325 new_iov[1].iov_len -= 4;
1326
1327 memset(&rqst, 0, sizeof(struct smb_rqst));
1328 rqst.rq_iov = new_iov;
1329 rqst.rq_nvec = n_vec + 1;
1330
352d96f3
AA
1331 rc = cifs_send_recv(xid, ses, ses->server,
1332 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1333 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1334 kfree(new_iov);
738f9de5
PS
1335 return rc;
1336}
1337
1da177e4 1338int
96daf2b0 1339SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1340 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1341 int *pbytes_returned, const int flags)
1da177e4
LT
1342{
1343 int rc = 0;
1da177e4 1344 struct mid_q_entry *midQ;
fb2036d8
PS
1345 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1346 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1347 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1348 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1349 struct TCP_Server_Info *server;
1da177e4
LT
1350
1351 if (ses == NULL) {
f96637be 1352 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1353 return -EIO;
1354 }
ac6ad7a8 1355 server = ses->server;
afe6f653 1356 if (server == NULL) {
f96637be 1357 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1358 return -EIO;
1359 }
1360
d7d7a66a 1361 spin_lock(&server->srv_lock);
080dc5e5 1362 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1363 spin_unlock(&server->srv_lock);
31ca3bc3 1364 return -ENOENT;
080dc5e5 1365 }
d7d7a66a 1366 spin_unlock(&server->srv_lock);
31ca3bc3 1367
79a58d1f 1368 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1369 to the same server. We may make this configurable later or
1370 use ses->maxReq */
1da177e4 1371
fb2036d8 1372 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1373 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1374 len);
6d9c6d54
VL
1375 return -EIO;
1376 }
1377
afe6f653 1378 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1379 if (rc)
1380 return rc;
1381
79a58d1f 1382 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1383 and avoid races inside tcp sendmsg code that could cause corruption
1384 of smb data */
1385
cc391b69 1386 cifs_server_lock(server);
1da177e4 1387
7ee1af76
JA
1388 rc = allocate_mid(ses, in_buf, &midQ);
1389 if (rc) {
cc391b69 1390 cifs_server_unlock(server);
7ee1af76 1391 /* Update # of requests on wire to server */
afe6f653 1392 add_credits(server, &credits, 0);
7ee1af76 1393 return rc;
1da177e4
LT
1394 }
1395
afe6f653 1396 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1397 if (rc) {
cc391b69 1398 cifs_server_unlock(server);
829049cb
VL
1399 goto out;
1400 }
1da177e4 1401
7c9421e1 1402 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1403
afe6f653 1404 rc = smb_send(server, in_buf, len);
789e6661 1405 cifs_save_when_sent(midQ);
ad313cb8
JL
1406
1407 if (rc < 0)
afe6f653 1408 server->sequence_number -= 2;
ad313cb8 1409
cc391b69 1410 cifs_server_unlock(server);
7ee1af76 1411
79a58d1f 1412 if (rc < 0)
7ee1af76
JA
1413 goto out;
1414
afe6f653 1415 rc = wait_for_response(server, midQ);
1be912dd 1416 if (rc != 0) {
afe6f653 1417 send_cancel(server, &rqst, midQ);
d7d7a66a 1418 spin_lock(&server->mid_lock);
d527f513
ZX
1419 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1420 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1be912dd 1421 /* no longer considered to be "in-flight" */
70f08f91 1422 midQ->callback = release_mid;
d7d7a66a 1423 spin_unlock(&server->mid_lock);
afe6f653 1424 add_credits(server, &credits, 0);
1be912dd
JL
1425 return rc;
1426 }
d7d7a66a 1427 spin_unlock(&server->mid_lock);
1be912dd 1428 }
1da177e4 1429
afe6f653 1430 rc = cifs_sync_mid_result(midQ, server);
053d5034 1431 if (rc != 0) {
afe6f653 1432 add_credits(server, &credits, 0);
1da177e4
LT
1433 return rc;
1434 }
50c2f753 1435
2c8f981d 1436 if (!midQ->resp_buf || !out_buf ||
d527f513 1437 midQ->mid_state != MID_RESPONSE_READY) {
2b2bdfba 1438 rc = -EIO;
afe6f653 1439 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1440 goto out;
1da177e4 1441 }
7ee1af76 1442
d4e4854f 1443 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1444 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1445 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1446out:
70f08f91 1447 delete_mid(midQ);
afe6f653 1448 add_credits(server, &credits, 0);
1da177e4 1449
7ee1af76
JA
1450 return rc;
1451}
1da177e4 1452
7ee1af76
JA
1453/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1454 blocking lock to return. */
1455
1456static int
96daf2b0 1457send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1458 struct smb_hdr *in_buf,
1459 struct smb_hdr *out_buf)
1460{
1461 int bytes_returned;
96daf2b0 1462 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1463 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1464
1465 /* We just modify the current in_buf to change
1466 the type of lock from LOCKING_ANDX_SHARED_LOCK
1467 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1468 LOCKING_ANDX_CANCEL_LOCK. */
1469
1470 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1471 pSMB->Timeout = 0;
88257360 1472 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1473
1474 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1475 &bytes_returned, 0);
7ee1af76
JA
1476}
1477
1478int
96daf2b0 1479SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1480 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1481 int *pbytes_returned)
1482{
1483 int rc = 0;
1484 int rstart = 0;
7ee1af76 1485 struct mid_q_entry *midQ;
96daf2b0 1486 struct cifs_ses *ses;
fb2036d8
PS
1487 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1488 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1489 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1490 unsigned int instance;
afe6f653 1491 struct TCP_Server_Info *server;
7ee1af76
JA
1492
1493 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1494 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1495 return -EIO;
1496 }
1497 ses = tcon->ses;
afe6f653 1498 server = ses->server;
7ee1af76 1499
afe6f653 1500 if (server == NULL) {
f96637be 1501 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1502 return -EIO;
1503 }
1504
d7d7a66a 1505 spin_lock(&server->srv_lock);
080dc5e5 1506 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1507 spin_unlock(&server->srv_lock);
7ee1af76 1508 return -ENOENT;
080dc5e5 1509 }
d7d7a66a 1510 spin_unlock(&server->srv_lock);
7ee1af76 1511
79a58d1f 1512 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1513 to the same server. We may make this configurable later or
1514 use ses->maxReq */
1515
fb2036d8 1516 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1517 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1518 len);
6d9c6d54
VL
1519 return -EIO;
1520 }
1521
afe6f653 1522 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1523 if (rc)
1524 return rc;
1525
79a58d1f 1526 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1527 and avoid races inside tcp sendmsg code that could cause corruption
1528 of smb data */
1529
cc391b69 1530 cifs_server_lock(server);
7ee1af76
JA
1531
1532 rc = allocate_mid(ses, in_buf, &midQ);
1533 if (rc) {
cc391b69 1534 cifs_server_unlock(server);
7ee1af76
JA
1535 return rc;
1536 }
1537
afe6f653 1538 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1539 if (rc) {
70f08f91 1540 delete_mid(midQ);
cc391b69 1541 cifs_server_unlock(server);
829049cb
VL
1542 return rc;
1543 }
1da177e4 1544
7c9421e1 1545 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653 1546 rc = smb_send(server, in_buf, len);
789e6661 1547 cifs_save_when_sent(midQ);
ad313cb8
JL
1548
1549 if (rc < 0)
afe6f653 1550 server->sequence_number -= 2;
ad313cb8 1551
cc391b69 1552 cifs_server_unlock(server);
7ee1af76 1553
79a58d1f 1554 if (rc < 0) {
70f08f91 1555 delete_mid(midQ);
7ee1af76
JA
1556 return rc;
1557 }
1558
1559 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1560 rc = wait_event_interruptible(server->response_q,
d527f513
ZX
1561 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1562 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
afe6f653
RS
1563 ((server->tcpStatus != CifsGood) &&
1564 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1565
1566 /* Were we interrupted by a signal ? */
d7d7a66a 1567 spin_lock(&server->srv_lock);
7ee1af76 1568 if ((rc == -ERESTARTSYS) &&
d527f513
ZX
1569 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1570 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
afe6f653
RS
1571 ((server->tcpStatus == CifsGood) ||
1572 (server->tcpStatus == CifsNew))) {
d7d7a66a 1573 spin_unlock(&server->srv_lock);
7ee1af76
JA
1574
1575 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1576 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1577 blocking lock to return. */
afe6f653 1578 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1579 if (rc) {
70f08f91 1580 delete_mid(midQ);
7ee1af76
JA
1581 return rc;
1582 }
1583 } else {
1584 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1585 to cause the blocking lock to return. */
1586
1587 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1588
1589 /* If we get -ENOLCK back the lock may have
1590 already been removed. Don't exit in this case. */
1591 if (rc && rc != -ENOLCK) {
70f08f91 1592 delete_mid(midQ);
7ee1af76
JA
1593 return rc;
1594 }
1595 }
1596
afe6f653 1597 rc = wait_for_response(server, midQ);
1be912dd 1598 if (rc) {
afe6f653 1599 send_cancel(server, &rqst, midQ);
d7d7a66a 1600 spin_lock(&server->mid_lock);
d527f513
ZX
1601 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1602 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1be912dd 1603 /* no longer considered to be "in-flight" */
70f08f91 1604 midQ->callback = release_mid;
d7d7a66a 1605 spin_unlock(&server->mid_lock);
1be912dd
JL
1606 return rc;
1607 }
d7d7a66a 1608 spin_unlock(&server->mid_lock);
7ee1af76 1609 }
1be912dd
JL
1610
1611 /* We got the response - restart system call. */
1612 rstart = 1;
d7d7a66a 1613 spin_lock(&server->srv_lock);
7ee1af76 1614 }
d7d7a66a 1615 spin_unlock(&server->srv_lock);
7ee1af76 1616
afe6f653 1617 rc = cifs_sync_mid_result(midQ, server);
053d5034 1618 if (rc != 0)
7ee1af76 1619 return rc;
50c2f753 1620
17c8bfed 1621 /* rcvd frame is ok */
d527f513 1622 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
698e96a8 1623 rc = -EIO;
3175eb9b 1624 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1625 goto out;
1626 }
1da177e4 1627
d4e4854f 1628 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1629 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1630 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1631out:
70f08f91 1632 delete_mid(midQ);
7ee1af76
JA
1633 if (rstart && rc == -EACCES)
1634 return -ERESTARTSYS;
1da177e4
LT
1635 return rc;
1636}
fb157ed2
SF
1637
1638/*
1639 * Discard any remaining data in the current SMB. To do this, we borrow the
1640 * current bigbuf.
1641 */
1642int
1643cifs_discard_remaining_data(struct TCP_Server_Info *server)
1644{
1645 unsigned int rfclen = server->pdu_size;
d08089f6 1646 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1647 server->total_read;
1648
1649 while (remaining > 0) {
d08089f6 1650 ssize_t length;
fb157ed2
SF
1651
1652 length = cifs_discard_from_socket(server,
1653 min_t(size_t, remaining,
1654 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1655 if (length < 0)
1656 return length;
1657 server->total_read += length;
1658 remaining -= length;
1659 }
1660
1661 return 0;
1662}
1663
1664static int
1665__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1666 bool malformed)
1667{
1668 int length;
1669
1670 length = cifs_discard_remaining_data(server);
1671 dequeue_mid(mid, malformed);
1672 mid->resp_buf = server->smallbuf;
1673 server->smallbuf = NULL;
1674 return length;
1675}
1676
1677static int
1678cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1679{
1680 struct cifs_readdata *rdata = mid->callback_data;
1681
1682 return __cifs_readv_discard(server, mid, rdata->result);
1683}
1684
1685int
1686cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1687{
1688 int length, len;
1689 unsigned int data_offset, data_len;
1690 struct cifs_readdata *rdata = mid->callback_data;
1691 char *buf = server->smallbuf;
9789de8b 1692 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1693 bool use_rdma_mr = false;
1694
1695 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1696 __func__, mid->mid, rdata->offset, rdata->bytes);
1697
1698 /*
1699 * read the rest of READ_RSP header (sans Data array), or whatever we
1700 * can if there's not enough data. At this point, we've read down to
1701 * the Mid.
1702 */
1703 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1704 HEADER_SIZE(server) + 1;
1705
1706 length = cifs_read_from_socket(server,
1707 buf + HEADER_SIZE(server) - 1, len);
1708 if (length < 0)
1709 return length;
1710 server->total_read += length;
1711
1712 if (server->ops->is_session_expired &&
1713 server->ops->is_session_expired(buf)) {
1714 cifs_reconnect(server, true);
1715 return -1;
1716 }
1717
1718 if (server->ops->is_status_pending &&
1719 server->ops->is_status_pending(buf, server)) {
1720 cifs_discard_remaining_data(server);
1721 return -1;
1722 }
1723
1724 /* set up first two iov for signature check and to get credits */
1725 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1726 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1727 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1728 rdata->iov[1].iov_len =
9789de8b 1729 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1730 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1731 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1732 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1733 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1734
1735 /* Was the SMB read successful? */
1736 rdata->result = server->ops->map_error(buf, false);
1737 if (rdata->result != 0) {
1738 cifs_dbg(FYI, "%s: server returned error %d\n",
1739 __func__, rdata->result);
1740 /* normal error on read response */
1741 return __cifs_readv_discard(server, mid, false);
1742 }
1743
1744 /* Is there enough to get to the rest of the READ_RSP header? */
1745 if (server->total_read < server->vals->read_rsp_size) {
1746 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1747 __func__, server->total_read,
1748 server->vals->read_rsp_size);
1749 rdata->result = -EIO;
1750 return cifs_readv_discard(server, mid);
1751 }
1752
1753 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1754 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1755 if (data_offset < server->total_read) {
1756 /*
1757 * win2k8 sometimes sends an offset of 0 when the read
1758 * is beyond the EOF. Treat it as if the data starts just after
1759 * the header.
1760 */
1761 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1762 __func__, data_offset);
1763 data_offset = server->total_read;
1764 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1765 /* data_offset is beyond the end of smallbuf */
1766 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1767 __func__, data_offset);
1768 rdata->result = -EIO;
1769 return cifs_readv_discard(server, mid);
1770 }
1771
1772 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1773 __func__, server->total_read, data_offset);
1774
1775 len = data_offset - server->total_read;
1776 if (len > 0) {
1777 /* read any junk before data into the rest of smallbuf */
1778 length = cifs_read_from_socket(server,
1779 buf + server->total_read, len);
1780 if (length < 0)
1781 return length;
1782 server->total_read += length;
1783 }
1784
1785 /* how much data is in the response? */
1786#ifdef CONFIG_CIFS_SMB_DIRECT
1787 use_rdma_mr = rdata->mr;
1788#endif
1789 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1790 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1791 /* data_len is corrupt -- discard frame */
1792 rdata->result = -EIO;
1793 return cifs_readv_discard(server, mid);
1794 }
1795
d08089f6
DH
1796#ifdef CONFIG_CIFS_SMB_DIRECT
1797 if (rdata->mr)
1798 length = data_len; /* An RDMA read is already done. */
1799 else
1800#endif
1801 length = cifs_read_iter_from_socket(server, &rdata->iter,
1802 data_len);
1803 if (length > 0)
1804 rdata->got_bytes += length;
fb157ed2
SF
1805 server->total_read += length;
1806
1807 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1808 server->total_read, buflen, data_len);
1809
1810 /* discard anything left over */
1811 if (server->total_read < buflen)
1812 return cifs_readv_discard(server, mid);
1813
1814 dequeue_mid(mid, false);
1815 mid->resp_buf = server->smallbuf;
1816 server->smallbuf = NULL;
1817 return length;
1818}