Merge branches 'pm-cpuidle', 'pm-sleep' and 'pm-powercap'
[linux-block.git] / fs / smb / client / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
2c75426c 21#include <linux/processor.h>
1da177e4 22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c 37{
d527f513
ZX
38 if (mid->mid_state == MID_RESPONSE_RECEIVED)
39 mid->mid_state = MID_RESPONSE_READY;
2b84a36c
JL
40 wake_up_process(mid->callback_data);
41}
42
ea75a78c 43static struct mid_q_entry *
70f08f91 44alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
45{
46 struct mid_q_entry *temp;
47
24b9b06b 48 if (server == NULL) {
70f08f91 49 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
50 return NULL;
51 }
50c2f753 52
232087cb 53 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 54 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 55 kref_init(&temp->refcount);
a6f74e80
N
56 temp->mid = get_mid(smb_buffer);
57 temp->pid = current->pid;
58 temp->command = cpu_to_le16(smb_buffer->Command);
59 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
a5998a9e 60 /* easier to use jiffies */
a6f74e80
N
61 /* when mid allocated can be before when sent */
62 temp->when_alloc = jiffies;
63 temp->server = server;
2b84a36c 64
a6f74e80
N
65 /*
66 * The default is for the mid to be synchronous, so the
67 * default callback just wakes up the current task.
68 */
f1f27ad7
VW
69 get_task_struct(current);
70 temp->creator = current;
a6f74e80
N
71 temp->callback = cifs_wake_up_task;
72 temp->callback_data = current;
1da177e4 73
c2c17ddb 74 atomic_inc(&mid_count);
7c9421e1 75 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
76 return temp;
77}
78
e6322fd1 79void __release_mid(struct kref *refcount)
696e420b 80{
abe57073
PS
81 struct mid_q_entry *midEntry =
82 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 83#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 84 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 85 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 86 unsigned long now;
433b8dd7 87 unsigned long roundtrip_time;
1047abc1 88#endif
7b71843f
PS
89 struct TCP_Server_Info *server = midEntry->server;
90
91 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
d527f513
ZX
92 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
93 midEntry->mid_state == MID_RESPONSE_READY) &&
7b71843f 94 server->ops->handle_cancelled_mid)
04ad69c3 95 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 96
7c9421e1 97 midEntry->mid_state = MID_FREE;
c2c17ddb 98 atomic_dec(&mid_count);
7c9421e1 99 if (midEntry->large_buf)
b8643e1b
SF
100 cifs_buf_release(midEntry->resp_buf);
101 else
102 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
103#ifdef CONFIG_CIFS_STATS2
104 now = jiffies;
433b8dd7 105 if (now < midEntry->when_alloc)
a0a3036b 106 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
107 roundtrip_time = now - midEntry->when_alloc;
108
109 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
110 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 server->fastest_cmd[smb_cmd] = roundtrip_time;
113 } else {
114 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115 server->slowest_cmd[smb_cmd] = roundtrip_time;
116 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117 server->fastest_cmd[smb_cmd] = roundtrip_time;
118 }
119 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120 server->time_per_cmd[smb_cmd] += roundtrip_time;
121 }
00778e22
SF
122 /*
123 * commands taking longer than one second (default) can be indications
124 * that something is wrong, unless it is quite a slow link or a very
125 * busy server. Note that this calc is unlikely or impossible to wrap
126 * as long as slow_rsp_threshold is not set way above recommended max
127 * value (32767 ie 9 hours) and is generally harmless even if wrong
128 * since only affects debug counters - so leaving the calc as simple
129 * comparison rather than doing multiple conversions and overflow
130 * checks
131 */
132 if ((slow_rsp_threshold != 0) &&
133 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 134 (midEntry->command != command)) {
f5942db5
SF
135 /*
136 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
137 * NB: le16_to_cpu returns unsigned so can not be negative below
138 */
433b8dd7
SF
139 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
140 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 141
433b8dd7 142 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
143 midEntry->when_sent, midEntry->when_received);
144 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
145 pr_debug("slow rsp: cmd %d mid %llu",
146 midEntry->command, midEntry->mid);
147 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
148 now - midEntry->when_alloc,
149 now - midEntry->when_sent,
150 now - midEntry->when_received);
1047abc1
SF
151 }
152 }
153#endif
f1f27ad7 154 put_task_struct(midEntry->creator);
abe57073
PS
155
156 mempool_free(midEntry, cifs_mid_poolp);
157}
158
3c1bf7e4 159void
70f08f91 160delete_mid(struct mid_q_entry *mid)
ddc8cf8f 161{
d7d7a66a 162 spin_lock(&mid->server->mid_lock);
abe57073
PS
163 if (!(mid->mid_flags & MID_DELETED)) {
164 list_del_init(&mid->qhead);
165 mid->mid_flags |= MID_DELETED;
166 }
d7d7a66a 167 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 168
70f08f91 169 release_mid(mid);
ddc8cf8f
JL
170}
171
6f49f46b
JL
172/*
173 * smb_send_kvec - send an array of kvecs to the server
174 * @server: Server to send the data to
3ab3f2a1 175 * @smb_msg: Message to send
6f49f46b
JL
176 * @sent: amount of data sent on socket is stored here
177 *
178 * Our basic "send data to server" function. Should be called with srv_mutex
179 * held. The caller is responsible for handling the results.
180 */
d6e04ae6 181static int
3ab3f2a1
AV
182smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
183 size_t *sent)
1da177e4
LT
184{
185 int rc = 0;
3ab3f2a1 186 int retries = 0;
edf1ae40 187 struct socket *ssocket = server->ssocket;
50c2f753 188
6f49f46b
JL
189 *sent = 0;
190
0496e02d 191 if (server->noblocksnd)
3ab3f2a1 192 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 193 else
3ab3f2a1 194 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 195
3ab3f2a1 196 while (msg_data_left(smb_msg)) {
6f49f46b
JL
197 /*
198 * If blocking send, we try 3 times, since each can block
199 * for 5 seconds. For nonblocking we have to try more
200 * but wait increasing amounts of time allowing time for
201 * socket to clear. The overall time we wait in either
202 * case to send on the socket is about 15 seconds.
203 * Similarly we wait for 15 seconds for a response from
204 * the server in SendReceive[2] for the server to send
205 * a response back for most types of requests (except
206 * SMB Write past end of file which can be slow, and
207 * blocking lock operations). NFS waits slightly longer
208 * than CIFS, but this can make it take longer for
209 * nonresponsive servers to be detected and 15 seconds
210 * is more than enough time for modern networks to
211 * send a packet. In most cases if we fail to send
212 * after the retries we will kill the socket and
213 * reconnect which may clear the network problem.
214 */
3ab3f2a1 215 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 216 if (rc == -EAGAIN) {
3ab3f2a1
AV
217 retries++;
218 if (retries >= 14 ||
219 (!server->noblocksnd && (retries > 2))) {
afe6f653 220 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 221 ssocket);
3ab3f2a1 222 return -EAGAIN;
1da177e4 223 }
3ab3f2a1 224 msleep(1 << retries);
1da177e4
LT
225 continue;
226 }
6f49f46b 227
79a58d1f 228 if (rc < 0)
3ab3f2a1 229 return rc;
6f49f46b 230
79a58d1f 231 if (rc == 0) {
3e84469d
SF
232 /* should never happen, letting socket clear before
233 retrying is our only obvious option here */
afe6f653 234 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
235 msleep(500);
236 continue;
d6e04ae6 237 }
6f49f46b 238
3ab3f2a1
AV
239 /* send was at least partially successful */
240 *sent += rc;
241 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 242 }
3ab3f2a1 243 return 0;
97bc00b3
JL
244}
245
35e2cc1b 246unsigned long
81f39f95 247smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
248{
249 unsigned int i;
35e2cc1b
PA
250 struct kvec *iov;
251 int nvec;
a26054d1
JL
252 unsigned long buflen = 0;
253
d291e703 254 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 255 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
256 iov = &rqst->rq_iov[1];
257 nvec = rqst->rq_nvec - 1;
258 } else {
259 iov = rqst->rq_iov;
260 nvec = rqst->rq_nvec;
261 }
262
a26054d1 263 /* total up iov array first */
35e2cc1b 264 for (i = 0; i < nvec; i++)
a26054d1
JL
265 buflen += iov[i].iov_len;
266
d08089f6 267 buflen += iov_iter_count(&rqst->rq_iter);
a26054d1
JL
268 return buflen;
269}
270
6f49f46b 271static int
07cd952f
RS
272__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
273 struct smb_rqst *rqst)
6f49f46b 274{
d0dc4111 275 int rc;
07cd952f
RS
276 struct kvec *iov;
277 int n_vec;
278 unsigned int send_length = 0;
279 unsigned int i, j;
b30c74c7 280 sigset_t mask, oldmask;
3ab3f2a1 281 size_t total_len = 0, sent, size;
b8eed283 282 struct socket *ssocket = server->ssocket;
bedc8f76 283 struct msghdr smb_msg = {};
c713c877
RS
284 __be32 rfc1002_marker;
285
d0dc4111 286 cifs_in_send_inc(server);
4357d45f
LL
287 if (cifs_rdma_enabled(server)) {
288 /* return -EAGAIN when connecting or reconnecting */
289 rc = -EAGAIN;
290 if (server->smbd_conn)
291 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
292 goto smbd_done;
293 }
afc18a6f 294
d0dc4111 295 rc = -EAGAIN;
ea702b80 296 if (ssocket == NULL)
d0dc4111 297 goto out;
ea702b80 298
d0dc4111 299 rc = -ERESTARTSYS;
214a5ea0 300 if (fatal_signal_pending(current)) {
6988a619 301 cifs_dbg(FYI, "signal pending before send request\n");
d0dc4111 302 goto out;
b30c74c7
PS
303 }
304
d0dc4111 305 rc = 0;
b8eed283 306 /* cork the socket */
db10538a 307 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 308
07cd952f 309 for (j = 0; j < num_rqst; j++)
81f39f95 310 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
311 rfc1002_marker = cpu_to_be32(send_length);
312
b30c74c7
PS
313 /*
314 * We should not allow signals to interrupt the network send because
315 * any partial send will cause session reconnects thus increasing
316 * latency of system calls and overload a server with unnecessary
317 * requests.
318 */
319
320 sigfillset(&mask);
321 sigprocmask(SIG_BLOCK, &mask, &oldmask);
322
c713c877 323 /* Generate a rfc1002 marker for SMB2+ */
d291e703 324 if (!is_smb1(server)) {
c713c877
RS
325 struct kvec hiov = {
326 .iov_base = &rfc1002_marker,
327 .iov_len = 4
328 };
de4eda9d 329 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
330 rc = smb_send_kvec(server, &smb_msg, &sent);
331 if (rc < 0)
b30c74c7 332 goto unmask;
c713c877
RS
333
334 total_len += sent;
335 send_length += 4;
336 }
337
662bf5bc
PA
338 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
339
07cd952f
RS
340 for (j = 0; j < num_rqst; j++) {
341 iov = rqst[j].rq_iov;
342 n_vec = rqst[j].rq_nvec;
3ab3f2a1 343
07cd952f 344 size = 0;
662bf5bc
PA
345 for (i = 0; i < n_vec; i++) {
346 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 347 size += iov[i].iov_len;
662bf5bc 348 }
97bc00b3 349
de4eda9d 350 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 351
3ab3f2a1 352 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 353 if (rc < 0)
b30c74c7 354 goto unmask;
97bc00b3
JL
355
356 total_len += sent;
07cd952f 357
d08089f6
DH
358 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
359 smb_msg.msg_iter = rqst[j].rq_iter;
07cd952f
RS
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
07cd952f
RS
363 total_len += sent;
364 }
d08089f6
DH
365
366}
1da177e4 367
b30c74c7
PS
368unmask:
369 sigprocmask(SIG_SETMASK, &oldmask, NULL);
370
371 /*
372 * If signal is pending but we have already sent the whole packet to
373 * the server we need to return success status to allow a corresponding
374 * mid entry to be kept in the pending requests queue thus allowing
375 * to handle responses from the server by the client.
376 *
377 * If only part of the packet has been sent there is no need to hide
378 * interrupt because the session will be reconnected anyway, so there
379 * won't be any response from the server to handle.
380 */
381
382 if (signal_pending(current) && (total_len != send_length)) {
383 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 384 rc = -ERESTARTSYS;
b30c74c7
PS
385 }
386
b8eed283 387 /* uncork it */
db10538a 388 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 389
c713c877 390 if ((total_len > 0) && (total_len != send_length)) {
f96637be 391 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 392 send_length, total_len);
6f49f46b
JL
393 /*
394 * If we have only sent part of an SMB then the next SMB could
395 * be taken as the remainder of this one. We need to kill the
396 * socket so the server throws away the partial SMB
397 */
dca65818 398 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 399 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 400 server->conn_id, server->hostname);
edf1ae40 401 }
9762c2d0 402smbd_done:
a68106a6
SP
403 /*
404 * there's hardly any use for the layers above to know the
405 * actual error code here. All they should do at this point is
406 * to retry the connection and hope it goes away.
407 */
408 if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
afe6f653 409 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 410 rc);
a68106a6
SP
411 rc = -ECONNABORTED;
412 cifs_signal_cifsd_for_reconnect(server, false);
413 } else if (rc > 0)
1da177e4 414 rc = 0;
d0dc4111
ZX
415out:
416 cifs_in_send_dec(server);
1da177e4
LT
417 return rc;
418}
419
933148a4
PA
420struct send_req_vars {
421 struct smb2_transform_hdr tr_hdr;
422 struct smb_rqst rqst[MAX_COMPOUND];
423 struct kvec iov;
424};
425
6f49f46b 426static int
1f3a8f5f
RS
427smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
428 struct smb_rqst *rqst, int flags)
6f49f46b 429{
933148a4
PA
430 struct send_req_vars *vars;
431 struct smb_rqst *cur_rqst;
432 struct kvec *iov;
7fb8986e
PS
433 int rc;
434
435 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
436 return __smb_send_rqst(server, num_rqst, rqst);
437
11d4d1db
PA
438 if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
439 return -EIO;
7fb8986e 440
b2c96de7 441 if (!server->ops->init_transform_rq) {
a0a3036b 442 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
443 return -EIO;
444 }
6f49f46b 445
933148a4
PA
446 vars = kzalloc(sizeof(*vars), GFP_NOFS);
447 if (!vars)
3946d0d0 448 return -ENOMEM;
933148a4
PA
449 cur_rqst = vars->rqst;
450 iov = &vars->iov;
3946d0d0 451
933148a4
PA
452 iov->iov_base = &vars->tr_hdr;
453 iov->iov_len = sizeof(vars->tr_hdr);
454 cur_rqst[0].rq_iov = iov;
3946d0d0
LL
455 cur_rqst[0].rq_nvec = 1;
456
1f3a8f5f
RS
457 rc = server->ops->init_transform_rq(server, num_rqst + 1,
458 &cur_rqst[0], rqst);
7fb8986e 459 if (rc)
3946d0d0 460 goto out;
7fb8986e 461
1f3a8f5f
RS
462 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
463 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0 464out:
933148a4 465 kfree(vars);
7fb8986e 466 return rc;
6f49f46b
JL
467}
468
0496e02d
JL
469int
470smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
471 unsigned int smb_buf_length)
472{
738f9de5 473 struct kvec iov[2];
7fb8986e
PS
474 struct smb_rqst rqst = { .rq_iov = iov,
475 .rq_nvec = 2 };
0496e02d 476
738f9de5
PS
477 iov[0].iov_base = smb_buffer;
478 iov[0].iov_len = 4;
479 iov[1].iov_base = (char *)smb_buffer + 4;
480 iov[1].iov_len = smb_buf_length;
0496e02d 481
07cd952f 482 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
483}
484
fc40f9cf 485static int
b227d215 486wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
487 const int timeout, const int flags,
488 unsigned int *instance)
1da177e4 489{
19e88867 490 long rc;
4230cff8
RS
491 int *credits;
492 int optype;
2b53b929 493 long int t;
6d82c27a 494 int scredits, in_flight;
2b53b929
RS
495
496 if (timeout < 0)
497 t = MAX_JIFFY_OFFSET;
498 else
499 t = msecs_to_jiffies(timeout);
4230cff8
RS
500
501 optype = flags & CIFS_OP_MASK;
5bc59498 502
34f4deb7
PS
503 *instance = 0;
504
4230cff8
RS
505 credits = server->ops->get_credits_field(server, optype);
506 /* Since an echo is already inflight, no need to wait to send another */
507 if (*credits <= 0 && optype == CIFS_ECHO_OP)
508 return -EAGAIN;
509
fc40f9cf 510 spin_lock(&server->req_lock);
392e1c5d 511 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 512 /* oplock breaks must not be held up */
fc40f9cf 513 server->in_flight++;
1b63f184
SF
514 if (server->in_flight > server->max_in_flight)
515 server->max_in_flight = server->in_flight;
bc205ed1 516 *credits -= 1;
34f4deb7 517 *instance = server->reconnect_instance;
6d82c27a
SP
518 scredits = *credits;
519 in_flight = server->in_flight;
fc40f9cf 520 spin_unlock(&server->req_lock);
6d82c27a 521
1ddff774 522 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
523 server->conn_id, server->hostname, scredits, -1, in_flight);
524 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
525 __func__, 1, scredits);
526
27a97a61
VL
527 return 0;
528 }
529
27a97a61 530 while (1) {
326a8d04
SP
531 spin_unlock(&server->req_lock);
532
533 spin_lock(&server->srv_lock);
534 if (server->tcpStatus == CifsExiting) {
535 spin_unlock(&server->srv_lock);
536 return -ENOENT;
537 }
538 spin_unlock(&server->srv_lock);
539
540 spin_lock(&server->req_lock);
b227d215 541 if (*credits < num_credits) {
6d82c27a 542 scredits = *credits;
fc40f9cf 543 spin_unlock(&server->req_lock);
6d82c27a 544
789e6661 545 cifs_num_waiters_inc(server);
2b53b929
RS
546 rc = wait_event_killable_timeout(server->request_q,
547 has_credits(server, credits, num_credits), t);
789e6661 548 cifs_num_waiters_dec(server);
2b53b929 549 if (!rc) {
6d82c27a
SP
550 spin_lock(&server->req_lock);
551 scredits = *credits;
552 in_flight = server->in_flight;
553 spin_unlock(&server->req_lock);
554
7937ca96 555 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
556 server->conn_id, server->hostname, scredits,
557 num_credits, in_flight);
afe6f653 558 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 559 timeout);
7de03948 560 return -EBUSY;
2b53b929
RS
561 }
562 if (rc == -ERESTARTSYS)
563 return -ERESTARTSYS;
fc40f9cf 564 spin_lock(&server->req_lock);
27a97a61 565 } else {
16b34aa4
RS
566 /*
567 * For normal commands, reserve the last MAX_COMPOUND
568 * credits to compound requests.
569 * Otherwise these compounds could be permanently
570 * starved for credits by single-credit requests.
571 *
572 * To prevent spinning CPU, block this thread until
573 * there are >MAX_COMPOUND credits available.
574 * But only do this is we already have a lot of
575 * credits in flight to avoid triggering this check
576 * for servers that are slow to hand out credits on
577 * new sessions.
578 */
579 if (!optype && num_credits == 1 &&
580 server->in_flight > 2 * MAX_COMPOUND &&
581 *credits <= MAX_COMPOUND) {
582 spin_unlock(&server->req_lock);
6d82c27a 583
16b34aa4 584 cifs_num_waiters_inc(server);
2b53b929
RS
585 rc = wait_event_killable_timeout(
586 server->request_q,
16b34aa4 587 has_credits(server, credits,
2b53b929
RS
588 MAX_COMPOUND + 1),
589 t);
16b34aa4 590 cifs_num_waiters_dec(server);
2b53b929 591 if (!rc) {
6d82c27a
SP
592 spin_lock(&server->req_lock);
593 scredits = *credits;
594 in_flight = server->in_flight;
595 spin_unlock(&server->req_lock);
596
7937ca96 597 trace_smb3_credit_timeout(
6d82c27a
SP
598 server->CurrentMid,
599 server->conn_id, server->hostname,
600 scredits, num_credits, in_flight);
afe6f653 601 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 602 timeout);
7de03948 603 return -EBUSY;
2b53b929
RS
604 }
605 if (rc == -ERESTARTSYS)
606 return -ERESTARTSYS;
16b34aa4
RS
607 spin_lock(&server->req_lock);
608 continue;
609 }
610
2d86dbc9
PS
611 /*
612 * Can not count locking commands against total
613 * as they are allowed to block on server.
614 */
27a97a61
VL
615
616 /* update # of requests on the wire to server */
4230cff8 617 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
618 *credits -= num_credits;
619 server->in_flight += num_credits;
1b63f184
SF
620 if (server->in_flight > server->max_in_flight)
621 server->max_in_flight = server->in_flight;
34f4deb7 622 *instance = server->reconnect_instance;
2d86dbc9 623 }
6d82c27a
SP
624 scredits = *credits;
625 in_flight = server->in_flight;
fc40f9cf 626 spin_unlock(&server->req_lock);
cd7b699b 627
1ddff774 628 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
629 server->conn_id, server->hostname, scredits,
630 -(num_credits), in_flight);
cd7b699b
SP
631 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
632 __func__, num_credits, scredits);
27a97a61 633 break;
1da177e4
LT
634 }
635 }
7ee1af76
JA
636 return 0;
637}
1da177e4 638
bc205ed1 639static int
480b1cb9
RS
640wait_for_free_request(struct TCP_Server_Info *server, const int flags,
641 unsigned int *instance)
bc205ed1 642{
2b53b929
RS
643 return wait_for_free_credits(server, 1, -1, flags,
644 instance);
bc205ed1
PS
645}
646
257b7809
RS
647static int
648wait_for_compound_request(struct TCP_Server_Info *server, int num,
649 const int flags, unsigned int *instance)
650{
651 int *credits;
6d82c27a 652 int scredits, in_flight;
257b7809
RS
653
654 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
655
656 spin_lock(&server->req_lock);
cd7b699b 657 scredits = *credits;
6d82c27a 658 in_flight = server->in_flight;
cd7b699b 659
257b7809
RS
660 if (*credits < num) {
661 /*
91792bb8
PS
662 * If the server is tight on resources or just gives us less
663 * credits for other reasons (e.g. requests are coming out of
664 * order and the server delays granting more credits until it
665 * processes a missing mid) and we exhausted most available
666 * credits there may be situations when we try to send
667 * a compound request but we don't have enough credits. At this
668 * point the client needs to decide if it should wait for
669 * additional credits or fail the request. If at least one
670 * request is in flight there is a high probability that the
671 * server will return enough credits to satisfy this compound
672 * request.
673 *
674 * Return immediately if no requests in flight since we will be
675 * stuck on waiting for credits.
257b7809 676 */
91792bb8 677 if (server->in_flight == 0) {
257b7809 678 spin_unlock(&server->req_lock);
cd7b699b 679 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
680 server->conn_id, server->hostname, scredits,
681 num, in_flight);
cd7b699b 682 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 683 __func__, in_flight, num, scredits);
7de03948 684 return -EDEADLK;
257b7809
RS
685 }
686 }
687 spin_unlock(&server->req_lock);
688
689 return wait_for_free_credits(server, num, 60000, flags,
690 instance);
691}
692
cb7e9eab
PS
693int
694cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 695 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
696{
697 *num = size;
335b7b62
PS
698 credits->value = 0;
699 credits->instance = server->reconnect_instance;
cb7e9eab
PS
700 return 0;
701}
702
96daf2b0 703static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
704 struct mid_q_entry **ppmidQ)
705{
d7d7a66a 706 spin_lock(&ses->ses_lock);
dd3cd870 707 if (ses->ses_status == SES_NEW) {
79a58d1f 708 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 709 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 710 spin_unlock(&ses->ses_lock);
7ee1af76 711 return -EAGAIN;
080dc5e5 712 }
ad7a2926 713 /* else ok - we are setting up session */
1da177e4 714 }
7f48558e 715
dd3cd870 716 if (ses->ses_status == SES_EXITING) {
7f48558e 717 /* check if SMB session is bad because we are setting it up */
080dc5e5 718 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 719 spin_unlock(&ses->ses_lock);
7f48558e 720 return -EAGAIN;
080dc5e5 721 }
7f48558e
SP
722 /* else ok - we are shutting down session */
723 }
d7d7a66a 724 spin_unlock(&ses->ses_lock);
7f48558e 725
70f08f91 726 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 727 if (*ppmidQ == NULL)
7ee1af76 728 return -ENOMEM;
d7d7a66a 729 spin_lock(&ses->server->mid_lock);
ddc8cf8f 730 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 731 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
732 return 0;
733}
734
0ade640e
JL
735static int
736wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 737{
0ade640e 738 int error;
7ee1af76 739
f5d39b02 740 error = wait_event_state(server->response_q,
d527f513
ZX
741 midQ->mid_state != MID_REQUEST_SUBMITTED &&
742 midQ->mid_state != MID_RESPONSE_RECEIVED,
f5d39b02 743 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
744 if (error < 0)
745 return -ERESTARTSYS;
7ee1af76 746
0ade640e 747 return 0;
7ee1af76
JA
748}
749
fec344e3
JL
750struct mid_q_entry *
751cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
752{
753 int rc;
fec344e3 754 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
755 struct mid_q_entry *mid;
756
738f9de5
PS
757 if (rqst->rq_iov[0].iov_len != 4 ||
758 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
759 return ERR_PTR(-EIO);
760
792af7b0 761 /* enable signing if server requires it */
38d77c50 762 if (server->sign)
792af7b0
PS
763 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
764
70f08f91 765 mid = alloc_mid(hdr, server);
792af7b0 766 if (mid == NULL)
fec344e3 767 return ERR_PTR(-ENOMEM);
792af7b0 768
fec344e3 769 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 770 if (rc) {
70f08f91 771 release_mid(mid);
fec344e3 772 return ERR_PTR(rc);
ffc61ccb
SP
773 }
774
fec344e3 775 return mid;
792af7b0 776}
133672ef 777
a6827c18
JL
778/*
779 * Send a SMB request and set the callback function in the mid to handle
780 * the result. Caller is responsible for dealing with timeouts.
781 */
782int
fec344e3 783cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 784 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
785 mid_handle_t *handle, void *cbdata, const int flags,
786 const struct cifs_credits *exist_credits)
a6827c18 787{
480b1cb9 788 int rc;
a6827c18 789 struct mid_q_entry *mid;
335b7b62 790 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 791 unsigned int instance;
480b1cb9 792 int optype;
a6827c18 793
a891f0f8
PS
794 optype = flags & CIFS_OP_MASK;
795
cb7e9eab 796 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 797 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
798 if (rc)
799 return rc;
335b7b62 800 credits.value = 1;
34f4deb7 801 credits.instance = instance;
3349c3a7
PS
802 } else
803 instance = exist_credits->instance;
a6827c18 804
cc391b69 805 cifs_server_lock(server);
3349c3a7
PS
806
807 /*
808 * We can't use credits obtained from the previous session to send this
809 * request. Check if there were reconnects after we obtained credits and
810 * return -EAGAIN in such cases to let callers handle it.
811 */
812 if (instance != server->reconnect_instance) {
cc391b69 813 cifs_server_unlock(server);
3349c3a7
PS
814 add_credits_and_wake_if(server, &credits, optype);
815 return -EAGAIN;
816 }
817
fec344e3
JL
818 mid = server->ops->setup_async_request(server, rqst);
819 if (IS_ERR(mid)) {
cc391b69 820 cifs_server_unlock(server);
335b7b62 821 add_credits_and_wake_if(server, &credits, optype);
fec344e3 822 return PTR_ERR(mid);
a6827c18
JL
823 }
824
44d22d84 825 mid->receive = receive;
a6827c18
JL
826 mid->callback = callback;
827 mid->callback_data = cbdata;
9b7c18a2 828 mid->handle = handle;
7c9421e1 829 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 830
ffc61ccb 831 /* put it on the pending_mid_q */
d7d7a66a 832 spin_lock(&server->mid_lock);
ffc61ccb 833 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 834 spin_unlock(&server->mid_lock);
ffc61ccb 835
93d2cb6c
LL
836 /*
837 * Need to store the time in mid before calling I/O. For call_async,
838 * I/O response may come back and free the mid entry on another thread.
839 */
840 cifs_save_when_sent(mid);
1f3a8f5f 841 rc = smb_send_rqst(server, 1, rqst, flags);
ad313cb8 842
820962dc 843 if (rc < 0) {
c781af7e 844 revert_current_mid(server, mid->credits);
ad313cb8 845 server->sequence_number -= 2;
70f08f91 846 delete_mid(mid);
820962dc
RV
847 }
848
cc391b69 849 cifs_server_unlock(server);
789e6661 850
ffc61ccb
SP
851 if (rc == 0)
852 return 0;
a6827c18 853
335b7b62 854 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
855 return rc;
856}
857
133672ef
SF
858/*
859 *
860 * Send an SMB Request. No response info (other than return code)
861 * needs to be parsed.
862 *
863 * flags indicate the type of request buffer and how long to wait
864 * and whether to log NT STATUS code (error) before mapping it to POSIX error
865 *
866 */
867int
96daf2b0 868SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 869 char *in_buf, int flags)
133672ef
SF
870{
871 int rc;
872 struct kvec iov[1];
da502f7d 873 struct kvec rsp_iov;
133672ef
SF
874 int resp_buf_type;
875
792af7b0
PS
876 iov[0].iov_base = in_buf;
877 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 878 flags |= CIFS_NO_RSP_BUF;
da502f7d 879 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 880 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 881
133672ef
SF
882 return rc;
883}
884
053d5034 885static int
3c1105df 886cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
887{
888 int rc = 0;
889
f96637be
JP
890 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
891 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 892
d7d7a66a 893 spin_lock(&server->mid_lock);
7c9421e1 894 switch (mid->mid_state) {
d527f513 895 case MID_RESPONSE_READY:
d7d7a66a 896 spin_unlock(&server->mid_lock);
053d5034 897 return rc;
74dd92a8
JL
898 case MID_RETRY_NEEDED:
899 rc = -EAGAIN;
900 break;
71823baf
JL
901 case MID_RESPONSE_MALFORMED:
902 rc = -EIO;
903 break;
3c1105df
JL
904 case MID_SHUTDOWN:
905 rc = -EHOSTDOWN;
906 break;
74dd92a8 907 default:
abe57073
PS
908 if (!(mid->mid_flags & MID_DELETED)) {
909 list_del_init(&mid->qhead);
910 mid->mid_flags |= MID_DELETED;
911 }
8861fd51 912 spin_unlock(&server->mid_lock);
afe6f653 913 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 914 __func__, mid->mid, mid->mid_state);
74dd92a8 915 rc = -EIO;
8861fd51 916 goto sync_mid_done;
053d5034 917 }
d7d7a66a 918 spin_unlock(&server->mid_lock);
053d5034 919
8861fd51 920sync_mid_done:
70f08f91 921 release_mid(mid);
053d5034
JL
922 return rc;
923}
924
121b046a 925static inline int
fb2036d8
PS
926send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
927 struct mid_q_entry *mid)
76dcc26f 928{
121b046a 929 return server->ops->send_cancel ?
fb2036d8 930 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
931}
932
2c8f981d
JL
933int
934cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
935 bool log_error)
936{
792af7b0 937 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
938
939 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
940
941 /* convert the length into a more usable form */
38d77c50 942 if (server->sign) {
738f9de5 943 struct kvec iov[2];
985e4ff0 944 int rc = 0;
738f9de5
PS
945 struct smb_rqst rqst = { .rq_iov = iov,
946 .rq_nvec = 2 };
826a95e4 947
738f9de5
PS
948 iov[0].iov_base = mid->resp_buf;
949 iov[0].iov_len = 4;
950 iov[1].iov_base = (char *)mid->resp_buf + 4;
951 iov[1].iov_len = len - 4;
2c8f981d 952 /* FIXME: add code to kill session */
bf5ea0e2 953 rc = cifs_verify_signature(&rqst, server,
0124cc45 954 mid->sequence_number);
985e4ff0 955 if (rc)
afe6f653 956 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 957 rc);
2c8f981d
JL
958 }
959
960 /* BB special case reconnect tid and uid here? */
a3713ec3 961 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
962}
963
fec344e3 964struct mid_q_entry *
f780bd3f
AA
965cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
966 struct smb_rqst *rqst)
792af7b0
PS
967{
968 int rc;
fec344e3 969 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
970 struct mid_q_entry *mid;
971
738f9de5
PS
972 if (rqst->rq_iov[0].iov_len != 4 ||
973 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
974 return ERR_PTR(-EIO);
975
792af7b0
PS
976 rc = allocate_mid(ses, hdr, &mid);
977 if (rc)
fec344e3
JL
978 return ERR_PTR(rc);
979 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
980 if (rc) {
70f08f91 981 delete_mid(mid);
fec344e3
JL
982 return ERR_PTR(rc);
983 }
984 return mid;
792af7b0
PS
985}
986
4e34feb5 987static void
ee258d79 988cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
989{
990 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
991 struct cifs_credits credits;
992
993 credits.value = server->ops->get_credits(mid);
994 credits.instance = server->reconnect_instance;
8a26f0f7 995
34f4deb7 996 add_credits(server, &credits, mid->optype);
d527f513
ZX
997
998 if (mid->mid_state == MID_RESPONSE_RECEIVED)
999 mid->mid_state = MID_RESPONSE_READY;
8a26f0f7
PS
1000}
1001
ee258d79
PS
1002static void
1003cifs_compound_last_callback(struct mid_q_entry *mid)
1004{
1005 cifs_compound_callback(mid);
1006 cifs_wake_up_task(mid);
1007}
1008
1009static void
1010cifs_cancelled_callback(struct mid_q_entry *mid)
1011{
1012 cifs_compound_callback(mid);
70f08f91 1013 release_mid(mid);
ee258d79
PS
1014}
1015
5f68ea4a
AA
1016/*
1017 * Return a channel (master if none) of @ses that can be used to send
1018 * regular requests.
1019 *
1020 * If we are currently binding a new channel (negprot/sess.setup),
1021 * return the new incomplete channel.
1022 */
1023struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1024{
1025 uint index = 0;
ea90708d
SP
1026 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1027 struct TCP_Server_Info *server = NULL;
1028 int i;
5f68ea4a
AA
1029
1030 if (!ses)
1031 return NULL;
1032
88b024f5 1033 spin_lock(&ses->chan_lock);
ea90708d
SP
1034 for (i = 0; i < ses->chan_count; i++) {
1035 server = ses->chans[i].server;
ee1d2179 1036 if (!server || server->terminate)
ea90708d
SP
1037 continue;
1038
fc43a8ac
SP
1039 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
1040 continue;
1041
ea90708d
SP
1042 /*
1043 * strictly speaking, we should pick up req_lock to read
1044 * server->in_flight. But it shouldn't matter much here if we
1045 * race while reading this data. The worst that can happen is
1046 * that we could use a channel that's not least loaded. Avoiding
1047 * taking the lock could help reduce wait time, which is
1048 * important for this function
1049 */
1050 if (server->in_flight < min_in_flight) {
1051 min_in_flight = server->in_flight;
1052 index = i;
1053 }
1054 if (server->in_flight > max_in_flight)
1055 max_in_flight = server->in_flight;
1056 }
1057
1058 /* if all channels are equally loaded, fall back to round-robin */
1059 if (min_in_flight == max_in_flight) {
1060 index = (uint)atomic_inc_return(&ses->chan_seq);
1061 index %= ses->chan_count;
1062 }
8094a600
SF
1063
1064 server = ses->chans[index].server;
88b024f5 1065 spin_unlock(&ses->chan_lock);
f486ef8e 1066
8094a600 1067 return server;
5f68ea4a
AA
1068}
1069
b8f57ee8 1070int
e0bba0b8 1071compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1072 struct TCP_Server_Info *server,
e0bba0b8
RS
1073 const int flags, const int num_rqst, struct smb_rqst *rqst,
1074 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1075{
480b1cb9 1076 int i, j, optype, rc = 0;
e0bba0b8 1077 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1078 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1079 struct cifs_credits credits[MAX_COMPOUND] = {
1080 { .value = 0, .instance = 0 }
1081 };
1082 unsigned int instance;
738f9de5 1083 char *buf;
50c2f753 1084
a891f0f8 1085 optype = flags & CIFS_OP_MASK;
133672ef 1086
e0bba0b8
RS
1087 for (i = 0; i < num_rqst; i++)
1088 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1089
352d96f3 1090 if (!ses || !ses->server || !server) {
f96637be 1091 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1092 return -EIO;
1093 }
1094
d7d7a66a 1095 spin_lock(&server->srv_lock);
080dc5e5 1096 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1097 spin_unlock(&server->srv_lock);
7ee1af76 1098 return -ENOENT;
080dc5e5 1099 }
d7d7a66a 1100 spin_unlock(&server->srv_lock);
7ee1af76 1101
792af7b0 1102 /*
257b7809 1103 * Wait for all the requests to become available.
7091bcab
PS
1104 * This approach still leaves the possibility to be stuck waiting for
1105 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1106 * requests and if the client is completely idle, not generating any
1107 * other requests.
1108 * This can be handled by the eventual session reconnect.
792af7b0 1109 */
3190b59a 1110 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1111 &instance);
1112 if (rc)
1113 return rc;
97ea4998 1114
257b7809
RS
1115 for (i = 0; i < num_rqst; i++) {
1116 credits[i].value = 1;
1117 credits[i].instance = instance;
8544f4aa 1118 }
7ee1af76 1119
792af7b0
PS
1120 /*
1121 * Make sure that we sign in the same order that we send on this socket
1122 * and avoid races inside tcp sendmsg code that could cause corruption
1123 * of smb data.
1124 */
7ee1af76 1125
cc391b69 1126 cifs_server_lock(server);
7ee1af76 1127
97ea4998
PS
1128 /*
1129 * All the parts of the compound chain belong obtained credits from the
257b7809 1130 * same session. We can not use credits obtained from the previous
97ea4998
PS
1131 * session to send this request. Check if there were reconnects after
1132 * we obtained credits and return -EAGAIN in such cases to let callers
1133 * handle it.
1134 */
3190b59a 1135 if (instance != server->reconnect_instance) {
cc391b69 1136 cifs_server_unlock(server);
97ea4998 1137 for (j = 0; j < num_rqst; j++)
3190b59a 1138 add_credits(server, &credits[j], optype);
97ea4998
PS
1139 return -EAGAIN;
1140 }
1141
e0bba0b8 1142 for (i = 0; i < num_rqst; i++) {
f780bd3f 1143 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1144 if (IS_ERR(midQ[i])) {
3190b59a 1145 revert_current_mid(server, i);
e0bba0b8 1146 for (j = 0; j < i; j++)
70f08f91 1147 delete_mid(midQ[j]);
cc391b69 1148 cifs_server_unlock(server);
8544f4aa 1149
e0bba0b8 1150 /* Update # of requests on wire to server */
8544f4aa 1151 for (j = 0; j < num_rqst; j++)
3190b59a 1152 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1153 return PTR_ERR(midQ[i]);
1154 }
1155
1156 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1157 midQ[i]->optype = optype;
4e34feb5 1158 /*
ee258d79
PS
1159 * Invoke callback for every part of the compound chain
1160 * to calculate credits properly. Wake up this thread only when
1161 * the last element is received.
4e34feb5
RS
1162 */
1163 if (i < num_rqst - 1)
ee258d79
PS
1164 midQ[i]->callback = cifs_compound_callback;
1165 else
1166 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1167 }
3190b59a 1168 rc = smb_send_rqst(server, num_rqst, rqst, flags);
e0bba0b8
RS
1169
1170 for (i = 0; i < num_rqst; i++)
1171 cifs_save_when_sent(midQ[i]);
7ee1af76 1172
c781af7e 1173 if (rc < 0) {
3190b59a
AA
1174 revert_current_mid(server, num_rqst);
1175 server->sequence_number -= 2;
c781af7e 1176 }
e0bba0b8 1177
cc391b69 1178 cifs_server_unlock(server);
7ee1af76 1179
d69cb728
RS
1180 /*
1181 * If sending failed for some reason or it is an oplock break that we
1182 * will not receive a response to - return credits back
1183 */
1184 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1185 for (i = 0; i < num_rqst; i++)
3190b59a 1186 add_credits(server, &credits[i], optype);
cb5c2e63 1187 goto out;
ee258d79
PS
1188 }
1189
1190 /*
1191 * At this point the request is passed to the network stack - we assume
1192 * that any credits taken from the server structure on the client have
1193 * been spent and we can't return them back. Once we receive responses
1194 * we will collect credits granted by the server in the mid callbacks
1195 * and add those credits to the server structure.
1196 */
e0bba0b8 1197
cb5c2e63
RS
1198 /*
1199 * Compounding is never used during session establish.
1200 */
d7d7a66a 1201 spin_lock(&ses->ses_lock);
dd3cd870 1202 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1203 spin_unlock(&ses->ses_lock);
080dc5e5 1204
cc391b69 1205 cifs_server_lock(server);
f486ef8e 1206 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1207 cifs_server_unlock(server);
080dc5e5 1208
d7d7a66a 1209 spin_lock(&ses->ses_lock);
05946d4b 1210 }
d7d7a66a 1211 spin_unlock(&ses->ses_lock);
e0bba0b8 1212
cb5c2e63 1213 for (i = 0; i < num_rqst; i++) {
3190b59a 1214 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1215 if (rc != 0)
1216 break;
1217 }
1218 if (rc != 0) {
1219 for (; i < num_rqst; i++) {
e3d100ea 1220 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1221 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1222 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1223 spin_lock(&server->mid_lock);
7b71843f 1224 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
d527f513
ZX
1225 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1226 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
8a26f0f7 1227 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1228 cancelled_mid[i] = true;
34f4deb7 1229 credits[i].value = 0;
e0bba0b8 1230 }
d7d7a66a 1231 spin_unlock(&server->mid_lock);
e0bba0b8 1232 }
cb5c2e63
RS
1233 }
1234
cb5c2e63
RS
1235 for (i = 0; i < num_rqst; i++) {
1236 if (rc < 0)
1237 goto out;
e0bba0b8 1238
3190b59a 1239 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1240 if (rc != 0) {
8544f4aa
PS
1241 /* mark this mid as cancelled to not free it below */
1242 cancelled_mid[i] = true;
1243 goto out;
1be912dd 1244 }
2b2bdfba 1245
e0bba0b8 1246 if (!midQ[i]->resp_buf ||
d527f513 1247 midQ[i]->mid_state != MID_RESPONSE_READY) {
e0bba0b8
RS
1248 rc = -EIO;
1249 cifs_dbg(FYI, "Bad MID state?\n");
1250 goto out;
1251 }
a891f0f8 1252
e0bba0b8
RS
1253 buf = (char *)midQ[i]->resp_buf;
1254 resp_iov[i].iov_base = buf;
1255 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1256 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1257
1258 if (midQ[i]->large_buf)
1259 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1260 else
1261 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1262
3190b59a 1263 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1264 flags & CIFS_LOG_ERROR);
1da177e4 1265
70f08f91 1266 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1267 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1268 midQ[i]->resp_buf = NULL;
cb5c2e63 1269
e0bba0b8 1270 }
cb5c2e63
RS
1271
1272 /*
1273 * Compounding is never used during session establish.
1274 */
d7d7a66a 1275 spin_lock(&ses->ses_lock);
dd3cd870 1276 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1277 struct kvec iov = {
1278 .iov_base = resp_iov[0].iov_base,
1279 .iov_len = resp_iov[0].iov_len
1280 };
d7d7a66a 1281 spin_unlock(&ses->ses_lock);
cc391b69 1282 cifs_server_lock(server);
f486ef8e 1283 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1284 cifs_server_unlock(server);
d7d7a66a 1285 spin_lock(&ses->ses_lock);
cb5c2e63 1286 }
d7d7a66a 1287 spin_unlock(&ses->ses_lock);
cb5c2e63 1288
7ee1af76 1289out:
4e34feb5
RS
1290 /*
1291 * This will dequeue all mids. After this it is important that the
1292 * demultiplex_thread will not process any of these mids any futher.
1293 * This is prevented above by using a noop callback that will not
1294 * wake this thread except for the very last PDU.
1295 */
8544f4aa
PS
1296 for (i = 0; i < num_rqst; i++) {
1297 if (!cancelled_mid[i])
70f08f91 1298 delete_mid(midQ[i]);
8544f4aa 1299 }
1da177e4 1300
d6e04ae6
SF
1301 return rc;
1302}
1da177e4 1303
e0bba0b8
RS
1304int
1305cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1306 struct TCP_Server_Info *server,
e0bba0b8
RS
1307 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1308 struct kvec *resp_iov)
1309{
352d96f3
AA
1310 return compound_send_recv(xid, ses, server, flags, 1,
1311 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1312}
1313
738f9de5
PS
1314int
1315SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1316 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1317 const int flags, struct kvec *resp_iov)
1318{
1319 struct smb_rqst rqst;
3cecf486 1320 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1321 int rc;
1322
3cecf486 1323 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1324 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1325 GFP_KERNEL);
117e3b7f
SF
1326 if (!new_iov) {
1327 /* otherwise cifs_send_recv below sets resp_buf_type */
1328 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1329 return -ENOMEM;
117e3b7f 1330 }
3cecf486
RS
1331 } else
1332 new_iov = s_iov;
738f9de5
PS
1333
1334 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1335 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1336
1337 new_iov[0].iov_base = new_iov[1].iov_base;
1338 new_iov[0].iov_len = 4;
1339 new_iov[1].iov_base += 4;
1340 new_iov[1].iov_len -= 4;
1341
1342 memset(&rqst, 0, sizeof(struct smb_rqst));
1343 rqst.rq_iov = new_iov;
1344 rqst.rq_nvec = n_vec + 1;
1345
352d96f3
AA
1346 rc = cifs_send_recv(xid, ses, ses->server,
1347 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1348 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1349 kfree(new_iov);
738f9de5
PS
1350 return rc;
1351}
1352
1da177e4 1353int
96daf2b0 1354SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1355 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1356 int *pbytes_returned, const int flags)
1da177e4
LT
1357{
1358 int rc = 0;
1da177e4 1359 struct mid_q_entry *midQ;
fb2036d8
PS
1360 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1361 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1362 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1363 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1364 struct TCP_Server_Info *server;
1da177e4
LT
1365
1366 if (ses == NULL) {
f96637be 1367 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1368 return -EIO;
1369 }
ac6ad7a8 1370 server = ses->server;
afe6f653 1371 if (server == NULL) {
f96637be 1372 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1373 return -EIO;
1374 }
1375
d7d7a66a 1376 spin_lock(&server->srv_lock);
080dc5e5 1377 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1378 spin_unlock(&server->srv_lock);
31ca3bc3 1379 return -ENOENT;
080dc5e5 1380 }
d7d7a66a 1381 spin_unlock(&server->srv_lock);
31ca3bc3 1382
79a58d1f 1383 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1384 to the same server. We may make this configurable later or
1385 use ses->maxReq */
1da177e4 1386
fb2036d8 1387 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1388 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1389 len);
6d9c6d54
VL
1390 return -EIO;
1391 }
1392
afe6f653 1393 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1394 if (rc)
1395 return rc;
1396
79a58d1f 1397 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1398 and avoid races inside tcp sendmsg code that could cause corruption
1399 of smb data */
1400
cc391b69 1401 cifs_server_lock(server);
1da177e4 1402
7ee1af76
JA
1403 rc = allocate_mid(ses, in_buf, &midQ);
1404 if (rc) {
cc391b69 1405 cifs_server_unlock(server);
7ee1af76 1406 /* Update # of requests on wire to server */
afe6f653 1407 add_credits(server, &credits, 0);
7ee1af76 1408 return rc;
1da177e4
LT
1409 }
1410
afe6f653 1411 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1412 if (rc) {
cc391b69 1413 cifs_server_unlock(server);
829049cb
VL
1414 goto out;
1415 }
1da177e4 1416
7c9421e1 1417 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1418
afe6f653 1419 rc = smb_send(server, in_buf, len);
789e6661 1420 cifs_save_when_sent(midQ);
ad313cb8
JL
1421
1422 if (rc < 0)
afe6f653 1423 server->sequence_number -= 2;
ad313cb8 1424
cc391b69 1425 cifs_server_unlock(server);
7ee1af76 1426
79a58d1f 1427 if (rc < 0)
7ee1af76
JA
1428 goto out;
1429
afe6f653 1430 rc = wait_for_response(server, midQ);
1be912dd 1431 if (rc != 0) {
afe6f653 1432 send_cancel(server, &rqst, midQ);
d7d7a66a 1433 spin_lock(&server->mid_lock);
d527f513
ZX
1434 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1435 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1be912dd 1436 /* no longer considered to be "in-flight" */
70f08f91 1437 midQ->callback = release_mid;
d7d7a66a 1438 spin_unlock(&server->mid_lock);
afe6f653 1439 add_credits(server, &credits, 0);
1be912dd
JL
1440 return rc;
1441 }
d7d7a66a 1442 spin_unlock(&server->mid_lock);
1be912dd 1443 }
1da177e4 1444
afe6f653 1445 rc = cifs_sync_mid_result(midQ, server);
053d5034 1446 if (rc != 0) {
afe6f653 1447 add_credits(server, &credits, 0);
1da177e4
LT
1448 return rc;
1449 }
50c2f753 1450
2c8f981d 1451 if (!midQ->resp_buf || !out_buf ||
d527f513 1452 midQ->mid_state != MID_RESPONSE_READY) {
2b2bdfba 1453 rc = -EIO;
afe6f653 1454 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1455 goto out;
1da177e4 1456 }
7ee1af76 1457
d4e4854f 1458 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1459 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1460 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1461out:
70f08f91 1462 delete_mid(midQ);
afe6f653 1463 add_credits(server, &credits, 0);
1da177e4 1464
7ee1af76
JA
1465 return rc;
1466}
1da177e4 1467
7ee1af76
JA
1468/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1469 blocking lock to return. */
1470
1471static int
96daf2b0 1472send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1473 struct smb_hdr *in_buf,
1474 struct smb_hdr *out_buf)
1475{
1476 int bytes_returned;
96daf2b0 1477 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1478 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1479
1480 /* We just modify the current in_buf to change
1481 the type of lock from LOCKING_ANDX_SHARED_LOCK
1482 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1483 LOCKING_ANDX_CANCEL_LOCK. */
1484
1485 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1486 pSMB->Timeout = 0;
88257360 1487 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1488
1489 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1490 &bytes_returned, 0);
7ee1af76
JA
1491}
1492
1493int
96daf2b0 1494SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1495 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1496 int *pbytes_returned)
1497{
1498 int rc = 0;
1499 int rstart = 0;
7ee1af76 1500 struct mid_q_entry *midQ;
96daf2b0 1501 struct cifs_ses *ses;
fb2036d8
PS
1502 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1503 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1504 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1505 unsigned int instance;
afe6f653 1506 struct TCP_Server_Info *server;
7ee1af76
JA
1507
1508 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1509 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1510 return -EIO;
1511 }
1512 ses = tcon->ses;
afe6f653 1513 server = ses->server;
7ee1af76 1514
afe6f653 1515 if (server == NULL) {
f96637be 1516 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1517 return -EIO;
1518 }
1519
d7d7a66a 1520 spin_lock(&server->srv_lock);
080dc5e5 1521 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1522 spin_unlock(&server->srv_lock);
7ee1af76 1523 return -ENOENT;
080dc5e5 1524 }
d7d7a66a 1525 spin_unlock(&server->srv_lock);
7ee1af76 1526
79a58d1f 1527 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1528 to the same server. We may make this configurable later or
1529 use ses->maxReq */
1530
fb2036d8 1531 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1532 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1533 len);
6d9c6d54
VL
1534 return -EIO;
1535 }
1536
afe6f653 1537 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1538 if (rc)
1539 return rc;
1540
79a58d1f 1541 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1542 and avoid races inside tcp sendmsg code that could cause corruption
1543 of smb data */
1544
cc391b69 1545 cifs_server_lock(server);
7ee1af76
JA
1546
1547 rc = allocate_mid(ses, in_buf, &midQ);
1548 if (rc) {
cc391b69 1549 cifs_server_unlock(server);
7ee1af76
JA
1550 return rc;
1551 }
1552
afe6f653 1553 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1554 if (rc) {
70f08f91 1555 delete_mid(midQ);
cc391b69 1556 cifs_server_unlock(server);
829049cb
VL
1557 return rc;
1558 }
1da177e4 1559
7c9421e1 1560 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653 1561 rc = smb_send(server, in_buf, len);
789e6661 1562 cifs_save_when_sent(midQ);
ad313cb8
JL
1563
1564 if (rc < 0)
afe6f653 1565 server->sequence_number -= 2;
ad313cb8 1566
cc391b69 1567 cifs_server_unlock(server);
7ee1af76 1568
79a58d1f 1569 if (rc < 0) {
70f08f91 1570 delete_mid(midQ);
7ee1af76
JA
1571 return rc;
1572 }
1573
1574 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1575 rc = wait_event_interruptible(server->response_q,
d527f513
ZX
1576 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1577 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
afe6f653
RS
1578 ((server->tcpStatus != CifsGood) &&
1579 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1580
1581 /* Were we interrupted by a signal ? */
d7d7a66a 1582 spin_lock(&server->srv_lock);
7ee1af76 1583 if ((rc == -ERESTARTSYS) &&
d527f513
ZX
1584 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1585 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
afe6f653
RS
1586 ((server->tcpStatus == CifsGood) ||
1587 (server->tcpStatus == CifsNew))) {
d7d7a66a 1588 spin_unlock(&server->srv_lock);
7ee1af76
JA
1589
1590 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1591 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1592 blocking lock to return. */
afe6f653 1593 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1594 if (rc) {
70f08f91 1595 delete_mid(midQ);
7ee1af76
JA
1596 return rc;
1597 }
1598 } else {
1599 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1600 to cause the blocking lock to return. */
1601
1602 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1603
1604 /* If we get -ENOLCK back the lock may have
1605 already been removed. Don't exit in this case. */
1606 if (rc && rc != -ENOLCK) {
70f08f91 1607 delete_mid(midQ);
7ee1af76
JA
1608 return rc;
1609 }
1610 }
1611
afe6f653 1612 rc = wait_for_response(server, midQ);
1be912dd 1613 if (rc) {
afe6f653 1614 send_cancel(server, &rqst, midQ);
d7d7a66a 1615 spin_lock(&server->mid_lock);
d527f513
ZX
1616 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1617 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1be912dd 1618 /* no longer considered to be "in-flight" */
70f08f91 1619 midQ->callback = release_mid;
d7d7a66a 1620 spin_unlock(&server->mid_lock);
1be912dd
JL
1621 return rc;
1622 }
d7d7a66a 1623 spin_unlock(&server->mid_lock);
7ee1af76 1624 }
1be912dd
JL
1625
1626 /* We got the response - restart system call. */
1627 rstart = 1;
d7d7a66a 1628 spin_lock(&server->srv_lock);
7ee1af76 1629 }
d7d7a66a 1630 spin_unlock(&server->srv_lock);
7ee1af76 1631
afe6f653 1632 rc = cifs_sync_mid_result(midQ, server);
053d5034 1633 if (rc != 0)
7ee1af76 1634 return rc;
50c2f753 1635
17c8bfed 1636 /* rcvd frame is ok */
d527f513 1637 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
698e96a8 1638 rc = -EIO;
3175eb9b 1639 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1640 goto out;
1641 }
1da177e4 1642
d4e4854f 1643 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1644 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1645 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1646out:
70f08f91 1647 delete_mid(midQ);
7ee1af76
JA
1648 if (rstart && rc == -EACCES)
1649 return -ERESTARTSYS;
1da177e4
LT
1650 return rc;
1651}
fb157ed2
SF
1652
1653/*
1654 * Discard any remaining data in the current SMB. To do this, we borrow the
1655 * current bigbuf.
1656 */
1657int
1658cifs_discard_remaining_data(struct TCP_Server_Info *server)
1659{
1660 unsigned int rfclen = server->pdu_size;
d08089f6 1661 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1662 server->total_read;
1663
1664 while (remaining > 0) {
d08089f6 1665 ssize_t length;
fb157ed2
SF
1666
1667 length = cifs_discard_from_socket(server,
1668 min_t(size_t, remaining,
1669 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1670 if (length < 0)
1671 return length;
1672 server->total_read += length;
1673 remaining -= length;
1674 }
1675
1676 return 0;
1677}
1678
1679static int
1680__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1681 bool malformed)
1682{
1683 int length;
1684
1685 length = cifs_discard_remaining_data(server);
1686 dequeue_mid(mid, malformed);
1687 mid->resp_buf = server->smallbuf;
1688 server->smallbuf = NULL;
1689 return length;
1690}
1691
1692static int
1693cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1694{
1695 struct cifs_readdata *rdata = mid->callback_data;
1696
1697 return __cifs_readv_discard(server, mid, rdata->result);
1698}
1699
1700int
1701cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1702{
1703 int length, len;
1704 unsigned int data_offset, data_len;
1705 struct cifs_readdata *rdata = mid->callback_data;
1706 char *buf = server->smallbuf;
9789de8b 1707 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1708 bool use_rdma_mr = false;
1709
1710 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1711 __func__, mid->mid, rdata->offset, rdata->bytes);
1712
1713 /*
1714 * read the rest of READ_RSP header (sans Data array), or whatever we
1715 * can if there's not enough data. At this point, we've read down to
1716 * the Mid.
1717 */
1718 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1719 HEADER_SIZE(server) + 1;
1720
1721 length = cifs_read_from_socket(server,
1722 buf + HEADER_SIZE(server) - 1, len);
1723 if (length < 0)
1724 return length;
1725 server->total_read += length;
1726
1727 if (server->ops->is_session_expired &&
1728 server->ops->is_session_expired(buf)) {
1729 cifs_reconnect(server, true);
1730 return -1;
1731 }
1732
1733 if (server->ops->is_status_pending &&
1734 server->ops->is_status_pending(buf, server)) {
1735 cifs_discard_remaining_data(server);
1736 return -1;
1737 }
1738
1739 /* set up first two iov for signature check and to get credits */
1740 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1741 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1742 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1743 rdata->iov[1].iov_len =
9789de8b 1744 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1745 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1746 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1747 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1748 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1749
1750 /* Was the SMB read successful? */
1751 rdata->result = server->ops->map_error(buf, false);
1752 if (rdata->result != 0) {
1753 cifs_dbg(FYI, "%s: server returned error %d\n",
1754 __func__, rdata->result);
1755 /* normal error on read response */
1756 return __cifs_readv_discard(server, mid, false);
1757 }
1758
1759 /* Is there enough to get to the rest of the READ_RSP header? */
1760 if (server->total_read < server->vals->read_rsp_size) {
1761 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1762 __func__, server->total_read,
1763 server->vals->read_rsp_size);
1764 rdata->result = -EIO;
1765 return cifs_readv_discard(server, mid);
1766 }
1767
1768 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1769 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1770 if (data_offset < server->total_read) {
1771 /*
1772 * win2k8 sometimes sends an offset of 0 when the read
1773 * is beyond the EOF. Treat it as if the data starts just after
1774 * the header.
1775 */
1776 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1777 __func__, data_offset);
1778 data_offset = server->total_read;
1779 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1780 /* data_offset is beyond the end of smallbuf */
1781 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1782 __func__, data_offset);
1783 rdata->result = -EIO;
1784 return cifs_readv_discard(server, mid);
1785 }
1786
1787 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1788 __func__, server->total_read, data_offset);
1789
1790 len = data_offset - server->total_read;
1791 if (len > 0) {
1792 /* read any junk before data into the rest of smallbuf */
1793 length = cifs_read_from_socket(server,
1794 buf + server->total_read, len);
1795 if (length < 0)
1796 return length;
1797 server->total_read += length;
1798 }
1799
1800 /* how much data is in the response? */
1801#ifdef CONFIG_CIFS_SMB_DIRECT
1802 use_rdma_mr = rdata->mr;
1803#endif
1804 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1805 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1806 /* data_len is corrupt -- discard frame */
1807 rdata->result = -EIO;
1808 return cifs_readv_discard(server, mid);
1809 }
1810
d08089f6
DH
1811#ifdef CONFIG_CIFS_SMB_DIRECT
1812 if (rdata->mr)
1813 length = data_len; /* An RDMA read is already done. */
1814 else
1815#endif
1816 length = cifs_read_iter_from_socket(server, &rdata->iter,
1817 data_len);
1818 if (length > 0)
1819 rdata->got_bytes += length;
fb157ed2
SF
1820 server->total_read += length;
1821
1822 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1823 server->total_read, buflen, data_len);
1824
1825 /* discard anything left over */
1826 if (server->total_read < buflen)
1827 return cifs_readv_discard(server, mid);
1828
1829 dequeue_mid(mid, false);
1830 mid->resp_buf = server->smallbuf;
1831 server->smallbuf = NULL;
1832 return length;
1833}