Merge tag 'm68knommu-for-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg...
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
0496e02d 197 if (server->noblocksnd)
3ab3f2a1 198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 199 else
3ab3f2a1 200 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 201
3ab3f2a1 202 while (msg_data_left(smb_msg)) {
6f49f46b
JL
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
3ab3f2a1 221 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 222 if (rc == -EAGAIN) {
3ab3f2a1
AV
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
afe6f653 226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 227 ssocket);
3ab3f2a1 228 return -EAGAIN;
1da177e4 229 }
3ab3f2a1 230 msleep(1 << retries);
1da177e4
LT
231 continue;
232 }
6f49f46b 233
79a58d1f 234 if (rc < 0)
3ab3f2a1 235 return rc;
6f49f46b 236
79a58d1f 237 if (rc == 0) {
3e84469d
SF
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
afe6f653 240 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
241 msleep(500);
242 continue;
d6e04ae6 243 }
6f49f46b 244
3ab3f2a1
AV
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 248 }
3ab3f2a1 249 return 0;
97bc00b3
JL
250}
251
35e2cc1b 252unsigned long
81f39f95 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
254{
255 unsigned int i;
35e2cc1b
PA
256 struct kvec *iov;
257 int nvec;
a26054d1
JL
258 unsigned long buflen = 0;
259
d291e703 260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 261 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
a26054d1 269 /* total up iov array first */
35e2cc1b 270 for (i = 0; i < nvec; i++)
a26054d1
JL
271 buflen += iov[i].iov_len;
272
d08089f6 273 buflen += iov_iter_count(&rqst->rq_iter);
a26054d1
JL
274 return buflen;
275}
276
6f49f46b 277static int
07cd952f
RS
278__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
279 struct smb_rqst *rqst)
6f49f46b 280{
d0dc4111 281 int rc;
07cd952f
RS
282 struct kvec *iov;
283 int n_vec;
284 unsigned int send_length = 0;
285 unsigned int i, j;
b30c74c7 286 sigset_t mask, oldmask;
3ab3f2a1 287 size_t total_len = 0, sent, size;
b8eed283 288 struct socket *ssocket = server->ssocket;
bedc8f76 289 struct msghdr smb_msg = {};
c713c877
RS
290 __be32 rfc1002_marker;
291
d0dc4111 292 cifs_in_send_inc(server);
4357d45f
LL
293 if (cifs_rdma_enabled(server)) {
294 /* return -EAGAIN when connecting or reconnecting */
295 rc = -EAGAIN;
296 if (server->smbd_conn)
297 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
298 goto smbd_done;
299 }
afc18a6f 300
d0dc4111 301 rc = -EAGAIN;
ea702b80 302 if (ssocket == NULL)
d0dc4111 303 goto out;
ea702b80 304
d0dc4111 305 rc = -ERESTARTSYS;
214a5ea0 306 if (fatal_signal_pending(current)) {
6988a619 307 cifs_dbg(FYI, "signal pending before send request\n");
d0dc4111 308 goto out;
b30c74c7
PS
309 }
310
d0dc4111 311 rc = 0;
b8eed283 312 /* cork the socket */
db10538a 313 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 314
07cd952f 315 for (j = 0; j < num_rqst; j++)
81f39f95 316 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
317 rfc1002_marker = cpu_to_be32(send_length);
318
b30c74c7
PS
319 /*
320 * We should not allow signals to interrupt the network send because
321 * any partial send will cause session reconnects thus increasing
322 * latency of system calls and overload a server with unnecessary
323 * requests.
324 */
325
326 sigfillset(&mask);
327 sigprocmask(SIG_BLOCK, &mask, &oldmask);
328
c713c877 329 /* Generate a rfc1002 marker for SMB2+ */
d291e703 330 if (!is_smb1(server)) {
c713c877
RS
331 struct kvec hiov = {
332 .iov_base = &rfc1002_marker,
333 .iov_len = 4
334 };
de4eda9d 335 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
336 rc = smb_send_kvec(server, &smb_msg, &sent);
337 if (rc < 0)
b30c74c7 338 goto unmask;
c713c877
RS
339
340 total_len += sent;
341 send_length += 4;
342 }
343
662bf5bc
PA
344 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
345
07cd952f
RS
346 for (j = 0; j < num_rqst; j++) {
347 iov = rqst[j].rq_iov;
348 n_vec = rqst[j].rq_nvec;
3ab3f2a1 349
07cd952f 350 size = 0;
662bf5bc
PA
351 for (i = 0; i < n_vec; i++) {
352 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 353 size += iov[i].iov_len;
662bf5bc 354 }
97bc00b3 355
de4eda9d 356 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 357
3ab3f2a1 358 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 359 if (rc < 0)
b30c74c7 360 goto unmask;
97bc00b3
JL
361
362 total_len += sent;
07cd952f 363
d08089f6
DH
364 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
365 smb_msg.msg_iter = rqst[j].rq_iter;
07cd952f
RS
366 rc = smb_send_kvec(server, &smb_msg, &sent);
367 if (rc < 0)
368 break;
07cd952f
RS
369 total_len += sent;
370 }
d08089f6
DH
371
372}
1da177e4 373
b30c74c7
PS
374unmask:
375 sigprocmask(SIG_SETMASK, &oldmask, NULL);
376
377 /*
378 * If signal is pending but we have already sent the whole packet to
379 * the server we need to return success status to allow a corresponding
380 * mid entry to be kept in the pending requests queue thus allowing
381 * to handle responses from the server by the client.
382 *
383 * If only part of the packet has been sent there is no need to hide
384 * interrupt because the session will be reconnected anyway, so there
385 * won't be any response from the server to handle.
386 */
387
388 if (signal_pending(current) && (total_len != send_length)) {
389 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 390 rc = -ERESTARTSYS;
b30c74c7
PS
391 }
392
b8eed283 393 /* uncork it */
db10538a 394 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 395
c713c877 396 if ((total_len > 0) && (total_len != send_length)) {
f96637be 397 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 398 send_length, total_len);
6f49f46b
JL
399 /*
400 * If we have only sent part of an SMB then the next SMB could
401 * be taken as the remainder of this one. We need to kill the
402 * socket so the server throws away the partial SMB
403 */
dca65818 404 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 405 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 406 server->conn_id, server->hostname);
edf1ae40 407 }
9762c2d0 408smbd_done:
d804d41d 409 if (rc < 0 && rc != -EINTR)
afe6f653 410 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 411 rc);
ee13919c 412 else if (rc > 0)
1da177e4 413 rc = 0;
d0dc4111
ZX
414out:
415 cifs_in_send_dec(server);
1da177e4
LT
416 return rc;
417}
418
6f49f46b 419static int
1f3a8f5f
RS
420smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
421 struct smb_rqst *rqst, int flags)
6f49f46b 422{
b2c96de7 423 struct kvec iov;
3946d0d0 424 struct smb2_transform_hdr *tr_hdr;
b2c96de7 425 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
426 int rc;
427
428 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
429 return __smb_send_rqst(server, num_rqst, rqst);
430
431 if (num_rqst > MAX_COMPOUND - 1)
432 return -ENOMEM;
7fb8986e 433
b2c96de7 434 if (!server->ops->init_transform_rq) {
a0a3036b 435 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
436 return -EIO;
437 }
6f49f46b 438
9339faac 439 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
440 if (!tr_hdr)
441 return -ENOMEM;
442
443 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
444 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
445
446 iov.iov_base = tr_hdr;
447 iov.iov_len = sizeof(*tr_hdr);
448 cur_rqst[0].rq_iov = &iov;
449 cur_rqst[0].rq_nvec = 1;
450
1f3a8f5f
RS
451 rc = server->ops->init_transform_rq(server, num_rqst + 1,
452 &cur_rqst[0], rqst);
7fb8986e 453 if (rc)
3946d0d0 454 goto out;
7fb8986e 455
1f3a8f5f
RS
456 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
457 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
458out:
459 kfree(tr_hdr);
7fb8986e 460 return rc;
6f49f46b
JL
461}
462
0496e02d
JL
463int
464smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
465 unsigned int smb_buf_length)
466{
738f9de5 467 struct kvec iov[2];
7fb8986e
PS
468 struct smb_rqst rqst = { .rq_iov = iov,
469 .rq_nvec = 2 };
0496e02d 470
738f9de5
PS
471 iov[0].iov_base = smb_buffer;
472 iov[0].iov_len = 4;
473 iov[1].iov_base = (char *)smb_buffer + 4;
474 iov[1].iov_len = smb_buf_length;
0496e02d 475
07cd952f 476 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
477}
478
fc40f9cf 479static int
b227d215 480wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
481 const int timeout, const int flags,
482 unsigned int *instance)
1da177e4 483{
19e88867 484 long rc;
4230cff8
RS
485 int *credits;
486 int optype;
2b53b929 487 long int t;
6d82c27a 488 int scredits, in_flight;
2b53b929
RS
489
490 if (timeout < 0)
491 t = MAX_JIFFY_OFFSET;
492 else
493 t = msecs_to_jiffies(timeout);
4230cff8
RS
494
495 optype = flags & CIFS_OP_MASK;
5bc59498 496
34f4deb7
PS
497 *instance = 0;
498
4230cff8
RS
499 credits = server->ops->get_credits_field(server, optype);
500 /* Since an echo is already inflight, no need to wait to send another */
501 if (*credits <= 0 && optype == CIFS_ECHO_OP)
502 return -EAGAIN;
503
fc40f9cf 504 spin_lock(&server->req_lock);
392e1c5d 505 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 506 /* oplock breaks must not be held up */
fc40f9cf 507 server->in_flight++;
1b63f184
SF
508 if (server->in_flight > server->max_in_flight)
509 server->max_in_flight = server->in_flight;
bc205ed1 510 *credits -= 1;
34f4deb7 511 *instance = server->reconnect_instance;
6d82c27a
SP
512 scredits = *credits;
513 in_flight = server->in_flight;
fc40f9cf 514 spin_unlock(&server->req_lock);
6d82c27a 515
1ddff774 516 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
517 server->conn_id, server->hostname, scredits, -1, in_flight);
518 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
519 __func__, 1, scredits);
520
27a97a61
VL
521 return 0;
522 }
523
27a97a61 524 while (1) {
b227d215 525 if (*credits < num_credits) {
6d82c27a 526 scredits = *credits;
fc40f9cf 527 spin_unlock(&server->req_lock);
6d82c27a 528
789e6661 529 cifs_num_waiters_inc(server);
2b53b929
RS
530 rc = wait_event_killable_timeout(server->request_q,
531 has_credits(server, credits, num_credits), t);
789e6661 532 cifs_num_waiters_dec(server);
2b53b929 533 if (!rc) {
6d82c27a
SP
534 spin_lock(&server->req_lock);
535 scredits = *credits;
536 in_flight = server->in_flight;
537 spin_unlock(&server->req_lock);
538
7937ca96 539 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
540 server->conn_id, server->hostname, scredits,
541 num_credits, in_flight);
afe6f653 542 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 543 timeout);
7de03948 544 return -EBUSY;
2b53b929
RS
545 }
546 if (rc == -ERESTARTSYS)
547 return -ERESTARTSYS;
fc40f9cf 548 spin_lock(&server->req_lock);
27a97a61 549 } else {
080dc5e5
SP
550 spin_unlock(&server->req_lock);
551
d7d7a66a 552 spin_lock(&server->srv_lock);
c5797a94 553 if (server->tcpStatus == CifsExiting) {
d7d7a66a 554 spin_unlock(&server->srv_lock);
27a97a61 555 return -ENOENT;
1da177e4 556 }
d7d7a66a 557 spin_unlock(&server->srv_lock);
27a97a61 558
16b34aa4
RS
559 /*
560 * For normal commands, reserve the last MAX_COMPOUND
561 * credits to compound requests.
562 * Otherwise these compounds could be permanently
563 * starved for credits by single-credit requests.
564 *
565 * To prevent spinning CPU, block this thread until
566 * there are >MAX_COMPOUND credits available.
567 * But only do this is we already have a lot of
568 * credits in flight to avoid triggering this check
569 * for servers that are slow to hand out credits on
570 * new sessions.
571 */
080dc5e5 572 spin_lock(&server->req_lock);
16b34aa4
RS
573 if (!optype && num_credits == 1 &&
574 server->in_flight > 2 * MAX_COMPOUND &&
575 *credits <= MAX_COMPOUND) {
576 spin_unlock(&server->req_lock);
6d82c27a 577
16b34aa4 578 cifs_num_waiters_inc(server);
2b53b929
RS
579 rc = wait_event_killable_timeout(
580 server->request_q,
16b34aa4 581 has_credits(server, credits,
2b53b929
RS
582 MAX_COMPOUND + 1),
583 t);
16b34aa4 584 cifs_num_waiters_dec(server);
2b53b929 585 if (!rc) {
6d82c27a
SP
586 spin_lock(&server->req_lock);
587 scredits = *credits;
588 in_flight = server->in_flight;
589 spin_unlock(&server->req_lock);
590
7937ca96 591 trace_smb3_credit_timeout(
6d82c27a
SP
592 server->CurrentMid,
593 server->conn_id, server->hostname,
594 scredits, num_credits, in_flight);
afe6f653 595 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 596 timeout);
7de03948 597 return -EBUSY;
2b53b929
RS
598 }
599 if (rc == -ERESTARTSYS)
600 return -ERESTARTSYS;
16b34aa4
RS
601 spin_lock(&server->req_lock);
602 continue;
603 }
604
2d86dbc9
PS
605 /*
606 * Can not count locking commands against total
607 * as they are allowed to block on server.
608 */
27a97a61
VL
609
610 /* update # of requests on the wire to server */
4230cff8 611 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
612 *credits -= num_credits;
613 server->in_flight += num_credits;
1b63f184
SF
614 if (server->in_flight > server->max_in_flight)
615 server->max_in_flight = server->in_flight;
34f4deb7 616 *instance = server->reconnect_instance;
2d86dbc9 617 }
6d82c27a
SP
618 scredits = *credits;
619 in_flight = server->in_flight;
fc40f9cf 620 spin_unlock(&server->req_lock);
cd7b699b 621
1ddff774 622 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
623 server->conn_id, server->hostname, scredits,
624 -(num_credits), in_flight);
cd7b699b
SP
625 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
626 __func__, num_credits, scredits);
27a97a61 627 break;
1da177e4
LT
628 }
629 }
7ee1af76
JA
630 return 0;
631}
1da177e4 632
bc205ed1 633static int
480b1cb9
RS
634wait_for_free_request(struct TCP_Server_Info *server, const int flags,
635 unsigned int *instance)
bc205ed1 636{
2b53b929
RS
637 return wait_for_free_credits(server, 1, -1, flags,
638 instance);
bc205ed1
PS
639}
640
257b7809
RS
641static int
642wait_for_compound_request(struct TCP_Server_Info *server, int num,
643 const int flags, unsigned int *instance)
644{
645 int *credits;
6d82c27a 646 int scredits, in_flight;
257b7809
RS
647
648 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
649
650 spin_lock(&server->req_lock);
cd7b699b 651 scredits = *credits;
6d82c27a 652 in_flight = server->in_flight;
cd7b699b 653
257b7809
RS
654 if (*credits < num) {
655 /*
91792bb8
PS
656 * If the server is tight on resources or just gives us less
657 * credits for other reasons (e.g. requests are coming out of
658 * order and the server delays granting more credits until it
659 * processes a missing mid) and we exhausted most available
660 * credits there may be situations when we try to send
661 * a compound request but we don't have enough credits. At this
662 * point the client needs to decide if it should wait for
663 * additional credits or fail the request. If at least one
664 * request is in flight there is a high probability that the
665 * server will return enough credits to satisfy this compound
666 * request.
667 *
668 * Return immediately if no requests in flight since we will be
669 * stuck on waiting for credits.
257b7809 670 */
91792bb8 671 if (server->in_flight == 0) {
257b7809 672 spin_unlock(&server->req_lock);
cd7b699b 673 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
674 server->conn_id, server->hostname, scredits,
675 num, in_flight);
cd7b699b 676 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 677 __func__, in_flight, num, scredits);
7de03948 678 return -EDEADLK;
257b7809
RS
679 }
680 }
681 spin_unlock(&server->req_lock);
682
683 return wait_for_free_credits(server, num, 60000, flags,
684 instance);
685}
686
cb7e9eab
PS
687int
688cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 689 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
690{
691 *num = size;
335b7b62
PS
692 credits->value = 0;
693 credits->instance = server->reconnect_instance;
cb7e9eab
PS
694 return 0;
695}
696
96daf2b0 697static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
698 struct mid_q_entry **ppmidQ)
699{
d7d7a66a 700 spin_lock(&ses->ses_lock);
dd3cd870 701 if (ses->ses_status == SES_NEW) {
79a58d1f 702 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 703 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 704 spin_unlock(&ses->ses_lock);
7ee1af76 705 return -EAGAIN;
080dc5e5 706 }
ad7a2926 707 /* else ok - we are setting up session */
1da177e4 708 }
7f48558e 709
dd3cd870 710 if (ses->ses_status == SES_EXITING) {
7f48558e 711 /* check if SMB session is bad because we are setting it up */
080dc5e5 712 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 713 spin_unlock(&ses->ses_lock);
7f48558e 714 return -EAGAIN;
080dc5e5 715 }
7f48558e
SP
716 /* else ok - we are shutting down session */
717 }
d7d7a66a 718 spin_unlock(&ses->ses_lock);
7f48558e 719
70f08f91 720 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 721 if (*ppmidQ == NULL)
7ee1af76 722 return -ENOMEM;
d7d7a66a 723 spin_lock(&ses->server->mid_lock);
ddc8cf8f 724 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 725 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
726 return 0;
727}
728
0ade640e
JL
729static int
730wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 731{
0ade640e 732 int error;
7ee1af76 733
f5d39b02
PZ
734 error = wait_event_state(server->response_q,
735 midQ->mid_state != MID_REQUEST_SUBMITTED,
736 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
737 if (error < 0)
738 return -ERESTARTSYS;
7ee1af76 739
0ade640e 740 return 0;
7ee1af76
JA
741}
742
fec344e3
JL
743struct mid_q_entry *
744cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
745{
746 int rc;
fec344e3 747 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
748 struct mid_q_entry *mid;
749
738f9de5
PS
750 if (rqst->rq_iov[0].iov_len != 4 ||
751 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
752 return ERR_PTR(-EIO);
753
792af7b0 754 /* enable signing if server requires it */
38d77c50 755 if (server->sign)
792af7b0
PS
756 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
757
70f08f91 758 mid = alloc_mid(hdr, server);
792af7b0 759 if (mid == NULL)
fec344e3 760 return ERR_PTR(-ENOMEM);
792af7b0 761
fec344e3 762 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 763 if (rc) {
70f08f91 764 release_mid(mid);
fec344e3 765 return ERR_PTR(rc);
ffc61ccb
SP
766 }
767
fec344e3 768 return mid;
792af7b0 769}
133672ef 770
a6827c18
JL
771/*
772 * Send a SMB request and set the callback function in the mid to handle
773 * the result. Caller is responsible for dealing with timeouts.
774 */
775int
fec344e3 776cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 777 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
778 mid_handle_t *handle, void *cbdata, const int flags,
779 const struct cifs_credits *exist_credits)
a6827c18 780{
480b1cb9 781 int rc;
a6827c18 782 struct mid_q_entry *mid;
335b7b62 783 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 784 unsigned int instance;
480b1cb9 785 int optype;
a6827c18 786
a891f0f8
PS
787 optype = flags & CIFS_OP_MASK;
788
cb7e9eab 789 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 790 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
791 if (rc)
792 return rc;
335b7b62 793 credits.value = 1;
34f4deb7 794 credits.instance = instance;
3349c3a7
PS
795 } else
796 instance = exist_credits->instance;
a6827c18 797
cc391b69 798 cifs_server_lock(server);
3349c3a7
PS
799
800 /*
801 * We can't use credits obtained from the previous session to send this
802 * request. Check if there were reconnects after we obtained credits and
803 * return -EAGAIN in such cases to let callers handle it.
804 */
805 if (instance != server->reconnect_instance) {
cc391b69 806 cifs_server_unlock(server);
3349c3a7
PS
807 add_credits_and_wake_if(server, &credits, optype);
808 return -EAGAIN;
809 }
810
fec344e3
JL
811 mid = server->ops->setup_async_request(server, rqst);
812 if (IS_ERR(mid)) {
cc391b69 813 cifs_server_unlock(server);
335b7b62 814 add_credits_and_wake_if(server, &credits, optype);
fec344e3 815 return PTR_ERR(mid);
a6827c18
JL
816 }
817
44d22d84 818 mid->receive = receive;
a6827c18
JL
819 mid->callback = callback;
820 mid->callback_data = cbdata;
9b7c18a2 821 mid->handle = handle;
7c9421e1 822 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 823
ffc61ccb 824 /* put it on the pending_mid_q */
d7d7a66a 825 spin_lock(&server->mid_lock);
ffc61ccb 826 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 827 spin_unlock(&server->mid_lock);
ffc61ccb 828
93d2cb6c
LL
829 /*
830 * Need to store the time in mid before calling I/O. For call_async,
831 * I/O response may come back and free the mid entry on another thread.
832 */
833 cifs_save_when_sent(mid);
1f3a8f5f 834 rc = smb_send_rqst(server, 1, rqst, flags);
ad313cb8 835
820962dc 836 if (rc < 0) {
c781af7e 837 revert_current_mid(server, mid->credits);
ad313cb8 838 server->sequence_number -= 2;
70f08f91 839 delete_mid(mid);
820962dc
RV
840 }
841
cc391b69 842 cifs_server_unlock(server);
789e6661 843
ffc61ccb
SP
844 if (rc == 0)
845 return 0;
a6827c18 846
335b7b62 847 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
848 return rc;
849}
850
133672ef
SF
851/*
852 *
853 * Send an SMB Request. No response info (other than return code)
854 * needs to be parsed.
855 *
856 * flags indicate the type of request buffer and how long to wait
857 * and whether to log NT STATUS code (error) before mapping it to POSIX error
858 *
859 */
860int
96daf2b0 861SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 862 char *in_buf, int flags)
133672ef
SF
863{
864 int rc;
865 struct kvec iov[1];
da502f7d 866 struct kvec rsp_iov;
133672ef
SF
867 int resp_buf_type;
868
792af7b0
PS
869 iov[0].iov_base = in_buf;
870 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 871 flags |= CIFS_NO_RSP_BUF;
da502f7d 872 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 873 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 874
133672ef
SF
875 return rc;
876}
877
053d5034 878static int
3c1105df 879cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
880{
881 int rc = 0;
882
f96637be
JP
883 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
884 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 885
d7d7a66a 886 spin_lock(&server->mid_lock);
7c9421e1 887 switch (mid->mid_state) {
74dd92a8 888 case MID_RESPONSE_RECEIVED:
d7d7a66a 889 spin_unlock(&server->mid_lock);
053d5034 890 return rc;
74dd92a8
JL
891 case MID_RETRY_NEEDED:
892 rc = -EAGAIN;
893 break;
71823baf
JL
894 case MID_RESPONSE_MALFORMED:
895 rc = -EIO;
896 break;
3c1105df
JL
897 case MID_SHUTDOWN:
898 rc = -EHOSTDOWN;
899 break;
74dd92a8 900 default:
abe57073
PS
901 if (!(mid->mid_flags & MID_DELETED)) {
902 list_del_init(&mid->qhead);
903 mid->mid_flags |= MID_DELETED;
904 }
afe6f653 905 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 906 __func__, mid->mid, mid->mid_state);
74dd92a8 907 rc = -EIO;
053d5034 908 }
d7d7a66a 909 spin_unlock(&server->mid_lock);
053d5034 910
70f08f91 911 release_mid(mid);
053d5034
JL
912 return rc;
913}
914
121b046a 915static inline int
fb2036d8
PS
916send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
917 struct mid_q_entry *mid)
76dcc26f 918{
121b046a 919 return server->ops->send_cancel ?
fb2036d8 920 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
921}
922
2c8f981d
JL
923int
924cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
925 bool log_error)
926{
792af7b0 927 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
928
929 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
930
931 /* convert the length into a more usable form */
38d77c50 932 if (server->sign) {
738f9de5 933 struct kvec iov[2];
985e4ff0 934 int rc = 0;
738f9de5
PS
935 struct smb_rqst rqst = { .rq_iov = iov,
936 .rq_nvec = 2 };
826a95e4 937
738f9de5
PS
938 iov[0].iov_base = mid->resp_buf;
939 iov[0].iov_len = 4;
940 iov[1].iov_base = (char *)mid->resp_buf + 4;
941 iov[1].iov_len = len - 4;
2c8f981d 942 /* FIXME: add code to kill session */
bf5ea0e2 943 rc = cifs_verify_signature(&rqst, server,
0124cc45 944 mid->sequence_number);
985e4ff0 945 if (rc)
afe6f653 946 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 947 rc);
2c8f981d
JL
948 }
949
950 /* BB special case reconnect tid and uid here? */
a3713ec3 951 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
952}
953
fec344e3 954struct mid_q_entry *
f780bd3f
AA
955cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
956 struct smb_rqst *rqst)
792af7b0
PS
957{
958 int rc;
fec344e3 959 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
960 struct mid_q_entry *mid;
961
738f9de5
PS
962 if (rqst->rq_iov[0].iov_len != 4 ||
963 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
964 return ERR_PTR(-EIO);
965
792af7b0
PS
966 rc = allocate_mid(ses, hdr, &mid);
967 if (rc)
fec344e3
JL
968 return ERR_PTR(rc);
969 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
970 if (rc) {
70f08f91 971 delete_mid(mid);
fec344e3
JL
972 return ERR_PTR(rc);
973 }
974 return mid;
792af7b0
PS
975}
976
4e34feb5 977static void
ee258d79 978cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
979{
980 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
981 struct cifs_credits credits;
982
983 credits.value = server->ops->get_credits(mid);
984 credits.instance = server->reconnect_instance;
8a26f0f7 985
34f4deb7 986 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
987}
988
ee258d79
PS
989static void
990cifs_compound_last_callback(struct mid_q_entry *mid)
991{
992 cifs_compound_callback(mid);
993 cifs_wake_up_task(mid);
994}
995
996static void
997cifs_cancelled_callback(struct mid_q_entry *mid)
998{
999 cifs_compound_callback(mid);
70f08f91 1000 release_mid(mid);
ee258d79
PS
1001}
1002
5f68ea4a
AA
1003/*
1004 * Return a channel (master if none) of @ses that can be used to send
1005 * regular requests.
1006 *
1007 * If we are currently binding a new channel (negprot/sess.setup),
1008 * return the new incomplete channel.
1009 */
1010struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1011{
1012 uint index = 0;
ea90708d
SP
1013 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1014 struct TCP_Server_Info *server = NULL;
1015 int i;
5f68ea4a
AA
1016
1017 if (!ses)
1018 return NULL;
1019
88b024f5 1020 spin_lock(&ses->chan_lock);
ea90708d
SP
1021 for (i = 0; i < ses->chan_count; i++) {
1022 server = ses->chans[i].server;
1023 if (!server)
1024 continue;
1025
1026 /*
1027 * strictly speaking, we should pick up req_lock to read
1028 * server->in_flight. But it shouldn't matter much here if we
1029 * race while reading this data. The worst that can happen is
1030 * that we could use a channel that's not least loaded. Avoiding
1031 * taking the lock could help reduce wait time, which is
1032 * important for this function
1033 */
1034 if (server->in_flight < min_in_flight) {
1035 min_in_flight = server->in_flight;
1036 index = i;
1037 }
1038 if (server->in_flight > max_in_flight)
1039 max_in_flight = server->in_flight;
1040 }
1041
1042 /* if all channels are equally loaded, fall back to round-robin */
1043 if (min_in_flight == max_in_flight) {
1044 index = (uint)atomic_inc_return(&ses->chan_seq);
1045 index %= ses->chan_count;
1046 }
88b024f5 1047 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1048
1049 return ses->chans[index].server;
5f68ea4a
AA
1050}
1051
b8f57ee8 1052int
e0bba0b8 1053compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1054 struct TCP_Server_Info *server,
e0bba0b8
RS
1055 const int flags, const int num_rqst, struct smb_rqst *rqst,
1056 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1057{
480b1cb9 1058 int i, j, optype, rc = 0;
e0bba0b8 1059 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1060 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1061 struct cifs_credits credits[MAX_COMPOUND] = {
1062 { .value = 0, .instance = 0 }
1063 };
1064 unsigned int instance;
738f9de5 1065 char *buf;
50c2f753 1066
a891f0f8 1067 optype = flags & CIFS_OP_MASK;
133672ef 1068
e0bba0b8
RS
1069 for (i = 0; i < num_rqst; i++)
1070 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1071
352d96f3 1072 if (!ses || !ses->server || !server) {
f96637be 1073 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1074 return -EIO;
1075 }
1076
d7d7a66a 1077 spin_lock(&server->srv_lock);
080dc5e5 1078 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1079 spin_unlock(&server->srv_lock);
7ee1af76 1080 return -ENOENT;
080dc5e5 1081 }
d7d7a66a 1082 spin_unlock(&server->srv_lock);
7ee1af76 1083
792af7b0 1084 /*
257b7809 1085 * Wait for all the requests to become available.
7091bcab
PS
1086 * This approach still leaves the possibility to be stuck waiting for
1087 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1088 * requests and if the client is completely idle, not generating any
1089 * other requests.
1090 * This can be handled by the eventual session reconnect.
792af7b0 1091 */
3190b59a 1092 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1093 &instance);
1094 if (rc)
1095 return rc;
97ea4998 1096
257b7809
RS
1097 for (i = 0; i < num_rqst; i++) {
1098 credits[i].value = 1;
1099 credits[i].instance = instance;
8544f4aa 1100 }
7ee1af76 1101
792af7b0
PS
1102 /*
1103 * Make sure that we sign in the same order that we send on this socket
1104 * and avoid races inside tcp sendmsg code that could cause corruption
1105 * of smb data.
1106 */
7ee1af76 1107
cc391b69 1108 cifs_server_lock(server);
7ee1af76 1109
97ea4998
PS
1110 /*
1111 * All the parts of the compound chain belong obtained credits from the
257b7809 1112 * same session. We can not use credits obtained from the previous
97ea4998
PS
1113 * session to send this request. Check if there were reconnects after
1114 * we obtained credits and return -EAGAIN in such cases to let callers
1115 * handle it.
1116 */
3190b59a 1117 if (instance != server->reconnect_instance) {
cc391b69 1118 cifs_server_unlock(server);
97ea4998 1119 for (j = 0; j < num_rqst; j++)
3190b59a 1120 add_credits(server, &credits[j], optype);
97ea4998
PS
1121 return -EAGAIN;
1122 }
1123
e0bba0b8 1124 for (i = 0; i < num_rqst; i++) {
f780bd3f 1125 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1126 if (IS_ERR(midQ[i])) {
3190b59a 1127 revert_current_mid(server, i);
e0bba0b8 1128 for (j = 0; j < i; j++)
70f08f91 1129 delete_mid(midQ[j]);
cc391b69 1130 cifs_server_unlock(server);
8544f4aa 1131
e0bba0b8 1132 /* Update # of requests on wire to server */
8544f4aa 1133 for (j = 0; j < num_rqst; j++)
3190b59a 1134 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1135 return PTR_ERR(midQ[i]);
1136 }
1137
1138 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1139 midQ[i]->optype = optype;
4e34feb5 1140 /*
ee258d79
PS
1141 * Invoke callback for every part of the compound chain
1142 * to calculate credits properly. Wake up this thread only when
1143 * the last element is received.
4e34feb5
RS
1144 */
1145 if (i < num_rqst - 1)
ee258d79
PS
1146 midQ[i]->callback = cifs_compound_callback;
1147 else
1148 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1149 }
3190b59a 1150 rc = smb_send_rqst(server, num_rqst, rqst, flags);
e0bba0b8
RS
1151
1152 for (i = 0; i < num_rqst; i++)
1153 cifs_save_when_sent(midQ[i]);
7ee1af76 1154
c781af7e 1155 if (rc < 0) {
3190b59a
AA
1156 revert_current_mid(server, num_rqst);
1157 server->sequence_number -= 2;
c781af7e 1158 }
e0bba0b8 1159
cc391b69 1160 cifs_server_unlock(server);
7ee1af76 1161
d69cb728
RS
1162 /*
1163 * If sending failed for some reason or it is an oplock break that we
1164 * will not receive a response to - return credits back
1165 */
1166 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1167 for (i = 0; i < num_rqst; i++)
3190b59a 1168 add_credits(server, &credits[i], optype);
cb5c2e63 1169 goto out;
ee258d79
PS
1170 }
1171
1172 /*
1173 * At this point the request is passed to the network stack - we assume
1174 * that any credits taken from the server structure on the client have
1175 * been spent and we can't return them back. Once we receive responses
1176 * we will collect credits granted by the server in the mid callbacks
1177 * and add those credits to the server structure.
1178 */
e0bba0b8 1179
cb5c2e63
RS
1180 /*
1181 * Compounding is never used during session establish.
1182 */
d7d7a66a 1183 spin_lock(&ses->ses_lock);
dd3cd870 1184 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1185 spin_unlock(&ses->ses_lock);
080dc5e5 1186
cc391b69 1187 cifs_server_lock(server);
f486ef8e 1188 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1189 cifs_server_unlock(server);
080dc5e5 1190
d7d7a66a 1191 spin_lock(&ses->ses_lock);
05946d4b 1192 }
d7d7a66a 1193 spin_unlock(&ses->ses_lock);
e0bba0b8 1194
cb5c2e63 1195 for (i = 0; i < num_rqst; i++) {
3190b59a 1196 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1197 if (rc != 0)
1198 break;
1199 }
1200 if (rc != 0) {
1201 for (; i < num_rqst; i++) {
e3d100ea 1202 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1203 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1204 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1205 spin_lock(&server->mid_lock);
7b71843f 1206 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1207 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1208 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1209 cancelled_mid[i] = true;
34f4deb7 1210 credits[i].value = 0;
e0bba0b8 1211 }
d7d7a66a 1212 spin_unlock(&server->mid_lock);
e0bba0b8 1213 }
cb5c2e63
RS
1214 }
1215
cb5c2e63
RS
1216 for (i = 0; i < num_rqst; i++) {
1217 if (rc < 0)
1218 goto out;
e0bba0b8 1219
3190b59a 1220 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1221 if (rc != 0) {
8544f4aa
PS
1222 /* mark this mid as cancelled to not free it below */
1223 cancelled_mid[i] = true;
1224 goto out;
1be912dd 1225 }
2b2bdfba 1226
e0bba0b8
RS
1227 if (!midQ[i]->resp_buf ||
1228 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229 rc = -EIO;
1230 cifs_dbg(FYI, "Bad MID state?\n");
1231 goto out;
1232 }
a891f0f8 1233
e0bba0b8
RS
1234 buf = (char *)midQ[i]->resp_buf;
1235 resp_iov[i].iov_base = buf;
1236 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1237 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1238
1239 if (midQ[i]->large_buf)
1240 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241 else
1242 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243
3190b59a 1244 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1245 flags & CIFS_LOG_ERROR);
1da177e4 1246
70f08f91 1247 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1248 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1249 midQ[i]->resp_buf = NULL;
cb5c2e63 1250
e0bba0b8 1251 }
cb5c2e63
RS
1252
1253 /*
1254 * Compounding is never used during session establish.
1255 */
d7d7a66a 1256 spin_lock(&ses->ses_lock);
dd3cd870 1257 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1258 struct kvec iov = {
1259 .iov_base = resp_iov[0].iov_base,
1260 .iov_len = resp_iov[0].iov_len
1261 };
d7d7a66a 1262 spin_unlock(&ses->ses_lock);
cc391b69 1263 cifs_server_lock(server);
f486ef8e 1264 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1265 cifs_server_unlock(server);
d7d7a66a 1266 spin_lock(&ses->ses_lock);
cb5c2e63 1267 }
d7d7a66a 1268 spin_unlock(&ses->ses_lock);
cb5c2e63 1269
7ee1af76 1270out:
4e34feb5
RS
1271 /*
1272 * This will dequeue all mids. After this it is important that the
1273 * demultiplex_thread will not process any of these mids any futher.
1274 * This is prevented above by using a noop callback that will not
1275 * wake this thread except for the very last PDU.
1276 */
8544f4aa
PS
1277 for (i = 0; i < num_rqst; i++) {
1278 if (!cancelled_mid[i])
70f08f91 1279 delete_mid(midQ[i]);
8544f4aa 1280 }
1da177e4 1281
d6e04ae6
SF
1282 return rc;
1283}
1da177e4 1284
e0bba0b8
RS
1285int
1286cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1287 struct TCP_Server_Info *server,
e0bba0b8
RS
1288 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1289 struct kvec *resp_iov)
1290{
352d96f3
AA
1291 return compound_send_recv(xid, ses, server, flags, 1,
1292 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1293}
1294
738f9de5
PS
1295int
1296SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1297 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1298 const int flags, struct kvec *resp_iov)
1299{
1300 struct smb_rqst rqst;
3cecf486 1301 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1302 int rc;
1303
3cecf486 1304 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1305 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1306 GFP_KERNEL);
117e3b7f
SF
1307 if (!new_iov) {
1308 /* otherwise cifs_send_recv below sets resp_buf_type */
1309 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1310 return -ENOMEM;
117e3b7f 1311 }
3cecf486
RS
1312 } else
1313 new_iov = s_iov;
738f9de5
PS
1314
1315 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1316 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1317
1318 new_iov[0].iov_base = new_iov[1].iov_base;
1319 new_iov[0].iov_len = 4;
1320 new_iov[1].iov_base += 4;
1321 new_iov[1].iov_len -= 4;
1322
1323 memset(&rqst, 0, sizeof(struct smb_rqst));
1324 rqst.rq_iov = new_iov;
1325 rqst.rq_nvec = n_vec + 1;
1326
352d96f3
AA
1327 rc = cifs_send_recv(xid, ses, ses->server,
1328 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1329 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1330 kfree(new_iov);
738f9de5
PS
1331 return rc;
1332}
1333
1da177e4 1334int
96daf2b0 1335SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1336 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1337 int *pbytes_returned, const int flags)
1da177e4
LT
1338{
1339 int rc = 0;
1da177e4 1340 struct mid_q_entry *midQ;
fb2036d8
PS
1341 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1342 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1343 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1344 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1345 struct TCP_Server_Info *server;
1da177e4
LT
1346
1347 if (ses == NULL) {
f96637be 1348 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1349 return -EIO;
1350 }
ac6ad7a8 1351 server = ses->server;
afe6f653 1352 if (server == NULL) {
f96637be 1353 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1354 return -EIO;
1355 }
1356
d7d7a66a 1357 spin_lock(&server->srv_lock);
080dc5e5 1358 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1359 spin_unlock(&server->srv_lock);
31ca3bc3 1360 return -ENOENT;
080dc5e5 1361 }
d7d7a66a 1362 spin_unlock(&server->srv_lock);
31ca3bc3 1363
79a58d1f 1364 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1365 to the same server. We may make this configurable later or
1366 use ses->maxReq */
1da177e4 1367
fb2036d8 1368 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1369 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1370 len);
6d9c6d54
VL
1371 return -EIO;
1372 }
1373
afe6f653 1374 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1375 if (rc)
1376 return rc;
1377
79a58d1f 1378 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1379 and avoid races inside tcp sendmsg code that could cause corruption
1380 of smb data */
1381
cc391b69 1382 cifs_server_lock(server);
1da177e4 1383
7ee1af76
JA
1384 rc = allocate_mid(ses, in_buf, &midQ);
1385 if (rc) {
cc391b69 1386 cifs_server_unlock(server);
7ee1af76 1387 /* Update # of requests on wire to server */
afe6f653 1388 add_credits(server, &credits, 0);
7ee1af76 1389 return rc;
1da177e4
LT
1390 }
1391
afe6f653 1392 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1393 if (rc) {
cc391b69 1394 cifs_server_unlock(server);
829049cb
VL
1395 goto out;
1396 }
1da177e4 1397
7c9421e1 1398 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1399
afe6f653 1400 rc = smb_send(server, in_buf, len);
789e6661 1401 cifs_save_when_sent(midQ);
ad313cb8
JL
1402
1403 if (rc < 0)
afe6f653 1404 server->sequence_number -= 2;
ad313cb8 1405
cc391b69 1406 cifs_server_unlock(server);
7ee1af76 1407
79a58d1f 1408 if (rc < 0)
7ee1af76
JA
1409 goto out;
1410
afe6f653 1411 rc = wait_for_response(server, midQ);
1be912dd 1412 if (rc != 0) {
afe6f653 1413 send_cancel(server, &rqst, midQ);
d7d7a66a 1414 spin_lock(&server->mid_lock);
7c9421e1 1415 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1416 /* no longer considered to be "in-flight" */
70f08f91 1417 midQ->callback = release_mid;
d7d7a66a 1418 spin_unlock(&server->mid_lock);
afe6f653 1419 add_credits(server, &credits, 0);
1be912dd
JL
1420 return rc;
1421 }
d7d7a66a 1422 spin_unlock(&server->mid_lock);
1be912dd 1423 }
1da177e4 1424
afe6f653 1425 rc = cifs_sync_mid_result(midQ, server);
053d5034 1426 if (rc != 0) {
afe6f653 1427 add_credits(server, &credits, 0);
1da177e4
LT
1428 return rc;
1429 }
50c2f753 1430
2c8f981d 1431 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1432 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1433 rc = -EIO;
afe6f653 1434 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1435 goto out;
1da177e4 1436 }
7ee1af76 1437
d4e4854f 1438 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1439 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1440 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1441out:
70f08f91 1442 delete_mid(midQ);
afe6f653 1443 add_credits(server, &credits, 0);
1da177e4 1444
7ee1af76
JA
1445 return rc;
1446}
1da177e4 1447
7ee1af76
JA
1448/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1449 blocking lock to return. */
1450
1451static int
96daf2b0 1452send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1453 struct smb_hdr *in_buf,
1454 struct smb_hdr *out_buf)
1455{
1456 int bytes_returned;
96daf2b0 1457 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1458 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1459
1460 /* We just modify the current in_buf to change
1461 the type of lock from LOCKING_ANDX_SHARED_LOCK
1462 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1463 LOCKING_ANDX_CANCEL_LOCK. */
1464
1465 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1466 pSMB->Timeout = 0;
88257360 1467 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1468
1469 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1470 &bytes_returned, 0);
7ee1af76
JA
1471}
1472
1473int
96daf2b0 1474SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1475 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1476 int *pbytes_returned)
1477{
1478 int rc = 0;
1479 int rstart = 0;
7ee1af76 1480 struct mid_q_entry *midQ;
96daf2b0 1481 struct cifs_ses *ses;
fb2036d8
PS
1482 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1483 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1484 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1485 unsigned int instance;
afe6f653 1486 struct TCP_Server_Info *server;
7ee1af76
JA
1487
1488 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1489 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1490 return -EIO;
1491 }
1492 ses = tcon->ses;
afe6f653 1493 server = ses->server;
7ee1af76 1494
afe6f653 1495 if (server == NULL) {
f96637be 1496 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1497 return -EIO;
1498 }
1499
d7d7a66a 1500 spin_lock(&server->srv_lock);
080dc5e5 1501 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1502 spin_unlock(&server->srv_lock);
7ee1af76 1503 return -ENOENT;
080dc5e5 1504 }
d7d7a66a 1505 spin_unlock(&server->srv_lock);
7ee1af76 1506
79a58d1f 1507 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1508 to the same server. We may make this configurable later or
1509 use ses->maxReq */
1510
fb2036d8 1511 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1512 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1513 len);
6d9c6d54
VL
1514 return -EIO;
1515 }
1516
afe6f653 1517 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1518 if (rc)
1519 return rc;
1520
79a58d1f 1521 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1522 and avoid races inside tcp sendmsg code that could cause corruption
1523 of smb data */
1524
cc391b69 1525 cifs_server_lock(server);
7ee1af76
JA
1526
1527 rc = allocate_mid(ses, in_buf, &midQ);
1528 if (rc) {
cc391b69 1529 cifs_server_unlock(server);
7ee1af76
JA
1530 return rc;
1531 }
1532
afe6f653 1533 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1534 if (rc) {
70f08f91 1535 delete_mid(midQ);
cc391b69 1536 cifs_server_unlock(server);
829049cb
VL
1537 return rc;
1538 }
1da177e4 1539
7c9421e1 1540 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653 1541 rc = smb_send(server, in_buf, len);
789e6661 1542 cifs_save_when_sent(midQ);
ad313cb8
JL
1543
1544 if (rc < 0)
afe6f653 1545 server->sequence_number -= 2;
ad313cb8 1546
cc391b69 1547 cifs_server_unlock(server);
7ee1af76 1548
79a58d1f 1549 if (rc < 0) {
70f08f91 1550 delete_mid(midQ);
7ee1af76
JA
1551 return rc;
1552 }
1553
1554 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1555 rc = wait_event_interruptible(server->response_q,
7c9421e1 1556 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1557 ((server->tcpStatus != CifsGood) &&
1558 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1559
1560 /* Were we interrupted by a signal ? */
d7d7a66a 1561 spin_lock(&server->srv_lock);
7ee1af76 1562 if ((rc == -ERESTARTSYS) &&
7c9421e1 1563 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1564 ((server->tcpStatus == CifsGood) ||
1565 (server->tcpStatus == CifsNew))) {
d7d7a66a 1566 spin_unlock(&server->srv_lock);
7ee1af76
JA
1567
1568 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1569 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1570 blocking lock to return. */
afe6f653 1571 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1572 if (rc) {
70f08f91 1573 delete_mid(midQ);
7ee1af76
JA
1574 return rc;
1575 }
1576 } else {
1577 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1578 to cause the blocking lock to return. */
1579
1580 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1581
1582 /* If we get -ENOLCK back the lock may have
1583 already been removed. Don't exit in this case. */
1584 if (rc && rc != -ENOLCK) {
70f08f91 1585 delete_mid(midQ);
7ee1af76
JA
1586 return rc;
1587 }
1588 }
1589
afe6f653 1590 rc = wait_for_response(server, midQ);
1be912dd 1591 if (rc) {
afe6f653 1592 send_cancel(server, &rqst, midQ);
d7d7a66a 1593 spin_lock(&server->mid_lock);
7c9421e1 1594 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1595 /* no longer considered to be "in-flight" */
70f08f91 1596 midQ->callback = release_mid;
d7d7a66a 1597 spin_unlock(&server->mid_lock);
1be912dd
JL
1598 return rc;
1599 }
d7d7a66a 1600 spin_unlock(&server->mid_lock);
7ee1af76 1601 }
1be912dd
JL
1602
1603 /* We got the response - restart system call. */
1604 rstart = 1;
d7d7a66a 1605 spin_lock(&server->srv_lock);
7ee1af76 1606 }
d7d7a66a 1607 spin_unlock(&server->srv_lock);
7ee1af76 1608
afe6f653 1609 rc = cifs_sync_mid_result(midQ, server);
053d5034 1610 if (rc != 0)
7ee1af76 1611 return rc;
50c2f753 1612
17c8bfed 1613 /* rcvd frame is ok */
7c9421e1 1614 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1615 rc = -EIO;
3175eb9b 1616 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1617 goto out;
1618 }
1da177e4 1619
d4e4854f 1620 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1621 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1622 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1623out:
70f08f91 1624 delete_mid(midQ);
7ee1af76
JA
1625 if (rstart && rc == -EACCES)
1626 return -ERESTARTSYS;
1da177e4
LT
1627 return rc;
1628}
fb157ed2
SF
1629
1630/*
1631 * Discard any remaining data in the current SMB. To do this, we borrow the
1632 * current bigbuf.
1633 */
1634int
1635cifs_discard_remaining_data(struct TCP_Server_Info *server)
1636{
1637 unsigned int rfclen = server->pdu_size;
d08089f6 1638 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1639 server->total_read;
1640
1641 while (remaining > 0) {
d08089f6 1642 ssize_t length;
fb157ed2
SF
1643
1644 length = cifs_discard_from_socket(server,
1645 min_t(size_t, remaining,
1646 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1647 if (length < 0)
1648 return length;
1649 server->total_read += length;
1650 remaining -= length;
1651 }
1652
1653 return 0;
1654}
1655
1656static int
1657__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1658 bool malformed)
1659{
1660 int length;
1661
1662 length = cifs_discard_remaining_data(server);
1663 dequeue_mid(mid, malformed);
1664 mid->resp_buf = server->smallbuf;
1665 server->smallbuf = NULL;
1666 return length;
1667}
1668
1669static int
1670cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1671{
1672 struct cifs_readdata *rdata = mid->callback_data;
1673
1674 return __cifs_readv_discard(server, mid, rdata->result);
1675}
1676
1677int
1678cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1679{
1680 int length, len;
1681 unsigned int data_offset, data_len;
1682 struct cifs_readdata *rdata = mid->callback_data;
1683 char *buf = server->smallbuf;
9789de8b 1684 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1685 bool use_rdma_mr = false;
1686
1687 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1688 __func__, mid->mid, rdata->offset, rdata->bytes);
1689
1690 /*
1691 * read the rest of READ_RSP header (sans Data array), or whatever we
1692 * can if there's not enough data. At this point, we've read down to
1693 * the Mid.
1694 */
1695 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1696 HEADER_SIZE(server) + 1;
1697
1698 length = cifs_read_from_socket(server,
1699 buf + HEADER_SIZE(server) - 1, len);
1700 if (length < 0)
1701 return length;
1702 server->total_read += length;
1703
1704 if (server->ops->is_session_expired &&
1705 server->ops->is_session_expired(buf)) {
1706 cifs_reconnect(server, true);
1707 return -1;
1708 }
1709
1710 if (server->ops->is_status_pending &&
1711 server->ops->is_status_pending(buf, server)) {
1712 cifs_discard_remaining_data(server);
1713 return -1;
1714 }
1715
1716 /* set up first two iov for signature check and to get credits */
1717 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1718 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1719 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1720 rdata->iov[1].iov_len =
9789de8b 1721 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1722 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1723 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1724 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1725 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1726
1727 /* Was the SMB read successful? */
1728 rdata->result = server->ops->map_error(buf, false);
1729 if (rdata->result != 0) {
1730 cifs_dbg(FYI, "%s: server returned error %d\n",
1731 __func__, rdata->result);
1732 /* normal error on read response */
1733 return __cifs_readv_discard(server, mid, false);
1734 }
1735
1736 /* Is there enough to get to the rest of the READ_RSP header? */
1737 if (server->total_read < server->vals->read_rsp_size) {
1738 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1739 __func__, server->total_read,
1740 server->vals->read_rsp_size);
1741 rdata->result = -EIO;
1742 return cifs_readv_discard(server, mid);
1743 }
1744
1745 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1746 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1747 if (data_offset < server->total_read) {
1748 /*
1749 * win2k8 sometimes sends an offset of 0 when the read
1750 * is beyond the EOF. Treat it as if the data starts just after
1751 * the header.
1752 */
1753 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1754 __func__, data_offset);
1755 data_offset = server->total_read;
1756 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1757 /* data_offset is beyond the end of smallbuf */
1758 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1759 __func__, data_offset);
1760 rdata->result = -EIO;
1761 return cifs_readv_discard(server, mid);
1762 }
1763
1764 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1765 __func__, server->total_read, data_offset);
1766
1767 len = data_offset - server->total_read;
1768 if (len > 0) {
1769 /* read any junk before data into the rest of smallbuf */
1770 length = cifs_read_from_socket(server,
1771 buf + server->total_read, len);
1772 if (length < 0)
1773 return length;
1774 server->total_read += length;
1775 }
1776
1777 /* how much data is in the response? */
1778#ifdef CONFIG_CIFS_SMB_DIRECT
1779 use_rdma_mr = rdata->mr;
1780#endif
1781 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1782 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1783 /* data_len is corrupt -- discard frame */
1784 rdata->result = -EIO;
1785 return cifs_readv_discard(server, mid);
1786 }
1787
d08089f6
DH
1788#ifdef CONFIG_CIFS_SMB_DIRECT
1789 if (rdata->mr)
1790 length = data_len; /* An RDMA read is already done. */
1791 else
1792#endif
1793 length = cifs_read_iter_from_socket(server, &rdata->iter,
1794 data_len);
1795 if (length > 0)
1796 rdata->got_bytes += length;
fb157ed2
SF
1797 server->total_read += length;
1798
1799 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1800 server->total_read, buflen, data_len);
1801
1802 /* discard anything left over */
1803 if (server->total_read < buflen)
1804 return cifs_readv_discard(server, mid);
1805
1806 dequeue_mid(mid, false);
1807 mid->resp_buf = server->smallbuf;
1808 server->smallbuf = NULL;
1809 return length;
1810}