cifs: DIO to/from KVEC-type iterators should now work
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
0496e02d 197 if (server->noblocksnd)
3ab3f2a1 198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 199 else
3ab3f2a1 200 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 201
3ab3f2a1 202 while (msg_data_left(smb_msg)) {
6f49f46b
JL
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
3ab3f2a1 221 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 222 if (rc == -EAGAIN) {
3ab3f2a1
AV
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
afe6f653 226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 227 ssocket);
3ab3f2a1 228 return -EAGAIN;
1da177e4 229 }
3ab3f2a1 230 msleep(1 << retries);
1da177e4
LT
231 continue;
232 }
6f49f46b 233
79a58d1f 234 if (rc < 0)
3ab3f2a1 235 return rc;
6f49f46b 236
79a58d1f 237 if (rc == 0) {
3e84469d
SF
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
afe6f653 240 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
241 msleep(500);
242 continue;
d6e04ae6 243 }
6f49f46b 244
3ab3f2a1
AV
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 248 }
3ab3f2a1 249 return 0;
97bc00b3
JL
250}
251
35e2cc1b 252unsigned long
81f39f95 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
254{
255 unsigned int i;
35e2cc1b
PA
256 struct kvec *iov;
257 int nvec;
a26054d1
JL
258 unsigned long buflen = 0;
259
d291e703 260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 261 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
a26054d1 269 /* total up iov array first */
35e2cc1b 270 for (i = 0; i < nvec; i++)
a26054d1
JL
271 buflen += iov[i].iov_len;
272
d08089f6 273 buflen += iov_iter_count(&rqst->rq_iter);
a26054d1
JL
274 return buflen;
275}
276
6f49f46b 277static int
07cd952f
RS
278__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
279 struct smb_rqst *rqst)
6f49f46b 280{
07cd952f
RS
281 int rc = 0;
282 struct kvec *iov;
283 int n_vec;
284 unsigned int send_length = 0;
285 unsigned int i, j;
b30c74c7 286 sigset_t mask, oldmask;
3ab3f2a1 287 size_t total_len = 0, sent, size;
b8eed283 288 struct socket *ssocket = server->ssocket;
bedc8f76 289 struct msghdr smb_msg = {};
c713c877
RS
290 __be32 rfc1002_marker;
291
4357d45f
LL
292 if (cifs_rdma_enabled(server)) {
293 /* return -EAGAIN when connecting or reconnecting */
294 rc = -EAGAIN;
295 if (server->smbd_conn)
296 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
297 goto smbd_done;
298 }
afc18a6f 299
ea702b80 300 if (ssocket == NULL)
afc18a6f 301 return -EAGAIN;
ea702b80 302
214a5ea0 303 if (fatal_signal_pending(current)) {
6988a619
PA
304 cifs_dbg(FYI, "signal pending before send request\n");
305 return -ERESTARTSYS;
b30c74c7
PS
306 }
307
b8eed283 308 /* cork the socket */
db10538a 309 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 310
07cd952f 311 for (j = 0; j < num_rqst; j++)
81f39f95 312 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
313 rfc1002_marker = cpu_to_be32(send_length);
314
b30c74c7
PS
315 /*
316 * We should not allow signals to interrupt the network send because
317 * any partial send will cause session reconnects thus increasing
318 * latency of system calls and overload a server with unnecessary
319 * requests.
320 */
321
322 sigfillset(&mask);
323 sigprocmask(SIG_BLOCK, &mask, &oldmask);
324
c713c877 325 /* Generate a rfc1002 marker for SMB2+ */
d291e703 326 if (!is_smb1(server)) {
c713c877
RS
327 struct kvec hiov = {
328 .iov_base = &rfc1002_marker,
329 .iov_len = 4
330 };
de4eda9d 331 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
332 rc = smb_send_kvec(server, &smb_msg, &sent);
333 if (rc < 0)
b30c74c7 334 goto unmask;
c713c877
RS
335
336 total_len += sent;
337 send_length += 4;
338 }
339
662bf5bc
PA
340 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
341
07cd952f
RS
342 for (j = 0; j < num_rqst; j++) {
343 iov = rqst[j].rq_iov;
344 n_vec = rqst[j].rq_nvec;
3ab3f2a1 345
07cd952f 346 size = 0;
662bf5bc
PA
347 for (i = 0; i < n_vec; i++) {
348 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 349 size += iov[i].iov_len;
662bf5bc 350 }
97bc00b3 351
de4eda9d 352 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 353
3ab3f2a1 354 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 355 if (rc < 0)
b30c74c7 356 goto unmask;
97bc00b3
JL
357
358 total_len += sent;
07cd952f 359
d08089f6
DH
360 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
361 smb_msg.msg_iter = rqst[j].rq_iter;
07cd952f
RS
362 rc = smb_send_kvec(server, &smb_msg, &sent);
363 if (rc < 0)
364 break;
07cd952f
RS
365 total_len += sent;
366 }
d08089f6
DH
367
368}
1da177e4 369
b30c74c7
PS
370unmask:
371 sigprocmask(SIG_SETMASK, &oldmask, NULL);
372
373 /*
374 * If signal is pending but we have already sent the whole packet to
375 * the server we need to return success status to allow a corresponding
376 * mid entry to be kept in the pending requests queue thus allowing
377 * to handle responses from the server by the client.
378 *
379 * If only part of the packet has been sent there is no need to hide
380 * interrupt because the session will be reconnected anyway, so there
381 * won't be any response from the server to handle.
382 */
383
384 if (signal_pending(current) && (total_len != send_length)) {
385 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 386 rc = -ERESTARTSYS;
b30c74c7
PS
387 }
388
b8eed283 389 /* uncork it */
db10538a 390 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 391
c713c877 392 if ((total_len > 0) && (total_len != send_length)) {
f96637be 393 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 394 send_length, total_len);
6f49f46b
JL
395 /*
396 * If we have only sent part of an SMB then the next SMB could
397 * be taken as the remainder of this one. We need to kill the
398 * socket so the server throws away the partial SMB
399 */
dca65818 400 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 401 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 402 server->conn_id, server->hostname);
edf1ae40 403 }
9762c2d0 404smbd_done:
d804d41d 405 if (rc < 0 && rc != -EINTR)
afe6f653 406 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 407 rc);
ee13919c 408 else if (rc > 0)
1da177e4 409 rc = 0;
1da177e4
LT
410
411 return rc;
412}
413
6f49f46b 414static int
1f3a8f5f
RS
415smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
416 struct smb_rqst *rqst, int flags)
6f49f46b 417{
b2c96de7 418 struct kvec iov;
3946d0d0 419 struct smb2_transform_hdr *tr_hdr;
b2c96de7 420 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
421 int rc;
422
423 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
424 return __smb_send_rqst(server, num_rqst, rqst);
425
426 if (num_rqst > MAX_COMPOUND - 1)
427 return -ENOMEM;
7fb8986e 428
b2c96de7 429 if (!server->ops->init_transform_rq) {
a0a3036b 430 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
431 return -EIO;
432 }
6f49f46b 433
9339faac 434 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
435 if (!tr_hdr)
436 return -ENOMEM;
437
438 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
439 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
440
441 iov.iov_base = tr_hdr;
442 iov.iov_len = sizeof(*tr_hdr);
443 cur_rqst[0].rq_iov = &iov;
444 cur_rqst[0].rq_nvec = 1;
445
1f3a8f5f
RS
446 rc = server->ops->init_transform_rq(server, num_rqst + 1,
447 &cur_rqst[0], rqst);
7fb8986e 448 if (rc)
3946d0d0 449 goto out;
7fb8986e 450
1f3a8f5f
RS
451 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
452 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
453out:
454 kfree(tr_hdr);
7fb8986e 455 return rc;
6f49f46b
JL
456}
457
0496e02d
JL
458int
459smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
460 unsigned int smb_buf_length)
461{
738f9de5 462 struct kvec iov[2];
7fb8986e
PS
463 struct smb_rqst rqst = { .rq_iov = iov,
464 .rq_nvec = 2 };
0496e02d 465
738f9de5
PS
466 iov[0].iov_base = smb_buffer;
467 iov[0].iov_len = 4;
468 iov[1].iov_base = (char *)smb_buffer + 4;
469 iov[1].iov_len = smb_buf_length;
0496e02d 470
07cd952f 471 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
472}
473
fc40f9cf 474static int
b227d215 475wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
476 const int timeout, const int flags,
477 unsigned int *instance)
1da177e4 478{
19e88867 479 long rc;
4230cff8
RS
480 int *credits;
481 int optype;
2b53b929 482 long int t;
6d82c27a 483 int scredits, in_flight;
2b53b929
RS
484
485 if (timeout < 0)
486 t = MAX_JIFFY_OFFSET;
487 else
488 t = msecs_to_jiffies(timeout);
4230cff8
RS
489
490 optype = flags & CIFS_OP_MASK;
5bc59498 491
34f4deb7
PS
492 *instance = 0;
493
4230cff8
RS
494 credits = server->ops->get_credits_field(server, optype);
495 /* Since an echo is already inflight, no need to wait to send another */
496 if (*credits <= 0 && optype == CIFS_ECHO_OP)
497 return -EAGAIN;
498
fc40f9cf 499 spin_lock(&server->req_lock);
392e1c5d 500 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 501 /* oplock breaks must not be held up */
fc40f9cf 502 server->in_flight++;
1b63f184
SF
503 if (server->in_flight > server->max_in_flight)
504 server->max_in_flight = server->in_flight;
bc205ed1 505 *credits -= 1;
34f4deb7 506 *instance = server->reconnect_instance;
6d82c27a
SP
507 scredits = *credits;
508 in_flight = server->in_flight;
fc40f9cf 509 spin_unlock(&server->req_lock);
6d82c27a 510
1ddff774 511 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
512 server->conn_id, server->hostname, scredits, -1, in_flight);
513 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
514 __func__, 1, scredits);
515
27a97a61
VL
516 return 0;
517 }
518
27a97a61 519 while (1) {
b227d215 520 if (*credits < num_credits) {
6d82c27a 521 scredits = *credits;
fc40f9cf 522 spin_unlock(&server->req_lock);
6d82c27a 523
789e6661 524 cifs_num_waiters_inc(server);
2b53b929
RS
525 rc = wait_event_killable_timeout(server->request_q,
526 has_credits(server, credits, num_credits), t);
789e6661 527 cifs_num_waiters_dec(server);
2b53b929 528 if (!rc) {
6d82c27a
SP
529 spin_lock(&server->req_lock);
530 scredits = *credits;
531 in_flight = server->in_flight;
532 spin_unlock(&server->req_lock);
533
7937ca96 534 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
535 server->conn_id, server->hostname, scredits,
536 num_credits, in_flight);
afe6f653 537 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 538 timeout);
7de03948 539 return -EBUSY;
2b53b929
RS
540 }
541 if (rc == -ERESTARTSYS)
542 return -ERESTARTSYS;
fc40f9cf 543 spin_lock(&server->req_lock);
27a97a61 544 } else {
080dc5e5
SP
545 spin_unlock(&server->req_lock);
546
d7d7a66a 547 spin_lock(&server->srv_lock);
c5797a94 548 if (server->tcpStatus == CifsExiting) {
d7d7a66a 549 spin_unlock(&server->srv_lock);
27a97a61 550 return -ENOENT;
1da177e4 551 }
d7d7a66a 552 spin_unlock(&server->srv_lock);
27a97a61 553
16b34aa4
RS
554 /*
555 * For normal commands, reserve the last MAX_COMPOUND
556 * credits to compound requests.
557 * Otherwise these compounds could be permanently
558 * starved for credits by single-credit requests.
559 *
560 * To prevent spinning CPU, block this thread until
561 * there are >MAX_COMPOUND credits available.
562 * But only do this is we already have a lot of
563 * credits in flight to avoid triggering this check
564 * for servers that are slow to hand out credits on
565 * new sessions.
566 */
080dc5e5 567 spin_lock(&server->req_lock);
16b34aa4
RS
568 if (!optype && num_credits == 1 &&
569 server->in_flight > 2 * MAX_COMPOUND &&
570 *credits <= MAX_COMPOUND) {
571 spin_unlock(&server->req_lock);
6d82c27a 572
16b34aa4 573 cifs_num_waiters_inc(server);
2b53b929
RS
574 rc = wait_event_killable_timeout(
575 server->request_q,
16b34aa4 576 has_credits(server, credits,
2b53b929
RS
577 MAX_COMPOUND + 1),
578 t);
16b34aa4 579 cifs_num_waiters_dec(server);
2b53b929 580 if (!rc) {
6d82c27a
SP
581 spin_lock(&server->req_lock);
582 scredits = *credits;
583 in_flight = server->in_flight;
584 spin_unlock(&server->req_lock);
585
7937ca96 586 trace_smb3_credit_timeout(
6d82c27a
SP
587 server->CurrentMid,
588 server->conn_id, server->hostname,
589 scredits, num_credits, in_flight);
afe6f653 590 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 591 timeout);
7de03948 592 return -EBUSY;
2b53b929
RS
593 }
594 if (rc == -ERESTARTSYS)
595 return -ERESTARTSYS;
16b34aa4
RS
596 spin_lock(&server->req_lock);
597 continue;
598 }
599
2d86dbc9
PS
600 /*
601 * Can not count locking commands against total
602 * as they are allowed to block on server.
603 */
27a97a61
VL
604
605 /* update # of requests on the wire to server */
4230cff8 606 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
607 *credits -= num_credits;
608 server->in_flight += num_credits;
1b63f184
SF
609 if (server->in_flight > server->max_in_flight)
610 server->max_in_flight = server->in_flight;
34f4deb7 611 *instance = server->reconnect_instance;
2d86dbc9 612 }
6d82c27a
SP
613 scredits = *credits;
614 in_flight = server->in_flight;
fc40f9cf 615 spin_unlock(&server->req_lock);
cd7b699b 616
1ddff774 617 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
618 server->conn_id, server->hostname, scredits,
619 -(num_credits), in_flight);
cd7b699b
SP
620 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
621 __func__, num_credits, scredits);
27a97a61 622 break;
1da177e4
LT
623 }
624 }
7ee1af76
JA
625 return 0;
626}
1da177e4 627
bc205ed1 628static int
480b1cb9
RS
629wait_for_free_request(struct TCP_Server_Info *server, const int flags,
630 unsigned int *instance)
bc205ed1 631{
2b53b929
RS
632 return wait_for_free_credits(server, 1, -1, flags,
633 instance);
bc205ed1
PS
634}
635
257b7809
RS
636static int
637wait_for_compound_request(struct TCP_Server_Info *server, int num,
638 const int flags, unsigned int *instance)
639{
640 int *credits;
6d82c27a 641 int scredits, in_flight;
257b7809
RS
642
643 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
644
645 spin_lock(&server->req_lock);
cd7b699b 646 scredits = *credits;
6d82c27a 647 in_flight = server->in_flight;
cd7b699b 648
257b7809
RS
649 if (*credits < num) {
650 /*
91792bb8
PS
651 * If the server is tight on resources or just gives us less
652 * credits for other reasons (e.g. requests are coming out of
653 * order and the server delays granting more credits until it
654 * processes a missing mid) and we exhausted most available
655 * credits there may be situations when we try to send
656 * a compound request but we don't have enough credits. At this
657 * point the client needs to decide if it should wait for
658 * additional credits or fail the request. If at least one
659 * request is in flight there is a high probability that the
660 * server will return enough credits to satisfy this compound
661 * request.
662 *
663 * Return immediately if no requests in flight since we will be
664 * stuck on waiting for credits.
257b7809 665 */
91792bb8 666 if (server->in_flight == 0) {
257b7809 667 spin_unlock(&server->req_lock);
cd7b699b 668 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
669 server->conn_id, server->hostname, scredits,
670 num, in_flight);
cd7b699b 671 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 672 __func__, in_flight, num, scredits);
7de03948 673 return -EDEADLK;
257b7809
RS
674 }
675 }
676 spin_unlock(&server->req_lock);
677
678 return wait_for_free_credits(server, num, 60000, flags,
679 instance);
680}
681
cb7e9eab
PS
682int
683cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 684 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
685{
686 *num = size;
335b7b62
PS
687 credits->value = 0;
688 credits->instance = server->reconnect_instance;
cb7e9eab
PS
689 return 0;
690}
691
96daf2b0 692static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
693 struct mid_q_entry **ppmidQ)
694{
d7d7a66a 695 spin_lock(&ses->ses_lock);
dd3cd870 696 if (ses->ses_status == SES_NEW) {
79a58d1f 697 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 698 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 699 spin_unlock(&ses->ses_lock);
7ee1af76 700 return -EAGAIN;
080dc5e5 701 }
ad7a2926 702 /* else ok - we are setting up session */
1da177e4 703 }
7f48558e 704
dd3cd870 705 if (ses->ses_status == SES_EXITING) {
7f48558e 706 /* check if SMB session is bad because we are setting it up */
080dc5e5 707 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 708 spin_unlock(&ses->ses_lock);
7f48558e 709 return -EAGAIN;
080dc5e5 710 }
7f48558e
SP
711 /* else ok - we are shutting down session */
712 }
d7d7a66a 713 spin_unlock(&ses->ses_lock);
7f48558e 714
70f08f91 715 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 716 if (*ppmidQ == NULL)
7ee1af76 717 return -ENOMEM;
d7d7a66a 718 spin_lock(&ses->server->mid_lock);
ddc8cf8f 719 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 720 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
721 return 0;
722}
723
0ade640e
JL
724static int
725wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 726{
0ade640e 727 int error;
7ee1af76 728
f5d39b02
PZ
729 error = wait_event_state(server->response_q,
730 midQ->mid_state != MID_REQUEST_SUBMITTED,
731 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
732 if (error < 0)
733 return -ERESTARTSYS;
7ee1af76 734
0ade640e 735 return 0;
7ee1af76
JA
736}
737
fec344e3
JL
738struct mid_q_entry *
739cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
740{
741 int rc;
fec344e3 742 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
743 struct mid_q_entry *mid;
744
738f9de5
PS
745 if (rqst->rq_iov[0].iov_len != 4 ||
746 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
747 return ERR_PTR(-EIO);
748
792af7b0 749 /* enable signing if server requires it */
38d77c50 750 if (server->sign)
792af7b0
PS
751 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
752
70f08f91 753 mid = alloc_mid(hdr, server);
792af7b0 754 if (mid == NULL)
fec344e3 755 return ERR_PTR(-ENOMEM);
792af7b0 756
fec344e3 757 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 758 if (rc) {
70f08f91 759 release_mid(mid);
fec344e3 760 return ERR_PTR(rc);
ffc61ccb
SP
761 }
762
fec344e3 763 return mid;
792af7b0 764}
133672ef 765
a6827c18
JL
766/*
767 * Send a SMB request and set the callback function in the mid to handle
768 * the result. Caller is responsible for dealing with timeouts.
769 */
770int
fec344e3 771cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 772 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
773 mid_handle_t *handle, void *cbdata, const int flags,
774 const struct cifs_credits *exist_credits)
a6827c18 775{
480b1cb9 776 int rc;
a6827c18 777 struct mid_q_entry *mid;
335b7b62 778 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 779 unsigned int instance;
480b1cb9 780 int optype;
a6827c18 781
a891f0f8
PS
782 optype = flags & CIFS_OP_MASK;
783
cb7e9eab 784 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 785 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
786 if (rc)
787 return rc;
335b7b62 788 credits.value = 1;
34f4deb7 789 credits.instance = instance;
3349c3a7
PS
790 } else
791 instance = exist_credits->instance;
a6827c18 792
cc391b69 793 cifs_server_lock(server);
3349c3a7
PS
794
795 /*
796 * We can't use credits obtained from the previous session to send this
797 * request. Check if there were reconnects after we obtained credits and
798 * return -EAGAIN in such cases to let callers handle it.
799 */
800 if (instance != server->reconnect_instance) {
cc391b69 801 cifs_server_unlock(server);
3349c3a7
PS
802 add_credits_and_wake_if(server, &credits, optype);
803 return -EAGAIN;
804 }
805
fec344e3
JL
806 mid = server->ops->setup_async_request(server, rqst);
807 if (IS_ERR(mid)) {
cc391b69 808 cifs_server_unlock(server);
335b7b62 809 add_credits_and_wake_if(server, &credits, optype);
fec344e3 810 return PTR_ERR(mid);
a6827c18
JL
811 }
812
44d22d84 813 mid->receive = receive;
a6827c18
JL
814 mid->callback = callback;
815 mid->callback_data = cbdata;
9b7c18a2 816 mid->handle = handle;
7c9421e1 817 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 818
ffc61ccb 819 /* put it on the pending_mid_q */
d7d7a66a 820 spin_lock(&server->mid_lock);
ffc61ccb 821 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 822 spin_unlock(&server->mid_lock);
ffc61ccb 823
93d2cb6c
LL
824 /*
825 * Need to store the time in mid before calling I/O. For call_async,
826 * I/O response may come back and free the mid entry on another thread.
827 */
828 cifs_save_when_sent(mid);
789e6661 829 cifs_in_send_inc(server);
1f3a8f5f 830 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 831 cifs_in_send_dec(server);
ad313cb8 832
820962dc 833 if (rc < 0) {
c781af7e 834 revert_current_mid(server, mid->credits);
ad313cb8 835 server->sequence_number -= 2;
70f08f91 836 delete_mid(mid);
820962dc
RV
837 }
838
cc391b69 839 cifs_server_unlock(server);
789e6661 840
ffc61ccb
SP
841 if (rc == 0)
842 return 0;
a6827c18 843
335b7b62 844 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
845 return rc;
846}
847
133672ef
SF
848/*
849 *
850 * Send an SMB Request. No response info (other than return code)
851 * needs to be parsed.
852 *
853 * flags indicate the type of request buffer and how long to wait
854 * and whether to log NT STATUS code (error) before mapping it to POSIX error
855 *
856 */
857int
96daf2b0 858SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 859 char *in_buf, int flags)
133672ef
SF
860{
861 int rc;
862 struct kvec iov[1];
da502f7d 863 struct kvec rsp_iov;
133672ef
SF
864 int resp_buf_type;
865
792af7b0
PS
866 iov[0].iov_base = in_buf;
867 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 868 flags |= CIFS_NO_RSP_BUF;
da502f7d 869 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 870 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 871
133672ef
SF
872 return rc;
873}
874
053d5034 875static int
3c1105df 876cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
877{
878 int rc = 0;
879
f96637be
JP
880 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 882
d7d7a66a 883 spin_lock(&server->mid_lock);
7c9421e1 884 switch (mid->mid_state) {
74dd92a8 885 case MID_RESPONSE_RECEIVED:
d7d7a66a 886 spin_unlock(&server->mid_lock);
053d5034 887 return rc;
74dd92a8
JL
888 case MID_RETRY_NEEDED:
889 rc = -EAGAIN;
890 break;
71823baf
JL
891 case MID_RESPONSE_MALFORMED:
892 rc = -EIO;
893 break;
3c1105df
JL
894 case MID_SHUTDOWN:
895 rc = -EHOSTDOWN;
896 break;
74dd92a8 897 default:
abe57073
PS
898 if (!(mid->mid_flags & MID_DELETED)) {
899 list_del_init(&mid->qhead);
900 mid->mid_flags |= MID_DELETED;
901 }
afe6f653 902 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 903 __func__, mid->mid, mid->mid_state);
74dd92a8 904 rc = -EIO;
053d5034 905 }
d7d7a66a 906 spin_unlock(&server->mid_lock);
053d5034 907
70f08f91 908 release_mid(mid);
053d5034
JL
909 return rc;
910}
911
121b046a 912static inline int
fb2036d8
PS
913send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
914 struct mid_q_entry *mid)
76dcc26f 915{
121b046a 916 return server->ops->send_cancel ?
fb2036d8 917 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
918}
919
2c8f981d
JL
920int
921cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
922 bool log_error)
923{
792af7b0 924 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
925
926 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
927
928 /* convert the length into a more usable form */
38d77c50 929 if (server->sign) {
738f9de5 930 struct kvec iov[2];
985e4ff0 931 int rc = 0;
738f9de5
PS
932 struct smb_rqst rqst = { .rq_iov = iov,
933 .rq_nvec = 2 };
826a95e4 934
738f9de5
PS
935 iov[0].iov_base = mid->resp_buf;
936 iov[0].iov_len = 4;
937 iov[1].iov_base = (char *)mid->resp_buf + 4;
938 iov[1].iov_len = len - 4;
2c8f981d 939 /* FIXME: add code to kill session */
bf5ea0e2 940 rc = cifs_verify_signature(&rqst, server,
0124cc45 941 mid->sequence_number);
985e4ff0 942 if (rc)
afe6f653 943 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 944 rc);
2c8f981d
JL
945 }
946
947 /* BB special case reconnect tid and uid here? */
a3713ec3 948 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
949}
950
fec344e3 951struct mid_q_entry *
f780bd3f
AA
952cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
953 struct smb_rqst *rqst)
792af7b0
PS
954{
955 int rc;
fec344e3 956 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
957 struct mid_q_entry *mid;
958
738f9de5
PS
959 if (rqst->rq_iov[0].iov_len != 4 ||
960 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
961 return ERR_PTR(-EIO);
962
792af7b0
PS
963 rc = allocate_mid(ses, hdr, &mid);
964 if (rc)
fec344e3
JL
965 return ERR_PTR(rc);
966 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
967 if (rc) {
70f08f91 968 delete_mid(mid);
fec344e3
JL
969 return ERR_PTR(rc);
970 }
971 return mid;
792af7b0
PS
972}
973
4e34feb5 974static void
ee258d79 975cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
976{
977 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
978 struct cifs_credits credits;
979
980 credits.value = server->ops->get_credits(mid);
981 credits.instance = server->reconnect_instance;
8a26f0f7 982
34f4deb7 983 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
984}
985
ee258d79
PS
986static void
987cifs_compound_last_callback(struct mid_q_entry *mid)
988{
989 cifs_compound_callback(mid);
990 cifs_wake_up_task(mid);
991}
992
993static void
994cifs_cancelled_callback(struct mid_q_entry *mid)
995{
996 cifs_compound_callback(mid);
70f08f91 997 release_mid(mid);
ee258d79
PS
998}
999
5f68ea4a
AA
1000/*
1001 * Return a channel (master if none) of @ses that can be used to send
1002 * regular requests.
1003 *
1004 * If we are currently binding a new channel (negprot/sess.setup),
1005 * return the new incomplete channel.
1006 */
1007struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1008{
1009 uint index = 0;
1010
1011 if (!ses)
1012 return NULL;
1013
f486ef8e 1014 /* round robin */
bda487ac 1015 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1016
1017 spin_lock(&ses->chan_lock);
bda487ac 1018 index %= ses->chan_count;
88b024f5 1019 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1020
1021 return ses->chans[index].server;
5f68ea4a
AA
1022}
1023
b8f57ee8 1024int
e0bba0b8 1025compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1026 struct TCP_Server_Info *server,
e0bba0b8
RS
1027 const int flags, const int num_rqst, struct smb_rqst *rqst,
1028 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1029{
480b1cb9 1030 int i, j, optype, rc = 0;
e0bba0b8 1031 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1032 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1033 struct cifs_credits credits[MAX_COMPOUND] = {
1034 { .value = 0, .instance = 0 }
1035 };
1036 unsigned int instance;
738f9de5 1037 char *buf;
50c2f753 1038
a891f0f8 1039 optype = flags & CIFS_OP_MASK;
133672ef 1040
e0bba0b8
RS
1041 for (i = 0; i < num_rqst; i++)
1042 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1043
352d96f3 1044 if (!ses || !ses->server || !server) {
f96637be 1045 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1046 return -EIO;
1047 }
1048
d7d7a66a 1049 spin_lock(&server->srv_lock);
080dc5e5 1050 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1051 spin_unlock(&server->srv_lock);
7ee1af76 1052 return -ENOENT;
080dc5e5 1053 }
d7d7a66a 1054 spin_unlock(&server->srv_lock);
7ee1af76 1055
792af7b0 1056 /*
257b7809 1057 * Wait for all the requests to become available.
7091bcab
PS
1058 * This approach still leaves the possibility to be stuck waiting for
1059 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1060 * requests and if the client is completely idle, not generating any
1061 * other requests.
1062 * This can be handled by the eventual session reconnect.
792af7b0 1063 */
3190b59a 1064 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1065 &instance);
1066 if (rc)
1067 return rc;
97ea4998 1068
257b7809
RS
1069 for (i = 0; i < num_rqst; i++) {
1070 credits[i].value = 1;
1071 credits[i].instance = instance;
8544f4aa 1072 }
7ee1af76 1073
792af7b0
PS
1074 /*
1075 * Make sure that we sign in the same order that we send on this socket
1076 * and avoid races inside tcp sendmsg code that could cause corruption
1077 * of smb data.
1078 */
7ee1af76 1079
cc391b69 1080 cifs_server_lock(server);
7ee1af76 1081
97ea4998
PS
1082 /*
1083 * All the parts of the compound chain belong obtained credits from the
257b7809 1084 * same session. We can not use credits obtained from the previous
97ea4998
PS
1085 * session to send this request. Check if there were reconnects after
1086 * we obtained credits and return -EAGAIN in such cases to let callers
1087 * handle it.
1088 */
3190b59a 1089 if (instance != server->reconnect_instance) {
cc391b69 1090 cifs_server_unlock(server);
97ea4998 1091 for (j = 0; j < num_rqst; j++)
3190b59a 1092 add_credits(server, &credits[j], optype);
97ea4998
PS
1093 return -EAGAIN;
1094 }
1095
e0bba0b8 1096 for (i = 0; i < num_rqst; i++) {
f780bd3f 1097 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1098 if (IS_ERR(midQ[i])) {
3190b59a 1099 revert_current_mid(server, i);
e0bba0b8 1100 for (j = 0; j < i; j++)
70f08f91 1101 delete_mid(midQ[j]);
cc391b69 1102 cifs_server_unlock(server);
8544f4aa 1103
e0bba0b8 1104 /* Update # of requests on wire to server */
8544f4aa 1105 for (j = 0; j < num_rqst; j++)
3190b59a 1106 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1107 return PTR_ERR(midQ[i]);
1108 }
1109
1110 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1111 midQ[i]->optype = optype;
4e34feb5 1112 /*
ee258d79
PS
1113 * Invoke callback for every part of the compound chain
1114 * to calculate credits properly. Wake up this thread only when
1115 * the last element is received.
4e34feb5
RS
1116 */
1117 if (i < num_rqst - 1)
ee258d79
PS
1118 midQ[i]->callback = cifs_compound_callback;
1119 else
1120 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1121 }
3190b59a
AA
1122 cifs_in_send_inc(server);
1123 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1124 cifs_in_send_dec(server);
e0bba0b8
RS
1125
1126 for (i = 0; i < num_rqst; i++)
1127 cifs_save_when_sent(midQ[i]);
7ee1af76 1128
c781af7e 1129 if (rc < 0) {
3190b59a
AA
1130 revert_current_mid(server, num_rqst);
1131 server->sequence_number -= 2;
c781af7e 1132 }
e0bba0b8 1133
cc391b69 1134 cifs_server_unlock(server);
7ee1af76 1135
d69cb728
RS
1136 /*
1137 * If sending failed for some reason or it is an oplock break that we
1138 * will not receive a response to - return credits back
1139 */
1140 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1141 for (i = 0; i < num_rqst; i++)
3190b59a 1142 add_credits(server, &credits[i], optype);
cb5c2e63 1143 goto out;
ee258d79
PS
1144 }
1145
1146 /*
1147 * At this point the request is passed to the network stack - we assume
1148 * that any credits taken from the server structure on the client have
1149 * been spent and we can't return them back. Once we receive responses
1150 * we will collect credits granted by the server in the mid callbacks
1151 * and add those credits to the server structure.
1152 */
e0bba0b8 1153
cb5c2e63
RS
1154 /*
1155 * Compounding is never used during session establish.
1156 */
d7d7a66a 1157 spin_lock(&ses->ses_lock);
dd3cd870 1158 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1159 spin_unlock(&ses->ses_lock);
080dc5e5 1160
cc391b69 1161 cifs_server_lock(server);
f486ef8e 1162 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1163 cifs_server_unlock(server);
080dc5e5 1164
d7d7a66a 1165 spin_lock(&ses->ses_lock);
05946d4b 1166 }
d7d7a66a 1167 spin_unlock(&ses->ses_lock);
e0bba0b8 1168
cb5c2e63 1169 for (i = 0; i < num_rqst; i++) {
3190b59a 1170 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1171 if (rc != 0)
1172 break;
1173 }
1174 if (rc != 0) {
1175 for (; i < num_rqst; i++) {
e3d100ea 1176 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1177 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1178 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1179 spin_lock(&server->mid_lock);
7b71843f 1180 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1181 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1182 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1183 cancelled_mid[i] = true;
34f4deb7 1184 credits[i].value = 0;
e0bba0b8 1185 }
d7d7a66a 1186 spin_unlock(&server->mid_lock);
e0bba0b8 1187 }
cb5c2e63
RS
1188 }
1189
cb5c2e63
RS
1190 for (i = 0; i < num_rqst; i++) {
1191 if (rc < 0)
1192 goto out;
e0bba0b8 1193
3190b59a 1194 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1195 if (rc != 0) {
8544f4aa
PS
1196 /* mark this mid as cancelled to not free it below */
1197 cancelled_mid[i] = true;
1198 goto out;
1be912dd 1199 }
2b2bdfba 1200
e0bba0b8
RS
1201 if (!midQ[i]->resp_buf ||
1202 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1203 rc = -EIO;
1204 cifs_dbg(FYI, "Bad MID state?\n");
1205 goto out;
1206 }
a891f0f8 1207
e0bba0b8
RS
1208 buf = (char *)midQ[i]->resp_buf;
1209 resp_iov[i].iov_base = buf;
1210 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1211 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1212
1213 if (midQ[i]->large_buf)
1214 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1215 else
1216 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1217
3190b59a 1218 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1219 flags & CIFS_LOG_ERROR);
1da177e4 1220
70f08f91 1221 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1222 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1223 midQ[i]->resp_buf = NULL;
cb5c2e63 1224
e0bba0b8 1225 }
cb5c2e63
RS
1226
1227 /*
1228 * Compounding is never used during session establish.
1229 */
d7d7a66a 1230 spin_lock(&ses->ses_lock);
dd3cd870 1231 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1232 struct kvec iov = {
1233 .iov_base = resp_iov[0].iov_base,
1234 .iov_len = resp_iov[0].iov_len
1235 };
d7d7a66a 1236 spin_unlock(&ses->ses_lock);
cc391b69 1237 cifs_server_lock(server);
f486ef8e 1238 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1239 cifs_server_unlock(server);
d7d7a66a 1240 spin_lock(&ses->ses_lock);
cb5c2e63 1241 }
d7d7a66a 1242 spin_unlock(&ses->ses_lock);
cb5c2e63 1243
7ee1af76 1244out:
4e34feb5
RS
1245 /*
1246 * This will dequeue all mids. After this it is important that the
1247 * demultiplex_thread will not process any of these mids any futher.
1248 * This is prevented above by using a noop callback that will not
1249 * wake this thread except for the very last PDU.
1250 */
8544f4aa
PS
1251 for (i = 0; i < num_rqst; i++) {
1252 if (!cancelled_mid[i])
70f08f91 1253 delete_mid(midQ[i]);
8544f4aa 1254 }
1da177e4 1255
d6e04ae6
SF
1256 return rc;
1257}
1da177e4 1258
e0bba0b8
RS
1259int
1260cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1261 struct TCP_Server_Info *server,
e0bba0b8
RS
1262 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1263 struct kvec *resp_iov)
1264{
352d96f3
AA
1265 return compound_send_recv(xid, ses, server, flags, 1,
1266 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1267}
1268
738f9de5
PS
1269int
1270SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1271 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1272 const int flags, struct kvec *resp_iov)
1273{
1274 struct smb_rqst rqst;
3cecf486 1275 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1276 int rc;
1277
3cecf486 1278 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1279 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1280 GFP_KERNEL);
117e3b7f
SF
1281 if (!new_iov) {
1282 /* otherwise cifs_send_recv below sets resp_buf_type */
1283 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1284 return -ENOMEM;
117e3b7f 1285 }
3cecf486
RS
1286 } else
1287 new_iov = s_iov;
738f9de5
PS
1288
1289 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1290 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1291
1292 new_iov[0].iov_base = new_iov[1].iov_base;
1293 new_iov[0].iov_len = 4;
1294 new_iov[1].iov_base += 4;
1295 new_iov[1].iov_len -= 4;
1296
1297 memset(&rqst, 0, sizeof(struct smb_rqst));
1298 rqst.rq_iov = new_iov;
1299 rqst.rq_nvec = n_vec + 1;
1300
352d96f3
AA
1301 rc = cifs_send_recv(xid, ses, ses->server,
1302 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1303 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1304 kfree(new_iov);
738f9de5
PS
1305 return rc;
1306}
1307
1da177e4 1308int
96daf2b0 1309SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1310 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1311 int *pbytes_returned, const int flags)
1da177e4
LT
1312{
1313 int rc = 0;
1da177e4 1314 struct mid_q_entry *midQ;
fb2036d8
PS
1315 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1316 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1317 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1318 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1319 struct TCP_Server_Info *server;
1da177e4
LT
1320
1321 if (ses == NULL) {
f96637be 1322 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1323 return -EIO;
1324 }
ac6ad7a8 1325 server = ses->server;
afe6f653 1326 if (server == NULL) {
f96637be 1327 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1328 return -EIO;
1329 }
1330
d7d7a66a 1331 spin_lock(&server->srv_lock);
080dc5e5 1332 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1333 spin_unlock(&server->srv_lock);
31ca3bc3 1334 return -ENOENT;
080dc5e5 1335 }
d7d7a66a 1336 spin_unlock(&server->srv_lock);
31ca3bc3 1337
79a58d1f 1338 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1339 to the same server. We may make this configurable later or
1340 use ses->maxReq */
1da177e4 1341
fb2036d8 1342 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1343 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1344 len);
6d9c6d54
VL
1345 return -EIO;
1346 }
1347
afe6f653 1348 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1349 if (rc)
1350 return rc;
1351
79a58d1f 1352 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1353 and avoid races inside tcp sendmsg code that could cause corruption
1354 of smb data */
1355
cc391b69 1356 cifs_server_lock(server);
1da177e4 1357
7ee1af76
JA
1358 rc = allocate_mid(ses, in_buf, &midQ);
1359 if (rc) {
cc391b69 1360 cifs_server_unlock(server);
7ee1af76 1361 /* Update # of requests on wire to server */
afe6f653 1362 add_credits(server, &credits, 0);
7ee1af76 1363 return rc;
1da177e4
LT
1364 }
1365
afe6f653 1366 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1367 if (rc) {
cc391b69 1368 cifs_server_unlock(server);
829049cb
VL
1369 goto out;
1370 }
1da177e4 1371
7c9421e1 1372 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1373
afe6f653
RS
1374 cifs_in_send_inc(server);
1375 rc = smb_send(server, in_buf, len);
1376 cifs_in_send_dec(server);
789e6661 1377 cifs_save_when_sent(midQ);
ad313cb8
JL
1378
1379 if (rc < 0)
afe6f653 1380 server->sequence_number -= 2;
ad313cb8 1381
cc391b69 1382 cifs_server_unlock(server);
7ee1af76 1383
79a58d1f 1384 if (rc < 0)
7ee1af76
JA
1385 goto out;
1386
afe6f653 1387 rc = wait_for_response(server, midQ);
1be912dd 1388 if (rc != 0) {
afe6f653 1389 send_cancel(server, &rqst, midQ);
d7d7a66a 1390 spin_lock(&server->mid_lock);
7c9421e1 1391 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1392 /* no longer considered to be "in-flight" */
70f08f91 1393 midQ->callback = release_mid;
d7d7a66a 1394 spin_unlock(&server->mid_lock);
afe6f653 1395 add_credits(server, &credits, 0);
1be912dd
JL
1396 return rc;
1397 }
d7d7a66a 1398 spin_unlock(&server->mid_lock);
1be912dd 1399 }
1da177e4 1400
afe6f653 1401 rc = cifs_sync_mid_result(midQ, server);
053d5034 1402 if (rc != 0) {
afe6f653 1403 add_credits(server, &credits, 0);
1da177e4
LT
1404 return rc;
1405 }
50c2f753 1406
2c8f981d 1407 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1408 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1409 rc = -EIO;
afe6f653 1410 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1411 goto out;
1da177e4 1412 }
7ee1af76 1413
d4e4854f 1414 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1415 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1416 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1417out:
70f08f91 1418 delete_mid(midQ);
afe6f653 1419 add_credits(server, &credits, 0);
1da177e4 1420
7ee1af76
JA
1421 return rc;
1422}
1da177e4 1423
7ee1af76
JA
1424/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1425 blocking lock to return. */
1426
1427static int
96daf2b0 1428send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1429 struct smb_hdr *in_buf,
1430 struct smb_hdr *out_buf)
1431{
1432 int bytes_returned;
96daf2b0 1433 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1434 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1435
1436 /* We just modify the current in_buf to change
1437 the type of lock from LOCKING_ANDX_SHARED_LOCK
1438 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1439 LOCKING_ANDX_CANCEL_LOCK. */
1440
1441 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1442 pSMB->Timeout = 0;
88257360 1443 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1444
1445 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1446 &bytes_returned, 0);
7ee1af76
JA
1447}
1448
1449int
96daf2b0 1450SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1451 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1452 int *pbytes_returned)
1453{
1454 int rc = 0;
1455 int rstart = 0;
7ee1af76 1456 struct mid_q_entry *midQ;
96daf2b0 1457 struct cifs_ses *ses;
fb2036d8
PS
1458 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1459 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1460 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1461 unsigned int instance;
afe6f653 1462 struct TCP_Server_Info *server;
7ee1af76
JA
1463
1464 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1465 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1466 return -EIO;
1467 }
1468 ses = tcon->ses;
afe6f653 1469 server = ses->server;
7ee1af76 1470
afe6f653 1471 if (server == NULL) {
f96637be 1472 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1473 return -EIO;
1474 }
1475
d7d7a66a 1476 spin_lock(&server->srv_lock);
080dc5e5 1477 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1478 spin_unlock(&server->srv_lock);
7ee1af76 1479 return -ENOENT;
080dc5e5 1480 }
d7d7a66a 1481 spin_unlock(&server->srv_lock);
7ee1af76 1482
79a58d1f 1483 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1484 to the same server. We may make this configurable later or
1485 use ses->maxReq */
1486
fb2036d8 1487 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1488 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1489 len);
6d9c6d54
VL
1490 return -EIO;
1491 }
1492
afe6f653 1493 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1494 if (rc)
1495 return rc;
1496
79a58d1f 1497 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1498 and avoid races inside tcp sendmsg code that could cause corruption
1499 of smb data */
1500
cc391b69 1501 cifs_server_lock(server);
7ee1af76
JA
1502
1503 rc = allocate_mid(ses, in_buf, &midQ);
1504 if (rc) {
cc391b69 1505 cifs_server_unlock(server);
7ee1af76
JA
1506 return rc;
1507 }
1508
afe6f653 1509 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1510 if (rc) {
70f08f91 1511 delete_mid(midQ);
cc391b69 1512 cifs_server_unlock(server);
829049cb
VL
1513 return rc;
1514 }
1da177e4 1515
7c9421e1 1516 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1517 cifs_in_send_inc(server);
1518 rc = smb_send(server, in_buf, len);
1519 cifs_in_send_dec(server);
789e6661 1520 cifs_save_when_sent(midQ);
ad313cb8
JL
1521
1522 if (rc < 0)
afe6f653 1523 server->sequence_number -= 2;
ad313cb8 1524
cc391b69 1525 cifs_server_unlock(server);
7ee1af76 1526
79a58d1f 1527 if (rc < 0) {
70f08f91 1528 delete_mid(midQ);
7ee1af76
JA
1529 return rc;
1530 }
1531
1532 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1533 rc = wait_event_interruptible(server->response_q,
7c9421e1 1534 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1535 ((server->tcpStatus != CifsGood) &&
1536 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1537
1538 /* Were we interrupted by a signal ? */
d7d7a66a 1539 spin_lock(&server->srv_lock);
7ee1af76 1540 if ((rc == -ERESTARTSYS) &&
7c9421e1 1541 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1542 ((server->tcpStatus == CifsGood) ||
1543 (server->tcpStatus == CifsNew))) {
d7d7a66a 1544 spin_unlock(&server->srv_lock);
7ee1af76
JA
1545
1546 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1547 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1548 blocking lock to return. */
afe6f653 1549 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1550 if (rc) {
70f08f91 1551 delete_mid(midQ);
7ee1af76
JA
1552 return rc;
1553 }
1554 } else {
1555 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1556 to cause the blocking lock to return. */
1557
1558 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1559
1560 /* If we get -ENOLCK back the lock may have
1561 already been removed. Don't exit in this case. */
1562 if (rc && rc != -ENOLCK) {
70f08f91 1563 delete_mid(midQ);
7ee1af76
JA
1564 return rc;
1565 }
1566 }
1567
afe6f653 1568 rc = wait_for_response(server, midQ);
1be912dd 1569 if (rc) {
afe6f653 1570 send_cancel(server, &rqst, midQ);
d7d7a66a 1571 spin_lock(&server->mid_lock);
7c9421e1 1572 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1573 /* no longer considered to be "in-flight" */
70f08f91 1574 midQ->callback = release_mid;
d7d7a66a 1575 spin_unlock(&server->mid_lock);
1be912dd
JL
1576 return rc;
1577 }
d7d7a66a 1578 spin_unlock(&server->mid_lock);
7ee1af76 1579 }
1be912dd
JL
1580
1581 /* We got the response - restart system call. */
1582 rstart = 1;
d7d7a66a 1583 spin_lock(&server->srv_lock);
7ee1af76 1584 }
d7d7a66a 1585 spin_unlock(&server->srv_lock);
7ee1af76 1586
afe6f653 1587 rc = cifs_sync_mid_result(midQ, server);
053d5034 1588 if (rc != 0)
7ee1af76 1589 return rc;
50c2f753 1590
17c8bfed 1591 /* rcvd frame is ok */
7c9421e1 1592 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1593 rc = -EIO;
3175eb9b 1594 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1595 goto out;
1596 }
1da177e4 1597
d4e4854f 1598 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1599 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1600 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1601out:
70f08f91 1602 delete_mid(midQ);
7ee1af76
JA
1603 if (rstart && rc == -EACCES)
1604 return -ERESTARTSYS;
1da177e4
LT
1605 return rc;
1606}
fb157ed2
SF
1607
1608/*
1609 * Discard any remaining data in the current SMB. To do this, we borrow the
1610 * current bigbuf.
1611 */
1612int
1613cifs_discard_remaining_data(struct TCP_Server_Info *server)
1614{
1615 unsigned int rfclen = server->pdu_size;
d08089f6 1616 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1617 server->total_read;
1618
1619 while (remaining > 0) {
d08089f6 1620 ssize_t length;
fb157ed2
SF
1621
1622 length = cifs_discard_from_socket(server,
1623 min_t(size_t, remaining,
1624 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1625 if (length < 0)
1626 return length;
1627 server->total_read += length;
1628 remaining -= length;
1629 }
1630
1631 return 0;
1632}
1633
1634static int
1635__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1636 bool malformed)
1637{
1638 int length;
1639
1640 length = cifs_discard_remaining_data(server);
1641 dequeue_mid(mid, malformed);
1642 mid->resp_buf = server->smallbuf;
1643 server->smallbuf = NULL;
1644 return length;
1645}
1646
1647static int
1648cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1649{
1650 struct cifs_readdata *rdata = mid->callback_data;
1651
1652 return __cifs_readv_discard(server, mid, rdata->result);
1653}
1654
1655int
1656cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1657{
1658 int length, len;
1659 unsigned int data_offset, data_len;
1660 struct cifs_readdata *rdata = mid->callback_data;
1661 char *buf = server->smallbuf;
9789de8b 1662 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1663 bool use_rdma_mr = false;
1664
1665 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1666 __func__, mid->mid, rdata->offset, rdata->bytes);
1667
1668 /*
1669 * read the rest of READ_RSP header (sans Data array), or whatever we
1670 * can if there's not enough data. At this point, we've read down to
1671 * the Mid.
1672 */
1673 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1674 HEADER_SIZE(server) + 1;
1675
1676 length = cifs_read_from_socket(server,
1677 buf + HEADER_SIZE(server) - 1, len);
1678 if (length < 0)
1679 return length;
1680 server->total_read += length;
1681
1682 if (server->ops->is_session_expired &&
1683 server->ops->is_session_expired(buf)) {
1684 cifs_reconnect(server, true);
1685 return -1;
1686 }
1687
1688 if (server->ops->is_status_pending &&
1689 server->ops->is_status_pending(buf, server)) {
1690 cifs_discard_remaining_data(server);
1691 return -1;
1692 }
1693
1694 /* set up first two iov for signature check and to get credits */
1695 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1696 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1697 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1698 rdata->iov[1].iov_len =
9789de8b 1699 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1700 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1701 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1702 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1703 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1704
1705 /* Was the SMB read successful? */
1706 rdata->result = server->ops->map_error(buf, false);
1707 if (rdata->result != 0) {
1708 cifs_dbg(FYI, "%s: server returned error %d\n",
1709 __func__, rdata->result);
1710 /* normal error on read response */
1711 return __cifs_readv_discard(server, mid, false);
1712 }
1713
1714 /* Is there enough to get to the rest of the READ_RSP header? */
1715 if (server->total_read < server->vals->read_rsp_size) {
1716 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1717 __func__, server->total_read,
1718 server->vals->read_rsp_size);
1719 rdata->result = -EIO;
1720 return cifs_readv_discard(server, mid);
1721 }
1722
1723 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1724 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1725 if (data_offset < server->total_read) {
1726 /*
1727 * win2k8 sometimes sends an offset of 0 when the read
1728 * is beyond the EOF. Treat it as if the data starts just after
1729 * the header.
1730 */
1731 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1732 __func__, data_offset);
1733 data_offset = server->total_read;
1734 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1735 /* data_offset is beyond the end of smallbuf */
1736 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1737 __func__, data_offset);
1738 rdata->result = -EIO;
1739 return cifs_readv_discard(server, mid);
1740 }
1741
1742 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1743 __func__, server->total_read, data_offset);
1744
1745 len = data_offset - server->total_read;
1746 if (len > 0) {
1747 /* read any junk before data into the rest of smallbuf */
1748 length = cifs_read_from_socket(server,
1749 buf + server->total_read, len);
1750 if (length < 0)
1751 return length;
1752 server->total_read += length;
1753 }
1754
1755 /* how much data is in the response? */
1756#ifdef CONFIG_CIFS_SMB_DIRECT
1757 use_rdma_mr = rdata->mr;
1758#endif
1759 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1760 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1761 /* data_len is corrupt -- discard frame */
1762 rdata->result = -EIO;
1763 return cifs_readv_discard(server, mid);
1764 }
1765
d08089f6
DH
1766#ifdef CONFIG_CIFS_SMB_DIRECT
1767 if (rdata->mr)
1768 length = data_len; /* An RDMA read is already done. */
1769 else
1770#endif
1771 length = cifs_read_iter_from_socket(server, &rdata->iter,
1772 data_len);
1773 if (length > 0)
1774 rdata->got_bytes += length;
fb157ed2
SF
1775 server->total_read += length;
1776
1777 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1778 server->total_read, buflen, data_len);
1779
1780 /* discard anything left over */
1781 if (server->total_read < buflen)
1782 return cifs_readv_discard(server, mid);
1783
1784 dequeue_mid(mid, false);
1785 mid->resp_buf = server->smallbuf;
1786 server->smallbuf = NULL;
1787 return length;
1788}