ocfs2: fix defrag path triggering jbd2 ASSERT
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
0496e02d 197 if (server->noblocksnd)
3ab3f2a1 198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 199 else
3ab3f2a1 200 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 201
3ab3f2a1 202 while (msg_data_left(smb_msg)) {
6f49f46b
JL
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
3ab3f2a1 221 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 222 if (rc == -EAGAIN) {
3ab3f2a1
AV
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
afe6f653 226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 227 ssocket);
3ab3f2a1 228 return -EAGAIN;
1da177e4 229 }
3ab3f2a1 230 msleep(1 << retries);
1da177e4
LT
231 continue;
232 }
6f49f46b 233
79a58d1f 234 if (rc < 0)
3ab3f2a1 235 return rc;
6f49f46b 236
79a58d1f 237 if (rc == 0) {
3e84469d
SF
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
afe6f653 240 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
241 msleep(500);
242 continue;
d6e04ae6 243 }
6f49f46b 244
3ab3f2a1
AV
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 248 }
3ab3f2a1 249 return 0;
97bc00b3
JL
250}
251
35e2cc1b 252unsigned long
81f39f95 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
254{
255 unsigned int i;
35e2cc1b
PA
256 struct kvec *iov;
257 int nvec;
a26054d1
JL
258 unsigned long buflen = 0;
259
d291e703 260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 261 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
a26054d1 269 /* total up iov array first */
35e2cc1b 270 for (i = 0; i < nvec; i++)
a26054d1
JL
271 buflen += iov[i].iov_len;
272
d08089f6 273 buflen += iov_iter_count(&rqst->rq_iter);
a26054d1
JL
274 return buflen;
275}
276
6f49f46b 277static int
07cd952f
RS
278__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
279 struct smb_rqst *rqst)
6f49f46b 280{
07cd952f
RS
281 int rc = 0;
282 struct kvec *iov;
283 int n_vec;
284 unsigned int send_length = 0;
285 unsigned int i, j;
b30c74c7 286 sigset_t mask, oldmask;
3ab3f2a1 287 size_t total_len = 0, sent, size;
b8eed283 288 struct socket *ssocket = server->ssocket;
bedc8f76 289 struct msghdr smb_msg = {};
c713c877
RS
290 __be32 rfc1002_marker;
291
4357d45f
LL
292 if (cifs_rdma_enabled(server)) {
293 /* return -EAGAIN when connecting or reconnecting */
294 rc = -EAGAIN;
295 if (server->smbd_conn)
296 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
297 goto smbd_done;
298 }
afc18a6f 299
ea702b80 300 if (ssocket == NULL)
afc18a6f 301 return -EAGAIN;
ea702b80 302
214a5ea0 303 if (fatal_signal_pending(current)) {
6988a619
PA
304 cifs_dbg(FYI, "signal pending before send request\n");
305 return -ERESTARTSYS;
b30c74c7
PS
306 }
307
b8eed283 308 /* cork the socket */
db10538a 309 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 310
07cd952f 311 for (j = 0; j < num_rqst; j++)
81f39f95 312 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
313 rfc1002_marker = cpu_to_be32(send_length);
314
b30c74c7
PS
315 /*
316 * We should not allow signals to interrupt the network send because
317 * any partial send will cause session reconnects thus increasing
318 * latency of system calls and overload a server with unnecessary
319 * requests.
320 */
321
322 sigfillset(&mask);
323 sigprocmask(SIG_BLOCK, &mask, &oldmask);
324
c713c877 325 /* Generate a rfc1002 marker for SMB2+ */
d291e703 326 if (!is_smb1(server)) {
c713c877
RS
327 struct kvec hiov = {
328 .iov_base = &rfc1002_marker,
329 .iov_len = 4
330 };
de4eda9d 331 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
332 rc = smb_send_kvec(server, &smb_msg, &sent);
333 if (rc < 0)
b30c74c7 334 goto unmask;
c713c877
RS
335
336 total_len += sent;
337 send_length += 4;
338 }
339
662bf5bc
PA
340 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
341
07cd952f
RS
342 for (j = 0; j < num_rqst; j++) {
343 iov = rqst[j].rq_iov;
344 n_vec = rqst[j].rq_nvec;
3ab3f2a1 345
07cd952f 346 size = 0;
662bf5bc
PA
347 for (i = 0; i < n_vec; i++) {
348 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 349 size += iov[i].iov_len;
662bf5bc 350 }
97bc00b3 351
de4eda9d 352 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 353
3ab3f2a1 354 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 355 if (rc < 0)
b30c74c7 356 goto unmask;
97bc00b3
JL
357
358 total_len += sent;
07cd952f 359
d08089f6
DH
360 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
361 smb_msg.msg_iter = rqst[j].rq_iter;
07cd952f
RS
362 rc = smb_send_kvec(server, &smb_msg, &sent);
363 if (rc < 0)
364 break;
07cd952f
RS
365 total_len += sent;
366 }
d08089f6
DH
367
368}
1da177e4 369
b30c74c7
PS
370unmask:
371 sigprocmask(SIG_SETMASK, &oldmask, NULL);
372
373 /*
374 * If signal is pending but we have already sent the whole packet to
375 * the server we need to return success status to allow a corresponding
376 * mid entry to be kept in the pending requests queue thus allowing
377 * to handle responses from the server by the client.
378 *
379 * If only part of the packet has been sent there is no need to hide
380 * interrupt because the session will be reconnected anyway, so there
381 * won't be any response from the server to handle.
382 */
383
384 if (signal_pending(current) && (total_len != send_length)) {
385 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 386 rc = -ERESTARTSYS;
b30c74c7
PS
387 }
388
b8eed283 389 /* uncork it */
db10538a 390 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 391
c713c877 392 if ((total_len > 0) && (total_len != send_length)) {
f96637be 393 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 394 send_length, total_len);
6f49f46b
JL
395 /*
396 * If we have only sent part of an SMB then the next SMB could
397 * be taken as the remainder of this one. We need to kill the
398 * socket so the server throws away the partial SMB
399 */
dca65818 400 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 401 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 402 server->conn_id, server->hostname);
edf1ae40 403 }
9762c2d0 404smbd_done:
d804d41d 405 if (rc < 0 && rc != -EINTR)
afe6f653 406 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 407 rc);
ee13919c 408 else if (rc > 0)
1da177e4 409 rc = 0;
1da177e4
LT
410
411 return rc;
412}
413
6f49f46b 414static int
1f3a8f5f
RS
415smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
416 struct smb_rqst *rqst, int flags)
6f49f46b 417{
b2c96de7 418 struct kvec iov;
3946d0d0 419 struct smb2_transform_hdr *tr_hdr;
b2c96de7 420 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
421 int rc;
422
423 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
424 return __smb_send_rqst(server, num_rqst, rqst);
425
426 if (num_rqst > MAX_COMPOUND - 1)
427 return -ENOMEM;
7fb8986e 428
b2c96de7 429 if (!server->ops->init_transform_rq) {
a0a3036b 430 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
431 return -EIO;
432 }
6f49f46b 433
9339faac 434 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
435 if (!tr_hdr)
436 return -ENOMEM;
437
438 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
439 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
440
441 iov.iov_base = tr_hdr;
442 iov.iov_len = sizeof(*tr_hdr);
443 cur_rqst[0].rq_iov = &iov;
444 cur_rqst[0].rq_nvec = 1;
445
1f3a8f5f
RS
446 rc = server->ops->init_transform_rq(server, num_rqst + 1,
447 &cur_rqst[0], rqst);
7fb8986e 448 if (rc)
3946d0d0 449 goto out;
7fb8986e 450
1f3a8f5f
RS
451 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
452 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
453out:
454 kfree(tr_hdr);
7fb8986e 455 return rc;
6f49f46b
JL
456}
457
0496e02d
JL
458int
459smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
460 unsigned int smb_buf_length)
461{
738f9de5 462 struct kvec iov[2];
7fb8986e
PS
463 struct smb_rqst rqst = { .rq_iov = iov,
464 .rq_nvec = 2 };
0496e02d 465
738f9de5
PS
466 iov[0].iov_base = smb_buffer;
467 iov[0].iov_len = 4;
468 iov[1].iov_base = (char *)smb_buffer + 4;
469 iov[1].iov_len = smb_buf_length;
0496e02d 470
07cd952f 471 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
472}
473
fc40f9cf 474static int
b227d215 475wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
476 const int timeout, const int flags,
477 unsigned int *instance)
1da177e4 478{
19e88867 479 long rc;
4230cff8
RS
480 int *credits;
481 int optype;
2b53b929 482 long int t;
6d82c27a 483 int scredits, in_flight;
2b53b929
RS
484
485 if (timeout < 0)
486 t = MAX_JIFFY_OFFSET;
487 else
488 t = msecs_to_jiffies(timeout);
4230cff8
RS
489
490 optype = flags & CIFS_OP_MASK;
5bc59498 491
34f4deb7
PS
492 *instance = 0;
493
4230cff8
RS
494 credits = server->ops->get_credits_field(server, optype);
495 /* Since an echo is already inflight, no need to wait to send another */
496 if (*credits <= 0 && optype == CIFS_ECHO_OP)
497 return -EAGAIN;
498
fc40f9cf 499 spin_lock(&server->req_lock);
392e1c5d 500 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 501 /* oplock breaks must not be held up */
fc40f9cf 502 server->in_flight++;
1b63f184
SF
503 if (server->in_flight > server->max_in_flight)
504 server->max_in_flight = server->in_flight;
bc205ed1 505 *credits -= 1;
34f4deb7 506 *instance = server->reconnect_instance;
6d82c27a
SP
507 scredits = *credits;
508 in_flight = server->in_flight;
fc40f9cf 509 spin_unlock(&server->req_lock);
6d82c27a 510
1ddff774 511 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
512 server->conn_id, server->hostname, scredits, -1, in_flight);
513 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
514 __func__, 1, scredits);
515
27a97a61
VL
516 return 0;
517 }
518
27a97a61 519 while (1) {
b227d215 520 if (*credits < num_credits) {
6d82c27a 521 scredits = *credits;
fc40f9cf 522 spin_unlock(&server->req_lock);
6d82c27a 523
789e6661 524 cifs_num_waiters_inc(server);
2b53b929
RS
525 rc = wait_event_killable_timeout(server->request_q,
526 has_credits(server, credits, num_credits), t);
789e6661 527 cifs_num_waiters_dec(server);
2b53b929 528 if (!rc) {
6d82c27a
SP
529 spin_lock(&server->req_lock);
530 scredits = *credits;
531 in_flight = server->in_flight;
532 spin_unlock(&server->req_lock);
533
7937ca96 534 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
535 server->conn_id, server->hostname, scredits,
536 num_credits, in_flight);
afe6f653 537 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 538 timeout);
7de03948 539 return -EBUSY;
2b53b929
RS
540 }
541 if (rc == -ERESTARTSYS)
542 return -ERESTARTSYS;
fc40f9cf 543 spin_lock(&server->req_lock);
27a97a61 544 } else {
080dc5e5
SP
545 spin_unlock(&server->req_lock);
546
d7d7a66a 547 spin_lock(&server->srv_lock);
c5797a94 548 if (server->tcpStatus == CifsExiting) {
d7d7a66a 549 spin_unlock(&server->srv_lock);
27a97a61 550 return -ENOENT;
1da177e4 551 }
d7d7a66a 552 spin_unlock(&server->srv_lock);
27a97a61 553
16b34aa4
RS
554 /*
555 * For normal commands, reserve the last MAX_COMPOUND
556 * credits to compound requests.
557 * Otherwise these compounds could be permanently
558 * starved for credits by single-credit requests.
559 *
560 * To prevent spinning CPU, block this thread until
561 * there are >MAX_COMPOUND credits available.
562 * But only do this is we already have a lot of
563 * credits in flight to avoid triggering this check
564 * for servers that are slow to hand out credits on
565 * new sessions.
566 */
080dc5e5 567 spin_lock(&server->req_lock);
16b34aa4
RS
568 if (!optype && num_credits == 1 &&
569 server->in_flight > 2 * MAX_COMPOUND &&
570 *credits <= MAX_COMPOUND) {
571 spin_unlock(&server->req_lock);
6d82c27a 572
16b34aa4 573 cifs_num_waiters_inc(server);
2b53b929
RS
574 rc = wait_event_killable_timeout(
575 server->request_q,
16b34aa4 576 has_credits(server, credits,
2b53b929
RS
577 MAX_COMPOUND + 1),
578 t);
16b34aa4 579 cifs_num_waiters_dec(server);
2b53b929 580 if (!rc) {
6d82c27a
SP
581 spin_lock(&server->req_lock);
582 scredits = *credits;
583 in_flight = server->in_flight;
584 spin_unlock(&server->req_lock);
585
7937ca96 586 trace_smb3_credit_timeout(
6d82c27a
SP
587 server->CurrentMid,
588 server->conn_id, server->hostname,
589 scredits, num_credits, in_flight);
afe6f653 590 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 591 timeout);
7de03948 592 return -EBUSY;
2b53b929
RS
593 }
594 if (rc == -ERESTARTSYS)
595 return -ERESTARTSYS;
16b34aa4
RS
596 spin_lock(&server->req_lock);
597 continue;
598 }
599
2d86dbc9
PS
600 /*
601 * Can not count locking commands against total
602 * as they are allowed to block on server.
603 */
27a97a61
VL
604
605 /* update # of requests on the wire to server */
4230cff8 606 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
607 *credits -= num_credits;
608 server->in_flight += num_credits;
1b63f184
SF
609 if (server->in_flight > server->max_in_flight)
610 server->max_in_flight = server->in_flight;
34f4deb7 611 *instance = server->reconnect_instance;
2d86dbc9 612 }
6d82c27a
SP
613 scredits = *credits;
614 in_flight = server->in_flight;
fc40f9cf 615 spin_unlock(&server->req_lock);
cd7b699b 616
1ddff774 617 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
618 server->conn_id, server->hostname, scredits,
619 -(num_credits), in_flight);
cd7b699b
SP
620 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
621 __func__, num_credits, scredits);
27a97a61 622 break;
1da177e4
LT
623 }
624 }
7ee1af76
JA
625 return 0;
626}
1da177e4 627
bc205ed1 628static int
480b1cb9
RS
629wait_for_free_request(struct TCP_Server_Info *server, const int flags,
630 unsigned int *instance)
bc205ed1 631{
2b53b929
RS
632 return wait_for_free_credits(server, 1, -1, flags,
633 instance);
bc205ed1
PS
634}
635
257b7809
RS
636static int
637wait_for_compound_request(struct TCP_Server_Info *server, int num,
638 const int flags, unsigned int *instance)
639{
640 int *credits;
6d82c27a 641 int scredits, in_flight;
257b7809
RS
642
643 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
644
645 spin_lock(&server->req_lock);
cd7b699b 646 scredits = *credits;
6d82c27a 647 in_flight = server->in_flight;
cd7b699b 648
257b7809
RS
649 if (*credits < num) {
650 /*
91792bb8
PS
651 * If the server is tight on resources or just gives us less
652 * credits for other reasons (e.g. requests are coming out of
653 * order and the server delays granting more credits until it
654 * processes a missing mid) and we exhausted most available
655 * credits there may be situations when we try to send
656 * a compound request but we don't have enough credits. At this
657 * point the client needs to decide if it should wait for
658 * additional credits or fail the request. If at least one
659 * request is in flight there is a high probability that the
660 * server will return enough credits to satisfy this compound
661 * request.
662 *
663 * Return immediately if no requests in flight since we will be
664 * stuck on waiting for credits.
257b7809 665 */
91792bb8 666 if (server->in_flight == 0) {
257b7809 667 spin_unlock(&server->req_lock);
cd7b699b 668 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
669 server->conn_id, server->hostname, scredits,
670 num, in_flight);
cd7b699b 671 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 672 __func__, in_flight, num, scredits);
7de03948 673 return -EDEADLK;
257b7809
RS
674 }
675 }
676 spin_unlock(&server->req_lock);
677
678 return wait_for_free_credits(server, num, 60000, flags,
679 instance);
680}
681
cb7e9eab
PS
682int
683cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 684 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
685{
686 *num = size;
335b7b62
PS
687 credits->value = 0;
688 credits->instance = server->reconnect_instance;
cb7e9eab
PS
689 return 0;
690}
691
96daf2b0 692static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
693 struct mid_q_entry **ppmidQ)
694{
d7d7a66a 695 spin_lock(&ses->ses_lock);
dd3cd870 696 if (ses->ses_status == SES_NEW) {
79a58d1f 697 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 698 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 699 spin_unlock(&ses->ses_lock);
7ee1af76 700 return -EAGAIN;
080dc5e5 701 }
ad7a2926 702 /* else ok - we are setting up session */
1da177e4 703 }
7f48558e 704
dd3cd870 705 if (ses->ses_status == SES_EXITING) {
7f48558e 706 /* check if SMB session is bad because we are setting it up */
080dc5e5 707 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 708 spin_unlock(&ses->ses_lock);
7f48558e 709 return -EAGAIN;
080dc5e5 710 }
7f48558e
SP
711 /* else ok - we are shutting down session */
712 }
d7d7a66a 713 spin_unlock(&ses->ses_lock);
7f48558e 714
70f08f91 715 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 716 if (*ppmidQ == NULL)
7ee1af76 717 return -ENOMEM;
d7d7a66a 718 spin_lock(&ses->server->mid_lock);
ddc8cf8f 719 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 720 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
721 return 0;
722}
723
0ade640e
JL
724static int
725wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 726{
0ade640e 727 int error;
7ee1af76 728
f5d39b02
PZ
729 error = wait_event_state(server->response_q,
730 midQ->mid_state != MID_REQUEST_SUBMITTED,
731 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
732 if (error < 0)
733 return -ERESTARTSYS;
7ee1af76 734
0ade640e 735 return 0;
7ee1af76
JA
736}
737
fec344e3
JL
738struct mid_q_entry *
739cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
740{
741 int rc;
fec344e3 742 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
743 struct mid_q_entry *mid;
744
738f9de5
PS
745 if (rqst->rq_iov[0].iov_len != 4 ||
746 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
747 return ERR_PTR(-EIO);
748
792af7b0 749 /* enable signing if server requires it */
38d77c50 750 if (server->sign)
792af7b0
PS
751 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
752
70f08f91 753 mid = alloc_mid(hdr, server);
792af7b0 754 if (mid == NULL)
fec344e3 755 return ERR_PTR(-ENOMEM);
792af7b0 756
fec344e3 757 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 758 if (rc) {
70f08f91 759 release_mid(mid);
fec344e3 760 return ERR_PTR(rc);
ffc61ccb
SP
761 }
762
fec344e3 763 return mid;
792af7b0 764}
133672ef 765
a6827c18
JL
766/*
767 * Send a SMB request and set the callback function in the mid to handle
768 * the result. Caller is responsible for dealing with timeouts.
769 */
770int
fec344e3 771cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 772 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
773 mid_handle_t *handle, void *cbdata, const int flags,
774 const struct cifs_credits *exist_credits)
a6827c18 775{
480b1cb9 776 int rc;
a6827c18 777 struct mid_q_entry *mid;
335b7b62 778 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 779 unsigned int instance;
480b1cb9 780 int optype;
a6827c18 781
a891f0f8
PS
782 optype = flags & CIFS_OP_MASK;
783
cb7e9eab 784 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 785 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
786 if (rc)
787 return rc;
335b7b62 788 credits.value = 1;
34f4deb7 789 credits.instance = instance;
3349c3a7
PS
790 } else
791 instance = exist_credits->instance;
a6827c18 792
cc391b69 793 cifs_server_lock(server);
3349c3a7
PS
794
795 /*
796 * We can't use credits obtained from the previous session to send this
797 * request. Check if there were reconnects after we obtained credits and
798 * return -EAGAIN in such cases to let callers handle it.
799 */
800 if (instance != server->reconnect_instance) {
cc391b69 801 cifs_server_unlock(server);
3349c3a7
PS
802 add_credits_and_wake_if(server, &credits, optype);
803 return -EAGAIN;
804 }
805
fec344e3
JL
806 mid = server->ops->setup_async_request(server, rqst);
807 if (IS_ERR(mid)) {
cc391b69 808 cifs_server_unlock(server);
335b7b62 809 add_credits_and_wake_if(server, &credits, optype);
fec344e3 810 return PTR_ERR(mid);
a6827c18
JL
811 }
812
44d22d84 813 mid->receive = receive;
a6827c18
JL
814 mid->callback = callback;
815 mid->callback_data = cbdata;
9b7c18a2 816 mid->handle = handle;
7c9421e1 817 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 818
ffc61ccb 819 /* put it on the pending_mid_q */
d7d7a66a 820 spin_lock(&server->mid_lock);
ffc61ccb 821 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 822 spin_unlock(&server->mid_lock);
ffc61ccb 823
93d2cb6c
LL
824 /*
825 * Need to store the time in mid before calling I/O. For call_async,
826 * I/O response may come back and free the mid entry on another thread.
827 */
828 cifs_save_when_sent(mid);
789e6661 829 cifs_in_send_inc(server);
1f3a8f5f 830 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 831 cifs_in_send_dec(server);
ad313cb8 832
820962dc 833 if (rc < 0) {
c781af7e 834 revert_current_mid(server, mid->credits);
ad313cb8 835 server->sequence_number -= 2;
70f08f91 836 delete_mid(mid);
820962dc
RV
837 }
838
cc391b69 839 cifs_server_unlock(server);
789e6661 840
ffc61ccb
SP
841 if (rc == 0)
842 return 0;
a6827c18 843
335b7b62 844 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
845 return rc;
846}
847
133672ef
SF
848/*
849 *
850 * Send an SMB Request. No response info (other than return code)
851 * needs to be parsed.
852 *
853 * flags indicate the type of request buffer and how long to wait
854 * and whether to log NT STATUS code (error) before mapping it to POSIX error
855 *
856 */
857int
96daf2b0 858SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 859 char *in_buf, int flags)
133672ef
SF
860{
861 int rc;
862 struct kvec iov[1];
da502f7d 863 struct kvec rsp_iov;
133672ef
SF
864 int resp_buf_type;
865
792af7b0
PS
866 iov[0].iov_base = in_buf;
867 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 868 flags |= CIFS_NO_RSP_BUF;
da502f7d 869 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 870 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 871
133672ef
SF
872 return rc;
873}
874
053d5034 875static int
3c1105df 876cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
877{
878 int rc = 0;
879
f96637be
JP
880 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 882
d7d7a66a 883 spin_lock(&server->mid_lock);
7c9421e1 884 switch (mid->mid_state) {
74dd92a8 885 case MID_RESPONSE_RECEIVED:
d7d7a66a 886 spin_unlock(&server->mid_lock);
053d5034 887 return rc;
74dd92a8
JL
888 case MID_RETRY_NEEDED:
889 rc = -EAGAIN;
890 break;
71823baf
JL
891 case MID_RESPONSE_MALFORMED:
892 rc = -EIO;
893 break;
3c1105df
JL
894 case MID_SHUTDOWN:
895 rc = -EHOSTDOWN;
896 break;
74dd92a8 897 default:
abe57073
PS
898 if (!(mid->mid_flags & MID_DELETED)) {
899 list_del_init(&mid->qhead);
900 mid->mid_flags |= MID_DELETED;
901 }
afe6f653 902 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 903 __func__, mid->mid, mid->mid_state);
74dd92a8 904 rc = -EIO;
053d5034 905 }
d7d7a66a 906 spin_unlock(&server->mid_lock);
053d5034 907
70f08f91 908 release_mid(mid);
053d5034
JL
909 return rc;
910}
911
121b046a 912static inline int
fb2036d8
PS
913send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
914 struct mid_q_entry *mid)
76dcc26f 915{
121b046a 916 return server->ops->send_cancel ?
fb2036d8 917 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
918}
919
2c8f981d
JL
920int
921cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
922 bool log_error)
923{
792af7b0 924 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
925
926 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
927
928 /* convert the length into a more usable form */
38d77c50 929 if (server->sign) {
738f9de5 930 struct kvec iov[2];
985e4ff0 931 int rc = 0;
738f9de5
PS
932 struct smb_rqst rqst = { .rq_iov = iov,
933 .rq_nvec = 2 };
826a95e4 934
738f9de5
PS
935 iov[0].iov_base = mid->resp_buf;
936 iov[0].iov_len = 4;
937 iov[1].iov_base = (char *)mid->resp_buf + 4;
938 iov[1].iov_len = len - 4;
2c8f981d 939 /* FIXME: add code to kill session */
bf5ea0e2 940 rc = cifs_verify_signature(&rqst, server,
0124cc45 941 mid->sequence_number);
985e4ff0 942 if (rc)
afe6f653 943 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 944 rc);
2c8f981d
JL
945 }
946
947 /* BB special case reconnect tid and uid here? */
a3713ec3 948 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
949}
950
fec344e3 951struct mid_q_entry *
f780bd3f
AA
952cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
953 struct smb_rqst *rqst)
792af7b0
PS
954{
955 int rc;
fec344e3 956 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
957 struct mid_q_entry *mid;
958
738f9de5
PS
959 if (rqst->rq_iov[0].iov_len != 4 ||
960 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
961 return ERR_PTR(-EIO);
962
792af7b0
PS
963 rc = allocate_mid(ses, hdr, &mid);
964 if (rc)
fec344e3
JL
965 return ERR_PTR(rc);
966 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
967 if (rc) {
70f08f91 968 delete_mid(mid);
fec344e3
JL
969 return ERR_PTR(rc);
970 }
971 return mid;
792af7b0
PS
972}
973
4e34feb5 974static void
ee258d79 975cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
976{
977 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
978 struct cifs_credits credits;
979
980 credits.value = server->ops->get_credits(mid);
981 credits.instance = server->reconnect_instance;
8a26f0f7 982
34f4deb7 983 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
984}
985
ee258d79
PS
986static void
987cifs_compound_last_callback(struct mid_q_entry *mid)
988{
989 cifs_compound_callback(mid);
990 cifs_wake_up_task(mid);
991}
992
993static void
994cifs_cancelled_callback(struct mid_q_entry *mid)
995{
996 cifs_compound_callback(mid);
70f08f91 997 release_mid(mid);
ee258d79
PS
998}
999
5f68ea4a
AA
1000/*
1001 * Return a channel (master if none) of @ses that can be used to send
1002 * regular requests.
1003 *
1004 * If we are currently binding a new channel (negprot/sess.setup),
1005 * return the new incomplete channel.
1006 */
1007struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1008{
1009 uint index = 0;
ea90708d
SP
1010 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1011 struct TCP_Server_Info *server = NULL;
1012 int i;
5f68ea4a
AA
1013
1014 if (!ses)
1015 return NULL;
1016
88b024f5 1017 spin_lock(&ses->chan_lock);
ea90708d
SP
1018 for (i = 0; i < ses->chan_count; i++) {
1019 server = ses->chans[i].server;
1020 if (!server)
1021 continue;
1022
1023 /*
1024 * strictly speaking, we should pick up req_lock to read
1025 * server->in_flight. But it shouldn't matter much here if we
1026 * race while reading this data. The worst that can happen is
1027 * that we could use a channel that's not least loaded. Avoiding
1028 * taking the lock could help reduce wait time, which is
1029 * important for this function
1030 */
1031 if (server->in_flight < min_in_flight) {
1032 min_in_flight = server->in_flight;
1033 index = i;
1034 }
1035 if (server->in_flight > max_in_flight)
1036 max_in_flight = server->in_flight;
1037 }
1038
1039 /* if all channels are equally loaded, fall back to round-robin */
1040 if (min_in_flight == max_in_flight) {
1041 index = (uint)atomic_inc_return(&ses->chan_seq);
1042 index %= ses->chan_count;
1043 }
88b024f5 1044 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1045
1046 return ses->chans[index].server;
5f68ea4a
AA
1047}
1048
b8f57ee8 1049int
e0bba0b8 1050compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1051 struct TCP_Server_Info *server,
e0bba0b8
RS
1052 const int flags, const int num_rqst, struct smb_rqst *rqst,
1053 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1054{
480b1cb9 1055 int i, j, optype, rc = 0;
e0bba0b8 1056 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1057 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1058 struct cifs_credits credits[MAX_COMPOUND] = {
1059 { .value = 0, .instance = 0 }
1060 };
1061 unsigned int instance;
738f9de5 1062 char *buf;
50c2f753 1063
a891f0f8 1064 optype = flags & CIFS_OP_MASK;
133672ef 1065
e0bba0b8
RS
1066 for (i = 0; i < num_rqst; i++)
1067 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1068
352d96f3 1069 if (!ses || !ses->server || !server) {
f96637be 1070 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1071 return -EIO;
1072 }
1073
d7d7a66a 1074 spin_lock(&server->srv_lock);
080dc5e5 1075 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1076 spin_unlock(&server->srv_lock);
7ee1af76 1077 return -ENOENT;
080dc5e5 1078 }
d7d7a66a 1079 spin_unlock(&server->srv_lock);
7ee1af76 1080
792af7b0 1081 /*
257b7809 1082 * Wait for all the requests to become available.
7091bcab
PS
1083 * This approach still leaves the possibility to be stuck waiting for
1084 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1085 * requests and if the client is completely idle, not generating any
1086 * other requests.
1087 * This can be handled by the eventual session reconnect.
792af7b0 1088 */
3190b59a 1089 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1090 &instance);
1091 if (rc)
1092 return rc;
97ea4998 1093
257b7809
RS
1094 for (i = 0; i < num_rqst; i++) {
1095 credits[i].value = 1;
1096 credits[i].instance = instance;
8544f4aa 1097 }
7ee1af76 1098
792af7b0
PS
1099 /*
1100 * Make sure that we sign in the same order that we send on this socket
1101 * and avoid races inside tcp sendmsg code that could cause corruption
1102 * of smb data.
1103 */
7ee1af76 1104
cc391b69 1105 cifs_server_lock(server);
7ee1af76 1106
97ea4998
PS
1107 /*
1108 * All the parts of the compound chain belong obtained credits from the
257b7809 1109 * same session. We can not use credits obtained from the previous
97ea4998
PS
1110 * session to send this request. Check if there were reconnects after
1111 * we obtained credits and return -EAGAIN in such cases to let callers
1112 * handle it.
1113 */
3190b59a 1114 if (instance != server->reconnect_instance) {
cc391b69 1115 cifs_server_unlock(server);
97ea4998 1116 for (j = 0; j < num_rqst; j++)
3190b59a 1117 add_credits(server, &credits[j], optype);
97ea4998
PS
1118 return -EAGAIN;
1119 }
1120
e0bba0b8 1121 for (i = 0; i < num_rqst; i++) {
f780bd3f 1122 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1123 if (IS_ERR(midQ[i])) {
3190b59a 1124 revert_current_mid(server, i);
e0bba0b8 1125 for (j = 0; j < i; j++)
70f08f91 1126 delete_mid(midQ[j]);
cc391b69 1127 cifs_server_unlock(server);
8544f4aa 1128
e0bba0b8 1129 /* Update # of requests on wire to server */
8544f4aa 1130 for (j = 0; j < num_rqst; j++)
3190b59a 1131 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1132 return PTR_ERR(midQ[i]);
1133 }
1134
1135 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1136 midQ[i]->optype = optype;
4e34feb5 1137 /*
ee258d79
PS
1138 * Invoke callback for every part of the compound chain
1139 * to calculate credits properly. Wake up this thread only when
1140 * the last element is received.
4e34feb5
RS
1141 */
1142 if (i < num_rqst - 1)
ee258d79
PS
1143 midQ[i]->callback = cifs_compound_callback;
1144 else
1145 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1146 }
3190b59a
AA
1147 cifs_in_send_inc(server);
1148 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1149 cifs_in_send_dec(server);
e0bba0b8
RS
1150
1151 for (i = 0; i < num_rqst; i++)
1152 cifs_save_when_sent(midQ[i]);
7ee1af76 1153
c781af7e 1154 if (rc < 0) {
3190b59a
AA
1155 revert_current_mid(server, num_rqst);
1156 server->sequence_number -= 2;
c781af7e 1157 }
e0bba0b8 1158
cc391b69 1159 cifs_server_unlock(server);
7ee1af76 1160
d69cb728
RS
1161 /*
1162 * If sending failed for some reason or it is an oplock break that we
1163 * will not receive a response to - return credits back
1164 */
1165 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1166 for (i = 0; i < num_rqst; i++)
3190b59a 1167 add_credits(server, &credits[i], optype);
cb5c2e63 1168 goto out;
ee258d79
PS
1169 }
1170
1171 /*
1172 * At this point the request is passed to the network stack - we assume
1173 * that any credits taken from the server structure on the client have
1174 * been spent and we can't return them back. Once we receive responses
1175 * we will collect credits granted by the server in the mid callbacks
1176 * and add those credits to the server structure.
1177 */
e0bba0b8 1178
cb5c2e63
RS
1179 /*
1180 * Compounding is never used during session establish.
1181 */
d7d7a66a 1182 spin_lock(&ses->ses_lock);
dd3cd870 1183 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1184 spin_unlock(&ses->ses_lock);
080dc5e5 1185
cc391b69 1186 cifs_server_lock(server);
f486ef8e 1187 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1188 cifs_server_unlock(server);
080dc5e5 1189
d7d7a66a 1190 spin_lock(&ses->ses_lock);
05946d4b 1191 }
d7d7a66a 1192 spin_unlock(&ses->ses_lock);
e0bba0b8 1193
cb5c2e63 1194 for (i = 0; i < num_rqst; i++) {
3190b59a 1195 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1196 if (rc != 0)
1197 break;
1198 }
1199 if (rc != 0) {
1200 for (; i < num_rqst; i++) {
e3d100ea 1201 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1202 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1203 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1204 spin_lock(&server->mid_lock);
7b71843f 1205 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1206 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1207 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1208 cancelled_mid[i] = true;
34f4deb7 1209 credits[i].value = 0;
e0bba0b8 1210 }
d7d7a66a 1211 spin_unlock(&server->mid_lock);
e0bba0b8 1212 }
cb5c2e63
RS
1213 }
1214
cb5c2e63
RS
1215 for (i = 0; i < num_rqst; i++) {
1216 if (rc < 0)
1217 goto out;
e0bba0b8 1218
3190b59a 1219 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1220 if (rc != 0) {
8544f4aa
PS
1221 /* mark this mid as cancelled to not free it below */
1222 cancelled_mid[i] = true;
1223 goto out;
1be912dd 1224 }
2b2bdfba 1225
e0bba0b8
RS
1226 if (!midQ[i]->resp_buf ||
1227 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1228 rc = -EIO;
1229 cifs_dbg(FYI, "Bad MID state?\n");
1230 goto out;
1231 }
a891f0f8 1232
e0bba0b8
RS
1233 buf = (char *)midQ[i]->resp_buf;
1234 resp_iov[i].iov_base = buf;
1235 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1236 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1237
1238 if (midQ[i]->large_buf)
1239 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1240 else
1241 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1242
3190b59a 1243 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1244 flags & CIFS_LOG_ERROR);
1da177e4 1245
70f08f91 1246 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1247 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1248 midQ[i]->resp_buf = NULL;
cb5c2e63 1249
e0bba0b8 1250 }
cb5c2e63
RS
1251
1252 /*
1253 * Compounding is never used during session establish.
1254 */
d7d7a66a 1255 spin_lock(&ses->ses_lock);
dd3cd870 1256 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1257 struct kvec iov = {
1258 .iov_base = resp_iov[0].iov_base,
1259 .iov_len = resp_iov[0].iov_len
1260 };
d7d7a66a 1261 spin_unlock(&ses->ses_lock);
cc391b69 1262 cifs_server_lock(server);
f486ef8e 1263 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1264 cifs_server_unlock(server);
d7d7a66a 1265 spin_lock(&ses->ses_lock);
cb5c2e63 1266 }
d7d7a66a 1267 spin_unlock(&ses->ses_lock);
cb5c2e63 1268
7ee1af76 1269out:
4e34feb5
RS
1270 /*
1271 * This will dequeue all mids. After this it is important that the
1272 * demultiplex_thread will not process any of these mids any futher.
1273 * This is prevented above by using a noop callback that will not
1274 * wake this thread except for the very last PDU.
1275 */
8544f4aa
PS
1276 for (i = 0; i < num_rqst; i++) {
1277 if (!cancelled_mid[i])
70f08f91 1278 delete_mid(midQ[i]);
8544f4aa 1279 }
1da177e4 1280
d6e04ae6
SF
1281 return rc;
1282}
1da177e4 1283
e0bba0b8
RS
1284int
1285cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1286 struct TCP_Server_Info *server,
e0bba0b8
RS
1287 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1288 struct kvec *resp_iov)
1289{
352d96f3
AA
1290 return compound_send_recv(xid, ses, server, flags, 1,
1291 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1292}
1293
738f9de5
PS
1294int
1295SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1296 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1297 const int flags, struct kvec *resp_iov)
1298{
1299 struct smb_rqst rqst;
3cecf486 1300 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1301 int rc;
1302
3cecf486 1303 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1304 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1305 GFP_KERNEL);
117e3b7f
SF
1306 if (!new_iov) {
1307 /* otherwise cifs_send_recv below sets resp_buf_type */
1308 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1309 return -ENOMEM;
117e3b7f 1310 }
3cecf486
RS
1311 } else
1312 new_iov = s_iov;
738f9de5
PS
1313
1314 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1315 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1316
1317 new_iov[0].iov_base = new_iov[1].iov_base;
1318 new_iov[0].iov_len = 4;
1319 new_iov[1].iov_base += 4;
1320 new_iov[1].iov_len -= 4;
1321
1322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = new_iov;
1324 rqst.rq_nvec = n_vec + 1;
1325
352d96f3
AA
1326 rc = cifs_send_recv(xid, ses, ses->server,
1327 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1328 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1329 kfree(new_iov);
738f9de5
PS
1330 return rc;
1331}
1332
1da177e4 1333int
96daf2b0 1334SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1335 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1336 int *pbytes_returned, const int flags)
1da177e4
LT
1337{
1338 int rc = 0;
1da177e4 1339 struct mid_q_entry *midQ;
fb2036d8
PS
1340 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1341 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1342 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1343 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1344 struct TCP_Server_Info *server;
1da177e4
LT
1345
1346 if (ses == NULL) {
f96637be 1347 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1348 return -EIO;
1349 }
ac6ad7a8 1350 server = ses->server;
afe6f653 1351 if (server == NULL) {
f96637be 1352 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1353 return -EIO;
1354 }
1355
d7d7a66a 1356 spin_lock(&server->srv_lock);
080dc5e5 1357 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1358 spin_unlock(&server->srv_lock);
31ca3bc3 1359 return -ENOENT;
080dc5e5 1360 }
d7d7a66a 1361 spin_unlock(&server->srv_lock);
31ca3bc3 1362
79a58d1f 1363 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1364 to the same server. We may make this configurable later or
1365 use ses->maxReq */
1da177e4 1366
fb2036d8 1367 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1368 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1369 len);
6d9c6d54
VL
1370 return -EIO;
1371 }
1372
afe6f653 1373 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1374 if (rc)
1375 return rc;
1376
79a58d1f 1377 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1378 and avoid races inside tcp sendmsg code that could cause corruption
1379 of smb data */
1380
cc391b69 1381 cifs_server_lock(server);
1da177e4 1382
7ee1af76
JA
1383 rc = allocate_mid(ses, in_buf, &midQ);
1384 if (rc) {
cc391b69 1385 cifs_server_unlock(server);
7ee1af76 1386 /* Update # of requests on wire to server */
afe6f653 1387 add_credits(server, &credits, 0);
7ee1af76 1388 return rc;
1da177e4
LT
1389 }
1390
afe6f653 1391 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1392 if (rc) {
cc391b69 1393 cifs_server_unlock(server);
829049cb
VL
1394 goto out;
1395 }
1da177e4 1396
7c9421e1 1397 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1398
afe6f653
RS
1399 cifs_in_send_inc(server);
1400 rc = smb_send(server, in_buf, len);
1401 cifs_in_send_dec(server);
789e6661 1402 cifs_save_when_sent(midQ);
ad313cb8
JL
1403
1404 if (rc < 0)
afe6f653 1405 server->sequence_number -= 2;
ad313cb8 1406
cc391b69 1407 cifs_server_unlock(server);
7ee1af76 1408
79a58d1f 1409 if (rc < 0)
7ee1af76
JA
1410 goto out;
1411
afe6f653 1412 rc = wait_for_response(server, midQ);
1be912dd 1413 if (rc != 0) {
afe6f653 1414 send_cancel(server, &rqst, midQ);
d7d7a66a 1415 spin_lock(&server->mid_lock);
7c9421e1 1416 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1417 /* no longer considered to be "in-flight" */
70f08f91 1418 midQ->callback = release_mid;
d7d7a66a 1419 spin_unlock(&server->mid_lock);
afe6f653 1420 add_credits(server, &credits, 0);
1be912dd
JL
1421 return rc;
1422 }
d7d7a66a 1423 spin_unlock(&server->mid_lock);
1be912dd 1424 }
1da177e4 1425
afe6f653 1426 rc = cifs_sync_mid_result(midQ, server);
053d5034 1427 if (rc != 0) {
afe6f653 1428 add_credits(server, &credits, 0);
1da177e4
LT
1429 return rc;
1430 }
50c2f753 1431
2c8f981d 1432 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1433 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1434 rc = -EIO;
afe6f653 1435 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1436 goto out;
1da177e4 1437 }
7ee1af76 1438
d4e4854f 1439 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1440 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1441 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1442out:
70f08f91 1443 delete_mid(midQ);
afe6f653 1444 add_credits(server, &credits, 0);
1da177e4 1445
7ee1af76
JA
1446 return rc;
1447}
1da177e4 1448
7ee1af76
JA
1449/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1450 blocking lock to return. */
1451
1452static int
96daf2b0 1453send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1454 struct smb_hdr *in_buf,
1455 struct smb_hdr *out_buf)
1456{
1457 int bytes_returned;
96daf2b0 1458 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1459 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1460
1461 /* We just modify the current in_buf to change
1462 the type of lock from LOCKING_ANDX_SHARED_LOCK
1463 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1464 LOCKING_ANDX_CANCEL_LOCK. */
1465
1466 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1467 pSMB->Timeout = 0;
88257360 1468 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1469
1470 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1471 &bytes_returned, 0);
7ee1af76
JA
1472}
1473
1474int
96daf2b0 1475SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1476 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1477 int *pbytes_returned)
1478{
1479 int rc = 0;
1480 int rstart = 0;
7ee1af76 1481 struct mid_q_entry *midQ;
96daf2b0 1482 struct cifs_ses *ses;
fb2036d8
PS
1483 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1484 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1485 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1486 unsigned int instance;
afe6f653 1487 struct TCP_Server_Info *server;
7ee1af76
JA
1488
1489 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1490 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1491 return -EIO;
1492 }
1493 ses = tcon->ses;
afe6f653 1494 server = ses->server;
7ee1af76 1495
afe6f653 1496 if (server == NULL) {
f96637be 1497 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1498 return -EIO;
1499 }
1500
d7d7a66a 1501 spin_lock(&server->srv_lock);
080dc5e5 1502 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1503 spin_unlock(&server->srv_lock);
7ee1af76 1504 return -ENOENT;
080dc5e5 1505 }
d7d7a66a 1506 spin_unlock(&server->srv_lock);
7ee1af76 1507
79a58d1f 1508 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1509 to the same server. We may make this configurable later or
1510 use ses->maxReq */
1511
fb2036d8 1512 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1513 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1514 len);
6d9c6d54
VL
1515 return -EIO;
1516 }
1517
afe6f653 1518 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1519 if (rc)
1520 return rc;
1521
79a58d1f 1522 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1523 and avoid races inside tcp sendmsg code that could cause corruption
1524 of smb data */
1525
cc391b69 1526 cifs_server_lock(server);
7ee1af76
JA
1527
1528 rc = allocate_mid(ses, in_buf, &midQ);
1529 if (rc) {
cc391b69 1530 cifs_server_unlock(server);
7ee1af76
JA
1531 return rc;
1532 }
1533
afe6f653 1534 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1535 if (rc) {
70f08f91 1536 delete_mid(midQ);
cc391b69 1537 cifs_server_unlock(server);
829049cb
VL
1538 return rc;
1539 }
1da177e4 1540
7c9421e1 1541 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1542 cifs_in_send_inc(server);
1543 rc = smb_send(server, in_buf, len);
1544 cifs_in_send_dec(server);
789e6661 1545 cifs_save_when_sent(midQ);
ad313cb8
JL
1546
1547 if (rc < 0)
afe6f653 1548 server->sequence_number -= 2;
ad313cb8 1549
cc391b69 1550 cifs_server_unlock(server);
7ee1af76 1551
79a58d1f 1552 if (rc < 0) {
70f08f91 1553 delete_mid(midQ);
7ee1af76
JA
1554 return rc;
1555 }
1556
1557 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1558 rc = wait_event_interruptible(server->response_q,
7c9421e1 1559 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1560 ((server->tcpStatus != CifsGood) &&
1561 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1562
1563 /* Were we interrupted by a signal ? */
d7d7a66a 1564 spin_lock(&server->srv_lock);
7ee1af76 1565 if ((rc == -ERESTARTSYS) &&
7c9421e1 1566 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1567 ((server->tcpStatus == CifsGood) ||
1568 (server->tcpStatus == CifsNew))) {
d7d7a66a 1569 spin_unlock(&server->srv_lock);
7ee1af76
JA
1570
1571 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1572 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1573 blocking lock to return. */
afe6f653 1574 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1575 if (rc) {
70f08f91 1576 delete_mid(midQ);
7ee1af76
JA
1577 return rc;
1578 }
1579 } else {
1580 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1581 to cause the blocking lock to return. */
1582
1583 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1584
1585 /* If we get -ENOLCK back the lock may have
1586 already been removed. Don't exit in this case. */
1587 if (rc && rc != -ENOLCK) {
70f08f91 1588 delete_mid(midQ);
7ee1af76
JA
1589 return rc;
1590 }
1591 }
1592
afe6f653 1593 rc = wait_for_response(server, midQ);
1be912dd 1594 if (rc) {
afe6f653 1595 send_cancel(server, &rqst, midQ);
d7d7a66a 1596 spin_lock(&server->mid_lock);
7c9421e1 1597 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1598 /* no longer considered to be "in-flight" */
70f08f91 1599 midQ->callback = release_mid;
d7d7a66a 1600 spin_unlock(&server->mid_lock);
1be912dd
JL
1601 return rc;
1602 }
d7d7a66a 1603 spin_unlock(&server->mid_lock);
7ee1af76 1604 }
1be912dd
JL
1605
1606 /* We got the response - restart system call. */
1607 rstart = 1;
d7d7a66a 1608 spin_lock(&server->srv_lock);
7ee1af76 1609 }
d7d7a66a 1610 spin_unlock(&server->srv_lock);
7ee1af76 1611
afe6f653 1612 rc = cifs_sync_mid_result(midQ, server);
053d5034 1613 if (rc != 0)
7ee1af76 1614 return rc;
50c2f753 1615
17c8bfed 1616 /* rcvd frame is ok */
7c9421e1 1617 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1618 rc = -EIO;
3175eb9b 1619 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1620 goto out;
1621 }
1da177e4 1622
d4e4854f 1623 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1624 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1625 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1626out:
70f08f91 1627 delete_mid(midQ);
7ee1af76
JA
1628 if (rstart && rc == -EACCES)
1629 return -ERESTARTSYS;
1da177e4
LT
1630 return rc;
1631}
fb157ed2
SF
1632
1633/*
1634 * Discard any remaining data in the current SMB. To do this, we borrow the
1635 * current bigbuf.
1636 */
1637int
1638cifs_discard_remaining_data(struct TCP_Server_Info *server)
1639{
1640 unsigned int rfclen = server->pdu_size;
d08089f6 1641 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1642 server->total_read;
1643
1644 while (remaining > 0) {
d08089f6 1645 ssize_t length;
fb157ed2
SF
1646
1647 length = cifs_discard_from_socket(server,
1648 min_t(size_t, remaining,
1649 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1650 if (length < 0)
1651 return length;
1652 server->total_read += length;
1653 remaining -= length;
1654 }
1655
1656 return 0;
1657}
1658
1659static int
1660__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1661 bool malformed)
1662{
1663 int length;
1664
1665 length = cifs_discard_remaining_data(server);
1666 dequeue_mid(mid, malformed);
1667 mid->resp_buf = server->smallbuf;
1668 server->smallbuf = NULL;
1669 return length;
1670}
1671
1672static int
1673cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1674{
1675 struct cifs_readdata *rdata = mid->callback_data;
1676
1677 return __cifs_readv_discard(server, mid, rdata->result);
1678}
1679
1680int
1681cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1682{
1683 int length, len;
1684 unsigned int data_offset, data_len;
1685 struct cifs_readdata *rdata = mid->callback_data;
1686 char *buf = server->smallbuf;
9789de8b 1687 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1688 bool use_rdma_mr = false;
1689
1690 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1691 __func__, mid->mid, rdata->offset, rdata->bytes);
1692
1693 /*
1694 * read the rest of READ_RSP header (sans Data array), or whatever we
1695 * can if there's not enough data. At this point, we've read down to
1696 * the Mid.
1697 */
1698 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1699 HEADER_SIZE(server) + 1;
1700
1701 length = cifs_read_from_socket(server,
1702 buf + HEADER_SIZE(server) - 1, len);
1703 if (length < 0)
1704 return length;
1705 server->total_read += length;
1706
1707 if (server->ops->is_session_expired &&
1708 server->ops->is_session_expired(buf)) {
1709 cifs_reconnect(server, true);
1710 return -1;
1711 }
1712
1713 if (server->ops->is_status_pending &&
1714 server->ops->is_status_pending(buf, server)) {
1715 cifs_discard_remaining_data(server);
1716 return -1;
1717 }
1718
1719 /* set up first two iov for signature check and to get credits */
1720 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1721 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1722 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1723 rdata->iov[1].iov_len =
9789de8b 1724 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1725 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1726 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1727 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1728 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1729
1730 /* Was the SMB read successful? */
1731 rdata->result = server->ops->map_error(buf, false);
1732 if (rdata->result != 0) {
1733 cifs_dbg(FYI, "%s: server returned error %d\n",
1734 __func__, rdata->result);
1735 /* normal error on read response */
1736 return __cifs_readv_discard(server, mid, false);
1737 }
1738
1739 /* Is there enough to get to the rest of the READ_RSP header? */
1740 if (server->total_read < server->vals->read_rsp_size) {
1741 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1742 __func__, server->total_read,
1743 server->vals->read_rsp_size);
1744 rdata->result = -EIO;
1745 return cifs_readv_discard(server, mid);
1746 }
1747
1748 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1749 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1750 if (data_offset < server->total_read) {
1751 /*
1752 * win2k8 sometimes sends an offset of 0 when the read
1753 * is beyond the EOF. Treat it as if the data starts just after
1754 * the header.
1755 */
1756 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1757 __func__, data_offset);
1758 data_offset = server->total_read;
1759 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1760 /* data_offset is beyond the end of smallbuf */
1761 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1762 __func__, data_offset);
1763 rdata->result = -EIO;
1764 return cifs_readv_discard(server, mid);
1765 }
1766
1767 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1768 __func__, server->total_read, data_offset);
1769
1770 len = data_offset - server->total_read;
1771 if (len > 0) {
1772 /* read any junk before data into the rest of smallbuf */
1773 length = cifs_read_from_socket(server,
1774 buf + server->total_read, len);
1775 if (length < 0)
1776 return length;
1777 server->total_read += length;
1778 }
1779
1780 /* how much data is in the response? */
1781#ifdef CONFIG_CIFS_SMB_DIRECT
1782 use_rdma_mr = rdata->mr;
1783#endif
1784 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1785 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1786 /* data_len is corrupt -- discard frame */
1787 rdata->result = -EIO;
1788 return cifs_readv_discard(server, mid);
1789 }
1790
d08089f6
DH
1791#ifdef CONFIG_CIFS_SMB_DIRECT
1792 if (rdata->mr)
1793 length = data_len; /* An RDMA read is already done. */
1794 else
1795#endif
1796 length = cifs_read_iter_from_socket(server, &rdata->iter,
1797 data_len);
1798 if (length > 0)
1799 rdata->got_bytes += length;
fb157ed2
SF
1800 server->total_read += length;
1801
1802 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1803 server->total_read, buflen, data_len);
1804
1805 /* discard anything left over */
1806 if (server->total_read < buflen)
1807 return cifs_readv_discard(server, mid);
1808
1809 dequeue_mid(mid, false);
1810 mid->resp_buf = server->smallbuf;
1811 server->smallbuf = NULL;
1812 return length;
1813}