use less confusing names for iov_iter direction initializers
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
0496e02d 197 if (server->noblocksnd)
3ab3f2a1 198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 199 else
3ab3f2a1 200 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 201
3ab3f2a1 202 while (msg_data_left(smb_msg)) {
6f49f46b
JL
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
3ab3f2a1 221 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 222 if (rc == -EAGAIN) {
3ab3f2a1
AV
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
afe6f653 226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 227 ssocket);
3ab3f2a1 228 return -EAGAIN;
1da177e4 229 }
3ab3f2a1 230 msleep(1 << retries);
1da177e4
LT
231 continue;
232 }
6f49f46b 233
79a58d1f 234 if (rc < 0)
3ab3f2a1 235 return rc;
6f49f46b 236
79a58d1f 237 if (rc == 0) {
3e84469d
SF
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
afe6f653 240 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
241 msleep(500);
242 continue;
d6e04ae6 243 }
6f49f46b 244
3ab3f2a1
AV
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 248 }
3ab3f2a1 249 return 0;
97bc00b3
JL
250}
251
35e2cc1b 252unsigned long
81f39f95 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
254{
255 unsigned int i;
35e2cc1b
PA
256 struct kvec *iov;
257 int nvec;
a26054d1
JL
258 unsigned long buflen = 0;
259
d291e703 260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 261 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
a26054d1 269 /* total up iov array first */
35e2cc1b 270 for (i = 0; i < nvec; i++)
a26054d1
JL
271 buflen += iov[i].iov_len;
272
c06a0f2d
LL
273 /*
274 * Add in the page array if there is one. The caller needs to make
275 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
276 * multiple pages ends at page boundary, rq_tailsz needs to be set to
277 * PAGE_SIZE.
278 */
a26054d1 279 if (rqst->rq_npages) {
c06a0f2d
LL
280 if (rqst->rq_npages == 1)
281 buflen += rqst->rq_tailsz;
282 else {
283 /*
284 * If there is more than one page, calculate the
285 * buffer length based on rq_offset and rq_tailsz
286 */
287 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
288 rqst->rq_offset;
289 buflen += rqst->rq_tailsz;
290 }
a26054d1
JL
291 }
292
293 return buflen;
294}
295
6f49f46b 296static int
07cd952f
RS
297__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
298 struct smb_rqst *rqst)
6f49f46b 299{
07cd952f
RS
300 int rc = 0;
301 struct kvec *iov;
302 int n_vec;
303 unsigned int send_length = 0;
304 unsigned int i, j;
b30c74c7 305 sigset_t mask, oldmask;
3ab3f2a1 306 size_t total_len = 0, sent, size;
b8eed283 307 struct socket *ssocket = server->ssocket;
bedc8f76 308 struct msghdr smb_msg = {};
c713c877
RS
309 __be32 rfc1002_marker;
310
4357d45f
LL
311 if (cifs_rdma_enabled(server)) {
312 /* return -EAGAIN when connecting or reconnecting */
313 rc = -EAGAIN;
314 if (server->smbd_conn)
315 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
316 goto smbd_done;
317 }
afc18a6f 318
ea702b80 319 if (ssocket == NULL)
afc18a6f 320 return -EAGAIN;
ea702b80 321
214a5ea0 322 if (fatal_signal_pending(current)) {
6988a619
PA
323 cifs_dbg(FYI, "signal pending before send request\n");
324 return -ERESTARTSYS;
b30c74c7
PS
325 }
326
b8eed283 327 /* cork the socket */
db10538a 328 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 329
07cd952f 330 for (j = 0; j < num_rqst; j++)
81f39f95 331 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
332 rfc1002_marker = cpu_to_be32(send_length);
333
b30c74c7
PS
334 /*
335 * We should not allow signals to interrupt the network send because
336 * any partial send will cause session reconnects thus increasing
337 * latency of system calls and overload a server with unnecessary
338 * requests.
339 */
340
341 sigfillset(&mask);
342 sigprocmask(SIG_BLOCK, &mask, &oldmask);
343
c713c877 344 /* Generate a rfc1002 marker for SMB2+ */
d291e703 345 if (!is_smb1(server)) {
c713c877
RS
346 struct kvec hiov = {
347 .iov_base = &rfc1002_marker,
348 .iov_len = 4
349 };
de4eda9d 350 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
c713c877
RS
351 rc = smb_send_kvec(server, &smb_msg, &sent);
352 if (rc < 0)
b30c74c7 353 goto unmask;
c713c877
RS
354
355 total_len += sent;
356 send_length += 4;
357 }
358
662bf5bc
PA
359 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
360
07cd952f
RS
361 for (j = 0; j < num_rqst; j++) {
362 iov = rqst[j].rq_iov;
363 n_vec = rqst[j].rq_nvec;
3ab3f2a1 364
07cd952f 365 size = 0;
662bf5bc
PA
366 for (i = 0; i < n_vec; i++) {
367 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 368 size += iov[i].iov_len;
662bf5bc 369 }
97bc00b3 370
de4eda9d 371 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
97bc00b3 372
3ab3f2a1 373 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 374 if (rc < 0)
b30c74c7 375 goto unmask;
97bc00b3
JL
376
377 total_len += sent;
07cd952f
RS
378
379 /* now walk the page array and send each page in it */
380 for (i = 0; i < rqst[j].rq_npages; i++) {
381 struct bio_vec bvec;
382
383 bvec.bv_page = rqst[j].rq_pages[i];
384 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
385 &bvec.bv_offset);
386
de4eda9d 387 iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
07cd952f
RS
388 &bvec, 1, bvec.bv_len);
389 rc = smb_send_kvec(server, &smb_msg, &sent);
390 if (rc < 0)
391 break;
392
393 total_len += sent;
394 }
97bc00b3 395 }
1da177e4 396
b30c74c7
PS
397unmask:
398 sigprocmask(SIG_SETMASK, &oldmask, NULL);
399
400 /*
401 * If signal is pending but we have already sent the whole packet to
402 * the server we need to return success status to allow a corresponding
403 * mid entry to be kept in the pending requests queue thus allowing
404 * to handle responses from the server by the client.
405 *
406 * If only part of the packet has been sent there is no need to hide
407 * interrupt because the session will be reconnected anyway, so there
408 * won't be any response from the server to handle.
409 */
410
411 if (signal_pending(current) && (total_len != send_length)) {
412 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 413 rc = -ERESTARTSYS;
b30c74c7
PS
414 }
415
b8eed283 416 /* uncork it */
db10538a 417 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 418
c713c877 419 if ((total_len > 0) && (total_len != send_length)) {
f96637be 420 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 421 send_length, total_len);
6f49f46b
JL
422 /*
423 * If we have only sent part of an SMB then the next SMB could
424 * be taken as the remainder of this one. We need to kill the
425 * socket so the server throws away the partial SMB
426 */
dca65818 427 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 428 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 429 server->conn_id, server->hostname);
edf1ae40 430 }
9762c2d0 431smbd_done:
d804d41d 432 if (rc < 0 && rc != -EINTR)
afe6f653 433 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 434 rc);
ee13919c 435 else if (rc > 0)
1da177e4 436 rc = 0;
1da177e4
LT
437
438 return rc;
439}
440
6f49f46b 441static int
1f3a8f5f
RS
442smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
443 struct smb_rqst *rqst, int flags)
6f49f46b 444{
b2c96de7 445 struct kvec iov;
3946d0d0 446 struct smb2_transform_hdr *tr_hdr;
b2c96de7 447 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
448 int rc;
449
450 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
451 return __smb_send_rqst(server, num_rqst, rqst);
452
453 if (num_rqst > MAX_COMPOUND - 1)
454 return -ENOMEM;
7fb8986e 455
b2c96de7 456 if (!server->ops->init_transform_rq) {
a0a3036b 457 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
458 return -EIO;
459 }
6f49f46b 460
9339faac 461 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
462 if (!tr_hdr)
463 return -ENOMEM;
464
465 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
466 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
467
468 iov.iov_base = tr_hdr;
469 iov.iov_len = sizeof(*tr_hdr);
470 cur_rqst[0].rq_iov = &iov;
471 cur_rqst[0].rq_nvec = 1;
472
1f3a8f5f
RS
473 rc = server->ops->init_transform_rq(server, num_rqst + 1,
474 &cur_rqst[0], rqst);
7fb8986e 475 if (rc)
3946d0d0 476 goto out;
7fb8986e 477
1f3a8f5f
RS
478 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
479 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
480out:
481 kfree(tr_hdr);
7fb8986e 482 return rc;
6f49f46b
JL
483}
484
0496e02d
JL
485int
486smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
487 unsigned int smb_buf_length)
488{
738f9de5 489 struct kvec iov[2];
7fb8986e
PS
490 struct smb_rqst rqst = { .rq_iov = iov,
491 .rq_nvec = 2 };
0496e02d 492
738f9de5
PS
493 iov[0].iov_base = smb_buffer;
494 iov[0].iov_len = 4;
495 iov[1].iov_base = (char *)smb_buffer + 4;
496 iov[1].iov_len = smb_buf_length;
0496e02d 497
07cd952f 498 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
499}
500
fc40f9cf 501static int
b227d215 502wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
503 const int timeout, const int flags,
504 unsigned int *instance)
1da177e4 505{
19e88867 506 long rc;
4230cff8
RS
507 int *credits;
508 int optype;
2b53b929 509 long int t;
6d82c27a 510 int scredits, in_flight;
2b53b929
RS
511
512 if (timeout < 0)
513 t = MAX_JIFFY_OFFSET;
514 else
515 t = msecs_to_jiffies(timeout);
4230cff8
RS
516
517 optype = flags & CIFS_OP_MASK;
5bc59498 518
34f4deb7
PS
519 *instance = 0;
520
4230cff8
RS
521 credits = server->ops->get_credits_field(server, optype);
522 /* Since an echo is already inflight, no need to wait to send another */
523 if (*credits <= 0 && optype == CIFS_ECHO_OP)
524 return -EAGAIN;
525
fc40f9cf 526 spin_lock(&server->req_lock);
392e1c5d 527 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 528 /* oplock breaks must not be held up */
fc40f9cf 529 server->in_flight++;
1b63f184
SF
530 if (server->in_flight > server->max_in_flight)
531 server->max_in_flight = server->in_flight;
bc205ed1 532 *credits -= 1;
34f4deb7 533 *instance = server->reconnect_instance;
6d82c27a
SP
534 scredits = *credits;
535 in_flight = server->in_flight;
fc40f9cf 536 spin_unlock(&server->req_lock);
6d82c27a 537
1ddff774 538 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
539 server->conn_id, server->hostname, scredits, -1, in_flight);
540 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
541 __func__, 1, scredits);
542
27a97a61
VL
543 return 0;
544 }
545
27a97a61 546 while (1) {
b227d215 547 if (*credits < num_credits) {
6d82c27a 548 scredits = *credits;
fc40f9cf 549 spin_unlock(&server->req_lock);
6d82c27a 550
789e6661 551 cifs_num_waiters_inc(server);
2b53b929
RS
552 rc = wait_event_killable_timeout(server->request_q,
553 has_credits(server, credits, num_credits), t);
789e6661 554 cifs_num_waiters_dec(server);
2b53b929 555 if (!rc) {
6d82c27a
SP
556 spin_lock(&server->req_lock);
557 scredits = *credits;
558 in_flight = server->in_flight;
559 spin_unlock(&server->req_lock);
560
7937ca96 561 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
562 server->conn_id, server->hostname, scredits,
563 num_credits, in_flight);
afe6f653 564 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 565 timeout);
7de03948 566 return -EBUSY;
2b53b929
RS
567 }
568 if (rc == -ERESTARTSYS)
569 return -ERESTARTSYS;
fc40f9cf 570 spin_lock(&server->req_lock);
27a97a61 571 } else {
080dc5e5
SP
572 spin_unlock(&server->req_lock);
573
d7d7a66a 574 spin_lock(&server->srv_lock);
c5797a94 575 if (server->tcpStatus == CifsExiting) {
d7d7a66a 576 spin_unlock(&server->srv_lock);
27a97a61 577 return -ENOENT;
1da177e4 578 }
d7d7a66a 579 spin_unlock(&server->srv_lock);
27a97a61 580
16b34aa4
RS
581 /*
582 * For normal commands, reserve the last MAX_COMPOUND
583 * credits to compound requests.
584 * Otherwise these compounds could be permanently
585 * starved for credits by single-credit requests.
586 *
587 * To prevent spinning CPU, block this thread until
588 * there are >MAX_COMPOUND credits available.
589 * But only do this is we already have a lot of
590 * credits in flight to avoid triggering this check
591 * for servers that are slow to hand out credits on
592 * new sessions.
593 */
080dc5e5 594 spin_lock(&server->req_lock);
16b34aa4
RS
595 if (!optype && num_credits == 1 &&
596 server->in_flight > 2 * MAX_COMPOUND &&
597 *credits <= MAX_COMPOUND) {
598 spin_unlock(&server->req_lock);
6d82c27a 599
16b34aa4 600 cifs_num_waiters_inc(server);
2b53b929
RS
601 rc = wait_event_killable_timeout(
602 server->request_q,
16b34aa4 603 has_credits(server, credits,
2b53b929
RS
604 MAX_COMPOUND + 1),
605 t);
16b34aa4 606 cifs_num_waiters_dec(server);
2b53b929 607 if (!rc) {
6d82c27a
SP
608 spin_lock(&server->req_lock);
609 scredits = *credits;
610 in_flight = server->in_flight;
611 spin_unlock(&server->req_lock);
612
7937ca96 613 trace_smb3_credit_timeout(
6d82c27a
SP
614 server->CurrentMid,
615 server->conn_id, server->hostname,
616 scredits, num_credits, in_flight);
afe6f653 617 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 618 timeout);
7de03948 619 return -EBUSY;
2b53b929
RS
620 }
621 if (rc == -ERESTARTSYS)
622 return -ERESTARTSYS;
16b34aa4
RS
623 spin_lock(&server->req_lock);
624 continue;
625 }
626
2d86dbc9
PS
627 /*
628 * Can not count locking commands against total
629 * as they are allowed to block on server.
630 */
27a97a61
VL
631
632 /* update # of requests on the wire to server */
4230cff8 633 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
634 *credits -= num_credits;
635 server->in_flight += num_credits;
1b63f184
SF
636 if (server->in_flight > server->max_in_flight)
637 server->max_in_flight = server->in_flight;
34f4deb7 638 *instance = server->reconnect_instance;
2d86dbc9 639 }
6d82c27a
SP
640 scredits = *credits;
641 in_flight = server->in_flight;
fc40f9cf 642 spin_unlock(&server->req_lock);
cd7b699b 643
1ddff774 644 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
645 server->conn_id, server->hostname, scredits,
646 -(num_credits), in_flight);
cd7b699b
SP
647 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
648 __func__, num_credits, scredits);
27a97a61 649 break;
1da177e4
LT
650 }
651 }
7ee1af76
JA
652 return 0;
653}
1da177e4 654
bc205ed1 655static int
480b1cb9
RS
656wait_for_free_request(struct TCP_Server_Info *server, const int flags,
657 unsigned int *instance)
bc205ed1 658{
2b53b929
RS
659 return wait_for_free_credits(server, 1, -1, flags,
660 instance);
bc205ed1
PS
661}
662
257b7809
RS
663static int
664wait_for_compound_request(struct TCP_Server_Info *server, int num,
665 const int flags, unsigned int *instance)
666{
667 int *credits;
6d82c27a 668 int scredits, in_flight;
257b7809
RS
669
670 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
671
672 spin_lock(&server->req_lock);
cd7b699b 673 scredits = *credits;
6d82c27a 674 in_flight = server->in_flight;
cd7b699b 675
257b7809
RS
676 if (*credits < num) {
677 /*
91792bb8
PS
678 * If the server is tight on resources or just gives us less
679 * credits for other reasons (e.g. requests are coming out of
680 * order and the server delays granting more credits until it
681 * processes a missing mid) and we exhausted most available
682 * credits there may be situations when we try to send
683 * a compound request but we don't have enough credits. At this
684 * point the client needs to decide if it should wait for
685 * additional credits or fail the request. If at least one
686 * request is in flight there is a high probability that the
687 * server will return enough credits to satisfy this compound
688 * request.
689 *
690 * Return immediately if no requests in flight since we will be
691 * stuck on waiting for credits.
257b7809 692 */
91792bb8 693 if (server->in_flight == 0) {
257b7809 694 spin_unlock(&server->req_lock);
cd7b699b 695 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
696 server->conn_id, server->hostname, scredits,
697 num, in_flight);
cd7b699b 698 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 699 __func__, in_flight, num, scredits);
7de03948 700 return -EDEADLK;
257b7809
RS
701 }
702 }
703 spin_unlock(&server->req_lock);
704
705 return wait_for_free_credits(server, num, 60000, flags,
706 instance);
707}
708
cb7e9eab
PS
709int
710cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 711 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
712{
713 *num = size;
335b7b62
PS
714 credits->value = 0;
715 credits->instance = server->reconnect_instance;
cb7e9eab
PS
716 return 0;
717}
718
96daf2b0 719static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
720 struct mid_q_entry **ppmidQ)
721{
d7d7a66a 722 spin_lock(&ses->ses_lock);
dd3cd870 723 if (ses->ses_status == SES_NEW) {
79a58d1f 724 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 725 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 726 spin_unlock(&ses->ses_lock);
7ee1af76 727 return -EAGAIN;
080dc5e5 728 }
ad7a2926 729 /* else ok - we are setting up session */
1da177e4 730 }
7f48558e 731
dd3cd870 732 if (ses->ses_status == SES_EXITING) {
7f48558e 733 /* check if SMB session is bad because we are setting it up */
080dc5e5 734 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 735 spin_unlock(&ses->ses_lock);
7f48558e 736 return -EAGAIN;
080dc5e5 737 }
7f48558e
SP
738 /* else ok - we are shutting down session */
739 }
d7d7a66a 740 spin_unlock(&ses->ses_lock);
7f48558e 741
70f08f91 742 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 743 if (*ppmidQ == NULL)
7ee1af76 744 return -ENOMEM;
d7d7a66a 745 spin_lock(&ses->server->mid_lock);
ddc8cf8f 746 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 747 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
748 return 0;
749}
750
0ade640e
JL
751static int
752wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 753{
0ade640e 754 int error;
7ee1af76 755
f5d39b02
PZ
756 error = wait_event_state(server->response_q,
757 midQ->mid_state != MID_REQUEST_SUBMITTED,
758 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
0ade640e
JL
759 if (error < 0)
760 return -ERESTARTSYS;
7ee1af76 761
0ade640e 762 return 0;
7ee1af76
JA
763}
764
fec344e3
JL
765struct mid_q_entry *
766cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
767{
768 int rc;
fec344e3 769 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
770 struct mid_q_entry *mid;
771
738f9de5
PS
772 if (rqst->rq_iov[0].iov_len != 4 ||
773 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
774 return ERR_PTR(-EIO);
775
792af7b0 776 /* enable signing if server requires it */
38d77c50 777 if (server->sign)
792af7b0
PS
778 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
779
70f08f91 780 mid = alloc_mid(hdr, server);
792af7b0 781 if (mid == NULL)
fec344e3 782 return ERR_PTR(-ENOMEM);
792af7b0 783
fec344e3 784 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 785 if (rc) {
70f08f91 786 release_mid(mid);
fec344e3 787 return ERR_PTR(rc);
ffc61ccb
SP
788 }
789
fec344e3 790 return mid;
792af7b0 791}
133672ef 792
a6827c18
JL
793/*
794 * Send a SMB request and set the callback function in the mid to handle
795 * the result. Caller is responsible for dealing with timeouts.
796 */
797int
fec344e3 798cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 799 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
800 mid_handle_t *handle, void *cbdata, const int flags,
801 const struct cifs_credits *exist_credits)
a6827c18 802{
480b1cb9 803 int rc;
a6827c18 804 struct mid_q_entry *mid;
335b7b62 805 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 806 unsigned int instance;
480b1cb9 807 int optype;
a6827c18 808
a891f0f8
PS
809 optype = flags & CIFS_OP_MASK;
810
cb7e9eab 811 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 812 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
813 if (rc)
814 return rc;
335b7b62 815 credits.value = 1;
34f4deb7 816 credits.instance = instance;
3349c3a7
PS
817 } else
818 instance = exist_credits->instance;
a6827c18 819
cc391b69 820 cifs_server_lock(server);
3349c3a7
PS
821
822 /*
823 * We can't use credits obtained from the previous session to send this
824 * request. Check if there were reconnects after we obtained credits and
825 * return -EAGAIN in such cases to let callers handle it.
826 */
827 if (instance != server->reconnect_instance) {
cc391b69 828 cifs_server_unlock(server);
3349c3a7
PS
829 add_credits_and_wake_if(server, &credits, optype);
830 return -EAGAIN;
831 }
832
fec344e3
JL
833 mid = server->ops->setup_async_request(server, rqst);
834 if (IS_ERR(mid)) {
cc391b69 835 cifs_server_unlock(server);
335b7b62 836 add_credits_and_wake_if(server, &credits, optype);
fec344e3 837 return PTR_ERR(mid);
a6827c18
JL
838 }
839
44d22d84 840 mid->receive = receive;
a6827c18
JL
841 mid->callback = callback;
842 mid->callback_data = cbdata;
9b7c18a2 843 mid->handle = handle;
7c9421e1 844 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 845
ffc61ccb 846 /* put it on the pending_mid_q */
d7d7a66a 847 spin_lock(&server->mid_lock);
ffc61ccb 848 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 849 spin_unlock(&server->mid_lock);
ffc61ccb 850
93d2cb6c
LL
851 /*
852 * Need to store the time in mid before calling I/O. For call_async,
853 * I/O response may come back and free the mid entry on another thread.
854 */
855 cifs_save_when_sent(mid);
789e6661 856 cifs_in_send_inc(server);
1f3a8f5f 857 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 858 cifs_in_send_dec(server);
ad313cb8 859
820962dc 860 if (rc < 0) {
c781af7e 861 revert_current_mid(server, mid->credits);
ad313cb8 862 server->sequence_number -= 2;
70f08f91 863 delete_mid(mid);
820962dc
RV
864 }
865
cc391b69 866 cifs_server_unlock(server);
789e6661 867
ffc61ccb
SP
868 if (rc == 0)
869 return 0;
a6827c18 870
335b7b62 871 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
872 return rc;
873}
874
133672ef
SF
875/*
876 *
877 * Send an SMB Request. No response info (other than return code)
878 * needs to be parsed.
879 *
880 * flags indicate the type of request buffer and how long to wait
881 * and whether to log NT STATUS code (error) before mapping it to POSIX error
882 *
883 */
884int
96daf2b0 885SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 886 char *in_buf, int flags)
133672ef
SF
887{
888 int rc;
889 struct kvec iov[1];
da502f7d 890 struct kvec rsp_iov;
133672ef
SF
891 int resp_buf_type;
892
792af7b0
PS
893 iov[0].iov_base = in_buf;
894 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 895 flags |= CIFS_NO_RSP_BUF;
da502f7d 896 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 897 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 898
133672ef
SF
899 return rc;
900}
901
053d5034 902static int
3c1105df 903cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
904{
905 int rc = 0;
906
f96637be
JP
907 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
908 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 909
d7d7a66a 910 spin_lock(&server->mid_lock);
7c9421e1 911 switch (mid->mid_state) {
74dd92a8 912 case MID_RESPONSE_RECEIVED:
d7d7a66a 913 spin_unlock(&server->mid_lock);
053d5034 914 return rc;
74dd92a8
JL
915 case MID_RETRY_NEEDED:
916 rc = -EAGAIN;
917 break;
71823baf
JL
918 case MID_RESPONSE_MALFORMED:
919 rc = -EIO;
920 break;
3c1105df
JL
921 case MID_SHUTDOWN:
922 rc = -EHOSTDOWN;
923 break;
74dd92a8 924 default:
abe57073
PS
925 if (!(mid->mid_flags & MID_DELETED)) {
926 list_del_init(&mid->qhead);
927 mid->mid_flags |= MID_DELETED;
928 }
afe6f653 929 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 930 __func__, mid->mid, mid->mid_state);
74dd92a8 931 rc = -EIO;
053d5034 932 }
d7d7a66a 933 spin_unlock(&server->mid_lock);
053d5034 934
70f08f91 935 release_mid(mid);
053d5034
JL
936 return rc;
937}
938
121b046a 939static inline int
fb2036d8
PS
940send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
941 struct mid_q_entry *mid)
76dcc26f 942{
121b046a 943 return server->ops->send_cancel ?
fb2036d8 944 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
945}
946
2c8f981d
JL
947int
948cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
949 bool log_error)
950{
792af7b0 951 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
952
953 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
954
955 /* convert the length into a more usable form */
38d77c50 956 if (server->sign) {
738f9de5 957 struct kvec iov[2];
985e4ff0 958 int rc = 0;
738f9de5
PS
959 struct smb_rqst rqst = { .rq_iov = iov,
960 .rq_nvec = 2 };
826a95e4 961
738f9de5
PS
962 iov[0].iov_base = mid->resp_buf;
963 iov[0].iov_len = 4;
964 iov[1].iov_base = (char *)mid->resp_buf + 4;
965 iov[1].iov_len = len - 4;
2c8f981d 966 /* FIXME: add code to kill session */
bf5ea0e2 967 rc = cifs_verify_signature(&rqst, server,
0124cc45 968 mid->sequence_number);
985e4ff0 969 if (rc)
afe6f653 970 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 971 rc);
2c8f981d
JL
972 }
973
974 /* BB special case reconnect tid and uid here? */
a3713ec3 975 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
976}
977
fec344e3 978struct mid_q_entry *
f780bd3f
AA
979cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
980 struct smb_rqst *rqst)
792af7b0
PS
981{
982 int rc;
fec344e3 983 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
984 struct mid_q_entry *mid;
985
738f9de5
PS
986 if (rqst->rq_iov[0].iov_len != 4 ||
987 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
988 return ERR_PTR(-EIO);
989
792af7b0
PS
990 rc = allocate_mid(ses, hdr, &mid);
991 if (rc)
fec344e3
JL
992 return ERR_PTR(rc);
993 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
994 if (rc) {
70f08f91 995 delete_mid(mid);
fec344e3
JL
996 return ERR_PTR(rc);
997 }
998 return mid;
792af7b0
PS
999}
1000
4e34feb5 1001static void
ee258d79 1002cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1003{
1004 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1005 struct cifs_credits credits;
1006
1007 credits.value = server->ops->get_credits(mid);
1008 credits.instance = server->reconnect_instance;
8a26f0f7 1009
34f4deb7 1010 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1011}
1012
ee258d79
PS
1013static void
1014cifs_compound_last_callback(struct mid_q_entry *mid)
1015{
1016 cifs_compound_callback(mid);
1017 cifs_wake_up_task(mid);
1018}
1019
1020static void
1021cifs_cancelled_callback(struct mid_q_entry *mid)
1022{
1023 cifs_compound_callback(mid);
70f08f91 1024 release_mid(mid);
ee258d79
PS
1025}
1026
5f68ea4a
AA
1027/*
1028 * Return a channel (master if none) of @ses that can be used to send
1029 * regular requests.
1030 *
1031 * If we are currently binding a new channel (negprot/sess.setup),
1032 * return the new incomplete channel.
1033 */
1034struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1035{
1036 uint index = 0;
1037
1038 if (!ses)
1039 return NULL;
1040
f486ef8e 1041 /* round robin */
bda487ac 1042 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1043
1044 spin_lock(&ses->chan_lock);
bda487ac 1045 index %= ses->chan_count;
88b024f5 1046 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1047
1048 return ses->chans[index].server;
5f68ea4a
AA
1049}
1050
b8f57ee8 1051int
e0bba0b8 1052compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1053 struct TCP_Server_Info *server,
e0bba0b8
RS
1054 const int flags, const int num_rqst, struct smb_rqst *rqst,
1055 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1056{
480b1cb9 1057 int i, j, optype, rc = 0;
e0bba0b8 1058 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1059 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1060 struct cifs_credits credits[MAX_COMPOUND] = {
1061 { .value = 0, .instance = 0 }
1062 };
1063 unsigned int instance;
738f9de5 1064 char *buf;
50c2f753 1065
a891f0f8 1066 optype = flags & CIFS_OP_MASK;
133672ef 1067
e0bba0b8
RS
1068 for (i = 0; i < num_rqst; i++)
1069 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1070
352d96f3 1071 if (!ses || !ses->server || !server) {
f96637be 1072 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1073 return -EIO;
1074 }
1075
d7d7a66a 1076 spin_lock(&server->srv_lock);
080dc5e5 1077 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1078 spin_unlock(&server->srv_lock);
7ee1af76 1079 return -ENOENT;
080dc5e5 1080 }
d7d7a66a 1081 spin_unlock(&server->srv_lock);
7ee1af76 1082
792af7b0 1083 /*
257b7809 1084 * Wait for all the requests to become available.
7091bcab
PS
1085 * This approach still leaves the possibility to be stuck waiting for
1086 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1087 * requests and if the client is completely idle, not generating any
1088 * other requests.
1089 * This can be handled by the eventual session reconnect.
792af7b0 1090 */
3190b59a 1091 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1092 &instance);
1093 if (rc)
1094 return rc;
97ea4998 1095
257b7809
RS
1096 for (i = 0; i < num_rqst; i++) {
1097 credits[i].value = 1;
1098 credits[i].instance = instance;
8544f4aa 1099 }
7ee1af76 1100
792af7b0
PS
1101 /*
1102 * Make sure that we sign in the same order that we send on this socket
1103 * and avoid races inside tcp sendmsg code that could cause corruption
1104 * of smb data.
1105 */
7ee1af76 1106
cc391b69 1107 cifs_server_lock(server);
7ee1af76 1108
97ea4998
PS
1109 /*
1110 * All the parts of the compound chain belong obtained credits from the
257b7809 1111 * same session. We can not use credits obtained from the previous
97ea4998
PS
1112 * session to send this request. Check if there were reconnects after
1113 * we obtained credits and return -EAGAIN in such cases to let callers
1114 * handle it.
1115 */
3190b59a 1116 if (instance != server->reconnect_instance) {
cc391b69 1117 cifs_server_unlock(server);
97ea4998 1118 for (j = 0; j < num_rqst; j++)
3190b59a 1119 add_credits(server, &credits[j], optype);
97ea4998
PS
1120 return -EAGAIN;
1121 }
1122
e0bba0b8 1123 for (i = 0; i < num_rqst; i++) {
f780bd3f 1124 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1125 if (IS_ERR(midQ[i])) {
3190b59a 1126 revert_current_mid(server, i);
e0bba0b8 1127 for (j = 0; j < i; j++)
70f08f91 1128 delete_mid(midQ[j]);
cc391b69 1129 cifs_server_unlock(server);
8544f4aa 1130
e0bba0b8 1131 /* Update # of requests on wire to server */
8544f4aa 1132 for (j = 0; j < num_rqst; j++)
3190b59a 1133 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1134 return PTR_ERR(midQ[i]);
1135 }
1136
1137 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1138 midQ[i]->optype = optype;
4e34feb5 1139 /*
ee258d79
PS
1140 * Invoke callback for every part of the compound chain
1141 * to calculate credits properly. Wake up this thread only when
1142 * the last element is received.
4e34feb5
RS
1143 */
1144 if (i < num_rqst - 1)
ee258d79
PS
1145 midQ[i]->callback = cifs_compound_callback;
1146 else
1147 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1148 }
3190b59a
AA
1149 cifs_in_send_inc(server);
1150 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1151 cifs_in_send_dec(server);
e0bba0b8
RS
1152
1153 for (i = 0; i < num_rqst; i++)
1154 cifs_save_when_sent(midQ[i]);
7ee1af76 1155
c781af7e 1156 if (rc < 0) {
3190b59a
AA
1157 revert_current_mid(server, num_rqst);
1158 server->sequence_number -= 2;
c781af7e 1159 }
e0bba0b8 1160
cc391b69 1161 cifs_server_unlock(server);
7ee1af76 1162
d69cb728
RS
1163 /*
1164 * If sending failed for some reason or it is an oplock break that we
1165 * will not receive a response to - return credits back
1166 */
1167 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1168 for (i = 0; i < num_rqst; i++)
3190b59a 1169 add_credits(server, &credits[i], optype);
cb5c2e63 1170 goto out;
ee258d79
PS
1171 }
1172
1173 /*
1174 * At this point the request is passed to the network stack - we assume
1175 * that any credits taken from the server structure on the client have
1176 * been spent and we can't return them back. Once we receive responses
1177 * we will collect credits granted by the server in the mid callbacks
1178 * and add those credits to the server structure.
1179 */
e0bba0b8 1180
cb5c2e63
RS
1181 /*
1182 * Compounding is never used during session establish.
1183 */
d7d7a66a 1184 spin_lock(&ses->ses_lock);
dd3cd870 1185 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1186 spin_unlock(&ses->ses_lock);
080dc5e5 1187
cc391b69 1188 cifs_server_lock(server);
f486ef8e 1189 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1190 cifs_server_unlock(server);
080dc5e5 1191
d7d7a66a 1192 spin_lock(&ses->ses_lock);
05946d4b 1193 }
d7d7a66a 1194 spin_unlock(&ses->ses_lock);
e0bba0b8 1195
cb5c2e63 1196 for (i = 0; i < num_rqst; i++) {
3190b59a 1197 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1198 if (rc != 0)
1199 break;
1200 }
1201 if (rc != 0) {
1202 for (; i < num_rqst; i++) {
e3d100ea 1203 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1204 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1205 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1206 spin_lock(&server->mid_lock);
7b71843f 1207 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1208 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1209 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1210 cancelled_mid[i] = true;
34f4deb7 1211 credits[i].value = 0;
e0bba0b8 1212 }
d7d7a66a 1213 spin_unlock(&server->mid_lock);
e0bba0b8 1214 }
cb5c2e63
RS
1215 }
1216
cb5c2e63
RS
1217 for (i = 0; i < num_rqst; i++) {
1218 if (rc < 0)
1219 goto out;
e0bba0b8 1220
3190b59a 1221 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1222 if (rc != 0) {
8544f4aa
PS
1223 /* mark this mid as cancelled to not free it below */
1224 cancelled_mid[i] = true;
1225 goto out;
1be912dd 1226 }
2b2bdfba 1227
e0bba0b8
RS
1228 if (!midQ[i]->resp_buf ||
1229 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1230 rc = -EIO;
1231 cifs_dbg(FYI, "Bad MID state?\n");
1232 goto out;
1233 }
a891f0f8 1234
e0bba0b8
RS
1235 buf = (char *)midQ[i]->resp_buf;
1236 resp_iov[i].iov_base = buf;
1237 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1238 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1239
1240 if (midQ[i]->large_buf)
1241 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1242 else
1243 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1244
3190b59a 1245 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1246 flags & CIFS_LOG_ERROR);
1da177e4 1247
70f08f91 1248 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1249 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1250 midQ[i]->resp_buf = NULL;
cb5c2e63 1251
e0bba0b8 1252 }
cb5c2e63
RS
1253
1254 /*
1255 * Compounding is never used during session establish.
1256 */
d7d7a66a 1257 spin_lock(&ses->ses_lock);
dd3cd870 1258 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1259 struct kvec iov = {
1260 .iov_base = resp_iov[0].iov_base,
1261 .iov_len = resp_iov[0].iov_len
1262 };
d7d7a66a 1263 spin_unlock(&ses->ses_lock);
cc391b69 1264 cifs_server_lock(server);
f486ef8e 1265 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1266 cifs_server_unlock(server);
d7d7a66a 1267 spin_lock(&ses->ses_lock);
cb5c2e63 1268 }
d7d7a66a 1269 spin_unlock(&ses->ses_lock);
cb5c2e63 1270
7ee1af76 1271out:
4e34feb5
RS
1272 /*
1273 * This will dequeue all mids. After this it is important that the
1274 * demultiplex_thread will not process any of these mids any futher.
1275 * This is prevented above by using a noop callback that will not
1276 * wake this thread except for the very last PDU.
1277 */
8544f4aa
PS
1278 for (i = 0; i < num_rqst; i++) {
1279 if (!cancelled_mid[i])
70f08f91 1280 delete_mid(midQ[i]);
8544f4aa 1281 }
1da177e4 1282
d6e04ae6
SF
1283 return rc;
1284}
1da177e4 1285
e0bba0b8
RS
1286int
1287cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1288 struct TCP_Server_Info *server,
e0bba0b8
RS
1289 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1290 struct kvec *resp_iov)
1291{
352d96f3
AA
1292 return compound_send_recv(xid, ses, server, flags, 1,
1293 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1294}
1295
738f9de5
PS
1296int
1297SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1298 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1299 const int flags, struct kvec *resp_iov)
1300{
1301 struct smb_rqst rqst;
3cecf486 1302 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1303 int rc;
1304
3cecf486 1305 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1306 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1307 GFP_KERNEL);
117e3b7f
SF
1308 if (!new_iov) {
1309 /* otherwise cifs_send_recv below sets resp_buf_type */
1310 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1311 return -ENOMEM;
117e3b7f 1312 }
3cecf486
RS
1313 } else
1314 new_iov = s_iov;
738f9de5
PS
1315
1316 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1317 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1318
1319 new_iov[0].iov_base = new_iov[1].iov_base;
1320 new_iov[0].iov_len = 4;
1321 new_iov[1].iov_base += 4;
1322 new_iov[1].iov_len -= 4;
1323
1324 memset(&rqst, 0, sizeof(struct smb_rqst));
1325 rqst.rq_iov = new_iov;
1326 rqst.rq_nvec = n_vec + 1;
1327
352d96f3
AA
1328 rc = cifs_send_recv(xid, ses, ses->server,
1329 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1330 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1331 kfree(new_iov);
738f9de5
PS
1332 return rc;
1333}
1334
1da177e4 1335int
96daf2b0 1336SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1337 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1338 int *pbytes_returned, const int flags)
1da177e4
LT
1339{
1340 int rc = 0;
1da177e4 1341 struct mid_q_entry *midQ;
fb2036d8
PS
1342 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1343 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1344 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1345 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1346 struct TCP_Server_Info *server;
1da177e4
LT
1347
1348 if (ses == NULL) {
f96637be 1349 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1350 return -EIO;
1351 }
ac6ad7a8 1352 server = ses->server;
afe6f653 1353 if (server == NULL) {
f96637be 1354 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1355 return -EIO;
1356 }
1357
d7d7a66a 1358 spin_lock(&server->srv_lock);
080dc5e5 1359 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1360 spin_unlock(&server->srv_lock);
31ca3bc3 1361 return -ENOENT;
080dc5e5 1362 }
d7d7a66a 1363 spin_unlock(&server->srv_lock);
31ca3bc3 1364
79a58d1f 1365 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1366 to the same server. We may make this configurable later or
1367 use ses->maxReq */
1da177e4 1368
fb2036d8 1369 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1370 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1371 len);
6d9c6d54
VL
1372 return -EIO;
1373 }
1374
afe6f653 1375 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1376 if (rc)
1377 return rc;
1378
79a58d1f 1379 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1380 and avoid races inside tcp sendmsg code that could cause corruption
1381 of smb data */
1382
cc391b69 1383 cifs_server_lock(server);
1da177e4 1384
7ee1af76
JA
1385 rc = allocate_mid(ses, in_buf, &midQ);
1386 if (rc) {
cc391b69 1387 cifs_server_unlock(server);
7ee1af76 1388 /* Update # of requests on wire to server */
afe6f653 1389 add_credits(server, &credits, 0);
7ee1af76 1390 return rc;
1da177e4
LT
1391 }
1392
afe6f653 1393 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1394 if (rc) {
cc391b69 1395 cifs_server_unlock(server);
829049cb
VL
1396 goto out;
1397 }
1da177e4 1398
7c9421e1 1399 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1400
afe6f653
RS
1401 cifs_in_send_inc(server);
1402 rc = smb_send(server, in_buf, len);
1403 cifs_in_send_dec(server);
789e6661 1404 cifs_save_when_sent(midQ);
ad313cb8
JL
1405
1406 if (rc < 0)
afe6f653 1407 server->sequence_number -= 2;
ad313cb8 1408
cc391b69 1409 cifs_server_unlock(server);
7ee1af76 1410
79a58d1f 1411 if (rc < 0)
7ee1af76
JA
1412 goto out;
1413
afe6f653 1414 rc = wait_for_response(server, midQ);
1be912dd 1415 if (rc != 0) {
afe6f653 1416 send_cancel(server, &rqst, midQ);
d7d7a66a 1417 spin_lock(&server->mid_lock);
7c9421e1 1418 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1419 /* no longer considered to be "in-flight" */
70f08f91 1420 midQ->callback = release_mid;
d7d7a66a 1421 spin_unlock(&server->mid_lock);
afe6f653 1422 add_credits(server, &credits, 0);
1be912dd
JL
1423 return rc;
1424 }
d7d7a66a 1425 spin_unlock(&server->mid_lock);
1be912dd 1426 }
1da177e4 1427
afe6f653 1428 rc = cifs_sync_mid_result(midQ, server);
053d5034 1429 if (rc != 0) {
afe6f653 1430 add_credits(server, &credits, 0);
1da177e4
LT
1431 return rc;
1432 }
50c2f753 1433
2c8f981d 1434 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1435 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1436 rc = -EIO;
afe6f653 1437 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1438 goto out;
1da177e4 1439 }
7ee1af76 1440
d4e4854f 1441 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1442 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1443 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1444out:
70f08f91 1445 delete_mid(midQ);
afe6f653 1446 add_credits(server, &credits, 0);
1da177e4 1447
7ee1af76
JA
1448 return rc;
1449}
1da177e4 1450
7ee1af76
JA
1451/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1452 blocking lock to return. */
1453
1454static int
96daf2b0 1455send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1456 struct smb_hdr *in_buf,
1457 struct smb_hdr *out_buf)
1458{
1459 int bytes_returned;
96daf2b0 1460 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1461 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1462
1463 /* We just modify the current in_buf to change
1464 the type of lock from LOCKING_ANDX_SHARED_LOCK
1465 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1466 LOCKING_ANDX_CANCEL_LOCK. */
1467
1468 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1469 pSMB->Timeout = 0;
88257360 1470 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1471
1472 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1473 &bytes_returned, 0);
7ee1af76
JA
1474}
1475
1476int
96daf2b0 1477SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1478 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1479 int *pbytes_returned)
1480{
1481 int rc = 0;
1482 int rstart = 0;
7ee1af76 1483 struct mid_q_entry *midQ;
96daf2b0 1484 struct cifs_ses *ses;
fb2036d8
PS
1485 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1486 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1487 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1488 unsigned int instance;
afe6f653 1489 struct TCP_Server_Info *server;
7ee1af76
JA
1490
1491 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1492 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1493 return -EIO;
1494 }
1495 ses = tcon->ses;
afe6f653 1496 server = ses->server;
7ee1af76 1497
afe6f653 1498 if (server == NULL) {
f96637be 1499 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1500 return -EIO;
1501 }
1502
d7d7a66a 1503 spin_lock(&server->srv_lock);
080dc5e5 1504 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1505 spin_unlock(&server->srv_lock);
7ee1af76 1506 return -ENOENT;
080dc5e5 1507 }
d7d7a66a 1508 spin_unlock(&server->srv_lock);
7ee1af76 1509
79a58d1f 1510 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1511 to the same server. We may make this configurable later or
1512 use ses->maxReq */
1513
fb2036d8 1514 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1515 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1516 len);
6d9c6d54
VL
1517 return -EIO;
1518 }
1519
afe6f653 1520 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1521 if (rc)
1522 return rc;
1523
79a58d1f 1524 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1525 and avoid races inside tcp sendmsg code that could cause corruption
1526 of smb data */
1527
cc391b69 1528 cifs_server_lock(server);
7ee1af76
JA
1529
1530 rc = allocate_mid(ses, in_buf, &midQ);
1531 if (rc) {
cc391b69 1532 cifs_server_unlock(server);
7ee1af76
JA
1533 return rc;
1534 }
1535
afe6f653 1536 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1537 if (rc) {
70f08f91 1538 delete_mid(midQ);
cc391b69 1539 cifs_server_unlock(server);
829049cb
VL
1540 return rc;
1541 }
1da177e4 1542
7c9421e1 1543 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1544 cifs_in_send_inc(server);
1545 rc = smb_send(server, in_buf, len);
1546 cifs_in_send_dec(server);
789e6661 1547 cifs_save_when_sent(midQ);
ad313cb8
JL
1548
1549 if (rc < 0)
afe6f653 1550 server->sequence_number -= 2;
ad313cb8 1551
cc391b69 1552 cifs_server_unlock(server);
7ee1af76 1553
79a58d1f 1554 if (rc < 0) {
70f08f91 1555 delete_mid(midQ);
7ee1af76
JA
1556 return rc;
1557 }
1558
1559 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1560 rc = wait_event_interruptible(server->response_q,
7c9421e1 1561 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1562 ((server->tcpStatus != CifsGood) &&
1563 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1564
1565 /* Were we interrupted by a signal ? */
d7d7a66a 1566 spin_lock(&server->srv_lock);
7ee1af76 1567 if ((rc == -ERESTARTSYS) &&
7c9421e1 1568 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1569 ((server->tcpStatus == CifsGood) ||
1570 (server->tcpStatus == CifsNew))) {
d7d7a66a 1571 spin_unlock(&server->srv_lock);
7ee1af76
JA
1572
1573 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1574 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1575 blocking lock to return. */
afe6f653 1576 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1577 if (rc) {
70f08f91 1578 delete_mid(midQ);
7ee1af76
JA
1579 return rc;
1580 }
1581 } else {
1582 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1583 to cause the blocking lock to return. */
1584
1585 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1586
1587 /* If we get -ENOLCK back the lock may have
1588 already been removed. Don't exit in this case. */
1589 if (rc && rc != -ENOLCK) {
70f08f91 1590 delete_mid(midQ);
7ee1af76
JA
1591 return rc;
1592 }
1593 }
1594
afe6f653 1595 rc = wait_for_response(server, midQ);
1be912dd 1596 if (rc) {
afe6f653 1597 send_cancel(server, &rqst, midQ);
d7d7a66a 1598 spin_lock(&server->mid_lock);
7c9421e1 1599 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1600 /* no longer considered to be "in-flight" */
70f08f91 1601 midQ->callback = release_mid;
d7d7a66a 1602 spin_unlock(&server->mid_lock);
1be912dd
JL
1603 return rc;
1604 }
d7d7a66a 1605 spin_unlock(&server->mid_lock);
7ee1af76 1606 }
1be912dd
JL
1607
1608 /* We got the response - restart system call. */
1609 rstart = 1;
d7d7a66a 1610 spin_lock(&server->srv_lock);
7ee1af76 1611 }
d7d7a66a 1612 spin_unlock(&server->srv_lock);
7ee1af76 1613
afe6f653 1614 rc = cifs_sync_mid_result(midQ, server);
053d5034 1615 if (rc != 0)
7ee1af76 1616 return rc;
50c2f753 1617
17c8bfed 1618 /* rcvd frame is ok */
7c9421e1 1619 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1620 rc = -EIO;
3175eb9b 1621 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1622 goto out;
1623 }
1da177e4 1624
d4e4854f 1625 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1626 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1627 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1628out:
70f08f91 1629 delete_mid(midQ);
7ee1af76
JA
1630 if (rstart && rc == -EACCES)
1631 return -ERESTARTSYS;
1da177e4
LT
1632 return rc;
1633}
fb157ed2
SF
1634
1635/*
1636 * Discard any remaining data in the current SMB. To do this, we borrow the
1637 * current bigbuf.
1638 */
1639int
1640cifs_discard_remaining_data(struct TCP_Server_Info *server)
1641{
1642 unsigned int rfclen = server->pdu_size;
9789de8b 1643 int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1644 server->total_read;
1645
1646 while (remaining > 0) {
1647 int length;
1648
1649 length = cifs_discard_from_socket(server,
1650 min_t(size_t, remaining,
1651 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1652 if (length < 0)
1653 return length;
1654 server->total_read += length;
1655 remaining -= length;
1656 }
1657
1658 return 0;
1659}
1660
1661static int
1662__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1663 bool malformed)
1664{
1665 int length;
1666
1667 length = cifs_discard_remaining_data(server);
1668 dequeue_mid(mid, malformed);
1669 mid->resp_buf = server->smallbuf;
1670 server->smallbuf = NULL;
1671 return length;
1672}
1673
1674static int
1675cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1676{
1677 struct cifs_readdata *rdata = mid->callback_data;
1678
1679 return __cifs_readv_discard(server, mid, rdata->result);
1680}
1681
1682int
1683cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1684{
1685 int length, len;
1686 unsigned int data_offset, data_len;
1687 struct cifs_readdata *rdata = mid->callback_data;
1688 char *buf = server->smallbuf;
9789de8b 1689 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1690 bool use_rdma_mr = false;
1691
1692 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1693 __func__, mid->mid, rdata->offset, rdata->bytes);
1694
1695 /*
1696 * read the rest of READ_RSP header (sans Data array), or whatever we
1697 * can if there's not enough data. At this point, we've read down to
1698 * the Mid.
1699 */
1700 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1701 HEADER_SIZE(server) + 1;
1702
1703 length = cifs_read_from_socket(server,
1704 buf + HEADER_SIZE(server) - 1, len);
1705 if (length < 0)
1706 return length;
1707 server->total_read += length;
1708
1709 if (server->ops->is_session_expired &&
1710 server->ops->is_session_expired(buf)) {
1711 cifs_reconnect(server, true);
1712 return -1;
1713 }
1714
1715 if (server->ops->is_status_pending &&
1716 server->ops->is_status_pending(buf, server)) {
1717 cifs_discard_remaining_data(server);
1718 return -1;
1719 }
1720
1721 /* set up first two iov for signature check and to get credits */
1722 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1723 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1724 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1725 rdata->iov[1].iov_len =
9789de8b 1726 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1727 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1728 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1729 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1730 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1731
1732 /* Was the SMB read successful? */
1733 rdata->result = server->ops->map_error(buf, false);
1734 if (rdata->result != 0) {
1735 cifs_dbg(FYI, "%s: server returned error %d\n",
1736 __func__, rdata->result);
1737 /* normal error on read response */
1738 return __cifs_readv_discard(server, mid, false);
1739 }
1740
1741 /* Is there enough to get to the rest of the READ_RSP header? */
1742 if (server->total_read < server->vals->read_rsp_size) {
1743 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1744 __func__, server->total_read,
1745 server->vals->read_rsp_size);
1746 rdata->result = -EIO;
1747 return cifs_readv_discard(server, mid);
1748 }
1749
1750 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1751 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1752 if (data_offset < server->total_read) {
1753 /*
1754 * win2k8 sometimes sends an offset of 0 when the read
1755 * is beyond the EOF. Treat it as if the data starts just after
1756 * the header.
1757 */
1758 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1759 __func__, data_offset);
1760 data_offset = server->total_read;
1761 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1762 /* data_offset is beyond the end of smallbuf */
1763 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1764 __func__, data_offset);
1765 rdata->result = -EIO;
1766 return cifs_readv_discard(server, mid);
1767 }
1768
1769 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1770 __func__, server->total_read, data_offset);
1771
1772 len = data_offset - server->total_read;
1773 if (len > 0) {
1774 /* read any junk before data into the rest of smallbuf */
1775 length = cifs_read_from_socket(server,
1776 buf + server->total_read, len);
1777 if (length < 0)
1778 return length;
1779 server->total_read += length;
1780 }
1781
1782 /* how much data is in the response? */
1783#ifdef CONFIG_CIFS_SMB_DIRECT
1784 use_rdma_mr = rdata->mr;
1785#endif
1786 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1787 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1788 /* data_len is corrupt -- discard frame */
1789 rdata->result = -EIO;
1790 return cifs_readv_discard(server, mid);
1791 }
1792
1793 length = rdata->read_into_pages(server, rdata, data_len);
1794 if (length < 0)
1795 return length;
1796
1797 server->total_read += length;
1798
1799 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1800 server->total_read, buflen, data_len);
1801
1802 /* discard anything left over */
1803 if (server->total_read < buflen)
1804 return cifs_readv_discard(server, mid);
1805
1806 dequeue_mid(mid, false);
1807 mid->resp_buf = server->smallbuf;
1808 server->smallbuf = NULL;
1809 return length;
1810}