cifs: we do not need a spinlock around the tree access during umount
[linux-block.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
1da177e4
LT
24#include "cifspdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "cifs_debug.h"
8bd68c6e 28#include "smb2proto.h"
9762c2d0 29#include "smbdirect.h"
50c2f753 30
3cecf486
RS
31/* Max number of iovectors we can use off the stack when sending requests. */
32#define CIFS_MAX_IOV_SIZE 8
33
2dc7e1c0
PS
34void
35cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
36{
37 wake_up_process(mid->callback_data);
38}
39
a6827c18 40struct mid_q_entry *
24b9b06b 41AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
42{
43 struct mid_q_entry *temp;
44
24b9b06b 45 if (server == NULL) {
f96637be 46 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
47 return NULL;
48 }
50c2f753 49
232087cb 50 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 51 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 52 kref_init(&temp->refcount);
a6f74e80
N
53 temp->mid = get_mid(smb_buffer);
54 temp->pid = current->pid;
55 temp->command = cpu_to_le16(smb_buffer->Command);
56 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 57 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
58 /* when mid allocated can be before when sent */
59 temp->when_alloc = jiffies;
60 temp->server = server;
2b84a36c 61
a6f74e80
N
62 /*
63 * The default is for the mid to be synchronous, so the
64 * default callback just wakes up the current task.
65 */
f1f27ad7
VW
66 get_task_struct(current);
67 temp->creator = current;
a6f74e80
N
68 temp->callback = cifs_wake_up_task;
69 temp->callback_data = current;
1da177e4 70
1da177e4 71 atomic_inc(&midCount);
7c9421e1 72 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
73 return temp;
74}
75
696e420b
LP
76static void _cifs_mid_q_entry_release(struct kref *refcount)
77{
abe57073
PS
78 struct mid_q_entry *midEntry =
79 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 80#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 81 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 82 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 83 unsigned long now;
433b8dd7 84 unsigned long roundtrip_time;
1047abc1 85#endif
7b71843f
PS
86 struct TCP_Server_Info *server = midEntry->server;
87
88 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 server->ops->handle_cancelled_mid)
04ad69c3 91 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 92
7c9421e1 93 midEntry->mid_state = MID_FREE;
8097531a 94 atomic_dec(&midCount);
7c9421e1 95 if (midEntry->large_buf)
b8643e1b
SF
96 cifs_buf_release(midEntry->resp_buf);
97 else
98 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
99#ifdef CONFIG_CIFS_STATS2
100 now = jiffies;
433b8dd7 101 if (now < midEntry->when_alloc)
a0a3036b 102 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
103 roundtrip_time = now - midEntry->when_alloc;
104
105 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 server->slowest_cmd[smb_cmd] = roundtrip_time;
108 server->fastest_cmd[smb_cmd] = roundtrip_time;
109 } else {
110 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 }
115 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 server->time_per_cmd[smb_cmd] += roundtrip_time;
117 }
00778e22
SF
118 /*
119 * commands taking longer than one second (default) can be indications
120 * that something is wrong, unless it is quite a slow link or a very
121 * busy server. Note that this calc is unlikely or impossible to wrap
122 * as long as slow_rsp_threshold is not set way above recommended max
123 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 * since only affects debug counters - so leaving the calc as simple
125 * comparison rather than doing multiple conversions and overflow
126 * checks
127 */
128 if ((slow_rsp_threshold != 0) &&
129 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 130 (midEntry->command != command)) {
f5942db5
SF
131 /*
132 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 * NB: le16_to_cpu returns unsigned so can not be negative below
134 */
433b8dd7
SF
135 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 137
433b8dd7 138 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
141 pr_debug("slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
1047abc1
SF
147 }
148 }
149#endif
f1f27ad7 150 put_task_struct(midEntry->creator);
abe57073
PS
151
152 mempool_free(midEntry, cifs_mid_poolp);
153}
154
155void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156{
157 spin_lock(&GlobalMid_Lock);
158 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 spin_unlock(&GlobalMid_Lock);
160}
161
162void DeleteMidQEntry(struct mid_q_entry *midEntry)
163{
696e420b 164 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
165}
166
3c1bf7e4
PS
167void
168cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
169{
170 spin_lock(&GlobalMid_Lock);
abe57073
PS
171 if (!(mid->mid_flags & MID_DELETED)) {
172 list_del_init(&mid->qhead);
173 mid->mid_flags |= MID_DELETED;
174 }
ddc8cf8f
JL
175 spin_unlock(&GlobalMid_Lock);
176
177 DeleteMidQEntry(mid);
178}
179
6f49f46b
JL
180/*
181 * smb_send_kvec - send an array of kvecs to the server
182 * @server: Server to send the data to
3ab3f2a1 183 * @smb_msg: Message to send
6f49f46b
JL
184 * @sent: amount of data sent on socket is stored here
185 *
186 * Our basic "send data to server" function. Should be called with srv_mutex
187 * held. The caller is responsible for handling the results.
188 */
d6e04ae6 189static int
3ab3f2a1
AV
190smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 size_t *sent)
1da177e4
LT
192{
193 int rc = 0;
3ab3f2a1 194 int retries = 0;
edf1ae40 195 struct socket *ssocket = server->ssocket;
50c2f753 196
6f49f46b
JL
197 *sent = 0;
198
3ab3f2a1
AV
199 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200 smb_msg->msg_namelen = sizeof(struct sockaddr);
201 smb_msg->msg_control = NULL;
202 smb_msg->msg_controllen = 0;
0496e02d 203 if (server->noblocksnd)
3ab3f2a1 204 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 205 else
3ab3f2a1 206 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 207
3ab3f2a1 208 while (msg_data_left(smb_msg)) {
6f49f46b
JL
209 /*
210 * If blocking send, we try 3 times, since each can block
211 * for 5 seconds. For nonblocking we have to try more
212 * but wait increasing amounts of time allowing time for
213 * socket to clear. The overall time we wait in either
214 * case to send on the socket is about 15 seconds.
215 * Similarly we wait for 15 seconds for a response from
216 * the server in SendReceive[2] for the server to send
217 * a response back for most types of requests (except
218 * SMB Write past end of file which can be slow, and
219 * blocking lock operations). NFS waits slightly longer
220 * than CIFS, but this can make it take longer for
221 * nonresponsive servers to be detected and 15 seconds
222 * is more than enough time for modern networks to
223 * send a packet. In most cases if we fail to send
224 * after the retries we will kill the socket and
225 * reconnect which may clear the network problem.
226 */
3ab3f2a1 227 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 228 if (rc == -EAGAIN) {
3ab3f2a1
AV
229 retries++;
230 if (retries >= 14 ||
231 (!server->noblocksnd && (retries > 2))) {
afe6f653 232 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 233 ssocket);
3ab3f2a1 234 return -EAGAIN;
1da177e4 235 }
3ab3f2a1 236 msleep(1 << retries);
1da177e4
LT
237 continue;
238 }
6f49f46b 239
79a58d1f 240 if (rc < 0)
3ab3f2a1 241 return rc;
6f49f46b 242
79a58d1f 243 if (rc == 0) {
3e84469d
SF
244 /* should never happen, letting socket clear before
245 retrying is our only obvious option here */
afe6f653 246 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
247 msleep(500);
248 continue;
d6e04ae6 249 }
6f49f46b 250
3ab3f2a1
AV
251 /* send was at least partially successful */
252 *sent += rc;
253 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 254 }
3ab3f2a1 255 return 0;
97bc00b3
JL
256}
257
35e2cc1b 258unsigned long
81f39f95 259smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
260{
261 unsigned int i;
35e2cc1b
PA
262 struct kvec *iov;
263 int nvec;
a26054d1
JL
264 unsigned long buflen = 0;
265
81f39f95
RS
266 if (server->vals->header_preamble_size == 0 &&
267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
268 iov = &rqst->rq_iov[1];
269 nvec = rqst->rq_nvec - 1;
270 } else {
271 iov = rqst->rq_iov;
272 nvec = rqst->rq_nvec;
273 }
274
a26054d1 275 /* total up iov array first */
35e2cc1b 276 for (i = 0; i < nvec; i++)
a26054d1
JL
277 buflen += iov[i].iov_len;
278
c06a0f2d
LL
279 /*
280 * Add in the page array if there is one. The caller needs to make
281 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282 * multiple pages ends at page boundary, rq_tailsz needs to be set to
283 * PAGE_SIZE.
284 */
a26054d1 285 if (rqst->rq_npages) {
c06a0f2d
LL
286 if (rqst->rq_npages == 1)
287 buflen += rqst->rq_tailsz;
288 else {
289 /*
290 * If there is more than one page, calculate the
291 * buffer length based on rq_offset and rq_tailsz
292 */
293 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294 rqst->rq_offset;
295 buflen += rqst->rq_tailsz;
296 }
a26054d1
JL
297 }
298
299 return buflen;
300}
301
6f49f46b 302static int
07cd952f
RS
303__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304 struct smb_rqst *rqst)
6f49f46b 305{
07cd952f
RS
306 int rc = 0;
307 struct kvec *iov;
308 int n_vec;
309 unsigned int send_length = 0;
310 unsigned int i, j;
b30c74c7 311 sigset_t mask, oldmask;
3ab3f2a1 312 size_t total_len = 0, sent, size;
b8eed283 313 struct socket *ssocket = server->ssocket;
3ab3f2a1 314 struct msghdr smb_msg;
c713c877
RS
315 __be32 rfc1002_marker;
316
4357d45f
LL
317 if (cifs_rdma_enabled(server)) {
318 /* return -EAGAIN when connecting or reconnecting */
319 rc = -EAGAIN;
320 if (server->smbd_conn)
321 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
322 goto smbd_done;
323 }
afc18a6f 324
ea702b80 325 if (ssocket == NULL)
afc18a6f 326 return -EAGAIN;
ea702b80 327
214a5ea0 328 if (fatal_signal_pending(current)) {
6988a619
PA
329 cifs_dbg(FYI, "signal pending before send request\n");
330 return -ERESTARTSYS;
b30c74c7
PS
331 }
332
b8eed283 333 /* cork the socket */
db10538a 334 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 335
07cd952f 336 for (j = 0; j < num_rqst; j++)
81f39f95 337 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
338 rfc1002_marker = cpu_to_be32(send_length);
339
b30c74c7
PS
340 /*
341 * We should not allow signals to interrupt the network send because
342 * any partial send will cause session reconnects thus increasing
343 * latency of system calls and overload a server with unnecessary
344 * requests.
345 */
346
347 sigfillset(&mask);
348 sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
c713c877
RS
350 /* Generate a rfc1002 marker for SMB2+ */
351 if (server->vals->header_preamble_size == 0) {
352 struct kvec hiov = {
353 .iov_base = &rfc1002_marker,
354 .iov_len = 4
355 };
aa563d7b 356 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
357 rc = smb_send_kvec(server, &smb_msg, &sent);
358 if (rc < 0)
b30c74c7 359 goto unmask;
c713c877
RS
360
361 total_len += sent;
362 send_length += 4;
363 }
364
662bf5bc
PA
365 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
07cd952f
RS
367 for (j = 0; j < num_rqst; j++) {
368 iov = rqst[j].rq_iov;
369 n_vec = rqst[j].rq_nvec;
3ab3f2a1 370
07cd952f 371 size = 0;
662bf5bc
PA
372 for (i = 0; i < n_vec; i++) {
373 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 374 size += iov[i].iov_len;
662bf5bc 375 }
97bc00b3 376
aa563d7b 377 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 378
3ab3f2a1 379 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 380 if (rc < 0)
b30c74c7 381 goto unmask;
97bc00b3
JL
382
383 total_len += sent;
07cd952f
RS
384
385 /* now walk the page array and send each page in it */
386 for (i = 0; i < rqst[j].rq_npages; i++) {
387 struct bio_vec bvec;
388
389 bvec.bv_page = rqst[j].rq_pages[i];
390 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 &bvec.bv_offset);
392
aa563d7b 393 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
394 &bvec, 1, bvec.bv_len);
395 rc = smb_send_kvec(server, &smb_msg, &sent);
396 if (rc < 0)
397 break;
398
399 total_len += sent;
400 }
97bc00b3 401 }
1da177e4 402
b30c74c7
PS
403unmask:
404 sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406 /*
407 * If signal is pending but we have already sent the whole packet to
408 * the server we need to return success status to allow a corresponding
409 * mid entry to be kept in the pending requests queue thus allowing
410 * to handle responses from the server by the client.
411 *
412 * If only part of the packet has been sent there is no need to hide
413 * interrupt because the session will be reconnected anyway, so there
414 * won't be any response from the server to handle.
415 */
416
417 if (signal_pending(current) && (total_len != send_length)) {
418 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 419 rc = -ERESTARTSYS;
b30c74c7
PS
420 }
421
b8eed283 422 /* uncork it */
db10538a 423 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 424
c713c877 425 if ((total_len > 0) && (total_len != send_length)) {
f96637be 426 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 427 send_length, total_len);
6f49f46b
JL
428 /*
429 * If we have only sent part of an SMB then the next SMB could
430 * be taken as the remainder of this one. We need to kill the
431 * socket so the server throws away the partial SMB
432 */
52492ff5 433 cifs_mark_tcp_ses_conns_for_reconnect(server, false);
bf1fdeb7 434 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 435 server->conn_id, server->hostname);
edf1ae40 436 }
9762c2d0 437smbd_done:
d804d41d 438 if (rc < 0 && rc != -EINTR)
afe6f653 439 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 440 rc);
ee13919c 441 else if (rc > 0)
1da177e4 442 rc = 0;
1da177e4
LT
443
444 return rc;
445}
446
6f49f46b 447static int
1f3a8f5f
RS
448smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
449 struct smb_rqst *rqst, int flags)
6f49f46b 450{
b2c96de7 451 struct kvec iov;
3946d0d0 452 struct smb2_transform_hdr *tr_hdr;
b2c96de7 453 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
454 int rc;
455
456 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
457 return __smb_send_rqst(server, num_rqst, rqst);
458
459 if (num_rqst > MAX_COMPOUND - 1)
460 return -ENOMEM;
7fb8986e 461
b2c96de7 462 if (!server->ops->init_transform_rq) {
a0a3036b 463 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
464 return -EIO;
465 }
6f49f46b 466
3946d0d0
LL
467 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
468 if (!tr_hdr)
469 return -ENOMEM;
470
471 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
472 memset(&iov, 0, sizeof(iov));
473 memset(tr_hdr, 0, sizeof(*tr_hdr));
474
475 iov.iov_base = tr_hdr;
476 iov.iov_len = sizeof(*tr_hdr);
477 cur_rqst[0].rq_iov = &iov;
478 cur_rqst[0].rq_nvec = 1;
479
1f3a8f5f
RS
480 rc = server->ops->init_transform_rq(server, num_rqst + 1,
481 &cur_rqst[0], rqst);
7fb8986e 482 if (rc)
3946d0d0 483 goto out;
7fb8986e 484
1f3a8f5f
RS
485 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
486 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
487out:
488 kfree(tr_hdr);
7fb8986e 489 return rc;
6f49f46b
JL
490}
491
0496e02d
JL
492int
493smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
494 unsigned int smb_buf_length)
495{
738f9de5 496 struct kvec iov[2];
7fb8986e
PS
497 struct smb_rqst rqst = { .rq_iov = iov,
498 .rq_nvec = 2 };
0496e02d 499
738f9de5
PS
500 iov[0].iov_base = smb_buffer;
501 iov[0].iov_len = 4;
502 iov[1].iov_base = (char *)smb_buffer + 4;
503 iov[1].iov_len = smb_buf_length;
0496e02d 504
07cd952f 505 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
506}
507
fc40f9cf 508static int
b227d215 509wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
510 const int timeout, const int flags,
511 unsigned int *instance)
1da177e4 512{
19e88867 513 long rc;
4230cff8
RS
514 int *credits;
515 int optype;
2b53b929 516 long int t;
6d82c27a 517 int scredits, in_flight;
2b53b929
RS
518
519 if (timeout < 0)
520 t = MAX_JIFFY_OFFSET;
521 else
522 t = msecs_to_jiffies(timeout);
4230cff8
RS
523
524 optype = flags & CIFS_OP_MASK;
5bc59498 525
34f4deb7
PS
526 *instance = 0;
527
4230cff8
RS
528 credits = server->ops->get_credits_field(server, optype);
529 /* Since an echo is already inflight, no need to wait to send another */
530 if (*credits <= 0 && optype == CIFS_ECHO_OP)
531 return -EAGAIN;
532
fc40f9cf 533 spin_lock(&server->req_lock);
392e1c5d 534 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 535 /* oplock breaks must not be held up */
fc40f9cf 536 server->in_flight++;
1b63f184
SF
537 if (server->in_flight > server->max_in_flight)
538 server->max_in_flight = server->in_flight;
bc205ed1 539 *credits -= 1;
34f4deb7 540 *instance = server->reconnect_instance;
6d82c27a
SP
541 scredits = *credits;
542 in_flight = server->in_flight;
fc40f9cf 543 spin_unlock(&server->req_lock);
6d82c27a
SP
544
545 trace_smb3_add_credits(server->CurrentMid,
546 server->conn_id, server->hostname, scredits, -1, in_flight);
547 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
548 __func__, 1, scredits);
549
27a97a61
VL
550 return 0;
551 }
552
27a97a61 553 while (1) {
b227d215 554 if (*credits < num_credits) {
6d82c27a 555 scredits = *credits;
fc40f9cf 556 spin_unlock(&server->req_lock);
6d82c27a 557
789e6661 558 cifs_num_waiters_inc(server);
2b53b929
RS
559 rc = wait_event_killable_timeout(server->request_q,
560 has_credits(server, credits, num_credits), t);
789e6661 561 cifs_num_waiters_dec(server);
2b53b929 562 if (!rc) {
6d82c27a
SP
563 spin_lock(&server->req_lock);
564 scredits = *credits;
565 in_flight = server->in_flight;
566 spin_unlock(&server->req_lock);
567
7937ca96 568 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
569 server->conn_id, server->hostname, scredits,
570 num_credits, in_flight);
afe6f653 571 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 572 timeout);
7de03948 573 return -EBUSY;
2b53b929
RS
574 }
575 if (rc == -ERESTARTSYS)
576 return -ERESTARTSYS;
fc40f9cf 577 spin_lock(&server->req_lock);
27a97a61 578 } else {
080dc5e5
SP
579 spin_unlock(&server->req_lock);
580
581 spin_lock(&cifs_tcp_ses_lock);
c5797a94 582 if (server->tcpStatus == CifsExiting) {
080dc5e5 583 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 584 return -ENOENT;
1da177e4 585 }
080dc5e5 586 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 587
16b34aa4
RS
588 /*
589 * For normal commands, reserve the last MAX_COMPOUND
590 * credits to compound requests.
591 * Otherwise these compounds could be permanently
592 * starved for credits by single-credit requests.
593 *
594 * To prevent spinning CPU, block this thread until
595 * there are >MAX_COMPOUND credits available.
596 * But only do this is we already have a lot of
597 * credits in flight to avoid triggering this check
598 * for servers that are slow to hand out credits on
599 * new sessions.
600 */
080dc5e5 601 spin_lock(&server->req_lock);
16b34aa4
RS
602 if (!optype && num_credits == 1 &&
603 server->in_flight > 2 * MAX_COMPOUND &&
604 *credits <= MAX_COMPOUND) {
605 spin_unlock(&server->req_lock);
6d82c27a 606
16b34aa4 607 cifs_num_waiters_inc(server);
2b53b929
RS
608 rc = wait_event_killable_timeout(
609 server->request_q,
16b34aa4 610 has_credits(server, credits,
2b53b929
RS
611 MAX_COMPOUND + 1),
612 t);
16b34aa4 613 cifs_num_waiters_dec(server);
2b53b929 614 if (!rc) {
6d82c27a
SP
615 spin_lock(&server->req_lock);
616 scredits = *credits;
617 in_flight = server->in_flight;
618 spin_unlock(&server->req_lock);
619
7937ca96 620 trace_smb3_credit_timeout(
6d82c27a
SP
621 server->CurrentMid,
622 server->conn_id, server->hostname,
623 scredits, num_credits, in_flight);
afe6f653 624 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 625 timeout);
7de03948 626 return -EBUSY;
2b53b929
RS
627 }
628 if (rc == -ERESTARTSYS)
629 return -ERESTARTSYS;
16b34aa4
RS
630 spin_lock(&server->req_lock);
631 continue;
632 }
633
2d86dbc9
PS
634 /*
635 * Can not count locking commands against total
636 * as they are allowed to block on server.
637 */
27a97a61
VL
638
639 /* update # of requests on the wire to server */
4230cff8 640 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
641 *credits -= num_credits;
642 server->in_flight += num_credits;
1b63f184
SF
643 if (server->in_flight > server->max_in_flight)
644 server->max_in_flight = server->in_flight;
34f4deb7 645 *instance = server->reconnect_instance;
2d86dbc9 646 }
6d82c27a
SP
647 scredits = *credits;
648 in_flight = server->in_flight;
fc40f9cf 649 spin_unlock(&server->req_lock);
cd7b699b
SP
650
651 trace_smb3_add_credits(server->CurrentMid,
6d82c27a
SP
652 server->conn_id, server->hostname, scredits,
653 -(num_credits), in_flight);
cd7b699b
SP
654 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
655 __func__, num_credits, scredits);
27a97a61 656 break;
1da177e4
LT
657 }
658 }
7ee1af76
JA
659 return 0;
660}
1da177e4 661
bc205ed1 662static int
480b1cb9
RS
663wait_for_free_request(struct TCP_Server_Info *server, const int flags,
664 unsigned int *instance)
bc205ed1 665{
2b53b929
RS
666 return wait_for_free_credits(server, 1, -1, flags,
667 instance);
bc205ed1
PS
668}
669
257b7809
RS
670static int
671wait_for_compound_request(struct TCP_Server_Info *server, int num,
672 const int flags, unsigned int *instance)
673{
674 int *credits;
6d82c27a 675 int scredits, in_flight;
257b7809
RS
676
677 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
678
679 spin_lock(&server->req_lock);
cd7b699b 680 scredits = *credits;
6d82c27a 681 in_flight = server->in_flight;
cd7b699b 682
257b7809
RS
683 if (*credits < num) {
684 /*
91792bb8
PS
685 * If the server is tight on resources or just gives us less
686 * credits for other reasons (e.g. requests are coming out of
687 * order and the server delays granting more credits until it
688 * processes a missing mid) and we exhausted most available
689 * credits there may be situations when we try to send
690 * a compound request but we don't have enough credits. At this
691 * point the client needs to decide if it should wait for
692 * additional credits or fail the request. If at least one
693 * request is in flight there is a high probability that the
694 * server will return enough credits to satisfy this compound
695 * request.
696 *
697 * Return immediately if no requests in flight since we will be
698 * stuck on waiting for credits.
257b7809 699 */
91792bb8 700 if (server->in_flight == 0) {
257b7809 701 spin_unlock(&server->req_lock);
cd7b699b 702 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
703 server->conn_id, server->hostname, scredits,
704 num, in_flight);
cd7b699b 705 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 706 __func__, in_flight, num, scredits);
7de03948 707 return -EDEADLK;
257b7809
RS
708 }
709 }
710 spin_unlock(&server->req_lock);
711
712 return wait_for_free_credits(server, num, 60000, flags,
713 instance);
714}
715
cb7e9eab
PS
716int
717cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 718 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
719{
720 *num = size;
335b7b62
PS
721 credits->value = 0;
722 credits->instance = server->reconnect_instance;
cb7e9eab
PS
723 return 0;
724}
725
96daf2b0 726static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
727 struct mid_q_entry **ppmidQ)
728{
080dc5e5 729 spin_lock(&cifs_tcp_ses_lock);
7f48558e 730 if (ses->status == CifsNew) {
79a58d1f 731 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5
SP
732 (in_buf->Command != SMB_COM_NEGOTIATE)) {
733 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 734 return -EAGAIN;
080dc5e5 735 }
ad7a2926 736 /* else ok - we are setting up session */
1da177e4 737 }
7f48558e
SP
738
739 if (ses->status == CifsExiting) {
740 /* check if SMB session is bad because we are setting it up */
080dc5e5
SP
741 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
742 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 743 return -EAGAIN;
080dc5e5 744 }
7f48558e
SP
745 /* else ok - we are shutting down session */
746 }
080dc5e5 747 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 748
24b9b06b 749 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 750 if (*ppmidQ == NULL)
7ee1af76 751 return -ENOMEM;
ddc8cf8f
JL
752 spin_lock(&GlobalMid_Lock);
753 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
754 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
755 return 0;
756}
757
0ade640e
JL
758static int
759wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 760{
0ade640e 761 int error;
7ee1af76 762
5853cc2a 763 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 764 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
765 if (error < 0)
766 return -ERESTARTSYS;
7ee1af76 767
0ade640e 768 return 0;
7ee1af76
JA
769}
770
fec344e3
JL
771struct mid_q_entry *
772cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
773{
774 int rc;
fec344e3 775 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
776 struct mid_q_entry *mid;
777
738f9de5
PS
778 if (rqst->rq_iov[0].iov_len != 4 ||
779 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
780 return ERR_PTR(-EIO);
781
792af7b0 782 /* enable signing if server requires it */
38d77c50 783 if (server->sign)
792af7b0
PS
784 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
785
786 mid = AllocMidQEntry(hdr, server);
787 if (mid == NULL)
fec344e3 788 return ERR_PTR(-ENOMEM);
792af7b0 789
fec344e3 790 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
791 if (rc) {
792 DeleteMidQEntry(mid);
fec344e3 793 return ERR_PTR(rc);
ffc61ccb
SP
794 }
795
fec344e3 796 return mid;
792af7b0 797}
133672ef 798
a6827c18
JL
799/*
800 * Send a SMB request and set the callback function in the mid to handle
801 * the result. Caller is responsible for dealing with timeouts.
802 */
803int
fec344e3 804cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 805 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
806 mid_handle_t *handle, void *cbdata, const int flags,
807 const struct cifs_credits *exist_credits)
a6827c18 808{
480b1cb9 809 int rc;
a6827c18 810 struct mid_q_entry *mid;
335b7b62 811 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 812 unsigned int instance;
480b1cb9 813 int optype;
a6827c18 814
a891f0f8
PS
815 optype = flags & CIFS_OP_MASK;
816
cb7e9eab 817 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 818 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
819 if (rc)
820 return rc;
335b7b62 821 credits.value = 1;
34f4deb7 822 credits.instance = instance;
3349c3a7
PS
823 } else
824 instance = exist_credits->instance;
a6827c18
JL
825
826 mutex_lock(&server->srv_mutex);
3349c3a7
PS
827
828 /*
829 * We can't use credits obtained from the previous session to send this
830 * request. Check if there were reconnects after we obtained credits and
831 * return -EAGAIN in such cases to let callers handle it.
832 */
833 if (instance != server->reconnect_instance) {
834 mutex_unlock(&server->srv_mutex);
835 add_credits_and_wake_if(server, &credits, optype);
836 return -EAGAIN;
837 }
838
fec344e3
JL
839 mid = server->ops->setup_async_request(server, rqst);
840 if (IS_ERR(mid)) {
a6827c18 841 mutex_unlock(&server->srv_mutex);
335b7b62 842 add_credits_and_wake_if(server, &credits, optype);
fec344e3 843 return PTR_ERR(mid);
a6827c18
JL
844 }
845
44d22d84 846 mid->receive = receive;
a6827c18
JL
847 mid->callback = callback;
848 mid->callback_data = cbdata;
9b7c18a2 849 mid->handle = handle;
7c9421e1 850 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 851
ffc61ccb
SP
852 /* put it on the pending_mid_q */
853 spin_lock(&GlobalMid_Lock);
854 list_add_tail(&mid->qhead, &server->pending_mid_q);
855 spin_unlock(&GlobalMid_Lock);
856
93d2cb6c
LL
857 /*
858 * Need to store the time in mid before calling I/O. For call_async,
859 * I/O response may come back and free the mid entry on another thread.
860 */
861 cifs_save_when_sent(mid);
789e6661 862 cifs_in_send_inc(server);
1f3a8f5f 863 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 864 cifs_in_send_dec(server);
ad313cb8 865
820962dc 866 if (rc < 0) {
c781af7e 867 revert_current_mid(server, mid->credits);
ad313cb8 868 server->sequence_number -= 2;
820962dc
RV
869 cifs_delete_mid(mid);
870 }
871
a6827c18 872 mutex_unlock(&server->srv_mutex);
789e6661 873
ffc61ccb
SP
874 if (rc == 0)
875 return 0;
a6827c18 876
335b7b62 877 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
878 return rc;
879}
880
133672ef
SF
881/*
882 *
883 * Send an SMB Request. No response info (other than return code)
884 * needs to be parsed.
885 *
886 * flags indicate the type of request buffer and how long to wait
887 * and whether to log NT STATUS code (error) before mapping it to POSIX error
888 *
889 */
890int
96daf2b0 891SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 892 char *in_buf, int flags)
133672ef
SF
893{
894 int rc;
895 struct kvec iov[1];
da502f7d 896 struct kvec rsp_iov;
133672ef
SF
897 int resp_buf_type;
898
792af7b0
PS
899 iov[0].iov_base = in_buf;
900 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 901 flags |= CIFS_NO_RSP_BUF;
da502f7d 902 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 903 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 904
133672ef
SF
905 return rc;
906}
907
053d5034 908static int
3c1105df 909cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
910{
911 int rc = 0;
912
f96637be
JP
913 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
914 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 915
74dd92a8 916 spin_lock(&GlobalMid_Lock);
7c9421e1 917 switch (mid->mid_state) {
74dd92a8 918 case MID_RESPONSE_RECEIVED:
053d5034
JL
919 spin_unlock(&GlobalMid_Lock);
920 return rc;
74dd92a8
JL
921 case MID_RETRY_NEEDED:
922 rc = -EAGAIN;
923 break;
71823baf
JL
924 case MID_RESPONSE_MALFORMED:
925 rc = -EIO;
926 break;
3c1105df
JL
927 case MID_SHUTDOWN:
928 rc = -EHOSTDOWN;
929 break;
74dd92a8 930 default:
abe57073
PS
931 if (!(mid->mid_flags & MID_DELETED)) {
932 list_del_init(&mid->qhead);
933 mid->mid_flags |= MID_DELETED;
934 }
afe6f653 935 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 936 __func__, mid->mid, mid->mid_state);
74dd92a8 937 rc = -EIO;
053d5034
JL
938 }
939 spin_unlock(&GlobalMid_Lock);
940
2b84a36c 941 DeleteMidQEntry(mid);
053d5034
JL
942 return rc;
943}
944
121b046a 945static inline int
fb2036d8
PS
946send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
947 struct mid_q_entry *mid)
76dcc26f 948{
121b046a 949 return server->ops->send_cancel ?
fb2036d8 950 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
951}
952
2c8f981d
JL
953int
954cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
955 bool log_error)
956{
792af7b0 957 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
958
959 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
960
961 /* convert the length into a more usable form */
38d77c50 962 if (server->sign) {
738f9de5 963 struct kvec iov[2];
985e4ff0 964 int rc = 0;
738f9de5
PS
965 struct smb_rqst rqst = { .rq_iov = iov,
966 .rq_nvec = 2 };
826a95e4 967
738f9de5
PS
968 iov[0].iov_base = mid->resp_buf;
969 iov[0].iov_len = 4;
970 iov[1].iov_base = (char *)mid->resp_buf + 4;
971 iov[1].iov_len = len - 4;
2c8f981d 972 /* FIXME: add code to kill session */
bf5ea0e2 973 rc = cifs_verify_signature(&rqst, server,
0124cc45 974 mid->sequence_number);
985e4ff0 975 if (rc)
afe6f653 976 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 977 rc);
2c8f981d
JL
978 }
979
980 /* BB special case reconnect tid and uid here? */
a3713ec3 981 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
982}
983
fec344e3 984struct mid_q_entry *
f780bd3f
AA
985cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
986 struct smb_rqst *rqst)
792af7b0
PS
987{
988 int rc;
fec344e3 989 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
990 struct mid_q_entry *mid;
991
738f9de5
PS
992 if (rqst->rq_iov[0].iov_len != 4 ||
993 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
994 return ERR_PTR(-EIO);
995
792af7b0
PS
996 rc = allocate_mid(ses, hdr, &mid);
997 if (rc)
fec344e3
JL
998 return ERR_PTR(rc);
999 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1000 if (rc) {
3c1bf7e4 1001 cifs_delete_mid(mid);
fec344e3
JL
1002 return ERR_PTR(rc);
1003 }
1004 return mid;
792af7b0
PS
1005}
1006
4e34feb5 1007static void
ee258d79 1008cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1009{
1010 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1011 struct cifs_credits credits;
1012
1013 credits.value = server->ops->get_credits(mid);
1014 credits.instance = server->reconnect_instance;
8a26f0f7 1015
34f4deb7 1016 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1017}
1018
ee258d79
PS
1019static void
1020cifs_compound_last_callback(struct mid_q_entry *mid)
1021{
1022 cifs_compound_callback(mid);
1023 cifs_wake_up_task(mid);
1024}
1025
1026static void
1027cifs_cancelled_callback(struct mid_q_entry *mid)
1028{
1029 cifs_compound_callback(mid);
1030 DeleteMidQEntry(mid);
1031}
1032
5f68ea4a
AA
1033/*
1034 * Return a channel (master if none) of @ses that can be used to send
1035 * regular requests.
1036 *
1037 * If we are currently binding a new channel (negprot/sess.setup),
1038 * return the new incomplete channel.
1039 */
1040struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1041{
1042 uint index = 0;
1043
1044 if (!ses)
1045 return NULL;
1046
f486ef8e 1047 /* round robin */
bda487ac 1048 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1049
1050 spin_lock(&ses->chan_lock);
bda487ac 1051 index %= ses->chan_count;
88b024f5 1052 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1053
1054 return ses->chans[index].server;
5f68ea4a
AA
1055}
1056
b8f57ee8 1057int
e0bba0b8 1058compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1059 struct TCP_Server_Info *server,
e0bba0b8
RS
1060 const int flags, const int num_rqst, struct smb_rqst *rqst,
1061 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1062{
480b1cb9 1063 int i, j, optype, rc = 0;
e0bba0b8 1064 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1065 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1066 struct cifs_credits credits[MAX_COMPOUND] = {
1067 { .value = 0, .instance = 0 }
1068 };
1069 unsigned int instance;
738f9de5 1070 char *buf;
50c2f753 1071
a891f0f8 1072 optype = flags & CIFS_OP_MASK;
133672ef 1073
e0bba0b8
RS
1074 for (i = 0; i < num_rqst; i++)
1075 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1076
352d96f3 1077 if (!ses || !ses->server || !server) {
f96637be 1078 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1079 return -EIO;
1080 }
1081
080dc5e5
SP
1082 spin_lock(&cifs_tcp_ses_lock);
1083 if (server->tcpStatus == CifsExiting) {
1084 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1085 return -ENOENT;
080dc5e5
SP
1086 }
1087 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1088
792af7b0 1089 /*
257b7809 1090 * Wait for all the requests to become available.
7091bcab
PS
1091 * This approach still leaves the possibility to be stuck waiting for
1092 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1093 * requests and if the client is completely idle, not generating any
1094 * other requests.
1095 * This can be handled by the eventual session reconnect.
792af7b0 1096 */
3190b59a 1097 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1098 &instance);
1099 if (rc)
1100 return rc;
97ea4998 1101
257b7809
RS
1102 for (i = 0; i < num_rqst; i++) {
1103 credits[i].value = 1;
1104 credits[i].instance = instance;
8544f4aa 1105 }
7ee1af76 1106
792af7b0
PS
1107 /*
1108 * Make sure that we sign in the same order that we send on this socket
1109 * and avoid races inside tcp sendmsg code that could cause corruption
1110 * of smb data.
1111 */
7ee1af76 1112
3190b59a 1113 mutex_lock(&server->srv_mutex);
7ee1af76 1114
97ea4998
PS
1115 /*
1116 * All the parts of the compound chain belong obtained credits from the
257b7809 1117 * same session. We can not use credits obtained from the previous
97ea4998
PS
1118 * session to send this request. Check if there were reconnects after
1119 * we obtained credits and return -EAGAIN in such cases to let callers
1120 * handle it.
1121 */
3190b59a
AA
1122 if (instance != server->reconnect_instance) {
1123 mutex_unlock(&server->srv_mutex);
97ea4998 1124 for (j = 0; j < num_rqst; j++)
3190b59a 1125 add_credits(server, &credits[j], optype);
97ea4998
PS
1126 return -EAGAIN;
1127 }
1128
e0bba0b8 1129 for (i = 0; i < num_rqst; i++) {
f780bd3f 1130 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1131 if (IS_ERR(midQ[i])) {
3190b59a 1132 revert_current_mid(server, i);
e0bba0b8
RS
1133 for (j = 0; j < i; j++)
1134 cifs_delete_mid(midQ[j]);
3190b59a 1135 mutex_unlock(&server->srv_mutex);
8544f4aa 1136
e0bba0b8 1137 /* Update # of requests on wire to server */
8544f4aa 1138 for (j = 0; j < num_rqst; j++)
3190b59a 1139 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1140 return PTR_ERR(midQ[i]);
1141 }
1142
1143 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1144 midQ[i]->optype = optype;
4e34feb5 1145 /*
ee258d79
PS
1146 * Invoke callback for every part of the compound chain
1147 * to calculate credits properly. Wake up this thread only when
1148 * the last element is received.
4e34feb5
RS
1149 */
1150 if (i < num_rqst - 1)
ee258d79
PS
1151 midQ[i]->callback = cifs_compound_callback;
1152 else
1153 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1154 }
3190b59a
AA
1155 cifs_in_send_inc(server);
1156 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1157 cifs_in_send_dec(server);
e0bba0b8
RS
1158
1159 for (i = 0; i < num_rqst; i++)
1160 cifs_save_when_sent(midQ[i]);
7ee1af76 1161
c781af7e 1162 if (rc < 0) {
3190b59a
AA
1163 revert_current_mid(server, num_rqst);
1164 server->sequence_number -= 2;
c781af7e 1165 }
e0bba0b8 1166
3190b59a 1167 mutex_unlock(&server->srv_mutex);
7ee1af76 1168
d69cb728
RS
1169 /*
1170 * If sending failed for some reason or it is an oplock break that we
1171 * will not receive a response to - return credits back
1172 */
1173 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1174 for (i = 0; i < num_rqst; i++)
3190b59a 1175 add_credits(server, &credits[i], optype);
cb5c2e63 1176 goto out;
ee258d79
PS
1177 }
1178
1179 /*
1180 * At this point the request is passed to the network stack - we assume
1181 * that any credits taken from the server structure on the client have
1182 * been spent and we can't return them back. Once we receive responses
1183 * we will collect credits granted by the server in the mid callbacks
1184 * and add those credits to the server structure.
1185 */
e0bba0b8 1186
cb5c2e63
RS
1187 /*
1188 * Compounding is never used during session establish.
1189 */
080dc5e5 1190 spin_lock(&cifs_tcp_ses_lock);
05946d4b 1191 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
080dc5e5
SP
1192 spin_unlock(&cifs_tcp_ses_lock);
1193
05946d4b 1194 mutex_lock(&server->srv_mutex);
f486ef8e 1195 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
05946d4b 1196 mutex_unlock(&server->srv_mutex);
080dc5e5
SP
1197
1198 spin_lock(&cifs_tcp_ses_lock);
05946d4b 1199 }
080dc5e5 1200 spin_unlock(&cifs_tcp_ses_lock);
e0bba0b8 1201
cb5c2e63 1202 for (i = 0; i < num_rqst; i++) {
3190b59a 1203 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1204 if (rc != 0)
1205 break;
1206 }
1207 if (rc != 0) {
1208 for (; i < num_rqst; i++) {
e3d100ea 1209 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1210 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1211 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1212 spin_lock(&GlobalMid_Lock);
7b71843f 1213 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1214 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1215 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1216 cancelled_mid[i] = true;
34f4deb7 1217 credits[i].value = 0;
e0bba0b8 1218 }
1be912dd 1219 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1220 }
cb5c2e63
RS
1221 }
1222
cb5c2e63
RS
1223 for (i = 0; i < num_rqst; i++) {
1224 if (rc < 0)
1225 goto out;
e0bba0b8 1226
3190b59a 1227 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1228 if (rc != 0) {
8544f4aa
PS
1229 /* mark this mid as cancelled to not free it below */
1230 cancelled_mid[i] = true;
1231 goto out;
1be912dd 1232 }
2b2bdfba 1233
e0bba0b8
RS
1234 if (!midQ[i]->resp_buf ||
1235 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1236 rc = -EIO;
1237 cifs_dbg(FYI, "Bad MID state?\n");
1238 goto out;
1239 }
a891f0f8 1240
e0bba0b8
RS
1241 buf = (char *)midQ[i]->resp_buf;
1242 resp_iov[i].iov_base = buf;
1243 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1244 server->vals->header_preamble_size;
e0bba0b8
RS
1245
1246 if (midQ[i]->large_buf)
1247 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1248 else
1249 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1250
3190b59a 1251 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1252 flags & CIFS_LOG_ERROR);
1da177e4 1253
e0bba0b8 1254 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1255 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1256 midQ[i]->resp_buf = NULL;
cb5c2e63 1257
e0bba0b8 1258 }
cb5c2e63
RS
1259
1260 /*
1261 * Compounding is never used during session establish.
1262 */
080dc5e5 1263 spin_lock(&cifs_tcp_ses_lock);
0f56db83 1264 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1265 struct kvec iov = {
1266 .iov_base = resp_iov[0].iov_base,
1267 .iov_len = resp_iov[0].iov_len
1268 };
080dc5e5 1269 spin_unlock(&cifs_tcp_ses_lock);
05946d4b 1270 mutex_lock(&server->srv_mutex);
f486ef8e 1271 smb311_update_preauth_hash(ses, server, &iov, 1);
05946d4b 1272 mutex_unlock(&server->srv_mutex);
080dc5e5 1273 spin_lock(&cifs_tcp_ses_lock);
cb5c2e63 1274 }
080dc5e5 1275 spin_unlock(&cifs_tcp_ses_lock);
cb5c2e63 1276
7ee1af76 1277out:
4e34feb5
RS
1278 /*
1279 * This will dequeue all mids. After this it is important that the
1280 * demultiplex_thread will not process any of these mids any futher.
1281 * This is prevented above by using a noop callback that will not
1282 * wake this thread except for the very last PDU.
1283 */
8544f4aa
PS
1284 for (i = 0; i < num_rqst; i++) {
1285 if (!cancelled_mid[i])
1286 cifs_delete_mid(midQ[i]);
8544f4aa 1287 }
1da177e4 1288
d6e04ae6
SF
1289 return rc;
1290}
1da177e4 1291
e0bba0b8
RS
1292int
1293cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1294 struct TCP_Server_Info *server,
e0bba0b8
RS
1295 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1296 struct kvec *resp_iov)
1297{
352d96f3
AA
1298 return compound_send_recv(xid, ses, server, flags, 1,
1299 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1300}
1301
738f9de5
PS
1302int
1303SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1304 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1305 const int flags, struct kvec *resp_iov)
1306{
1307 struct smb_rqst rqst;
3cecf486 1308 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1309 int rc;
1310
3cecf486 1311 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1312 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1313 GFP_KERNEL);
117e3b7f
SF
1314 if (!new_iov) {
1315 /* otherwise cifs_send_recv below sets resp_buf_type */
1316 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1317 return -ENOMEM;
117e3b7f 1318 }
3cecf486
RS
1319 } else
1320 new_iov = s_iov;
738f9de5
PS
1321
1322 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1323 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1324
1325 new_iov[0].iov_base = new_iov[1].iov_base;
1326 new_iov[0].iov_len = 4;
1327 new_iov[1].iov_base += 4;
1328 new_iov[1].iov_len -= 4;
1329
1330 memset(&rqst, 0, sizeof(struct smb_rqst));
1331 rqst.rq_iov = new_iov;
1332 rqst.rq_nvec = n_vec + 1;
1333
352d96f3
AA
1334 rc = cifs_send_recv(xid, ses, ses->server,
1335 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1336 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1337 kfree(new_iov);
738f9de5
PS
1338 return rc;
1339}
1340
1da177e4 1341int
96daf2b0 1342SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1343 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1344 int *pbytes_returned, const int flags)
1da177e4
LT
1345{
1346 int rc = 0;
1da177e4 1347 struct mid_q_entry *midQ;
fb2036d8
PS
1348 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1349 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1350 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1351 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1352 struct TCP_Server_Info *server;
1da177e4
LT
1353
1354 if (ses == NULL) {
f96637be 1355 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1356 return -EIO;
1357 }
ac6ad7a8 1358 server = ses->server;
afe6f653 1359 if (server == NULL) {
f96637be 1360 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1361 return -EIO;
1362 }
1363
080dc5e5
SP
1364 spin_lock(&cifs_tcp_ses_lock);
1365 if (server->tcpStatus == CifsExiting) {
1366 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1367 return -ENOENT;
080dc5e5
SP
1368 }
1369 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1370
79a58d1f 1371 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1372 to the same server. We may make this configurable later or
1373 use ses->maxReq */
1da177e4 1374
fb2036d8 1375 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1376 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1377 len);
6d9c6d54
VL
1378 return -EIO;
1379 }
1380
afe6f653 1381 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1382 if (rc)
1383 return rc;
1384
79a58d1f 1385 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1386 and avoid races inside tcp sendmsg code that could cause corruption
1387 of smb data */
1388
afe6f653 1389 mutex_lock(&server->srv_mutex);
1da177e4 1390
7ee1af76
JA
1391 rc = allocate_mid(ses, in_buf, &midQ);
1392 if (rc) {
8bd3754c 1393 mutex_unlock(&server->srv_mutex);
7ee1af76 1394 /* Update # of requests on wire to server */
afe6f653 1395 add_credits(server, &credits, 0);
7ee1af76 1396 return rc;
1da177e4
LT
1397 }
1398
afe6f653 1399 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1400 if (rc) {
afe6f653 1401 mutex_unlock(&server->srv_mutex);
829049cb
VL
1402 goto out;
1403 }
1da177e4 1404
7c9421e1 1405 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1406
afe6f653
RS
1407 cifs_in_send_inc(server);
1408 rc = smb_send(server, in_buf, len);
1409 cifs_in_send_dec(server);
789e6661 1410 cifs_save_when_sent(midQ);
ad313cb8
JL
1411
1412 if (rc < 0)
afe6f653 1413 server->sequence_number -= 2;
ad313cb8 1414
afe6f653 1415 mutex_unlock(&server->srv_mutex);
7ee1af76 1416
79a58d1f 1417 if (rc < 0)
7ee1af76
JA
1418 goto out;
1419
afe6f653 1420 rc = wait_for_response(server, midQ);
1be912dd 1421 if (rc != 0) {
afe6f653 1422 send_cancel(server, &rqst, midQ);
1be912dd 1423 spin_lock(&GlobalMid_Lock);
7c9421e1 1424 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1425 /* no longer considered to be "in-flight" */
1426 midQ->callback = DeleteMidQEntry;
1427 spin_unlock(&GlobalMid_Lock);
afe6f653 1428 add_credits(server, &credits, 0);
1be912dd
JL
1429 return rc;
1430 }
1431 spin_unlock(&GlobalMid_Lock);
1432 }
1da177e4 1433
afe6f653 1434 rc = cifs_sync_mid_result(midQ, server);
053d5034 1435 if (rc != 0) {
afe6f653 1436 add_credits(server, &credits, 0);
1da177e4
LT
1437 return rc;
1438 }
50c2f753 1439
2c8f981d 1440 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1441 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1442 rc = -EIO;
afe6f653 1443 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1444 goto out;
1da177e4 1445 }
7ee1af76 1446
d4e4854f 1447 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1448 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1449 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1450out:
3c1bf7e4 1451 cifs_delete_mid(midQ);
afe6f653 1452 add_credits(server, &credits, 0);
1da177e4 1453
7ee1af76
JA
1454 return rc;
1455}
1da177e4 1456
7ee1af76
JA
1457/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1458 blocking lock to return. */
1459
1460static int
96daf2b0 1461send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1462 struct smb_hdr *in_buf,
1463 struct smb_hdr *out_buf)
1464{
1465 int bytes_returned;
96daf2b0 1466 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1467 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1468
1469 /* We just modify the current in_buf to change
1470 the type of lock from LOCKING_ANDX_SHARED_LOCK
1471 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1472 LOCKING_ANDX_CANCEL_LOCK. */
1473
1474 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1475 pSMB->Timeout = 0;
88257360 1476 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1477
1478 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1479 &bytes_returned, 0);
7ee1af76
JA
1480}
1481
1482int
96daf2b0 1483SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1484 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1485 int *pbytes_returned)
1486{
1487 int rc = 0;
1488 int rstart = 0;
7ee1af76 1489 struct mid_q_entry *midQ;
96daf2b0 1490 struct cifs_ses *ses;
fb2036d8
PS
1491 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1492 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1493 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1494 unsigned int instance;
afe6f653 1495 struct TCP_Server_Info *server;
7ee1af76
JA
1496
1497 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1498 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1499 return -EIO;
1500 }
1501 ses = tcon->ses;
afe6f653 1502 server = ses->server;
7ee1af76 1503
afe6f653 1504 if (server == NULL) {
f96637be 1505 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1506 return -EIO;
1507 }
1508
080dc5e5
SP
1509 spin_lock(&cifs_tcp_ses_lock);
1510 if (server->tcpStatus == CifsExiting) {
1511 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1512 return -ENOENT;
080dc5e5
SP
1513 }
1514 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1515
79a58d1f 1516 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1517 to the same server. We may make this configurable later or
1518 use ses->maxReq */
1519
fb2036d8 1520 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1521 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1522 len);
6d9c6d54
VL
1523 return -EIO;
1524 }
1525
afe6f653 1526 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1527 if (rc)
1528 return rc;
1529
79a58d1f 1530 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1531 and avoid races inside tcp sendmsg code that could cause corruption
1532 of smb data */
1533
afe6f653 1534 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1535
1536 rc = allocate_mid(ses, in_buf, &midQ);
1537 if (rc) {
afe6f653 1538 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1539 return rc;
1540 }
1541
afe6f653 1542 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1543 if (rc) {
3c1bf7e4 1544 cifs_delete_mid(midQ);
afe6f653 1545 mutex_unlock(&server->srv_mutex);
829049cb
VL
1546 return rc;
1547 }
1da177e4 1548
7c9421e1 1549 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1550 cifs_in_send_inc(server);
1551 rc = smb_send(server, in_buf, len);
1552 cifs_in_send_dec(server);
789e6661 1553 cifs_save_when_sent(midQ);
ad313cb8
JL
1554
1555 if (rc < 0)
afe6f653 1556 server->sequence_number -= 2;
ad313cb8 1557
afe6f653 1558 mutex_unlock(&server->srv_mutex);
7ee1af76 1559
79a58d1f 1560 if (rc < 0) {
3c1bf7e4 1561 cifs_delete_mid(midQ);
7ee1af76
JA
1562 return rc;
1563 }
1564
1565 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1566 rc = wait_event_interruptible(server->response_q,
7c9421e1 1567 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1568 ((server->tcpStatus != CifsGood) &&
1569 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1570
1571 /* Were we interrupted by a signal ? */
080dc5e5 1572 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1573 if ((rc == -ERESTARTSYS) &&
7c9421e1 1574 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1575 ((server->tcpStatus == CifsGood) ||
1576 (server->tcpStatus == CifsNew))) {
080dc5e5 1577 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76
JA
1578
1579 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1580 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1581 blocking lock to return. */
afe6f653 1582 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1583 if (rc) {
3c1bf7e4 1584 cifs_delete_mid(midQ);
7ee1af76
JA
1585 return rc;
1586 }
1587 } else {
1588 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1589 to cause the blocking lock to return. */
1590
1591 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1592
1593 /* If we get -ENOLCK back the lock may have
1594 already been removed. Don't exit in this case. */
1595 if (rc && rc != -ENOLCK) {
3c1bf7e4 1596 cifs_delete_mid(midQ);
7ee1af76
JA
1597 return rc;
1598 }
1599 }
1600
afe6f653 1601 rc = wait_for_response(server, midQ);
1be912dd 1602 if (rc) {
afe6f653 1603 send_cancel(server, &rqst, midQ);
1be912dd 1604 spin_lock(&GlobalMid_Lock);
7c9421e1 1605 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1606 /* no longer considered to be "in-flight" */
1607 midQ->callback = DeleteMidQEntry;
1608 spin_unlock(&GlobalMid_Lock);
1609 return rc;
1610 }
1611 spin_unlock(&GlobalMid_Lock);
7ee1af76 1612 }
1be912dd
JL
1613
1614 /* We got the response - restart system call. */
1615 rstart = 1;
080dc5e5 1616 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1617 }
080dc5e5 1618 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1619
afe6f653 1620 rc = cifs_sync_mid_result(midQ, server);
053d5034 1621 if (rc != 0)
7ee1af76 1622 return rc;
50c2f753 1623
17c8bfed 1624 /* rcvd frame is ok */
7c9421e1 1625 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1626 rc = -EIO;
3175eb9b 1627 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1628 goto out;
1629 }
1da177e4 1630
d4e4854f 1631 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1632 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1633 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1634out:
3c1bf7e4 1635 cifs_delete_mid(midQ);
7ee1af76
JA
1636 if (rstart && rc == -EACCES)
1637 return -ERESTARTSYS;
1da177e4
LT
1638 return rc;
1639}