cifs: multichannel: always zero struct cifs_io_parms
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
f1f27ad7
VW
79 get_task_struct(current);
80 temp->creator = current;
a6f74e80
N
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
1da177e4 83
1da177e4 84 atomic_inc(&midCount);
7c9421e1 85 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
86 return temp;
87}
88
696e420b
LP
89static void _cifs_mid_q_entry_release(struct kref *refcount)
90{
abe57073
PS
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 93#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 94 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 96 unsigned long now;
433b8dd7 97 unsigned long roundtrip_time;
1047abc1 98#endif
7b71843f
PS
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
7c9421e1 106 midEntry->mid_state = MID_FREE;
8097531a 107 atomic_dec(&midCount);
7c9421e1 108 if (midEntry->large_buf)
b8643e1b
SF
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
112#ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
433b8dd7 114 if (now < midEntry->when_alloc)
a0a3036b 115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
00778e22
SF
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 143 (midEntry->command != command)) {
f5942db5
SF
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
433b8dd7
SF
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 150
433b8dd7 151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
1047abc1
SF
160 }
161 }
162#endif
f1f27ad7 163 put_task_struct(midEntry->creator);
abe57073
PS
164
165 mempool_free(midEntry, cifs_mid_poolp);
166}
167
168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169{
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173}
174
175void DeleteMidQEntry(struct mid_q_entry *midEntry)
176{
696e420b 177 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
178}
179
3c1bf7e4
PS
180void
181cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
182{
183 spin_lock(&GlobalMid_Lock);
abe57073
PS
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
ddc8cf8f
JL
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191}
192
6f49f46b
JL
193/*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
3ab3f2a1 196 * @smb_msg: Message to send
6f49f46b
JL
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
d6e04ae6 202static int
3ab3f2a1
AV
203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
1da177e4
LT
205{
206 int rc = 0;
3ab3f2a1 207 int retries = 0;
edf1ae40 208 struct socket *ssocket = server->ssocket;
50c2f753 209
6f49f46b
JL
210 *sent = 0;
211
3ab3f2a1
AV
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
0496e02d 216 if (server->noblocksnd)
3ab3f2a1 217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 218 else
3ab3f2a1 219 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 220
3ab3f2a1 221 while (msg_data_left(smb_msg)) {
6f49f46b
JL
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
3ab3f2a1 240 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 241 if (rc == -EAGAIN) {
3ab3f2a1
AV
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
afe6f653 245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 246 ssocket);
3ab3f2a1 247 return -EAGAIN;
1da177e4 248 }
3ab3f2a1 249 msleep(1 << retries);
1da177e4
LT
250 continue;
251 }
6f49f46b 252
79a58d1f 253 if (rc < 0)
3ab3f2a1 254 return rc;
6f49f46b 255
79a58d1f 256 if (rc == 0) {
3e84469d
SF
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
afe6f653 259 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
260 msleep(500);
261 continue;
d6e04ae6 262 }
6f49f46b 263
3ab3f2a1
AV
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 267 }
3ab3f2a1 268 return 0;
97bc00b3
JL
269}
270
35e2cc1b 271unsigned long
81f39f95 272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
273{
274 unsigned int i;
35e2cc1b
PA
275 struct kvec *iov;
276 int nvec;
a26054d1
JL
277 unsigned long buflen = 0;
278
81f39f95
RS
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
a26054d1 288 /* total up iov array first */
35e2cc1b 289 for (i = 0; i < nvec; i++)
a26054d1
JL
290 buflen += iov[i].iov_len;
291
c06a0f2d
LL
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
a26054d1 298 if (rqst->rq_npages) {
c06a0f2d
LL
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
a26054d1
JL
310 }
311
312 return buflen;
313}
314
6f49f46b 315static int
07cd952f
RS
316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
6f49f46b 318{
07cd952f
RS
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
b30c74c7 324 sigset_t mask, oldmask;
3ab3f2a1 325 size_t total_len = 0, sent, size;
b8eed283 326 struct socket *ssocket = server->ssocket;
3ab3f2a1 327 struct msghdr smb_msg;
b8eed283 328 int val = 1;
c713c877
RS
329 __be32 rfc1002_marker;
330
4357d45f
LL
331 if (cifs_rdma_enabled(server)) {
332 /* return -EAGAIN when connecting or reconnecting */
333 rc = -EAGAIN;
334 if (server->smbd_conn)
335 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
336 goto smbd_done;
337 }
afc18a6f 338
ea702b80 339 if (ssocket == NULL)
afc18a6f 340 return -EAGAIN;
ea702b80 341
b30c74c7
PS
342 if (signal_pending(current)) {
343 cifs_dbg(FYI, "signal is pending before sending any data\n");
344 return -EINTR;
345 }
346
b8eed283
JL
347 /* cork the socket */
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
350
07cd952f 351 for (j = 0; j < num_rqst; j++)
81f39f95 352 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
353 rfc1002_marker = cpu_to_be32(send_length);
354
b30c74c7
PS
355 /*
356 * We should not allow signals to interrupt the network send because
357 * any partial send will cause session reconnects thus increasing
358 * latency of system calls and overload a server with unnecessary
359 * requests.
360 */
361
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
c713c877
RS
365 /* Generate a rfc1002 marker for SMB2+ */
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
370 };
aa563d7b 371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
b30c74c7 374 goto unmask;
c713c877
RS
375
376 total_len += sent;
377 send_length += 4;
378 }
379
662bf5bc
PA
380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
07cd952f
RS
382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
3ab3f2a1 385
07cd952f 386 size = 0;
662bf5bc
PA
387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 389 size += iov[i].iov_len;
662bf5bc 390 }
97bc00b3 391
aa563d7b 392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 393
3ab3f2a1 394 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 395 if (rc < 0)
b30c74c7 396 goto unmask;
97bc00b3
JL
397
398 total_len += sent;
07cd952f
RS
399
400 /* now walk the page array and send each page in it */
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
403
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
407
aa563d7b 408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
413
414 total_len += sent;
415 }
97bc00b3 416 }
1da177e4 417
b30c74c7
PS
418unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421 /*
422 * If signal is pending but we have already sent the whole packet to
423 * the server we need to return success status to allow a corresponding
424 * mid entry to be kept in the pending requests queue thus allowing
425 * to handle responses from the server by the client.
426 *
427 * If only part of the packet has been sent there is no need to hide
428 * interrupt because the session will be reconnected anyway, so there
429 * won't be any response from the server to handle.
430 */
431
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -EINTR;
435 }
436
b8eed283
JL
437 /* uncork it */
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
441
c713c877 442 if ((total_len > 0) && (total_len != send_length)) {
f96637be 443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 444 send_length, total_len);
6f49f46b
JL
445 /*
446 * If we have only sent part of an SMB then the next SMB could
447 * be taken as the remainder of this one. We need to kill the
448 * socket so the server throws away the partial SMB
449 */
edf1ae40 450 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
edf1ae40 453 }
9762c2d0 454smbd_done:
d804d41d 455 if (rc < 0 && rc != -EINTR)
afe6f653 456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 457 rc);
ee13919c 458 else if (rc > 0)
1da177e4 459 rc = 0;
1da177e4
LT
460
461 return rc;
462}
463
6f49f46b 464static int
1f3a8f5f
RS
465smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466 struct smb_rqst *rqst, int flags)
6f49f46b 467{
b2c96de7 468 struct kvec iov;
3946d0d0 469 struct smb2_transform_hdr *tr_hdr;
b2c96de7 470 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
471 int rc;
472
473 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
474 return __smb_send_rqst(server, num_rqst, rqst);
475
476 if (num_rqst > MAX_COMPOUND - 1)
477 return -ENOMEM;
7fb8986e 478
b2c96de7 479 if (!server->ops->init_transform_rq) {
a0a3036b 480 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
481 return -EIO;
482 }
6f49f46b 483
3946d0d0
LL
484 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
485 if (!tr_hdr)
486 return -ENOMEM;
487
488 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
489 memset(&iov, 0, sizeof(iov));
490 memset(tr_hdr, 0, sizeof(*tr_hdr));
491
492 iov.iov_base = tr_hdr;
493 iov.iov_len = sizeof(*tr_hdr);
494 cur_rqst[0].rq_iov = &iov;
495 cur_rqst[0].rq_nvec = 1;
496
1f3a8f5f
RS
497 rc = server->ops->init_transform_rq(server, num_rqst + 1,
498 &cur_rqst[0], rqst);
7fb8986e 499 if (rc)
3946d0d0 500 goto out;
7fb8986e 501
1f3a8f5f
RS
502 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
503 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
504out:
505 kfree(tr_hdr);
7fb8986e 506 return rc;
6f49f46b
JL
507}
508
0496e02d
JL
509int
510smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
511 unsigned int smb_buf_length)
512{
738f9de5 513 struct kvec iov[2];
7fb8986e
PS
514 struct smb_rqst rqst = { .rq_iov = iov,
515 .rq_nvec = 2 };
0496e02d 516
738f9de5
PS
517 iov[0].iov_base = smb_buffer;
518 iov[0].iov_len = 4;
519 iov[1].iov_base = (char *)smb_buffer + 4;
520 iov[1].iov_len = smb_buf_length;
0496e02d 521
07cd952f 522 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
523}
524
fc40f9cf 525static int
b227d215 526wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
527 const int timeout, const int flags,
528 unsigned int *instance)
1da177e4 529{
5bc59498 530 int rc;
4230cff8
RS
531 int *credits;
532 int optype;
2b53b929
RS
533 long int t;
534
535 if (timeout < 0)
536 t = MAX_JIFFY_OFFSET;
537 else
538 t = msecs_to_jiffies(timeout);
4230cff8
RS
539
540 optype = flags & CIFS_OP_MASK;
5bc59498 541
34f4deb7
PS
542 *instance = 0;
543
4230cff8
RS
544 credits = server->ops->get_credits_field(server, optype);
545 /* Since an echo is already inflight, no need to wait to send another */
546 if (*credits <= 0 && optype == CIFS_ECHO_OP)
547 return -EAGAIN;
548
fc40f9cf 549 spin_lock(&server->req_lock);
392e1c5d 550 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 551 /* oplock breaks must not be held up */
fc40f9cf 552 server->in_flight++;
1b63f184
SF
553 if (server->in_flight > server->max_in_flight)
554 server->max_in_flight = server->in_flight;
bc205ed1 555 *credits -= 1;
34f4deb7 556 *instance = server->reconnect_instance;
fc40f9cf 557 spin_unlock(&server->req_lock);
27a97a61
VL
558 return 0;
559 }
560
27a97a61 561 while (1) {
b227d215 562 if (*credits < num_credits) {
fc40f9cf 563 spin_unlock(&server->req_lock);
789e6661 564 cifs_num_waiters_inc(server);
2b53b929
RS
565 rc = wait_event_killable_timeout(server->request_q,
566 has_credits(server, credits, num_credits), t);
789e6661 567 cifs_num_waiters_dec(server);
2b53b929 568 if (!rc) {
7937ca96
SF
569 trace_smb3_credit_timeout(server->CurrentMid,
570 server->hostname, num_credits);
afe6f653 571 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
572 timeout);
573 return -ENOTSUPP;
574 }
575 if (rc == -ERESTARTSYS)
576 return -ERESTARTSYS;
fc40f9cf 577 spin_lock(&server->req_lock);
27a97a61 578 } else {
c5797a94 579 if (server->tcpStatus == CifsExiting) {
fc40f9cf 580 spin_unlock(&server->req_lock);
27a97a61 581 return -ENOENT;
1da177e4 582 }
27a97a61 583
16b34aa4
RS
584 /*
585 * For normal commands, reserve the last MAX_COMPOUND
586 * credits to compound requests.
587 * Otherwise these compounds could be permanently
588 * starved for credits by single-credit requests.
589 *
590 * To prevent spinning CPU, block this thread until
591 * there are >MAX_COMPOUND credits available.
592 * But only do this is we already have a lot of
593 * credits in flight to avoid triggering this check
594 * for servers that are slow to hand out credits on
595 * new sessions.
596 */
597 if (!optype && num_credits == 1 &&
598 server->in_flight > 2 * MAX_COMPOUND &&
599 *credits <= MAX_COMPOUND) {
600 spin_unlock(&server->req_lock);
601 cifs_num_waiters_inc(server);
2b53b929
RS
602 rc = wait_event_killable_timeout(
603 server->request_q,
16b34aa4 604 has_credits(server, credits,
2b53b929
RS
605 MAX_COMPOUND + 1),
606 t);
16b34aa4 607 cifs_num_waiters_dec(server);
2b53b929 608 if (!rc) {
7937ca96
SF
609 trace_smb3_credit_timeout(
610 server->CurrentMid,
611 server->hostname, num_credits);
afe6f653 612 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
613 timeout);
614 return -ENOTSUPP;
615 }
616 if (rc == -ERESTARTSYS)
617 return -ERESTARTSYS;
16b34aa4
RS
618 spin_lock(&server->req_lock);
619 continue;
620 }
621
2d86dbc9
PS
622 /*
623 * Can not count locking commands against total
624 * as they are allowed to block on server.
625 */
27a97a61
VL
626
627 /* update # of requests on the wire to server */
4230cff8 628 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
629 *credits -= num_credits;
630 server->in_flight += num_credits;
1b63f184
SF
631 if (server->in_flight > server->max_in_flight)
632 server->max_in_flight = server->in_flight;
34f4deb7 633 *instance = server->reconnect_instance;
2d86dbc9 634 }
fc40f9cf 635 spin_unlock(&server->req_lock);
27a97a61 636 break;
1da177e4
LT
637 }
638 }
7ee1af76
JA
639 return 0;
640}
1da177e4 641
bc205ed1 642static int
480b1cb9
RS
643wait_for_free_request(struct TCP_Server_Info *server, const int flags,
644 unsigned int *instance)
bc205ed1 645{
2b53b929
RS
646 return wait_for_free_credits(server, 1, -1, flags,
647 instance);
bc205ed1
PS
648}
649
257b7809
RS
650static int
651wait_for_compound_request(struct TCP_Server_Info *server, int num,
652 const int flags, unsigned int *instance)
653{
654 int *credits;
655
656 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
657
658 spin_lock(&server->req_lock);
659 if (*credits < num) {
660 /*
661 * Return immediately if not too many requests in flight since
662 * we will likely be stuck on waiting for credits.
663 */
664 if (server->in_flight < num - *credits) {
665 spin_unlock(&server->req_lock);
666 return -ENOTSUPP;
667 }
668 }
669 spin_unlock(&server->req_lock);
670
671 return wait_for_free_credits(server, num, 60000, flags,
672 instance);
673}
674
cb7e9eab
PS
675int
676cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 677 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
678{
679 *num = size;
335b7b62
PS
680 credits->value = 0;
681 credits->instance = server->reconnect_instance;
cb7e9eab
PS
682 return 0;
683}
684
96daf2b0 685static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
686 struct mid_q_entry **ppmidQ)
687{
1da177e4 688 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 689 return -ENOENT;
8fbbd365
VL
690 }
691
692 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 693 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 694 return -EAGAIN;
8fbbd365
VL
695 }
696
7f48558e 697 if (ses->status == CifsNew) {
79a58d1f 698 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 699 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 700 return -EAGAIN;
ad7a2926 701 /* else ok - we are setting up session */
1da177e4 702 }
7f48558e
SP
703
704 if (ses->status == CifsExiting) {
705 /* check if SMB session is bad because we are setting it up */
706 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
707 return -EAGAIN;
708 /* else ok - we are shutting down session */
709 }
710
24b9b06b 711 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 712 if (*ppmidQ == NULL)
7ee1af76 713 return -ENOMEM;
ddc8cf8f
JL
714 spin_lock(&GlobalMid_Lock);
715 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
716 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
717 return 0;
718}
719
0ade640e
JL
720static int
721wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 722{
0ade640e 723 int error;
7ee1af76 724
5853cc2a 725 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 726 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
727 if (error < 0)
728 return -ERESTARTSYS;
7ee1af76 729
0ade640e 730 return 0;
7ee1af76
JA
731}
732
fec344e3
JL
733struct mid_q_entry *
734cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
735{
736 int rc;
fec344e3 737 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
738 struct mid_q_entry *mid;
739
738f9de5
PS
740 if (rqst->rq_iov[0].iov_len != 4 ||
741 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
742 return ERR_PTR(-EIO);
743
792af7b0 744 /* enable signing if server requires it */
38d77c50 745 if (server->sign)
792af7b0
PS
746 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
747
748 mid = AllocMidQEntry(hdr, server);
749 if (mid == NULL)
fec344e3 750 return ERR_PTR(-ENOMEM);
792af7b0 751
fec344e3 752 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
753 if (rc) {
754 DeleteMidQEntry(mid);
fec344e3 755 return ERR_PTR(rc);
ffc61ccb
SP
756 }
757
fec344e3 758 return mid;
792af7b0 759}
133672ef 760
a6827c18
JL
761/*
762 * Send a SMB request and set the callback function in the mid to handle
763 * the result. Caller is responsible for dealing with timeouts.
764 */
765int
fec344e3 766cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 767 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
768 mid_handle_t *handle, void *cbdata, const int flags,
769 const struct cifs_credits *exist_credits)
a6827c18 770{
480b1cb9 771 int rc;
a6827c18 772 struct mid_q_entry *mid;
335b7b62 773 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 774 unsigned int instance;
480b1cb9 775 int optype;
a6827c18 776
a891f0f8
PS
777 optype = flags & CIFS_OP_MASK;
778
cb7e9eab 779 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 780 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
781 if (rc)
782 return rc;
335b7b62 783 credits.value = 1;
34f4deb7 784 credits.instance = instance;
3349c3a7
PS
785 } else
786 instance = exist_credits->instance;
a6827c18
JL
787
788 mutex_lock(&server->srv_mutex);
3349c3a7
PS
789
790 /*
791 * We can't use credits obtained from the previous session to send this
792 * request. Check if there were reconnects after we obtained credits and
793 * return -EAGAIN in such cases to let callers handle it.
794 */
795 if (instance != server->reconnect_instance) {
796 mutex_unlock(&server->srv_mutex);
797 add_credits_and_wake_if(server, &credits, optype);
798 return -EAGAIN;
799 }
800
fec344e3
JL
801 mid = server->ops->setup_async_request(server, rqst);
802 if (IS_ERR(mid)) {
a6827c18 803 mutex_unlock(&server->srv_mutex);
335b7b62 804 add_credits_and_wake_if(server, &credits, optype);
fec344e3 805 return PTR_ERR(mid);
a6827c18
JL
806 }
807
44d22d84 808 mid->receive = receive;
a6827c18
JL
809 mid->callback = callback;
810 mid->callback_data = cbdata;
9b7c18a2 811 mid->handle = handle;
7c9421e1 812 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 813
ffc61ccb
SP
814 /* put it on the pending_mid_q */
815 spin_lock(&GlobalMid_Lock);
816 list_add_tail(&mid->qhead, &server->pending_mid_q);
817 spin_unlock(&GlobalMid_Lock);
818
93d2cb6c
LL
819 /*
820 * Need to store the time in mid before calling I/O. For call_async,
821 * I/O response may come back and free the mid entry on another thread.
822 */
823 cifs_save_when_sent(mid);
789e6661 824 cifs_in_send_inc(server);
1f3a8f5f 825 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 826 cifs_in_send_dec(server);
ad313cb8 827
820962dc 828 if (rc < 0) {
c781af7e 829 revert_current_mid(server, mid->credits);
ad313cb8 830 server->sequence_number -= 2;
820962dc
RV
831 cifs_delete_mid(mid);
832 }
833
a6827c18 834 mutex_unlock(&server->srv_mutex);
789e6661 835
ffc61ccb
SP
836 if (rc == 0)
837 return 0;
a6827c18 838
335b7b62 839 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
840 return rc;
841}
842
133672ef
SF
843/*
844 *
845 * Send an SMB Request. No response info (other than return code)
846 * needs to be parsed.
847 *
848 * flags indicate the type of request buffer and how long to wait
849 * and whether to log NT STATUS code (error) before mapping it to POSIX error
850 *
851 */
852int
96daf2b0 853SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 854 char *in_buf, int flags)
133672ef
SF
855{
856 int rc;
857 struct kvec iov[1];
da502f7d 858 struct kvec rsp_iov;
133672ef
SF
859 int resp_buf_type;
860
792af7b0
PS
861 iov[0].iov_base = in_buf;
862 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 863 flags |= CIFS_NO_RSP_BUF;
da502f7d 864 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 865 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 866
133672ef
SF
867 return rc;
868}
869
053d5034 870static int
3c1105df 871cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
872{
873 int rc = 0;
874
f96637be
JP
875 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
876 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 877
74dd92a8 878 spin_lock(&GlobalMid_Lock);
7c9421e1 879 switch (mid->mid_state) {
74dd92a8 880 case MID_RESPONSE_RECEIVED:
053d5034
JL
881 spin_unlock(&GlobalMid_Lock);
882 return rc;
74dd92a8
JL
883 case MID_RETRY_NEEDED:
884 rc = -EAGAIN;
885 break;
71823baf
JL
886 case MID_RESPONSE_MALFORMED:
887 rc = -EIO;
888 break;
3c1105df
JL
889 case MID_SHUTDOWN:
890 rc = -EHOSTDOWN;
891 break;
74dd92a8 892 default:
abe57073
PS
893 if (!(mid->mid_flags & MID_DELETED)) {
894 list_del_init(&mid->qhead);
895 mid->mid_flags |= MID_DELETED;
896 }
afe6f653 897 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 898 __func__, mid->mid, mid->mid_state);
74dd92a8 899 rc = -EIO;
053d5034
JL
900 }
901 spin_unlock(&GlobalMid_Lock);
902
2b84a36c 903 DeleteMidQEntry(mid);
053d5034
JL
904 return rc;
905}
906
121b046a 907static inline int
fb2036d8
PS
908send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
909 struct mid_q_entry *mid)
76dcc26f 910{
121b046a 911 return server->ops->send_cancel ?
fb2036d8 912 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
913}
914
2c8f981d
JL
915int
916cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
917 bool log_error)
918{
792af7b0 919 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
920
921 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
922
923 /* convert the length into a more usable form */
38d77c50 924 if (server->sign) {
738f9de5 925 struct kvec iov[2];
985e4ff0 926 int rc = 0;
738f9de5
PS
927 struct smb_rqst rqst = { .rq_iov = iov,
928 .rq_nvec = 2 };
826a95e4 929
738f9de5
PS
930 iov[0].iov_base = mid->resp_buf;
931 iov[0].iov_len = 4;
932 iov[1].iov_base = (char *)mid->resp_buf + 4;
933 iov[1].iov_len = len - 4;
2c8f981d 934 /* FIXME: add code to kill session */
bf5ea0e2 935 rc = cifs_verify_signature(&rqst, server,
0124cc45 936 mid->sequence_number);
985e4ff0 937 if (rc)
afe6f653 938 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 939 rc);
2c8f981d
JL
940 }
941
942 /* BB special case reconnect tid and uid here? */
943 return map_smb_to_linux_error(mid->resp_buf, log_error);
944}
945
fec344e3 946struct mid_q_entry *
f780bd3f
AA
947cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
948 struct smb_rqst *rqst)
792af7b0
PS
949{
950 int rc;
fec344e3 951 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
952 struct mid_q_entry *mid;
953
738f9de5
PS
954 if (rqst->rq_iov[0].iov_len != 4 ||
955 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
956 return ERR_PTR(-EIO);
957
792af7b0
PS
958 rc = allocate_mid(ses, hdr, &mid);
959 if (rc)
fec344e3
JL
960 return ERR_PTR(rc);
961 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
962 if (rc) {
3c1bf7e4 963 cifs_delete_mid(mid);
fec344e3
JL
964 return ERR_PTR(rc);
965 }
966 return mid;
792af7b0
PS
967}
968
4e34feb5 969static void
ee258d79 970cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
971{
972 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
973 struct cifs_credits credits;
974
975 credits.value = server->ops->get_credits(mid);
976 credits.instance = server->reconnect_instance;
8a26f0f7 977
34f4deb7 978 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
979}
980
ee258d79
PS
981static void
982cifs_compound_last_callback(struct mid_q_entry *mid)
983{
984 cifs_compound_callback(mid);
985 cifs_wake_up_task(mid);
986}
987
988static void
989cifs_cancelled_callback(struct mid_q_entry *mid)
990{
991 cifs_compound_callback(mid);
992 DeleteMidQEntry(mid);
993}
994
5f68ea4a
AA
995/*
996 * Return a channel (master if none) of @ses that can be used to send
997 * regular requests.
998 *
999 * If we are currently binding a new channel (negprot/sess.setup),
1000 * return the new incomplete channel.
1001 */
1002struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1003{
1004 uint index = 0;
1005
1006 if (!ses)
1007 return NULL;
1008
1009 if (!ses->binding) {
1010 /* round robin */
1011 if (ses->chan_count > 1) {
1012 index = (uint)atomic_inc_return(&ses->chan_seq);
1013 index %= ses->chan_count;
1014 }
1015 return ses->chans[index].server;
1016 } else {
1017 return cifs_ses_server(ses);
1018 }
1019}
1020
b8f57ee8 1021int
e0bba0b8
RS
1022compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1023 const int flags, const int num_rqst, struct smb_rqst *rqst,
1024 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1025{
480b1cb9 1026 int i, j, optype, rc = 0;
e0bba0b8 1027 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1028 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1029 struct cifs_credits credits[MAX_COMPOUND] = {
1030 { .value = 0, .instance = 0 }
1031 };
1032 unsigned int instance;
738f9de5 1033 char *buf;
3190b59a 1034 struct TCP_Server_Info *server;
50c2f753 1035
a891f0f8 1036 optype = flags & CIFS_OP_MASK;
133672ef 1037
e0bba0b8
RS
1038 for (i = 0; i < num_rqst; i++)
1039 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
1040
1041 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 1042 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1043 return -EIO;
1044 }
1045
5f68ea4a 1046 server = cifs_pick_channel(ses);
d70e9fa5 1047
3190b59a 1048 if (server->tcpStatus == CifsExiting)
7ee1af76 1049 return -ENOENT;
7ee1af76 1050
792af7b0 1051 /*
257b7809 1052 * Wait for all the requests to become available.
7091bcab
PS
1053 * This approach still leaves the possibility to be stuck waiting for
1054 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1055 * requests and if the client is completely idle, not generating any
1056 * other requests.
1057 * This can be handled by the eventual session reconnect.
792af7b0 1058 */
3190b59a 1059 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1060 &instance);
1061 if (rc)
1062 return rc;
97ea4998 1063
257b7809
RS
1064 for (i = 0; i < num_rqst; i++) {
1065 credits[i].value = 1;
1066 credits[i].instance = instance;
8544f4aa 1067 }
7ee1af76 1068
792af7b0
PS
1069 /*
1070 * Make sure that we sign in the same order that we send on this socket
1071 * and avoid races inside tcp sendmsg code that could cause corruption
1072 * of smb data.
1073 */
7ee1af76 1074
3190b59a 1075 mutex_lock(&server->srv_mutex);
7ee1af76 1076
97ea4998
PS
1077 /*
1078 * All the parts of the compound chain belong obtained credits from the
257b7809 1079 * same session. We can not use credits obtained from the previous
97ea4998
PS
1080 * session to send this request. Check if there were reconnects after
1081 * we obtained credits and return -EAGAIN in such cases to let callers
1082 * handle it.
1083 */
3190b59a
AA
1084 if (instance != server->reconnect_instance) {
1085 mutex_unlock(&server->srv_mutex);
97ea4998 1086 for (j = 0; j < num_rqst; j++)
3190b59a 1087 add_credits(server, &credits[j], optype);
97ea4998
PS
1088 return -EAGAIN;
1089 }
1090
e0bba0b8 1091 for (i = 0; i < num_rqst; i++) {
f780bd3f 1092 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1093 if (IS_ERR(midQ[i])) {
3190b59a 1094 revert_current_mid(server, i);
e0bba0b8
RS
1095 for (j = 0; j < i; j++)
1096 cifs_delete_mid(midQ[j]);
3190b59a 1097 mutex_unlock(&server->srv_mutex);
8544f4aa 1098
e0bba0b8 1099 /* Update # of requests on wire to server */
8544f4aa 1100 for (j = 0; j < num_rqst; j++)
3190b59a 1101 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1102 return PTR_ERR(midQ[i]);
1103 }
1104
1105 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1106 midQ[i]->optype = optype;
4e34feb5 1107 /*
ee258d79
PS
1108 * Invoke callback for every part of the compound chain
1109 * to calculate credits properly. Wake up this thread only when
1110 * the last element is received.
4e34feb5
RS
1111 */
1112 if (i < num_rqst - 1)
ee258d79
PS
1113 midQ[i]->callback = cifs_compound_callback;
1114 else
1115 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1116 }
3190b59a
AA
1117 cifs_in_send_inc(server);
1118 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1119 cifs_in_send_dec(server);
e0bba0b8
RS
1120
1121 for (i = 0; i < num_rqst; i++)
1122 cifs_save_when_sent(midQ[i]);
7ee1af76 1123
c781af7e 1124 if (rc < 0) {
3190b59a
AA
1125 revert_current_mid(server, num_rqst);
1126 server->sequence_number -= 2;
c781af7e 1127 }
e0bba0b8 1128
3190b59a 1129 mutex_unlock(&server->srv_mutex);
7ee1af76 1130
d69cb728
RS
1131 /*
1132 * If sending failed for some reason or it is an oplock break that we
1133 * will not receive a response to - return credits back
1134 */
1135 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1136 for (i = 0; i < num_rqst; i++)
3190b59a 1137 add_credits(server, &credits[i], optype);
cb5c2e63 1138 goto out;
ee258d79
PS
1139 }
1140
1141 /*
1142 * At this point the request is passed to the network stack - we assume
1143 * that any credits taken from the server structure on the client have
1144 * been spent and we can't return them back. Once we receive responses
1145 * we will collect credits granted by the server in the mid callbacks
1146 * and add those credits to the server structure.
1147 */
e0bba0b8 1148
cb5c2e63
RS
1149 /*
1150 * Compounding is never used during session establish.
1151 */
1152 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1153 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1154 rqst[0].rq_nvec);
e0bba0b8 1155
cb5c2e63 1156 for (i = 0; i < num_rqst; i++) {
3190b59a 1157 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1158 if (rc != 0)
1159 break;
1160 }
1161 if (rc != 0) {
1162 for (; i < num_rqst; i++) {
afe6f653 1163 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1164 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1165 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1166 spin_lock(&GlobalMid_Lock);
7b71843f 1167 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1168 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1169 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1170 cancelled_mid[i] = true;
34f4deb7 1171 credits[i].value = 0;
e0bba0b8 1172 }
1be912dd 1173 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1174 }
cb5c2e63
RS
1175 }
1176
cb5c2e63
RS
1177 for (i = 0; i < num_rqst; i++) {
1178 if (rc < 0)
1179 goto out;
e0bba0b8 1180
3190b59a 1181 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1182 if (rc != 0) {
8544f4aa
PS
1183 /* mark this mid as cancelled to not free it below */
1184 cancelled_mid[i] = true;
1185 goto out;
1be912dd 1186 }
2b2bdfba 1187
e0bba0b8
RS
1188 if (!midQ[i]->resp_buf ||
1189 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1190 rc = -EIO;
1191 cifs_dbg(FYI, "Bad MID state?\n");
1192 goto out;
1193 }
a891f0f8 1194
e0bba0b8
RS
1195 buf = (char *)midQ[i]->resp_buf;
1196 resp_iov[i].iov_base = buf;
1197 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1198 server->vals->header_preamble_size;
e0bba0b8
RS
1199
1200 if (midQ[i]->large_buf)
1201 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1202 else
1203 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1204
3190b59a 1205 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1206 flags & CIFS_LOG_ERROR);
1da177e4 1207
e0bba0b8 1208 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1209 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1210 midQ[i]->resp_buf = NULL;
cb5c2e63 1211
e0bba0b8 1212 }
cb5c2e63
RS
1213
1214 /*
1215 * Compounding is never used during session establish.
1216 */
1217 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1218 struct kvec iov = {
1219 .iov_base = resp_iov[0].iov_base,
1220 .iov_len = resp_iov[0].iov_len
1221 };
1222 smb311_update_preauth_hash(ses, &iov, 1);
1223 }
1224
7ee1af76 1225out:
4e34feb5
RS
1226 /*
1227 * This will dequeue all mids. After this it is important that the
1228 * demultiplex_thread will not process any of these mids any futher.
1229 * This is prevented above by using a noop callback that will not
1230 * wake this thread except for the very last PDU.
1231 */
8544f4aa
PS
1232 for (i = 0; i < num_rqst; i++) {
1233 if (!cancelled_mid[i])
1234 cifs_delete_mid(midQ[i]);
8544f4aa 1235 }
1da177e4 1236
d6e04ae6
SF
1237 return rc;
1238}
1da177e4 1239
e0bba0b8
RS
1240int
1241cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1242 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1243 struct kvec *resp_iov)
1244{
1245 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1246 resp_iov);
1247}
1248
738f9de5
PS
1249int
1250SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1251 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1252 const int flags, struct kvec *resp_iov)
1253{
1254 struct smb_rqst rqst;
3cecf486 1255 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1256 int rc;
1257
3cecf486 1258 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1259 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1260 GFP_KERNEL);
117e3b7f
SF
1261 if (!new_iov) {
1262 /* otherwise cifs_send_recv below sets resp_buf_type */
1263 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1264 return -ENOMEM;
117e3b7f 1265 }
3cecf486
RS
1266 } else
1267 new_iov = s_iov;
738f9de5
PS
1268
1269 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1270 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1271
1272 new_iov[0].iov_base = new_iov[1].iov_base;
1273 new_iov[0].iov_len = 4;
1274 new_iov[1].iov_base += 4;
1275 new_iov[1].iov_len -= 4;
1276
1277 memset(&rqst, 0, sizeof(struct smb_rqst));
1278 rqst.rq_iov = new_iov;
1279 rqst.rq_nvec = n_vec + 1;
1280
1281 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1282 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1283 kfree(new_iov);
738f9de5
PS
1284 return rc;
1285}
1286
1da177e4 1287int
96daf2b0 1288SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1289 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1290 int *pbytes_returned, const int flags)
1da177e4
LT
1291{
1292 int rc = 0;
1da177e4 1293 struct mid_q_entry *midQ;
fb2036d8
PS
1294 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1295 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1296 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1297 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1298 struct TCP_Server_Info *server;
1da177e4
LT
1299
1300 if (ses == NULL) {
f96637be 1301 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1302 return -EIO;
1303 }
ac6ad7a8 1304 server = ses->server;
afe6f653 1305 if (server == NULL) {
f96637be 1306 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1307 return -EIO;
1308 }
1309
afe6f653 1310 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1311 return -ENOENT;
1312
79a58d1f 1313 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1314 to the same server. We may make this configurable later or
1315 use ses->maxReq */
1da177e4 1316
fb2036d8 1317 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1318 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1319 len);
6d9c6d54
VL
1320 return -EIO;
1321 }
1322
afe6f653 1323 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1324 if (rc)
1325 return rc;
1326
79a58d1f 1327 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1328 and avoid races inside tcp sendmsg code that could cause corruption
1329 of smb data */
1330
afe6f653 1331 mutex_lock(&server->srv_mutex);
1da177e4 1332
7ee1af76
JA
1333 rc = allocate_mid(ses, in_buf, &midQ);
1334 if (rc) {
8bd3754c 1335 mutex_unlock(&server->srv_mutex);
7ee1af76 1336 /* Update # of requests on wire to server */
afe6f653 1337 add_credits(server, &credits, 0);
7ee1af76 1338 return rc;
1da177e4
LT
1339 }
1340
afe6f653 1341 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1342 if (rc) {
afe6f653 1343 mutex_unlock(&server->srv_mutex);
829049cb
VL
1344 goto out;
1345 }
1da177e4 1346
7c9421e1 1347 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1348
afe6f653
RS
1349 cifs_in_send_inc(server);
1350 rc = smb_send(server, in_buf, len);
1351 cifs_in_send_dec(server);
789e6661 1352 cifs_save_when_sent(midQ);
ad313cb8
JL
1353
1354 if (rc < 0)
afe6f653 1355 server->sequence_number -= 2;
ad313cb8 1356
afe6f653 1357 mutex_unlock(&server->srv_mutex);
7ee1af76 1358
79a58d1f 1359 if (rc < 0)
7ee1af76
JA
1360 goto out;
1361
afe6f653 1362 rc = wait_for_response(server, midQ);
1be912dd 1363 if (rc != 0) {
afe6f653 1364 send_cancel(server, &rqst, midQ);
1be912dd 1365 spin_lock(&GlobalMid_Lock);
7c9421e1 1366 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1367 /* no longer considered to be "in-flight" */
1368 midQ->callback = DeleteMidQEntry;
1369 spin_unlock(&GlobalMid_Lock);
afe6f653 1370 add_credits(server, &credits, 0);
1be912dd
JL
1371 return rc;
1372 }
1373 spin_unlock(&GlobalMid_Lock);
1374 }
1da177e4 1375
afe6f653 1376 rc = cifs_sync_mid_result(midQ, server);
053d5034 1377 if (rc != 0) {
afe6f653 1378 add_credits(server, &credits, 0);
1da177e4
LT
1379 return rc;
1380 }
50c2f753 1381
2c8f981d 1382 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1383 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1384 rc = -EIO;
afe6f653 1385 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1386 goto out;
1da177e4 1387 }
7ee1af76 1388
d4e4854f 1389 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1390 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1391 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1392out:
3c1bf7e4 1393 cifs_delete_mid(midQ);
afe6f653 1394 add_credits(server, &credits, 0);
1da177e4 1395
7ee1af76
JA
1396 return rc;
1397}
1da177e4 1398
7ee1af76
JA
1399/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1400 blocking lock to return. */
1401
1402static int
96daf2b0 1403send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1404 struct smb_hdr *in_buf,
1405 struct smb_hdr *out_buf)
1406{
1407 int bytes_returned;
96daf2b0 1408 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1409 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1410
1411 /* We just modify the current in_buf to change
1412 the type of lock from LOCKING_ANDX_SHARED_LOCK
1413 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1414 LOCKING_ANDX_CANCEL_LOCK. */
1415
1416 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1417 pSMB->Timeout = 0;
88257360 1418 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1419
1420 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1421 &bytes_returned, 0);
7ee1af76
JA
1422}
1423
1424int
96daf2b0 1425SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1426 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1427 int *pbytes_returned)
1428{
1429 int rc = 0;
1430 int rstart = 0;
7ee1af76 1431 struct mid_q_entry *midQ;
96daf2b0 1432 struct cifs_ses *ses;
fb2036d8
PS
1433 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1434 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1435 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1436 unsigned int instance;
afe6f653 1437 struct TCP_Server_Info *server;
7ee1af76
JA
1438
1439 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1440 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1441 return -EIO;
1442 }
1443 ses = tcon->ses;
afe6f653 1444 server = ses->server;
7ee1af76 1445
afe6f653 1446 if (server == NULL) {
f96637be 1447 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1448 return -EIO;
1449 }
1450
afe6f653 1451 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1452 return -ENOENT;
1453
79a58d1f 1454 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1455 to the same server. We may make this configurable later or
1456 use ses->maxReq */
1457
fb2036d8 1458 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1459 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1460 len);
6d9c6d54
VL
1461 return -EIO;
1462 }
1463
afe6f653 1464 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1465 if (rc)
1466 return rc;
1467
79a58d1f 1468 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1469 and avoid races inside tcp sendmsg code that could cause corruption
1470 of smb data */
1471
afe6f653 1472 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1473
1474 rc = allocate_mid(ses, in_buf, &midQ);
1475 if (rc) {
afe6f653 1476 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1477 return rc;
1478 }
1479
afe6f653 1480 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1481 if (rc) {
3c1bf7e4 1482 cifs_delete_mid(midQ);
afe6f653 1483 mutex_unlock(&server->srv_mutex);
829049cb
VL
1484 return rc;
1485 }
1da177e4 1486
7c9421e1 1487 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1488 cifs_in_send_inc(server);
1489 rc = smb_send(server, in_buf, len);
1490 cifs_in_send_dec(server);
789e6661 1491 cifs_save_when_sent(midQ);
ad313cb8
JL
1492
1493 if (rc < 0)
afe6f653 1494 server->sequence_number -= 2;
ad313cb8 1495
afe6f653 1496 mutex_unlock(&server->srv_mutex);
7ee1af76 1497
79a58d1f 1498 if (rc < 0) {
3c1bf7e4 1499 cifs_delete_mid(midQ);
7ee1af76
JA
1500 return rc;
1501 }
1502
1503 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1504 rc = wait_event_interruptible(server->response_q,
7c9421e1 1505 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1506 ((server->tcpStatus != CifsGood) &&
1507 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1508
1509 /* Were we interrupted by a signal ? */
1510 if ((rc == -ERESTARTSYS) &&
7c9421e1 1511 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1512 ((server->tcpStatus == CifsGood) ||
1513 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1514
1515 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1516 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1517 blocking lock to return. */
afe6f653 1518 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1519 if (rc) {
3c1bf7e4 1520 cifs_delete_mid(midQ);
7ee1af76
JA
1521 return rc;
1522 }
1523 } else {
1524 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1525 to cause the blocking lock to return. */
1526
1527 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1528
1529 /* If we get -ENOLCK back the lock may have
1530 already been removed. Don't exit in this case. */
1531 if (rc && rc != -ENOLCK) {
3c1bf7e4 1532 cifs_delete_mid(midQ);
7ee1af76
JA
1533 return rc;
1534 }
1535 }
1536
afe6f653 1537 rc = wait_for_response(server, midQ);
1be912dd 1538 if (rc) {
afe6f653 1539 send_cancel(server, &rqst, midQ);
1be912dd 1540 spin_lock(&GlobalMid_Lock);
7c9421e1 1541 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1542 /* no longer considered to be "in-flight" */
1543 midQ->callback = DeleteMidQEntry;
1544 spin_unlock(&GlobalMid_Lock);
1545 return rc;
1546 }
1547 spin_unlock(&GlobalMid_Lock);
7ee1af76 1548 }
1be912dd
JL
1549
1550 /* We got the response - restart system call. */
1551 rstart = 1;
7ee1af76
JA
1552 }
1553
afe6f653 1554 rc = cifs_sync_mid_result(midQ, server);
053d5034 1555 if (rc != 0)
7ee1af76 1556 return rc;
50c2f753 1557
17c8bfed 1558 /* rcvd frame is ok */
7c9421e1 1559 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1560 rc = -EIO;
3175eb9b 1561 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1562 goto out;
1563 }
1da177e4 1564
d4e4854f 1565 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1566 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1567 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1568out:
3c1bf7e4 1569 cifs_delete_mid(midQ);
7ee1af76
JA
1570 if (rstart && rc == -EACCES)
1571 return -ERESTARTSYS;
1da177e4
LT
1572 return rc;
1573}