CIFS: Only send SMB2_NEGOTIATE command on new TCP connections
[linux-2.6-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
8bd68c6e 40#include "smb2proto.h"
9762c2d0 41#include "smbdirect.h"
50c2f753 42
3cecf486
RS
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
2dc7e1c0
PS
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
48{
49 wake_up_process(mid->callback_data);
50}
51
a6827c18 52struct mid_q_entry *
24b9b06b 53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
54{
55 struct mid_q_entry *temp;
56
24b9b06b 57 if (server == NULL) {
f96637be 58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
59 return NULL;
60 }
50c2f753 61
232087cb 62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 63 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 64 kref_init(&temp->refcount);
a6f74e80
N
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
2b84a36c 73
a6f74e80
N
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
1da177e4 80
1da177e4 81 atomic_inc(&midCount);
7c9421e1 82 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
83 return temp;
84}
85
696e420b
LP
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
766fdbb5 101void
1da177e4
LT
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
1047abc1 104#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 105 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
106 unsigned long now;
107#endif
7c9421e1 108 midEntry->mid_state = MID_FREE;
8097531a 109 atomic_dec(&midCount);
7c9421e1 110 if (midEntry->large_buf)
b8643e1b
SF
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
00778e22
SF
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 128 (midEntry->command != command)) {
f5942db5
SF
129 /*
130 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
131 * NB: le16_to_cpu returns unsigned so can not be negative below
132 */
133 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
468d6779
SF
134 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
135
020eec5f
SF
136 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
137 midEntry->mid, midEntry->pid,
138 midEntry->when_sent, midEntry->when_received);
139 if (cifsFYI & CIFS_TIMER) {
0b456f04 140 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 141 midEntry->command, midEntry->mid);
f80eaedd 142 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
143 now - midEntry->when_alloc,
144 now - midEntry->when_sent,
145 now - midEntry->when_received);
146 }
147 }
148#endif
696e420b 149 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
150}
151
3c1bf7e4
PS
152void
153cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
154{
155 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
156 list_del_init(&mid->qhead);
157 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
158 spin_unlock(&GlobalMid_Lock);
159
160 DeleteMidQEntry(mid);
161}
162
6f49f46b
JL
163/*
164 * smb_send_kvec - send an array of kvecs to the server
165 * @server: Server to send the data to
3ab3f2a1 166 * @smb_msg: Message to send
6f49f46b
JL
167 * @sent: amount of data sent on socket is stored here
168 *
169 * Our basic "send data to server" function. Should be called with srv_mutex
170 * held. The caller is responsible for handling the results.
171 */
d6e04ae6 172static int
3ab3f2a1
AV
173smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
174 size_t *sent)
1da177e4
LT
175{
176 int rc = 0;
3ab3f2a1 177 int retries = 0;
edf1ae40 178 struct socket *ssocket = server->ssocket;
50c2f753 179
6f49f46b
JL
180 *sent = 0;
181
3ab3f2a1
AV
182 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
183 smb_msg->msg_namelen = sizeof(struct sockaddr);
184 smb_msg->msg_control = NULL;
185 smb_msg->msg_controllen = 0;
0496e02d 186 if (server->noblocksnd)
3ab3f2a1 187 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 188 else
3ab3f2a1 189 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 190
3ab3f2a1 191 while (msg_data_left(smb_msg)) {
6f49f46b
JL
192 /*
193 * If blocking send, we try 3 times, since each can block
194 * for 5 seconds. For nonblocking we have to try more
195 * but wait increasing amounts of time allowing time for
196 * socket to clear. The overall time we wait in either
197 * case to send on the socket is about 15 seconds.
198 * Similarly we wait for 15 seconds for a response from
199 * the server in SendReceive[2] for the server to send
200 * a response back for most types of requests (except
201 * SMB Write past end of file which can be slow, and
202 * blocking lock operations). NFS waits slightly longer
203 * than CIFS, but this can make it take longer for
204 * nonresponsive servers to be detected and 15 seconds
205 * is more than enough time for modern networks to
206 * send a packet. In most cases if we fail to send
207 * after the retries we will kill the socket and
208 * reconnect which may clear the network problem.
209 */
3ab3f2a1 210 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 211 if (rc == -EAGAIN) {
3ab3f2a1
AV
212 retries++;
213 if (retries >= 14 ||
214 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
215 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
216 ssocket);
3ab3f2a1 217 return -EAGAIN;
1da177e4 218 }
3ab3f2a1 219 msleep(1 << retries);
1da177e4
LT
220 continue;
221 }
6f49f46b 222
79a58d1f 223 if (rc < 0)
3ab3f2a1 224 return rc;
6f49f46b 225
79a58d1f 226 if (rc == 0) {
3e84469d
SF
227 /* should never happen, letting socket clear before
228 retrying is our only obvious option here */
f96637be 229 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
230 msleep(500);
231 continue;
d6e04ae6 232 }
6f49f46b 233
3ab3f2a1
AV
234 /* send was at least partially successful */
235 *sent += rc;
236 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 237 }
3ab3f2a1 238 return 0;
97bc00b3
JL
239}
240
35e2cc1b 241unsigned long
81f39f95 242smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
243{
244 unsigned int i;
35e2cc1b
PA
245 struct kvec *iov;
246 int nvec;
a26054d1
JL
247 unsigned long buflen = 0;
248
81f39f95
RS
249 if (server->vals->header_preamble_size == 0 &&
250 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
251 iov = &rqst->rq_iov[1];
252 nvec = rqst->rq_nvec - 1;
253 } else {
254 iov = rqst->rq_iov;
255 nvec = rqst->rq_nvec;
256 }
257
a26054d1 258 /* total up iov array first */
35e2cc1b 259 for (i = 0; i < nvec; i++)
a26054d1
JL
260 buflen += iov[i].iov_len;
261
c06a0f2d
LL
262 /*
263 * Add in the page array if there is one. The caller needs to make
264 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
265 * multiple pages ends at page boundary, rq_tailsz needs to be set to
266 * PAGE_SIZE.
267 */
a26054d1 268 if (rqst->rq_npages) {
c06a0f2d
LL
269 if (rqst->rq_npages == 1)
270 buflen += rqst->rq_tailsz;
271 else {
272 /*
273 * If there is more than one page, calculate the
274 * buffer length based on rq_offset and rq_tailsz
275 */
276 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
277 rqst->rq_offset;
278 buflen += rqst->rq_tailsz;
279 }
a26054d1
JL
280 }
281
282 return buflen;
283}
284
6f49f46b 285static int
07cd952f
RS
286__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
287 struct smb_rqst *rqst)
6f49f46b 288{
07cd952f
RS
289 int rc = 0;
290 struct kvec *iov;
291 int n_vec;
292 unsigned int send_length = 0;
293 unsigned int i, j;
3ab3f2a1 294 size_t total_len = 0, sent, size;
b8eed283 295 struct socket *ssocket = server->ssocket;
3ab3f2a1 296 struct msghdr smb_msg;
b8eed283 297 int val = 1;
c713c877
RS
298 __be32 rfc1002_marker;
299
9762c2d0 300 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 301 rc = smbd_send(server, rqst);
9762c2d0
LL
302 goto smbd_done;
303 }
ea702b80
JL
304 if (ssocket == NULL)
305 return -ENOTSOCK;
306
b8eed283
JL
307 /* cork the socket */
308 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
309 (char *)&val, sizeof(val));
310
07cd952f 311 for (j = 0; j < num_rqst; j++)
81f39f95 312 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
313 rfc1002_marker = cpu_to_be32(send_length);
314
c713c877
RS
315 /* Generate a rfc1002 marker for SMB2+ */
316 if (server->vals->header_preamble_size == 0) {
317 struct kvec hiov = {
318 .iov_base = &rfc1002_marker,
319 .iov_len = 4
320 };
aa563d7b 321 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
322 rc = smb_send_kvec(server, &smb_msg, &sent);
323 if (rc < 0)
324 goto uncork;
325
326 total_len += sent;
327 send_length += 4;
328 }
329
662bf5bc
PA
330 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
331
07cd952f
RS
332 for (j = 0; j < num_rqst; j++) {
333 iov = rqst[j].rq_iov;
334 n_vec = rqst[j].rq_nvec;
3ab3f2a1 335
07cd952f 336 size = 0;
662bf5bc
PA
337 for (i = 0; i < n_vec; i++) {
338 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 339 size += iov[i].iov_len;
662bf5bc 340 }
97bc00b3 341
aa563d7b 342 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 343
3ab3f2a1 344 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 345 if (rc < 0)
07cd952f 346 goto uncork;
97bc00b3
JL
347
348 total_len += sent;
07cd952f
RS
349
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
352 struct bio_vec bvec;
353
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
356 &bvec.bv_offset);
357
aa563d7b 358 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
363
364 total_len += sent;
365 }
97bc00b3 366 }
1da177e4 367
97bc00b3 368uncork:
b8eed283
JL
369 /* uncork it */
370 val = 0;
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
373
c713c877 374 if ((total_len > 0) && (total_len != send_length)) {
f96637be 375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 376 send_length, total_len);
6f49f46b
JL
377 /*
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
381 */
edf1ae40 382 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
384 server->hostname);
edf1ae40 385 }
9762c2d0 386smbd_done:
d804d41d 387 if (rc < 0 && rc != -EINTR)
f96637be
JP
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc);
ee13919c 390 else if (rc > 0)
1da177e4 391 rc = 0;
1da177e4
LT
392
393 return rc;
394}
395
6f49f46b 396static int
1f3a8f5f
RS
397smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
6f49f46b 399{
b2c96de7
RS
400 struct kvec iov;
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
403 int rc;
404
405 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
406 return __smb_send_rqst(server, num_rqst, rqst);
407
408 if (num_rqst > MAX_COMPOUND - 1)
409 return -ENOMEM;
7fb8986e 410
b2c96de7
RS
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
414
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
419
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
422 "is missing\n");
7fb8986e
PS
423 return -EIO;
424 }
6f49f46b 425
1f3a8f5f
RS
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
427 &cur_rqst[0], rqst);
7fb8986e
PS
428 if (rc)
429 return rc;
430
1f3a8f5f
RS
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 433 return rc;
6f49f46b
JL
434}
435
0496e02d
JL
436int
437smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
439{
738f9de5 440 struct kvec iov[2];
7fb8986e
PS
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
0496e02d 443
738f9de5
PS
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
0496e02d 448
07cd952f 449 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
450}
451
fc40f9cf 452static int
a891f0f8 453wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
34f4deb7 454 int *credits, unsigned int *instance)
1da177e4 455{
5bc59498
PS
456 int rc;
457
34f4deb7
PS
458 *instance = 0;
459
fc40f9cf 460 spin_lock(&server->req_lock);
a891f0f8 461 if (timeout == CIFS_ASYNC_OP) {
1da177e4 462 /* oplock breaks must not be held up */
fc40f9cf 463 server->in_flight++;
bc205ed1 464 *credits -= 1;
34f4deb7 465 *instance = server->reconnect_instance;
fc40f9cf 466 spin_unlock(&server->req_lock);
27a97a61
VL
467 return 0;
468 }
469
27a97a61 470 while (1) {
bc205ed1 471 if (*credits <= 0) {
fc40f9cf 472 spin_unlock(&server->req_lock);
789e6661 473 cifs_num_waiters_inc(server);
5bc59498 474 rc = wait_event_killable(server->request_q,
bc205ed1 475 has_credits(server, credits));
789e6661 476 cifs_num_waiters_dec(server);
5bc59498
PS
477 if (rc)
478 return rc;
fc40f9cf 479 spin_lock(&server->req_lock);
27a97a61 480 } else {
c5797a94 481 if (server->tcpStatus == CifsExiting) {
fc40f9cf 482 spin_unlock(&server->req_lock);
27a97a61 483 return -ENOENT;
1da177e4 484 }
27a97a61 485
2d86dbc9
PS
486 /*
487 * Can not count locking commands against total
488 * as they are allowed to block on server.
489 */
27a97a61
VL
490
491 /* update # of requests on the wire to server */
a891f0f8 492 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 493 *credits -= 1;
fc40f9cf 494 server->in_flight++;
34f4deb7 495 *instance = server->reconnect_instance;
2d86dbc9 496 }
fc40f9cf 497 spin_unlock(&server->req_lock);
27a97a61 498 break;
1da177e4
LT
499 }
500 }
7ee1af76
JA
501 return 0;
502}
1da177e4 503
bc205ed1 504static int
a891f0f8 505wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
34f4deb7 506 const int optype, unsigned int *instance)
bc205ed1 507{
eb4c7df6
SP
508 int *val;
509
510 val = server->ops->get_credits_field(server, optype);
511 /* Since an echo is already inflight, no need to wait to send another */
512 if (*val <= 0 && optype == CIFS_ECHO_OP)
513 return -EAGAIN;
34f4deb7 514 return wait_for_free_credits(server, timeout, val, instance);
bc205ed1
PS
515}
516
cb7e9eab
PS
517int
518cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 519 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
520{
521 *num = size;
335b7b62
PS
522 credits->value = 0;
523 credits->instance = server->reconnect_instance;
cb7e9eab
PS
524 return 0;
525}
526
96daf2b0 527static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
528 struct mid_q_entry **ppmidQ)
529{
1da177e4 530 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 531 return -ENOENT;
8fbbd365
VL
532 }
533
534 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 535 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 536 return -EAGAIN;
8fbbd365
VL
537 }
538
7f48558e 539 if (ses->status == CifsNew) {
79a58d1f 540 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 541 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 542 return -EAGAIN;
ad7a2926 543 /* else ok - we are setting up session */
1da177e4 544 }
7f48558e
SP
545
546 if (ses->status == CifsExiting) {
547 /* check if SMB session is bad because we are setting it up */
548 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
549 return -EAGAIN;
550 /* else ok - we are shutting down session */
551 }
552
24b9b06b 553 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 554 if (*ppmidQ == NULL)
7ee1af76 555 return -ENOMEM;
ddc8cf8f
JL
556 spin_lock(&GlobalMid_Lock);
557 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
558 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
559 return 0;
560}
561
0ade640e
JL
562static int
563wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 564{
0ade640e 565 int error;
7ee1af76 566
5853cc2a 567 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 568 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
569 if (error < 0)
570 return -ERESTARTSYS;
7ee1af76 571
0ade640e 572 return 0;
7ee1af76
JA
573}
574
fec344e3
JL
575struct mid_q_entry *
576cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
577{
578 int rc;
fec344e3 579 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
580 struct mid_q_entry *mid;
581
738f9de5
PS
582 if (rqst->rq_iov[0].iov_len != 4 ||
583 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
584 return ERR_PTR(-EIO);
585
792af7b0 586 /* enable signing if server requires it */
38d77c50 587 if (server->sign)
792af7b0
PS
588 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
589
590 mid = AllocMidQEntry(hdr, server);
591 if (mid == NULL)
fec344e3 592 return ERR_PTR(-ENOMEM);
792af7b0 593
fec344e3 594 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
595 if (rc) {
596 DeleteMidQEntry(mid);
fec344e3 597 return ERR_PTR(rc);
ffc61ccb
SP
598 }
599
fec344e3 600 return mid;
792af7b0 601}
133672ef 602
a6827c18
JL
603/*
604 * Send a SMB request and set the callback function in the mid to handle
605 * the result. Caller is responsible for dealing with timeouts.
606 */
607int
fec344e3 608cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 609 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
610 mid_handle_t *handle, void *cbdata, const int flags,
611 const struct cifs_credits *exist_credits)
a6827c18 612{
a891f0f8 613 int rc, timeout, optype;
a6827c18 614 struct mid_q_entry *mid;
335b7b62 615 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 616 unsigned int instance;
a6827c18 617
a891f0f8
PS
618 timeout = flags & CIFS_TIMEOUT_MASK;
619 optype = flags & CIFS_OP_MASK;
620
cb7e9eab 621 if ((flags & CIFS_HAS_CREDITS) == 0) {
34f4deb7 622 rc = wait_for_free_request(server, timeout, optype, &instance);
cb7e9eab
PS
623 if (rc)
624 return rc;
335b7b62 625 credits.value = 1;
34f4deb7 626 credits.instance = instance;
3349c3a7
PS
627 } else
628 instance = exist_credits->instance;
a6827c18
JL
629
630 mutex_lock(&server->srv_mutex);
3349c3a7
PS
631
632 /*
633 * We can't use credits obtained from the previous session to send this
634 * request. Check if there were reconnects after we obtained credits and
635 * return -EAGAIN in such cases to let callers handle it.
636 */
637 if (instance != server->reconnect_instance) {
638 mutex_unlock(&server->srv_mutex);
639 add_credits_and_wake_if(server, &credits, optype);
640 return -EAGAIN;
641 }
642
fec344e3
JL
643 mid = server->ops->setup_async_request(server, rqst);
644 if (IS_ERR(mid)) {
a6827c18 645 mutex_unlock(&server->srv_mutex);
335b7b62 646 add_credits_and_wake_if(server, &credits, optype);
fec344e3 647 return PTR_ERR(mid);
a6827c18
JL
648 }
649
44d22d84 650 mid->receive = receive;
a6827c18
JL
651 mid->callback = callback;
652 mid->callback_data = cbdata;
9b7c18a2 653 mid->handle = handle;
7c9421e1 654 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 655
ffc61ccb
SP
656 /* put it on the pending_mid_q */
657 spin_lock(&GlobalMid_Lock);
658 list_add_tail(&mid->qhead, &server->pending_mid_q);
659 spin_unlock(&GlobalMid_Lock);
660
93d2cb6c
LL
661 /*
662 * Need to store the time in mid before calling I/O. For call_async,
663 * I/O response may come back and free the mid entry on another thread.
664 */
665 cifs_save_when_sent(mid);
789e6661 666 cifs_in_send_inc(server);
1f3a8f5f 667 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 668 cifs_in_send_dec(server);
ad313cb8 669
820962dc 670 if (rc < 0) {
c781af7e 671 revert_current_mid(server, mid->credits);
ad313cb8 672 server->sequence_number -= 2;
820962dc
RV
673 cifs_delete_mid(mid);
674 }
675
a6827c18 676 mutex_unlock(&server->srv_mutex);
789e6661 677
ffc61ccb
SP
678 if (rc == 0)
679 return 0;
a6827c18 680
335b7b62 681 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
682 return rc;
683}
684
133672ef
SF
685/*
686 *
687 * Send an SMB Request. No response info (other than return code)
688 * needs to be parsed.
689 *
690 * flags indicate the type of request buffer and how long to wait
691 * and whether to log NT STATUS code (error) before mapping it to POSIX error
692 *
693 */
694int
96daf2b0 695SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 696 char *in_buf, int flags)
133672ef
SF
697{
698 int rc;
699 struct kvec iov[1];
da502f7d 700 struct kvec rsp_iov;
133672ef
SF
701 int resp_buf_type;
702
792af7b0
PS
703 iov[0].iov_base = in_buf;
704 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 705 flags |= CIFS_NO_RESP;
da502f7d 706 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 707 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 708
133672ef
SF
709 return rc;
710}
711
053d5034 712static int
3c1105df 713cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
714{
715 int rc = 0;
716
f96637be
JP
717 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
718 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 719
74dd92a8 720 spin_lock(&GlobalMid_Lock);
7c9421e1 721 switch (mid->mid_state) {
74dd92a8 722 case MID_RESPONSE_RECEIVED:
053d5034
JL
723 spin_unlock(&GlobalMid_Lock);
724 return rc;
74dd92a8
JL
725 case MID_RETRY_NEEDED:
726 rc = -EAGAIN;
727 break;
71823baf
JL
728 case MID_RESPONSE_MALFORMED:
729 rc = -EIO;
730 break;
3c1105df
JL
731 case MID_SHUTDOWN:
732 rc = -EHOSTDOWN;
733 break;
74dd92a8 734 default:
3c1105df 735 list_del_init(&mid->qhead);
f96637be
JP
736 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
737 __func__, mid->mid, mid->mid_state);
74dd92a8 738 rc = -EIO;
053d5034
JL
739 }
740 spin_unlock(&GlobalMid_Lock);
741
2b84a36c 742 DeleteMidQEntry(mid);
053d5034
JL
743 return rc;
744}
745
121b046a 746static inline int
fb2036d8
PS
747send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
748 struct mid_q_entry *mid)
76dcc26f 749{
121b046a 750 return server->ops->send_cancel ?
fb2036d8 751 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
752}
753
2c8f981d
JL
754int
755cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
756 bool log_error)
757{
792af7b0 758 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
759
760 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
761
762 /* convert the length into a more usable form */
38d77c50 763 if (server->sign) {
738f9de5 764 struct kvec iov[2];
985e4ff0 765 int rc = 0;
738f9de5
PS
766 struct smb_rqst rqst = { .rq_iov = iov,
767 .rq_nvec = 2 };
826a95e4 768
738f9de5
PS
769 iov[0].iov_base = mid->resp_buf;
770 iov[0].iov_len = 4;
771 iov[1].iov_base = (char *)mid->resp_buf + 4;
772 iov[1].iov_len = len - 4;
2c8f981d 773 /* FIXME: add code to kill session */
bf5ea0e2 774 rc = cifs_verify_signature(&rqst, server,
0124cc45 775 mid->sequence_number);
985e4ff0 776 if (rc)
f96637be
JP
777 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
778 rc);
2c8f981d
JL
779 }
780
781 /* BB special case reconnect tid and uid here? */
782 return map_smb_to_linux_error(mid->resp_buf, log_error);
783}
784
fec344e3
JL
785struct mid_q_entry *
786cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
787{
788 int rc;
fec344e3 789 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
790 struct mid_q_entry *mid;
791
738f9de5
PS
792 if (rqst->rq_iov[0].iov_len != 4 ||
793 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
794 return ERR_PTR(-EIO);
795
792af7b0
PS
796 rc = allocate_mid(ses, hdr, &mid);
797 if (rc)
fec344e3
JL
798 return ERR_PTR(rc);
799 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
800 if (rc) {
3c1bf7e4 801 cifs_delete_mid(mid);
fec344e3
JL
802 return ERR_PTR(rc);
803 }
804 return mid;
792af7b0
PS
805}
806
4e34feb5 807static void
ee258d79 808cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
809{
810 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
811 struct cifs_credits credits;
812
813 credits.value = server->ops->get_credits(mid);
814 credits.instance = server->reconnect_instance;
8a26f0f7 815
34f4deb7 816 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
817}
818
ee258d79
PS
819static void
820cifs_compound_last_callback(struct mid_q_entry *mid)
821{
822 cifs_compound_callback(mid);
823 cifs_wake_up_task(mid);
824}
825
826static void
827cifs_cancelled_callback(struct mid_q_entry *mid)
828{
829 cifs_compound_callback(mid);
830 DeleteMidQEntry(mid);
831}
832
b8f57ee8 833int
e0bba0b8
RS
834compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
835 const int flags, const int num_rqst, struct smb_rqst *rqst,
836 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 837{
e0bba0b8 838 int i, j, rc = 0;
a891f0f8 839 int timeout, optype;
e0bba0b8 840 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 841 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
842 struct cifs_credits credits[MAX_COMPOUND] = {
843 { .value = 0, .instance = 0 }
844 };
845 unsigned int instance;
97ea4998 846 unsigned int first_instance = 0;
738f9de5 847 char *buf;
50c2f753 848
a891f0f8
PS
849 timeout = flags & CIFS_TIMEOUT_MASK;
850 optype = flags & CIFS_OP_MASK;
133672ef 851
e0bba0b8
RS
852 for (i = 0; i < num_rqst; i++)
853 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
854
855 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 856 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
857 return -EIO;
858 }
859
da502f7d 860 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 861 return -ENOENT;
7ee1af76 862
7091bcab
PS
863 spin_lock(&ses->server->req_lock);
864 if (ses->server->credits < num_rqst) {
865 /*
866 * Return immediately if not too many requests in flight since
867 * we will likely be stuck on waiting for credits.
868 */
869 if (ses->server->in_flight < num_rqst - ses->server->credits) {
870 spin_unlock(&ses->server->req_lock);
871 return -ENOTSUPP;
872 }
873 } else {
874 /* enough credits to send the whole compounded request */
875 ses->server->credits -= num_rqst;
876 ses->server->in_flight += num_rqst;
877 first_instance = ses->server->reconnect_instance;
878 }
879 spin_unlock(&ses->server->req_lock);
880
881 if (first_instance) {
882 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
883 for (i = 0; i < num_rqst; i++) {
884 credits[i].value = 1;
885 credits[i].instance = first_instance;
886 }
887 goto setup_rqsts;
888 }
889
792af7b0 890 /*
7091bcab
PS
891 * There are not enough credits to send the whole compound request but
892 * there are requests in flight that may bring credits from the server.
893 * This approach still leaves the possibility to be stuck waiting for
894 * credits if the server doesn't grant credits to the outstanding
895 * requests. This should be fixed by returning immediately and letting
896 * a caller fallback to sequential commands instead of compounding.
8544f4aa 897 * Ensure we obtain 1 credit per request in the compound chain.
792af7b0 898 */
8544f4aa 899 for (i = 0; i < num_rqst; i++) {
34f4deb7
PS
900 rc = wait_for_free_request(ses->server, timeout, optype,
901 &instance);
97ea4998
PS
902
903 if (rc == 0) {
904 credits[i].value = 1;
905 credits[i].instance = instance;
906 /*
907 * All parts of the compound chain must get credits from
908 * the same session, otherwise we may end up using more
909 * credits than the server granted. If there were
910 * reconnects in between, return -EAGAIN and let callers
911 * handle it.
912 */
913 if (i == 0)
914 first_instance = instance;
915 else if (first_instance != instance) {
916 i++;
917 rc = -EAGAIN;
918 }
919 }
920
8544f4aa
PS
921 if (rc) {
922 /*
923 * We haven't sent an SMB packet to the server yet but
924 * we already obtained credits for i requests in the
925 * compound chain - need to return those credits back
926 * for future use. Note that we need to call add_credits
927 * multiple times to match the way we obtained credits
928 * in the first place and to account for in flight
929 * requests correctly.
930 */
931 for (j = 0; j < i; j++)
34f4deb7 932 add_credits(ses->server, &credits[j], optype);
8544f4aa
PS
933 return rc;
934 }
8544f4aa 935 }
7ee1af76 936
7091bcab 937setup_rqsts:
792af7b0
PS
938 /*
939 * Make sure that we sign in the same order that we send on this socket
940 * and avoid races inside tcp sendmsg code that could cause corruption
941 * of smb data.
942 */
7ee1af76 943
72ca545b 944 mutex_lock(&ses->server->srv_mutex);
7ee1af76 945
97ea4998
PS
946 /*
947 * All the parts of the compound chain belong obtained credits from the
948 * same session (see the appropriate checks above). In the same time
949 * there might be reconnects after those checks but before we acquired
950 * the srv_mutex. We can not use credits obtained from the previous
951 * session to send this request. Check if there were reconnects after
952 * we obtained credits and return -EAGAIN in such cases to let callers
953 * handle it.
954 */
955 if (first_instance != ses->server->reconnect_instance) {
956 mutex_unlock(&ses->server->srv_mutex);
957 for (j = 0; j < num_rqst; j++)
958 add_credits(ses->server, &credits[j], optype);
959 return -EAGAIN;
960 }
961
e0bba0b8
RS
962 for (i = 0; i < num_rqst; i++) {
963 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
964 if (IS_ERR(midQ[i])) {
c781af7e 965 revert_current_mid(ses->server, i);
e0bba0b8
RS
966 for (j = 0; j < i; j++)
967 cifs_delete_mid(midQ[j]);
968 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 969
e0bba0b8 970 /* Update # of requests on wire to server */
8544f4aa 971 for (j = 0; j < num_rqst; j++)
34f4deb7 972 add_credits(ses->server, &credits[j], optype);
e0bba0b8
RS
973 return PTR_ERR(midQ[i]);
974 }
975
976 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 977 midQ[i]->optype = optype;
4e34feb5 978 /*
ee258d79
PS
979 * Invoke callback for every part of the compound chain
980 * to calculate credits properly. Wake up this thread only when
981 * the last element is received.
4e34feb5
RS
982 */
983 if (i < num_rqst - 1)
ee258d79
PS
984 midQ[i]->callback = cifs_compound_callback;
985 else
986 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 987 }
789e6661 988 cifs_in_send_inc(ses->server);
e0bba0b8 989 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 990 cifs_in_send_dec(ses->server);
e0bba0b8
RS
991
992 for (i = 0; i < num_rqst; i++)
993 cifs_save_when_sent(midQ[i]);
7ee1af76 994
c781af7e
PS
995 if (rc < 0) {
996 revert_current_mid(ses->server, num_rqst);
ad313cb8 997 ses->server->sequence_number -= 2;
c781af7e 998 }
e0bba0b8 999
72ca545b 1000 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1001
ee258d79
PS
1002 if (rc < 0) {
1003 /* Sending failed for some reason - return credits back */
1004 for (i = 0; i < num_rqst; i++)
34f4deb7 1005 add_credits(ses->server, &credits[i], optype);
cb5c2e63 1006 goto out;
ee258d79
PS
1007 }
1008
1009 /*
1010 * At this point the request is passed to the network stack - we assume
1011 * that any credits taken from the server structure on the client have
1012 * been spent and we can't return them back. Once we receive responses
1013 * we will collect credits granted by the server in the mid callbacks
1014 * and add those credits to the server structure.
1015 */
e0bba0b8 1016
cb5c2e63
RS
1017 /*
1018 * Compounding is never used during session establish.
1019 */
1020 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1021 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1022 rqst[0].rq_nvec);
e0bba0b8 1023
cb5c2e63
RS
1024 if (timeout == CIFS_ASYNC_OP)
1025 goto out;
e0bba0b8 1026
cb5c2e63 1027 for (i = 0; i < num_rqst; i++) {
e0bba0b8 1028 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
1029 if (rc != 0)
1030 break;
1031 }
1032 if (rc != 0) {
1033 for (; i < num_rqst; i++) {
43de1db3
SF
1034 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1035 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
1036 send_cancel(ses->server, &rqst[i], midQ[i]);
1037 spin_lock(&GlobalMid_Lock);
1038 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1039 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1040 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1041 cancelled_mid[i] = true;
34f4deb7 1042 credits[i].value = 0;
e0bba0b8 1043 }
1be912dd 1044 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1045 }
cb5c2e63
RS
1046 }
1047
cb5c2e63
RS
1048 for (i = 0; i < num_rqst; i++) {
1049 if (rc < 0)
1050 goto out;
e0bba0b8
RS
1051
1052 rc = cifs_sync_mid_result(midQ[i], ses->server);
1053 if (rc != 0) {
8544f4aa
PS
1054 /* mark this mid as cancelled to not free it below */
1055 cancelled_mid[i] = true;
1056 goto out;
1be912dd 1057 }
2b2bdfba 1058
e0bba0b8
RS
1059 if (!midQ[i]->resp_buf ||
1060 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1061 rc = -EIO;
1062 cifs_dbg(FYI, "Bad MID state?\n");
1063 goto out;
1064 }
a891f0f8 1065
e0bba0b8
RS
1066 buf = (char *)midQ[i]->resp_buf;
1067 resp_iov[i].iov_base = buf;
1068 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1069 ses->server->vals->header_preamble_size;
1070
1071 if (midQ[i]->large_buf)
1072 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1073 else
1074 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1075
e0bba0b8
RS
1076 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1077 flags & CIFS_LOG_ERROR);
1da177e4 1078
e0bba0b8
RS
1079 /* mark it so buf will not be freed by cifs_delete_mid */
1080 if ((flags & CIFS_NO_RESP) == 0)
1081 midQ[i]->resp_buf = NULL;
cb5c2e63 1082
e0bba0b8 1083 }
cb5c2e63
RS
1084
1085 /*
1086 * Compounding is never used during session establish.
1087 */
1088 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1089 struct kvec iov = {
1090 .iov_base = resp_iov[0].iov_base,
1091 .iov_len = resp_iov[0].iov_len
1092 };
1093 smb311_update_preauth_hash(ses, &iov, 1);
1094 }
1095
7ee1af76 1096out:
4e34feb5
RS
1097 /*
1098 * This will dequeue all mids. After this it is important that the
1099 * demultiplex_thread will not process any of these mids any futher.
1100 * This is prevented above by using a noop callback that will not
1101 * wake this thread except for the very last PDU.
1102 */
8544f4aa
PS
1103 for (i = 0; i < num_rqst; i++) {
1104 if (!cancelled_mid[i])
1105 cifs_delete_mid(midQ[i]);
8544f4aa 1106 }
1da177e4 1107
d6e04ae6
SF
1108 return rc;
1109}
1da177e4 1110
e0bba0b8
RS
1111int
1112cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1113 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1114 struct kvec *resp_iov)
1115{
1116 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1117 resp_iov);
1118}
1119
738f9de5
PS
1120int
1121SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1122 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1123 const int flags, struct kvec *resp_iov)
1124{
1125 struct smb_rqst rqst;
3cecf486 1126 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1127 int rc;
1128
3cecf486 1129 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1130 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1131 GFP_KERNEL);
117e3b7f
SF
1132 if (!new_iov) {
1133 /* otherwise cifs_send_recv below sets resp_buf_type */
1134 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1135 return -ENOMEM;
117e3b7f 1136 }
3cecf486
RS
1137 } else
1138 new_iov = s_iov;
738f9de5
PS
1139
1140 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1141 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1142
1143 new_iov[0].iov_base = new_iov[1].iov_base;
1144 new_iov[0].iov_len = 4;
1145 new_iov[1].iov_base += 4;
1146 new_iov[1].iov_len -= 4;
1147
1148 memset(&rqst, 0, sizeof(struct smb_rqst));
1149 rqst.rq_iov = new_iov;
1150 rqst.rq_nvec = n_vec + 1;
1151
1152 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1153 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1154 kfree(new_iov);
738f9de5
PS
1155 return rc;
1156}
1157
1da177e4 1158int
96daf2b0 1159SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1160 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
a891f0f8 1161 int *pbytes_returned, const int timeout)
1da177e4
LT
1162{
1163 int rc = 0;
1da177e4 1164 struct mid_q_entry *midQ;
fb2036d8
PS
1165 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1166 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1167 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1168 struct cifs_credits credits = { .value = 1, .instance = 0 };
1da177e4
LT
1169
1170 if (ses == NULL) {
f96637be 1171 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1172 return -EIO;
1173 }
79a58d1f 1174 if (ses->server == NULL) {
f96637be 1175 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1176 return -EIO;
1177 }
1178
79a58d1f 1179 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1180 return -ENOENT;
1181
79a58d1f 1182 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1183 to the same server. We may make this configurable later or
1184 use ses->maxReq */
1da177e4 1185
fb2036d8 1186 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1187 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1188 len);
6d9c6d54
VL
1189 return -EIO;
1190 }
1191
34f4deb7 1192 rc = wait_for_free_request(ses->server, timeout, 0, &credits.instance);
7ee1af76
JA
1193 if (rc)
1194 return rc;
1195
79a58d1f 1196 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1197 and avoid races inside tcp sendmsg code that could cause corruption
1198 of smb data */
1199
72ca545b 1200 mutex_lock(&ses->server->srv_mutex);
1da177e4 1201
7ee1af76
JA
1202 rc = allocate_mid(ses, in_buf, &midQ);
1203 if (rc) {
72ca545b 1204 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1205 /* Update # of requests on wire to server */
34f4deb7 1206 add_credits(ses->server, &credits, 0);
7ee1af76 1207 return rc;
1da177e4
LT
1208 }
1209
ad009ac9 1210 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1211 if (rc) {
1212 mutex_unlock(&ses->server->srv_mutex);
1213 goto out;
1214 }
1da177e4 1215
7c9421e1 1216 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1217
1218 cifs_in_send_inc(ses->server);
fb2036d8 1219 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1220 cifs_in_send_dec(ses->server);
1221 cifs_save_when_sent(midQ);
ad313cb8
JL
1222
1223 if (rc < 0)
1224 ses->server->sequence_number -= 2;
1225
72ca545b 1226 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1227
79a58d1f 1228 if (rc < 0)
7ee1af76
JA
1229 goto out;
1230
a891f0f8 1231 if (timeout == CIFS_ASYNC_OP)
7ee1af76 1232 goto out;
1da177e4 1233
0ade640e 1234 rc = wait_for_response(ses->server, midQ);
1be912dd 1235 if (rc != 0) {
fb2036d8 1236 send_cancel(ses->server, &rqst, midQ);
1be912dd 1237 spin_lock(&GlobalMid_Lock);
7c9421e1 1238 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1239 /* no longer considered to be "in-flight" */
1240 midQ->callback = DeleteMidQEntry;
1241 spin_unlock(&GlobalMid_Lock);
34f4deb7 1242 add_credits(ses->server, &credits, 0);
1be912dd
JL
1243 return rc;
1244 }
1245 spin_unlock(&GlobalMid_Lock);
1246 }
1da177e4 1247
3c1105df 1248 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1249 if (rc != 0) {
34f4deb7 1250 add_credits(ses->server, &credits, 0);
1da177e4
LT
1251 return rc;
1252 }
50c2f753 1253
2c8f981d 1254 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1255 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1256 rc = -EIO;
f96637be 1257 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1258 goto out;
1da177e4 1259 }
7ee1af76 1260
d4e4854f 1261 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1262 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1263 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1264out:
3c1bf7e4 1265 cifs_delete_mid(midQ);
34f4deb7 1266 add_credits(ses->server, &credits, 0);
1da177e4 1267
7ee1af76
JA
1268 return rc;
1269}
1da177e4 1270
7ee1af76
JA
1271/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1272 blocking lock to return. */
1273
1274static int
96daf2b0 1275send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1276 struct smb_hdr *in_buf,
1277 struct smb_hdr *out_buf)
1278{
1279 int bytes_returned;
96daf2b0 1280 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1281 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1282
1283 /* We just modify the current in_buf to change
1284 the type of lock from LOCKING_ANDX_SHARED_LOCK
1285 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1286 LOCKING_ANDX_CANCEL_LOCK. */
1287
1288 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1289 pSMB->Timeout = 0;
88257360 1290 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1291
1292 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1293 &bytes_returned, 0);
7ee1af76
JA
1294}
1295
1296int
96daf2b0 1297SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1298 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1299 int *pbytes_returned)
1300{
1301 int rc = 0;
1302 int rstart = 0;
7ee1af76 1303 struct mid_q_entry *midQ;
96daf2b0 1304 struct cifs_ses *ses;
fb2036d8
PS
1305 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1306 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1307 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1308 unsigned int instance;
7ee1af76
JA
1309
1310 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1311 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1312 return -EIO;
1313 }
1314 ses = tcon->ses;
1315
79a58d1f 1316 if (ses->server == NULL) {
f96637be 1317 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1318 return -EIO;
1319 }
1320
79a58d1f 1321 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1322 return -ENOENT;
1323
79a58d1f 1324 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1325 to the same server. We may make this configurable later or
1326 use ses->maxReq */
1327
fb2036d8 1328 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1329 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1330 len);
6d9c6d54
VL
1331 return -EIO;
1332 }
1333
34f4deb7
PS
1334 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0,
1335 &instance);
7ee1af76
JA
1336 if (rc)
1337 return rc;
1338
79a58d1f 1339 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1340 and avoid races inside tcp sendmsg code that could cause corruption
1341 of smb data */
1342
72ca545b 1343 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1344
1345 rc = allocate_mid(ses, in_buf, &midQ);
1346 if (rc) {
72ca545b 1347 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1348 return rc;
1349 }
1350
7ee1af76 1351 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1352 if (rc) {
3c1bf7e4 1353 cifs_delete_mid(midQ);
829049cb
VL
1354 mutex_unlock(&ses->server->srv_mutex);
1355 return rc;
1356 }
1da177e4 1357
7c9421e1 1358 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1359 cifs_in_send_inc(ses->server);
fb2036d8 1360 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1361 cifs_in_send_dec(ses->server);
1362 cifs_save_when_sent(midQ);
ad313cb8
JL
1363
1364 if (rc < 0)
1365 ses->server->sequence_number -= 2;
1366
72ca545b 1367 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1368
79a58d1f 1369 if (rc < 0) {
3c1bf7e4 1370 cifs_delete_mid(midQ);
7ee1af76
JA
1371 return rc;
1372 }
1373
1374 /* Wait for a reply - allow signals to interrupt. */
1375 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1376 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1377 ((ses->server->tcpStatus != CifsGood) &&
1378 (ses->server->tcpStatus != CifsNew)));
1379
1380 /* Were we interrupted by a signal ? */
1381 if ((rc == -ERESTARTSYS) &&
7c9421e1 1382 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1383 ((ses->server->tcpStatus == CifsGood) ||
1384 (ses->server->tcpStatus == CifsNew))) {
1385
1386 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1387 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1388 blocking lock to return. */
fb2036d8 1389 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1390 if (rc) {
3c1bf7e4 1391 cifs_delete_mid(midQ);
7ee1af76
JA
1392 return rc;
1393 }
1394 } else {
1395 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1396 to cause the blocking lock to return. */
1397
1398 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1399
1400 /* If we get -ENOLCK back the lock may have
1401 already been removed. Don't exit in this case. */
1402 if (rc && rc != -ENOLCK) {
3c1bf7e4 1403 cifs_delete_mid(midQ);
7ee1af76
JA
1404 return rc;
1405 }
1406 }
1407
1be912dd
JL
1408 rc = wait_for_response(ses->server, midQ);
1409 if (rc) {
fb2036d8 1410 send_cancel(ses->server, &rqst, midQ);
1be912dd 1411 spin_lock(&GlobalMid_Lock);
7c9421e1 1412 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1413 /* no longer considered to be "in-flight" */
1414 midQ->callback = DeleteMidQEntry;
1415 spin_unlock(&GlobalMid_Lock);
1416 return rc;
1417 }
1418 spin_unlock(&GlobalMid_Lock);
7ee1af76 1419 }
1be912dd
JL
1420
1421 /* We got the response - restart system call. */
1422 rstart = 1;
7ee1af76
JA
1423 }
1424
3c1105df 1425 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1426 if (rc != 0)
7ee1af76 1427 return rc;
50c2f753 1428
17c8bfed 1429 /* rcvd frame is ok */
7c9421e1 1430 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1431 rc = -EIO;
f96637be 1432 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1433 goto out;
1434 }
1da177e4 1435
d4e4854f 1436 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1437 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1438 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1439out:
3c1bf7e4 1440 cifs_delete_mid(midQ);
7ee1af76
JA
1441 if (rstart && rc == -EACCES)
1442 return -ERESTARTSYS;
1da177e4
LT
1443 return rc;
1444}