cifs: make smb_send_rqst take an array of requests
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
8bd68c6e 40#include "smb2proto.h"
9762c2d0 41#include "smbdirect.h"
50c2f753 42
3cecf486
RS
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
2dc7e1c0
PS
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
48{
49 wake_up_process(mid->callback_data);
50}
51
a6827c18 52struct mid_q_entry *
24b9b06b 53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
54{
55 struct mid_q_entry *temp;
56
24b9b06b 57 if (server == NULL) {
f96637be 58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
59 return NULL;
60 }
50c2f753 61
232087cb 62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 63 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 64 kref_init(&temp->refcount);
a6f74e80
N
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
2b84a36c 73
a6f74e80
N
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
1da177e4 80
1da177e4 81 atomic_inc(&midCount);
7c9421e1 82 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
83 return temp;
84}
85
696e420b
LP
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
766fdbb5 101void
1da177e4
LT
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
1047abc1 104#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 105 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
106 unsigned long now;
107#endif
7c9421e1 108 midEntry->mid_state = MID_FREE;
8097531a 109 atomic_dec(&midCount);
7c9421e1 110 if (midEntry->large_buf)
b8643e1b
SF
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /* commands taking longer than one second are indications that
117 something is wrong, unless it is quite a slow link or server */
4328fea7 118 if (time_after(now, midEntry->when_alloc + HZ)) {
2dc7e1c0 119 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
0b456f04 120 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 121 midEntry->command, midEntry->mid);
0b456f04 122 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
123 now - midEntry->when_alloc,
124 now - midEntry->when_sent,
125 now - midEntry->when_received);
126 }
127 }
128#endif
696e420b 129 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
130}
131
3c1bf7e4
PS
132void
133cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
134{
135 spin_lock(&GlobalMid_Lock);
136 list_del(&mid->qhead);
137 spin_unlock(&GlobalMid_Lock);
138
139 DeleteMidQEntry(mid);
140}
141
6f49f46b
JL
142/*
143 * smb_send_kvec - send an array of kvecs to the server
144 * @server: Server to send the data to
3ab3f2a1 145 * @smb_msg: Message to send
6f49f46b
JL
146 * @sent: amount of data sent on socket is stored here
147 *
148 * Our basic "send data to server" function. Should be called with srv_mutex
149 * held. The caller is responsible for handling the results.
150 */
d6e04ae6 151static int
3ab3f2a1
AV
152smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
153 size_t *sent)
1da177e4
LT
154{
155 int rc = 0;
3ab3f2a1 156 int retries = 0;
edf1ae40 157 struct socket *ssocket = server->ssocket;
50c2f753 158
6f49f46b
JL
159 *sent = 0;
160
3ab3f2a1
AV
161 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
162 smb_msg->msg_namelen = sizeof(struct sockaddr);
163 smb_msg->msg_control = NULL;
164 smb_msg->msg_controllen = 0;
0496e02d 165 if (server->noblocksnd)
3ab3f2a1 166 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 167 else
3ab3f2a1 168 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 169
3ab3f2a1 170 while (msg_data_left(smb_msg)) {
6f49f46b
JL
171 /*
172 * If blocking send, we try 3 times, since each can block
173 * for 5 seconds. For nonblocking we have to try more
174 * but wait increasing amounts of time allowing time for
175 * socket to clear. The overall time we wait in either
176 * case to send on the socket is about 15 seconds.
177 * Similarly we wait for 15 seconds for a response from
178 * the server in SendReceive[2] for the server to send
179 * a response back for most types of requests (except
180 * SMB Write past end of file which can be slow, and
181 * blocking lock operations). NFS waits slightly longer
182 * than CIFS, but this can make it take longer for
183 * nonresponsive servers to be detected and 15 seconds
184 * is more than enough time for modern networks to
185 * send a packet. In most cases if we fail to send
186 * after the retries we will kill the socket and
187 * reconnect which may clear the network problem.
188 */
3ab3f2a1 189 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 190 if (rc == -EAGAIN) {
3ab3f2a1
AV
191 retries++;
192 if (retries >= 14 ||
193 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
194 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
195 ssocket);
3ab3f2a1 196 return -EAGAIN;
1da177e4 197 }
3ab3f2a1 198 msleep(1 << retries);
1da177e4
LT
199 continue;
200 }
6f49f46b 201
79a58d1f 202 if (rc < 0)
3ab3f2a1 203 return rc;
6f49f46b 204
79a58d1f 205 if (rc == 0) {
3e84469d
SF
206 /* should never happen, letting socket clear before
207 retrying is our only obvious option here */
f96637be 208 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
209 msleep(500);
210 continue;
d6e04ae6 211 }
6f49f46b 212
3ab3f2a1
AV
213 /* send was at least partially successful */
214 *sent += rc;
215 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 216 }
3ab3f2a1 217 return 0;
97bc00b3
JL
218}
219
35e2cc1b 220unsigned long
81f39f95 221smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
222{
223 unsigned int i;
35e2cc1b
PA
224 struct kvec *iov;
225 int nvec;
a26054d1
JL
226 unsigned long buflen = 0;
227
81f39f95
RS
228 if (server->vals->header_preamble_size == 0 &&
229 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
230 iov = &rqst->rq_iov[1];
231 nvec = rqst->rq_nvec - 1;
232 } else {
233 iov = rqst->rq_iov;
234 nvec = rqst->rq_nvec;
235 }
236
a26054d1 237 /* total up iov array first */
35e2cc1b 238 for (i = 0; i < nvec; i++)
a26054d1
JL
239 buflen += iov[i].iov_len;
240
c06a0f2d
LL
241 /*
242 * Add in the page array if there is one. The caller needs to make
243 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
244 * multiple pages ends at page boundary, rq_tailsz needs to be set to
245 * PAGE_SIZE.
246 */
a26054d1 247 if (rqst->rq_npages) {
c06a0f2d
LL
248 if (rqst->rq_npages == 1)
249 buflen += rqst->rq_tailsz;
250 else {
251 /*
252 * If there is more than one page, calculate the
253 * buffer length based on rq_offset and rq_tailsz
254 */
255 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
256 rqst->rq_offset;
257 buflen += rqst->rq_tailsz;
258 }
a26054d1
JL
259 }
260
261 return buflen;
262}
263
6f49f46b 264static int
07cd952f
RS
265__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
266 struct smb_rqst *rqst)
6f49f46b 267{
07cd952f
RS
268 int rc = 0;
269 struct kvec *iov;
270 int n_vec;
271 unsigned int send_length = 0;
272 unsigned int i, j;
3ab3f2a1 273 size_t total_len = 0, sent, size;
b8eed283 274 struct socket *ssocket = server->ssocket;
3ab3f2a1 275 struct msghdr smb_msg;
b8eed283 276 int val = 1;
c713c877
RS
277 __be32 rfc1002_marker;
278
9762c2d0 279 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 280 rc = smbd_send(server, rqst);
9762c2d0
LL
281 goto smbd_done;
282 }
ea702b80
JL
283 if (ssocket == NULL)
284 return -ENOTSOCK;
285
b8eed283
JL
286 /* cork the socket */
287 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
288 (char *)&val, sizeof(val));
289
07cd952f 290 for (j = 0; j < num_rqst; j++)
81f39f95 291 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
292 rfc1002_marker = cpu_to_be32(send_length);
293
c713c877
RS
294 /* Generate a rfc1002 marker for SMB2+ */
295 if (server->vals->header_preamble_size == 0) {
296 struct kvec hiov = {
297 .iov_base = &rfc1002_marker,
298 .iov_len = 4
299 };
300 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
301 1, 4);
302 rc = smb_send_kvec(server, &smb_msg, &sent);
303 if (rc < 0)
304 goto uncork;
305
306 total_len += sent;
307 send_length += 4;
308 }
309
662bf5bc
PA
310 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
311
07cd952f
RS
312 for (j = 0; j < num_rqst; j++) {
313 iov = rqst[j].rq_iov;
314 n_vec = rqst[j].rq_nvec;
3ab3f2a1 315
07cd952f 316 size = 0;
662bf5bc
PA
317 for (i = 0; i < n_vec; i++) {
318 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 319 size += iov[i].iov_len;
662bf5bc 320 }
97bc00b3 321
07cd952f
RS
322 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
323 iov, n_vec, size);
97bc00b3 324
3ab3f2a1 325 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 326 if (rc < 0)
07cd952f 327 goto uncork;
97bc00b3
JL
328
329 total_len += sent;
07cd952f
RS
330
331 /* now walk the page array and send each page in it */
332 for (i = 0; i < rqst[j].rq_npages; i++) {
333 struct bio_vec bvec;
334
335 bvec.bv_page = rqst[j].rq_pages[i];
336 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
337 &bvec.bv_offset);
338
339 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
340 &bvec, 1, bvec.bv_len);
341 rc = smb_send_kvec(server, &smb_msg, &sent);
342 if (rc < 0)
343 break;
344
345 total_len += sent;
346 }
97bc00b3 347 }
1da177e4 348
97bc00b3 349uncork:
b8eed283
JL
350 /* uncork it */
351 val = 0;
352 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
353 (char *)&val, sizeof(val));
354
c713c877 355 if ((total_len > 0) && (total_len != send_length)) {
f96637be 356 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 357 send_length, total_len);
6f49f46b
JL
358 /*
359 * If we have only sent part of an SMB then the next SMB could
360 * be taken as the remainder of this one. We need to kill the
361 * socket so the server throws away the partial SMB
362 */
edf1ae40 363 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
364 trace_smb3_partial_send_reconnect(server->CurrentMid,
365 server->hostname);
edf1ae40 366 }
9762c2d0 367smbd_done:
d804d41d 368 if (rc < 0 && rc != -EINTR)
f96637be
JP
369 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
370 rc);
d804d41d 371 else
1da177e4 372 rc = 0;
1da177e4
LT
373
374 return rc;
375}
376
1f3a8f5f 377#define MAX_COMPOUND 5
b2c96de7 378
6f49f46b 379static int
1f3a8f5f
RS
380smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
381 struct smb_rqst *rqst, int flags)
6f49f46b 382{
b2c96de7
RS
383 struct kvec iov;
384 struct smb2_transform_hdr tr_hdr;
385 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
386 int rc;
387
388 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
389 return __smb_send_rqst(server, num_rqst, rqst);
390
391 if (num_rqst > MAX_COMPOUND - 1)
392 return -ENOMEM;
7fb8986e 393
b2c96de7
RS
394 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
395 memset(&iov, 0, sizeof(iov));
396 memset(&tr_hdr, 0, sizeof(tr_hdr));
397
398 iov.iov_base = &tr_hdr;
399 iov.iov_len = sizeof(tr_hdr);
400 cur_rqst[0].rq_iov = &iov;
401 cur_rqst[0].rq_nvec = 1;
402
403 if (!server->ops->init_transform_rq) {
404 cifs_dbg(VFS, "Encryption requested but transform callback "
405 "is missing\n");
7fb8986e
PS
406 return -EIO;
407 }
6f49f46b 408
1f3a8f5f
RS
409 rc = server->ops->init_transform_rq(server, num_rqst + 1,
410 &cur_rqst[0], rqst);
7fb8986e
PS
411 if (rc)
412 return rc;
413
1f3a8f5f
RS
414 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
415 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 416 return rc;
6f49f46b
JL
417}
418
0496e02d
JL
419int
420smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
421 unsigned int smb_buf_length)
422{
738f9de5 423 struct kvec iov[2];
7fb8986e
PS
424 struct smb_rqst rqst = { .rq_iov = iov,
425 .rq_nvec = 2 };
0496e02d 426
738f9de5
PS
427 iov[0].iov_base = smb_buffer;
428 iov[0].iov_len = 4;
429 iov[1].iov_base = (char *)smb_buffer + 4;
430 iov[1].iov_len = smb_buf_length;
0496e02d 431
07cd952f 432 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
433}
434
fc40f9cf 435static int
a891f0f8 436wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
bc205ed1 437 int *credits)
1da177e4 438{
5bc59498
PS
439 int rc;
440
fc40f9cf 441 spin_lock(&server->req_lock);
a891f0f8 442 if (timeout == CIFS_ASYNC_OP) {
1da177e4 443 /* oplock breaks must not be held up */
fc40f9cf 444 server->in_flight++;
bc205ed1 445 *credits -= 1;
fc40f9cf 446 spin_unlock(&server->req_lock);
27a97a61
VL
447 return 0;
448 }
449
27a97a61 450 while (1) {
bc205ed1 451 if (*credits <= 0) {
fc40f9cf 452 spin_unlock(&server->req_lock);
789e6661 453 cifs_num_waiters_inc(server);
5bc59498 454 rc = wait_event_killable(server->request_q,
bc205ed1 455 has_credits(server, credits));
789e6661 456 cifs_num_waiters_dec(server);
5bc59498
PS
457 if (rc)
458 return rc;
fc40f9cf 459 spin_lock(&server->req_lock);
27a97a61 460 } else {
c5797a94 461 if (server->tcpStatus == CifsExiting) {
fc40f9cf 462 spin_unlock(&server->req_lock);
27a97a61 463 return -ENOENT;
1da177e4 464 }
27a97a61 465
2d86dbc9
PS
466 /*
467 * Can not count locking commands against total
468 * as they are allowed to block on server.
469 */
27a97a61
VL
470
471 /* update # of requests on the wire to server */
a891f0f8 472 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 473 *credits -= 1;
fc40f9cf 474 server->in_flight++;
2d86dbc9 475 }
fc40f9cf 476 spin_unlock(&server->req_lock);
27a97a61 477 break;
1da177e4
LT
478 }
479 }
7ee1af76
JA
480 return 0;
481}
1da177e4 482
bc205ed1 483static int
a891f0f8
PS
484wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
485 const int optype)
bc205ed1 486{
eb4c7df6
SP
487 int *val;
488
489 val = server->ops->get_credits_field(server, optype);
490 /* Since an echo is already inflight, no need to wait to send another */
491 if (*val <= 0 && optype == CIFS_ECHO_OP)
492 return -EAGAIN;
493 return wait_for_free_credits(server, timeout, val);
bc205ed1
PS
494}
495
cb7e9eab
PS
496int
497cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
498 unsigned int *num, unsigned int *credits)
499{
500 *num = size;
501 *credits = 0;
502 return 0;
503}
504
96daf2b0 505static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
506 struct mid_q_entry **ppmidQ)
507{
1da177e4 508 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 509 return -ENOENT;
8fbbd365
VL
510 }
511
512 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 513 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 514 return -EAGAIN;
8fbbd365
VL
515 }
516
7f48558e 517 if (ses->status == CifsNew) {
79a58d1f 518 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 519 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 520 return -EAGAIN;
ad7a2926 521 /* else ok - we are setting up session */
1da177e4 522 }
7f48558e
SP
523
524 if (ses->status == CifsExiting) {
525 /* check if SMB session is bad because we are setting it up */
526 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
527 return -EAGAIN;
528 /* else ok - we are shutting down session */
529 }
530
24b9b06b 531 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 532 if (*ppmidQ == NULL)
7ee1af76 533 return -ENOMEM;
ddc8cf8f
JL
534 spin_lock(&GlobalMid_Lock);
535 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
536 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
537 return 0;
538}
539
0ade640e
JL
540static int
541wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 542{
0ade640e 543 int error;
7ee1af76 544
5853cc2a 545 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 546 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
547 if (error < 0)
548 return -ERESTARTSYS;
7ee1af76 549
0ade640e 550 return 0;
7ee1af76
JA
551}
552
fec344e3
JL
553struct mid_q_entry *
554cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
555{
556 int rc;
fec344e3 557 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
558 struct mid_q_entry *mid;
559
738f9de5
PS
560 if (rqst->rq_iov[0].iov_len != 4 ||
561 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
562 return ERR_PTR(-EIO);
563
792af7b0 564 /* enable signing if server requires it */
38d77c50 565 if (server->sign)
792af7b0
PS
566 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
567
568 mid = AllocMidQEntry(hdr, server);
569 if (mid == NULL)
fec344e3 570 return ERR_PTR(-ENOMEM);
792af7b0 571
fec344e3 572 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
573 if (rc) {
574 DeleteMidQEntry(mid);
fec344e3 575 return ERR_PTR(rc);
ffc61ccb
SP
576 }
577
fec344e3 578 return mid;
792af7b0 579}
133672ef 580
a6827c18
JL
581/*
582 * Send a SMB request and set the callback function in the mid to handle
583 * the result. Caller is responsible for dealing with timeouts.
584 */
585int
fec344e3 586cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2
PS
587 mid_receive_t *receive, mid_callback_t *callback,
588 mid_handle_t *handle, void *cbdata, const int flags)
a6827c18 589{
a891f0f8 590 int rc, timeout, optype;
a6827c18 591 struct mid_q_entry *mid;
cb7e9eab 592 unsigned int credits = 0;
a6827c18 593
a891f0f8
PS
594 timeout = flags & CIFS_TIMEOUT_MASK;
595 optype = flags & CIFS_OP_MASK;
596
cb7e9eab
PS
597 if ((flags & CIFS_HAS_CREDITS) == 0) {
598 rc = wait_for_free_request(server, timeout, optype);
599 if (rc)
600 return rc;
601 credits = 1;
602 }
a6827c18
JL
603
604 mutex_lock(&server->srv_mutex);
fec344e3
JL
605 mid = server->ops->setup_async_request(server, rqst);
606 if (IS_ERR(mid)) {
a6827c18 607 mutex_unlock(&server->srv_mutex);
cb7e9eab 608 add_credits_and_wake_if(server, credits, optype);
fec344e3 609 return PTR_ERR(mid);
a6827c18
JL
610 }
611
44d22d84 612 mid->receive = receive;
a6827c18
JL
613 mid->callback = callback;
614 mid->callback_data = cbdata;
9b7c18a2 615 mid->handle = handle;
7c9421e1 616 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 617
ffc61ccb
SP
618 /* put it on the pending_mid_q */
619 spin_lock(&GlobalMid_Lock);
620 list_add_tail(&mid->qhead, &server->pending_mid_q);
621 spin_unlock(&GlobalMid_Lock);
622
93d2cb6c
LL
623 /*
624 * Need to store the time in mid before calling I/O. For call_async,
625 * I/O response may come back and free the mid entry on another thread.
626 */
627 cifs_save_when_sent(mid);
789e6661 628 cifs_in_send_inc(server);
1f3a8f5f 629 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 630 cifs_in_send_dec(server);
ad313cb8 631
820962dc 632 if (rc < 0) {
ad313cb8 633 server->sequence_number -= 2;
820962dc
RV
634 cifs_delete_mid(mid);
635 }
636
a6827c18 637 mutex_unlock(&server->srv_mutex);
789e6661 638
ffc61ccb
SP
639 if (rc == 0)
640 return 0;
a6827c18 641
cb7e9eab 642 add_credits_and_wake_if(server, credits, optype);
a6827c18
JL
643 return rc;
644}
645
133672ef
SF
646/*
647 *
648 * Send an SMB Request. No response info (other than return code)
649 * needs to be parsed.
650 *
651 * flags indicate the type of request buffer and how long to wait
652 * and whether to log NT STATUS code (error) before mapping it to POSIX error
653 *
654 */
655int
96daf2b0 656SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 657 char *in_buf, int flags)
133672ef
SF
658{
659 int rc;
660 struct kvec iov[1];
da502f7d 661 struct kvec rsp_iov;
133672ef
SF
662 int resp_buf_type;
663
792af7b0
PS
664 iov[0].iov_base = in_buf;
665 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 666 flags |= CIFS_NO_RESP;
da502f7d 667 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 668 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 669
133672ef
SF
670 return rc;
671}
672
053d5034 673static int
3c1105df 674cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
675{
676 int rc = 0;
677
f96637be
JP
678 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
679 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 680
74dd92a8 681 spin_lock(&GlobalMid_Lock);
7c9421e1 682 switch (mid->mid_state) {
74dd92a8 683 case MID_RESPONSE_RECEIVED:
053d5034
JL
684 spin_unlock(&GlobalMid_Lock);
685 return rc;
74dd92a8
JL
686 case MID_RETRY_NEEDED:
687 rc = -EAGAIN;
688 break;
71823baf
JL
689 case MID_RESPONSE_MALFORMED:
690 rc = -EIO;
691 break;
3c1105df
JL
692 case MID_SHUTDOWN:
693 rc = -EHOSTDOWN;
694 break;
74dd92a8 695 default:
3c1105df 696 list_del_init(&mid->qhead);
f96637be
JP
697 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
698 __func__, mid->mid, mid->mid_state);
74dd92a8 699 rc = -EIO;
053d5034
JL
700 }
701 spin_unlock(&GlobalMid_Lock);
702
2b84a36c 703 DeleteMidQEntry(mid);
053d5034
JL
704 return rc;
705}
706
121b046a 707static inline int
fb2036d8
PS
708send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
709 struct mid_q_entry *mid)
76dcc26f 710{
121b046a 711 return server->ops->send_cancel ?
fb2036d8 712 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
713}
714
2c8f981d
JL
715int
716cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
717 bool log_error)
718{
792af7b0 719 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
720
721 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
722
723 /* convert the length into a more usable form */
38d77c50 724 if (server->sign) {
738f9de5 725 struct kvec iov[2];
985e4ff0 726 int rc = 0;
738f9de5
PS
727 struct smb_rqst rqst = { .rq_iov = iov,
728 .rq_nvec = 2 };
826a95e4 729
738f9de5
PS
730 iov[0].iov_base = mid->resp_buf;
731 iov[0].iov_len = 4;
732 iov[1].iov_base = (char *)mid->resp_buf + 4;
733 iov[1].iov_len = len - 4;
2c8f981d 734 /* FIXME: add code to kill session */
bf5ea0e2 735 rc = cifs_verify_signature(&rqst, server,
0124cc45 736 mid->sequence_number);
985e4ff0 737 if (rc)
f96637be
JP
738 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
739 rc);
2c8f981d
JL
740 }
741
742 /* BB special case reconnect tid and uid here? */
743 return map_smb_to_linux_error(mid->resp_buf, log_error);
744}
745
fec344e3
JL
746struct mid_q_entry *
747cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
748{
749 int rc;
fec344e3 750 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
751 struct mid_q_entry *mid;
752
738f9de5
PS
753 if (rqst->rq_iov[0].iov_len != 4 ||
754 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
755 return ERR_PTR(-EIO);
756
792af7b0
PS
757 rc = allocate_mid(ses, hdr, &mid);
758 if (rc)
fec344e3
JL
759 return ERR_PTR(rc);
760 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
761 if (rc) {
3c1bf7e4 762 cifs_delete_mid(mid);
fec344e3
JL
763 return ERR_PTR(rc);
764 }
765 return mid;
792af7b0
PS
766}
767
b8f57ee8 768int
738f9de5
PS
769cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
770 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
771 struct kvec *resp_iov)
7ee1af76
JA
772{
773 int rc = 0;
a891f0f8 774 int timeout, optype;
7ee1af76 775 struct mid_q_entry *midQ;
a891f0f8 776 unsigned int credits = 1;
738f9de5 777 char *buf;
50c2f753 778
a891f0f8
PS
779 timeout = flags & CIFS_TIMEOUT_MASK;
780 optype = flags & CIFS_OP_MASK;
133672ef 781
a891f0f8 782 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
783
784 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 785 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
786 return -EIO;
787 }
788
da502f7d 789 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 790 return -ENOENT;
7ee1af76 791
792af7b0
PS
792 /*
793 * Ensure that we do not send more than 50 overlapping requests
794 * to the same server. We may make this configurable later or
795 * use ses->maxReq.
796 */
a891f0f8 797 rc = wait_for_free_request(ses->server, timeout, optype);
da502f7d 798 if (rc)
7ee1af76 799 return rc;
7ee1af76 800
792af7b0
PS
801 /*
802 * Make sure that we sign in the same order that we send on this socket
803 * and avoid races inside tcp sendmsg code that could cause corruption
804 * of smb data.
805 */
7ee1af76 806
72ca545b 807 mutex_lock(&ses->server->srv_mutex);
7ee1af76 808
738f9de5 809 midQ = ses->server->ops->setup_request(ses, rqst);
fec344e3 810 if (IS_ERR(midQ)) {
72ca545b 811 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 812 /* Update # of requests on wire to server */
a891f0f8 813 add_credits(ses->server, 1, optype);
fec344e3 814 return PTR_ERR(midQ);
1da177e4 815 }
1da177e4 816
7c9421e1 817 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 818 cifs_in_send_inc(ses->server);
1f3a8f5f 819 rc = smb_send_rqst(ses->server, 1, rqst, flags);
789e6661
SF
820 cifs_in_send_dec(ses->server);
821 cifs_save_when_sent(midQ);
7ee1af76 822
ad313cb8
JL
823 if (rc < 0)
824 ses->server->sequence_number -= 2;
72ca545b 825 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 826
da502f7d 827 if (rc < 0)
7ee1af76 828 goto out;
4b8f930f 829
0d5ec281 830 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
c713c877
RS
831 smb311_update_preauth_hash(ses, rqst->rq_iov,
832 rqst->rq_nvec);
8bd68c6e 833
da502f7d 834 if (timeout == CIFS_ASYNC_OP)
133672ef 835 goto out;
d6e04ae6 836
0ade640e 837 rc = wait_for_response(ses->server, midQ);
1be912dd 838 if (rc != 0) {
38bd4906 839 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
738f9de5 840 send_cancel(ses->server, rqst, midQ);
1be912dd 841 spin_lock(&GlobalMid_Lock);
7c9421e1 842 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
38bd4906 843 midQ->mid_flags |= MID_WAIT_CANCELLED;
1be912dd
JL
844 midQ->callback = DeleteMidQEntry;
845 spin_unlock(&GlobalMid_Lock);
a891f0f8 846 add_credits(ses->server, 1, optype);
1be912dd
JL
847 return rc;
848 }
849 spin_unlock(&GlobalMid_Lock);
850 }
d6e04ae6 851
3c1105df 852 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 853 if (rc != 0) {
a891f0f8 854 add_credits(ses->server, 1, optype);
d6e04ae6
SF
855 return rc;
856 }
50c2f753 857
7c9421e1 858 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
d6e04ae6 859 rc = -EIO;
f96637be 860 cifs_dbg(FYI, "Bad MID state?\n");
2b2bdfba
SF
861 goto out;
862 }
863
792af7b0 864 buf = (char *)midQ->resp_buf;
da502f7d 865 resp_iov->iov_base = buf;
e19b2bc0 866 resp_iov->iov_len = midQ->resp_buf_size +
93012bf9 867 ses->server->vals->header_preamble_size;
7c9421e1 868 if (midQ->large_buf)
a891f0f8 869 *resp_buf_type = CIFS_LARGE_BUFFER;
2c8f981d 870 else
a891f0f8
PS
871 *resp_buf_type = CIFS_SMALL_BUFFER;
872
0d5ec281 873 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
8bd68c6e 874 struct kvec iov = {
c713c877
RS
875 .iov_base = resp_iov->iov_base,
876 .iov_len = resp_iov->iov_len
8bd68c6e
AA
877 };
878 smb311_update_preauth_hash(ses, &iov, 1);
879 }
8bd68c6e 880
a891f0f8 881 credits = ses->server->ops->get_credits(midQ);
2b2bdfba 882
082d0642
PS
883 rc = ses->server->ops->check_receive(midQ, ses->server,
884 flags & CIFS_LOG_ERROR);
1da177e4 885
3c1bf7e4 886 /* mark it so buf will not be freed by cifs_delete_mid */
2c8f981d
JL
887 if ((flags & CIFS_NO_RESP) == 0)
888 midQ->resp_buf = NULL;
7ee1af76 889out:
3c1bf7e4 890 cifs_delete_mid(midQ);
a891f0f8 891 add_credits(ses->server, credits, optype);
1da177e4 892
d6e04ae6
SF
893 return rc;
894}
1da177e4 895
738f9de5
PS
896int
897SendReceive2(const unsigned int xid, struct cifs_ses *ses,
898 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
899 const int flags, struct kvec *resp_iov)
900{
901 struct smb_rqst rqst;
3cecf486 902 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
903 int rc;
904
3cecf486 905 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
906 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
907 GFP_KERNEL);
117e3b7f
SF
908 if (!new_iov) {
909 /* otherwise cifs_send_recv below sets resp_buf_type */
910 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 911 return -ENOMEM;
117e3b7f 912 }
3cecf486
RS
913 } else
914 new_iov = s_iov;
738f9de5
PS
915
916 /* 1st iov is a RFC1001 length followed by the rest of the packet */
917 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
918
919 new_iov[0].iov_base = new_iov[1].iov_base;
920 new_iov[0].iov_len = 4;
921 new_iov[1].iov_base += 4;
922 new_iov[1].iov_len -= 4;
923
924 memset(&rqst, 0, sizeof(struct smb_rqst));
925 rqst.rq_iov = new_iov;
926 rqst.rq_nvec = n_vec + 1;
927
928 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
929 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
930 kfree(new_iov);
738f9de5
PS
931 return rc;
932}
933
1da177e4 934int
96daf2b0 935SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 936 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
a891f0f8 937 int *pbytes_returned, const int timeout)
1da177e4
LT
938{
939 int rc = 0;
1da177e4 940 struct mid_q_entry *midQ;
fb2036d8
PS
941 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
942 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
943 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1da177e4
LT
944
945 if (ses == NULL) {
f96637be 946 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
947 return -EIO;
948 }
79a58d1f 949 if (ses->server == NULL) {
f96637be 950 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
951 return -EIO;
952 }
953
79a58d1f 954 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
955 return -ENOENT;
956
79a58d1f 957 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
958 to the same server. We may make this configurable later or
959 use ses->maxReq */
1da177e4 960
fb2036d8 961 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 962 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 963 len);
6d9c6d54
VL
964 return -EIO;
965 }
966
a891f0f8 967 rc = wait_for_free_request(ses->server, timeout, 0);
7ee1af76
JA
968 if (rc)
969 return rc;
970
79a58d1f 971 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
972 and avoid races inside tcp sendmsg code that could cause corruption
973 of smb data */
974
72ca545b 975 mutex_lock(&ses->server->srv_mutex);
1da177e4 976
7ee1af76
JA
977 rc = allocate_mid(ses, in_buf, &midQ);
978 if (rc) {
72ca545b 979 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 980 /* Update # of requests on wire to server */
a891f0f8 981 add_credits(ses->server, 1, 0);
7ee1af76 982 return rc;
1da177e4
LT
983 }
984
ad009ac9 985 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
986 if (rc) {
987 mutex_unlock(&ses->server->srv_mutex);
988 goto out;
989 }
1da177e4 990
7c9421e1 991 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
992
993 cifs_in_send_inc(ses->server);
fb2036d8 994 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
995 cifs_in_send_dec(ses->server);
996 cifs_save_when_sent(midQ);
ad313cb8
JL
997
998 if (rc < 0)
999 ses->server->sequence_number -= 2;
1000
72ca545b 1001 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1002
79a58d1f 1003 if (rc < 0)
7ee1af76
JA
1004 goto out;
1005
a891f0f8 1006 if (timeout == CIFS_ASYNC_OP)
7ee1af76 1007 goto out;
1da177e4 1008
0ade640e 1009 rc = wait_for_response(ses->server, midQ);
1be912dd 1010 if (rc != 0) {
fb2036d8 1011 send_cancel(ses->server, &rqst, midQ);
1be912dd 1012 spin_lock(&GlobalMid_Lock);
7c9421e1 1013 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1014 /* no longer considered to be "in-flight" */
1015 midQ->callback = DeleteMidQEntry;
1016 spin_unlock(&GlobalMid_Lock);
a891f0f8 1017 add_credits(ses->server, 1, 0);
1be912dd
JL
1018 return rc;
1019 }
1020 spin_unlock(&GlobalMid_Lock);
1021 }
1da177e4 1022
3c1105df 1023 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1024 if (rc != 0) {
a891f0f8 1025 add_credits(ses->server, 1, 0);
1da177e4
LT
1026 return rc;
1027 }
50c2f753 1028
2c8f981d 1029 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1030 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1031 rc = -EIO;
f96637be 1032 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1033 goto out;
1da177e4 1034 }
7ee1af76 1035
d4e4854f 1036 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1037 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1038 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1039out:
3c1bf7e4 1040 cifs_delete_mid(midQ);
a891f0f8 1041 add_credits(ses->server, 1, 0);
1da177e4 1042
7ee1af76
JA
1043 return rc;
1044}
1da177e4 1045
7ee1af76
JA
1046/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1047 blocking lock to return. */
1048
1049static int
96daf2b0 1050send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1051 struct smb_hdr *in_buf,
1052 struct smb_hdr *out_buf)
1053{
1054 int bytes_returned;
96daf2b0 1055 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1056 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1057
1058 /* We just modify the current in_buf to change
1059 the type of lock from LOCKING_ANDX_SHARED_LOCK
1060 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1061 LOCKING_ANDX_CANCEL_LOCK. */
1062
1063 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1064 pSMB->Timeout = 0;
88257360 1065 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1066
1067 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1068 &bytes_returned, 0);
7ee1af76
JA
1069}
1070
1071int
96daf2b0 1072SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1073 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1074 int *pbytes_returned)
1075{
1076 int rc = 0;
1077 int rstart = 0;
7ee1af76 1078 struct mid_q_entry *midQ;
96daf2b0 1079 struct cifs_ses *ses;
fb2036d8
PS
1080 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1081 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1082 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
7ee1af76
JA
1083
1084 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1085 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1086 return -EIO;
1087 }
1088 ses = tcon->ses;
1089
79a58d1f 1090 if (ses->server == NULL) {
f96637be 1091 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1092 return -EIO;
1093 }
1094
79a58d1f 1095 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1096 return -ENOENT;
1097
79a58d1f 1098 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1099 to the same server. We may make this configurable later or
1100 use ses->maxReq */
1101
fb2036d8 1102 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1103 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1104 len);
6d9c6d54
VL
1105 return -EIO;
1106 }
1107
a891f0f8 1108 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
7ee1af76
JA
1109 if (rc)
1110 return rc;
1111
79a58d1f 1112 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1113 and avoid races inside tcp sendmsg code that could cause corruption
1114 of smb data */
1115
72ca545b 1116 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1117
1118 rc = allocate_mid(ses, in_buf, &midQ);
1119 if (rc) {
72ca545b 1120 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1121 return rc;
1122 }
1123
7ee1af76 1124 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1125 if (rc) {
3c1bf7e4 1126 cifs_delete_mid(midQ);
829049cb
VL
1127 mutex_unlock(&ses->server->srv_mutex);
1128 return rc;
1129 }
1da177e4 1130
7c9421e1 1131 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1132 cifs_in_send_inc(ses->server);
fb2036d8 1133 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1134 cifs_in_send_dec(ses->server);
1135 cifs_save_when_sent(midQ);
ad313cb8
JL
1136
1137 if (rc < 0)
1138 ses->server->sequence_number -= 2;
1139
72ca545b 1140 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1141
79a58d1f 1142 if (rc < 0) {
3c1bf7e4 1143 cifs_delete_mid(midQ);
7ee1af76
JA
1144 return rc;
1145 }
1146
1147 /* Wait for a reply - allow signals to interrupt. */
1148 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1149 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1150 ((ses->server->tcpStatus != CifsGood) &&
1151 (ses->server->tcpStatus != CifsNew)));
1152
1153 /* Were we interrupted by a signal ? */
1154 if ((rc == -ERESTARTSYS) &&
7c9421e1 1155 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1156 ((ses->server->tcpStatus == CifsGood) ||
1157 (ses->server->tcpStatus == CifsNew))) {
1158
1159 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1160 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1161 blocking lock to return. */
fb2036d8 1162 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1163 if (rc) {
3c1bf7e4 1164 cifs_delete_mid(midQ);
7ee1af76
JA
1165 return rc;
1166 }
1167 } else {
1168 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1169 to cause the blocking lock to return. */
1170
1171 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1172
1173 /* If we get -ENOLCK back the lock may have
1174 already been removed. Don't exit in this case. */
1175 if (rc && rc != -ENOLCK) {
3c1bf7e4 1176 cifs_delete_mid(midQ);
7ee1af76
JA
1177 return rc;
1178 }
1179 }
1180
1be912dd
JL
1181 rc = wait_for_response(ses->server, midQ);
1182 if (rc) {
fb2036d8 1183 send_cancel(ses->server, &rqst, midQ);
1be912dd 1184 spin_lock(&GlobalMid_Lock);
7c9421e1 1185 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1186 /* no longer considered to be "in-flight" */
1187 midQ->callback = DeleteMidQEntry;
1188 spin_unlock(&GlobalMid_Lock);
1189 return rc;
1190 }
1191 spin_unlock(&GlobalMid_Lock);
7ee1af76 1192 }
1be912dd
JL
1193
1194 /* We got the response - restart system call. */
1195 rstart = 1;
7ee1af76
JA
1196 }
1197
3c1105df 1198 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1199 if (rc != 0)
7ee1af76 1200 return rc;
50c2f753 1201
17c8bfed 1202 /* rcvd frame is ok */
7c9421e1 1203 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1204 rc = -EIO;
f96637be 1205 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1206 goto out;
1207 }
1da177e4 1208
d4e4854f 1209 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1210 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1211 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1212out:
3c1bf7e4 1213 cifs_delete_mid(midQ);
7ee1af76
JA
1214 if (rstart && rc == -EACCES)
1215 return -ERESTARTSYS;
1da177e4
LT
1216 return rc;
1217}