CIFS: Adds information-level logging function
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
8bd68c6e 40#include "smb2proto.h"
9762c2d0 41#include "smbdirect.h"
50c2f753 42
3cecf486
RS
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
2dc7e1c0
PS
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
48{
49 wake_up_process(mid->callback_data);
50}
51
a6827c18 52struct mid_q_entry *
24b9b06b 53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
54{
55 struct mid_q_entry *temp;
56
24b9b06b 57 if (server == NULL) {
f96637be 58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
59 return NULL;
60 }
50c2f753 61
232087cb 62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 63 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 64 kref_init(&temp->refcount);
a6f74e80
N
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
2b84a36c 73
a6f74e80
N
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
1da177e4 80
1da177e4 81 atomic_inc(&midCount);
7c9421e1 82 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
83 return temp;
84}
85
696e420b
LP
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
766fdbb5 101void
1da177e4
LT
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
1047abc1 104#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 105 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
106 unsigned long now;
107#endif
7c9421e1 108 midEntry->mid_state = MID_FREE;
8097531a 109 atomic_dec(&midCount);
7c9421e1 110 if (midEntry->large_buf)
b8643e1b
SF
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
00778e22
SF
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 128 (midEntry->command != command)) {
468d6779
SF
129 /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
130 if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
131 (le16_to_cpu(midEntry->command) >= 0))
132 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
133
020eec5f
SF
134 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
135 midEntry->mid, midEntry->pid,
136 midEntry->when_sent, midEntry->when_received);
137 if (cifsFYI & CIFS_TIMER) {
0b456f04 138 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 139 midEntry->command, midEntry->mid);
0b456f04 140 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
141 now - midEntry->when_alloc,
142 now - midEntry->when_sent,
143 now - midEntry->when_received);
144 }
145 }
146#endif
696e420b 147 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
148}
149
3c1bf7e4
PS
150void
151cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
152{
153 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
154 list_del_init(&mid->qhead);
155 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
156 spin_unlock(&GlobalMid_Lock);
157
158 DeleteMidQEntry(mid);
159}
160
6f49f46b
JL
161/*
162 * smb_send_kvec - send an array of kvecs to the server
163 * @server: Server to send the data to
3ab3f2a1 164 * @smb_msg: Message to send
6f49f46b
JL
165 * @sent: amount of data sent on socket is stored here
166 *
167 * Our basic "send data to server" function. Should be called with srv_mutex
168 * held. The caller is responsible for handling the results.
169 */
d6e04ae6 170static int
3ab3f2a1
AV
171smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
172 size_t *sent)
1da177e4
LT
173{
174 int rc = 0;
3ab3f2a1 175 int retries = 0;
edf1ae40 176 struct socket *ssocket = server->ssocket;
50c2f753 177
6f49f46b
JL
178 *sent = 0;
179
3ab3f2a1
AV
180 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
181 smb_msg->msg_namelen = sizeof(struct sockaddr);
182 smb_msg->msg_control = NULL;
183 smb_msg->msg_controllen = 0;
0496e02d 184 if (server->noblocksnd)
3ab3f2a1 185 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 186 else
3ab3f2a1 187 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 188
3ab3f2a1 189 while (msg_data_left(smb_msg)) {
6f49f46b
JL
190 /*
191 * If blocking send, we try 3 times, since each can block
192 * for 5 seconds. For nonblocking we have to try more
193 * but wait increasing amounts of time allowing time for
194 * socket to clear. The overall time we wait in either
195 * case to send on the socket is about 15 seconds.
196 * Similarly we wait for 15 seconds for a response from
197 * the server in SendReceive[2] for the server to send
198 * a response back for most types of requests (except
199 * SMB Write past end of file which can be slow, and
200 * blocking lock operations). NFS waits slightly longer
201 * than CIFS, but this can make it take longer for
202 * nonresponsive servers to be detected and 15 seconds
203 * is more than enough time for modern networks to
204 * send a packet. In most cases if we fail to send
205 * after the retries we will kill the socket and
206 * reconnect which may clear the network problem.
207 */
3ab3f2a1 208 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 209 if (rc == -EAGAIN) {
3ab3f2a1
AV
210 retries++;
211 if (retries >= 14 ||
212 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
213 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
214 ssocket);
3ab3f2a1 215 return -EAGAIN;
1da177e4 216 }
3ab3f2a1 217 msleep(1 << retries);
1da177e4
LT
218 continue;
219 }
6f49f46b 220
79a58d1f 221 if (rc < 0)
3ab3f2a1 222 return rc;
6f49f46b 223
79a58d1f 224 if (rc == 0) {
3e84469d
SF
225 /* should never happen, letting socket clear before
226 retrying is our only obvious option here */
f96637be 227 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
228 msleep(500);
229 continue;
d6e04ae6 230 }
6f49f46b 231
3ab3f2a1
AV
232 /* send was at least partially successful */
233 *sent += rc;
234 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 235 }
3ab3f2a1 236 return 0;
97bc00b3
JL
237}
238
35e2cc1b 239unsigned long
81f39f95 240smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
241{
242 unsigned int i;
35e2cc1b
PA
243 struct kvec *iov;
244 int nvec;
a26054d1
JL
245 unsigned long buflen = 0;
246
81f39f95
RS
247 if (server->vals->header_preamble_size == 0 &&
248 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
249 iov = &rqst->rq_iov[1];
250 nvec = rqst->rq_nvec - 1;
251 } else {
252 iov = rqst->rq_iov;
253 nvec = rqst->rq_nvec;
254 }
255
a26054d1 256 /* total up iov array first */
35e2cc1b 257 for (i = 0; i < nvec; i++)
a26054d1
JL
258 buflen += iov[i].iov_len;
259
c06a0f2d
LL
260 /*
261 * Add in the page array if there is one. The caller needs to make
262 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
263 * multiple pages ends at page boundary, rq_tailsz needs to be set to
264 * PAGE_SIZE.
265 */
a26054d1 266 if (rqst->rq_npages) {
c06a0f2d
LL
267 if (rqst->rq_npages == 1)
268 buflen += rqst->rq_tailsz;
269 else {
270 /*
271 * If there is more than one page, calculate the
272 * buffer length based on rq_offset and rq_tailsz
273 */
274 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
275 rqst->rq_offset;
276 buflen += rqst->rq_tailsz;
277 }
a26054d1
JL
278 }
279
280 return buflen;
281}
282
6f49f46b 283static int
07cd952f
RS
284__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
285 struct smb_rqst *rqst)
6f49f46b 286{
07cd952f
RS
287 int rc = 0;
288 struct kvec *iov;
289 int n_vec;
290 unsigned int send_length = 0;
291 unsigned int i, j;
3ab3f2a1 292 size_t total_len = 0, sent, size;
b8eed283 293 struct socket *ssocket = server->ssocket;
3ab3f2a1 294 struct msghdr smb_msg;
b8eed283 295 int val = 1;
c713c877
RS
296 __be32 rfc1002_marker;
297
9762c2d0 298 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 299 rc = smbd_send(server, rqst);
9762c2d0
LL
300 goto smbd_done;
301 }
ea702b80
JL
302 if (ssocket == NULL)
303 return -ENOTSOCK;
304
b8eed283
JL
305 /* cork the socket */
306 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
307 (char *)&val, sizeof(val));
308
07cd952f 309 for (j = 0; j < num_rqst; j++)
81f39f95 310 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
311 rfc1002_marker = cpu_to_be32(send_length);
312
c713c877
RS
313 /* Generate a rfc1002 marker for SMB2+ */
314 if (server->vals->header_preamble_size == 0) {
315 struct kvec hiov = {
316 .iov_base = &rfc1002_marker,
317 .iov_len = 4
318 };
319 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
320 1, 4);
321 rc = smb_send_kvec(server, &smb_msg, &sent);
322 if (rc < 0)
323 goto uncork;
324
325 total_len += sent;
326 send_length += 4;
327 }
328
662bf5bc
PA
329 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
330
07cd952f
RS
331 for (j = 0; j < num_rqst; j++) {
332 iov = rqst[j].rq_iov;
333 n_vec = rqst[j].rq_nvec;
3ab3f2a1 334
07cd952f 335 size = 0;
662bf5bc
PA
336 for (i = 0; i < n_vec; i++) {
337 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 338 size += iov[i].iov_len;
662bf5bc 339 }
97bc00b3 340
07cd952f
RS
341 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
342 iov, n_vec, size);
97bc00b3 343
3ab3f2a1 344 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 345 if (rc < 0)
07cd952f 346 goto uncork;
97bc00b3
JL
347
348 total_len += sent;
07cd952f
RS
349
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
352 struct bio_vec bvec;
353
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
356 &bvec.bv_offset);
357
358 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
363
364 total_len += sent;
365 }
97bc00b3 366 }
1da177e4 367
97bc00b3 368uncork:
b8eed283
JL
369 /* uncork it */
370 val = 0;
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
373
c713c877 374 if ((total_len > 0) && (total_len != send_length)) {
f96637be 375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 376 send_length, total_len);
6f49f46b
JL
377 /*
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
381 */
edf1ae40 382 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
384 server->hostname);
edf1ae40 385 }
9762c2d0 386smbd_done:
d804d41d 387 if (rc < 0 && rc != -EINTR)
f96637be
JP
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc);
d804d41d 390 else
1da177e4 391 rc = 0;
1da177e4
LT
392
393 return rc;
394}
395
6f49f46b 396static int
1f3a8f5f
RS
397smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
6f49f46b 399{
b2c96de7
RS
400 struct kvec iov;
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
403 int rc;
404
405 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
406 return __smb_send_rqst(server, num_rqst, rqst);
407
408 if (num_rqst > MAX_COMPOUND - 1)
409 return -ENOMEM;
7fb8986e 410
b2c96de7
RS
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
414
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
419
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
422 "is missing\n");
7fb8986e
PS
423 return -EIO;
424 }
6f49f46b 425
1f3a8f5f
RS
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
427 &cur_rqst[0], rqst);
7fb8986e
PS
428 if (rc)
429 return rc;
430
1f3a8f5f
RS
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 433 return rc;
6f49f46b
JL
434}
435
0496e02d
JL
436int
437smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
439{
738f9de5 440 struct kvec iov[2];
7fb8986e
PS
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
0496e02d 443
738f9de5
PS
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
0496e02d 448
07cd952f 449 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
450}
451
fc40f9cf 452static int
a891f0f8 453wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
bc205ed1 454 int *credits)
1da177e4 455{
5bc59498
PS
456 int rc;
457
fc40f9cf 458 spin_lock(&server->req_lock);
a891f0f8 459 if (timeout == CIFS_ASYNC_OP) {
1da177e4 460 /* oplock breaks must not be held up */
fc40f9cf 461 server->in_flight++;
bc205ed1 462 *credits -= 1;
fc40f9cf 463 spin_unlock(&server->req_lock);
27a97a61
VL
464 return 0;
465 }
466
27a97a61 467 while (1) {
bc205ed1 468 if (*credits <= 0) {
fc40f9cf 469 spin_unlock(&server->req_lock);
789e6661 470 cifs_num_waiters_inc(server);
5bc59498 471 rc = wait_event_killable(server->request_q,
bc205ed1 472 has_credits(server, credits));
789e6661 473 cifs_num_waiters_dec(server);
5bc59498
PS
474 if (rc)
475 return rc;
fc40f9cf 476 spin_lock(&server->req_lock);
27a97a61 477 } else {
c5797a94 478 if (server->tcpStatus == CifsExiting) {
fc40f9cf 479 spin_unlock(&server->req_lock);
27a97a61 480 return -ENOENT;
1da177e4 481 }
27a97a61 482
2d86dbc9
PS
483 /*
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
486 */
27a97a61
VL
487
488 /* update # of requests on the wire to server */
a891f0f8 489 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 490 *credits -= 1;
fc40f9cf 491 server->in_flight++;
2d86dbc9 492 }
fc40f9cf 493 spin_unlock(&server->req_lock);
27a97a61 494 break;
1da177e4
LT
495 }
496 }
7ee1af76
JA
497 return 0;
498}
1da177e4 499
bc205ed1 500static int
a891f0f8
PS
501wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 const int optype)
bc205ed1 503{
eb4c7df6
SP
504 int *val;
505
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
509 return -EAGAIN;
510 return wait_for_free_credits(server, timeout, val);
bc205ed1
PS
511}
512
cb7e9eab
PS
513int
514cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 unsigned int *num, unsigned int *credits)
516{
517 *num = size;
518 *credits = 0;
519 return 0;
520}
521
96daf2b0 522static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
523 struct mid_q_entry **ppmidQ)
524{
1da177e4 525 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 526 return -ENOENT;
8fbbd365
VL
527 }
528
529 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 530 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 531 return -EAGAIN;
8fbbd365
VL
532 }
533
7f48558e 534 if (ses->status == CifsNew) {
79a58d1f 535 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 536 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 537 return -EAGAIN;
ad7a2926 538 /* else ok - we are setting up session */
1da177e4 539 }
7f48558e
SP
540
541 if (ses->status == CifsExiting) {
542 /* check if SMB session is bad because we are setting it up */
543 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
544 return -EAGAIN;
545 /* else ok - we are shutting down session */
546 }
547
24b9b06b 548 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 549 if (*ppmidQ == NULL)
7ee1af76 550 return -ENOMEM;
ddc8cf8f
JL
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
554 return 0;
555}
556
0ade640e
JL
557static int
558wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 559{
0ade640e 560 int error;
7ee1af76 561
5853cc2a 562 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 563 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
564 if (error < 0)
565 return -ERESTARTSYS;
7ee1af76 566
0ade640e 567 return 0;
7ee1af76
JA
568}
569
fec344e3
JL
570struct mid_q_entry *
571cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
572{
573 int rc;
fec344e3 574 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
575 struct mid_q_entry *mid;
576
738f9de5
PS
577 if (rqst->rq_iov[0].iov_len != 4 ||
578 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 return ERR_PTR(-EIO);
580
792af7b0 581 /* enable signing if server requires it */
38d77c50 582 if (server->sign)
792af7b0
PS
583 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
584
585 mid = AllocMidQEntry(hdr, server);
586 if (mid == NULL)
fec344e3 587 return ERR_PTR(-ENOMEM);
792af7b0 588
fec344e3 589 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
590 if (rc) {
591 DeleteMidQEntry(mid);
fec344e3 592 return ERR_PTR(rc);
ffc61ccb
SP
593 }
594
fec344e3 595 return mid;
792af7b0 596}
133672ef 597
a6827c18
JL
598/*
599 * Send a SMB request and set the callback function in the mid to handle
600 * the result. Caller is responsible for dealing with timeouts.
601 */
602int
fec344e3 603cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2
PS
604 mid_receive_t *receive, mid_callback_t *callback,
605 mid_handle_t *handle, void *cbdata, const int flags)
a6827c18 606{
a891f0f8 607 int rc, timeout, optype;
a6827c18 608 struct mid_q_entry *mid;
cb7e9eab 609 unsigned int credits = 0;
a6827c18 610
a891f0f8
PS
611 timeout = flags & CIFS_TIMEOUT_MASK;
612 optype = flags & CIFS_OP_MASK;
613
cb7e9eab
PS
614 if ((flags & CIFS_HAS_CREDITS) == 0) {
615 rc = wait_for_free_request(server, timeout, optype);
616 if (rc)
617 return rc;
618 credits = 1;
619 }
a6827c18
JL
620
621 mutex_lock(&server->srv_mutex);
fec344e3
JL
622 mid = server->ops->setup_async_request(server, rqst);
623 if (IS_ERR(mid)) {
a6827c18 624 mutex_unlock(&server->srv_mutex);
cb7e9eab 625 add_credits_and_wake_if(server, credits, optype);
fec344e3 626 return PTR_ERR(mid);
a6827c18
JL
627 }
628
44d22d84 629 mid->receive = receive;
a6827c18
JL
630 mid->callback = callback;
631 mid->callback_data = cbdata;
9b7c18a2 632 mid->handle = handle;
7c9421e1 633 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 634
ffc61ccb
SP
635 /* put it on the pending_mid_q */
636 spin_lock(&GlobalMid_Lock);
637 list_add_tail(&mid->qhead, &server->pending_mid_q);
638 spin_unlock(&GlobalMid_Lock);
639
93d2cb6c
LL
640 /*
641 * Need to store the time in mid before calling I/O. For call_async,
642 * I/O response may come back and free the mid entry on another thread.
643 */
644 cifs_save_when_sent(mid);
789e6661 645 cifs_in_send_inc(server);
1f3a8f5f 646 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 647 cifs_in_send_dec(server);
ad313cb8 648
820962dc 649 if (rc < 0) {
ad313cb8 650 server->sequence_number -= 2;
820962dc
RV
651 cifs_delete_mid(mid);
652 }
653
a6827c18 654 mutex_unlock(&server->srv_mutex);
789e6661 655
ffc61ccb
SP
656 if (rc == 0)
657 return 0;
a6827c18 658
cb7e9eab 659 add_credits_and_wake_if(server, credits, optype);
a6827c18
JL
660 return rc;
661}
662
133672ef
SF
663/*
664 *
665 * Send an SMB Request. No response info (other than return code)
666 * needs to be parsed.
667 *
668 * flags indicate the type of request buffer and how long to wait
669 * and whether to log NT STATUS code (error) before mapping it to POSIX error
670 *
671 */
672int
96daf2b0 673SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 674 char *in_buf, int flags)
133672ef
SF
675{
676 int rc;
677 struct kvec iov[1];
da502f7d 678 struct kvec rsp_iov;
133672ef
SF
679 int resp_buf_type;
680
792af7b0
PS
681 iov[0].iov_base = in_buf;
682 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 683 flags |= CIFS_NO_RESP;
da502f7d 684 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 685 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 686
133672ef
SF
687 return rc;
688}
689
053d5034 690static int
3c1105df 691cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
692{
693 int rc = 0;
694
f96637be
JP
695 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
696 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 697
74dd92a8 698 spin_lock(&GlobalMid_Lock);
7c9421e1 699 switch (mid->mid_state) {
74dd92a8 700 case MID_RESPONSE_RECEIVED:
053d5034
JL
701 spin_unlock(&GlobalMid_Lock);
702 return rc;
74dd92a8
JL
703 case MID_RETRY_NEEDED:
704 rc = -EAGAIN;
705 break;
71823baf
JL
706 case MID_RESPONSE_MALFORMED:
707 rc = -EIO;
708 break;
3c1105df
JL
709 case MID_SHUTDOWN:
710 rc = -EHOSTDOWN;
711 break;
74dd92a8 712 default:
3c1105df 713 list_del_init(&mid->qhead);
f96637be
JP
714 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
715 __func__, mid->mid, mid->mid_state);
74dd92a8 716 rc = -EIO;
053d5034
JL
717 }
718 spin_unlock(&GlobalMid_Lock);
719
2b84a36c 720 DeleteMidQEntry(mid);
053d5034
JL
721 return rc;
722}
723
121b046a 724static inline int
fb2036d8
PS
725send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
726 struct mid_q_entry *mid)
76dcc26f 727{
121b046a 728 return server->ops->send_cancel ?
fb2036d8 729 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
730}
731
2c8f981d
JL
732int
733cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
734 bool log_error)
735{
792af7b0 736 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
737
738 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
739
740 /* convert the length into a more usable form */
38d77c50 741 if (server->sign) {
738f9de5 742 struct kvec iov[2];
985e4ff0 743 int rc = 0;
738f9de5
PS
744 struct smb_rqst rqst = { .rq_iov = iov,
745 .rq_nvec = 2 };
826a95e4 746
738f9de5
PS
747 iov[0].iov_base = mid->resp_buf;
748 iov[0].iov_len = 4;
749 iov[1].iov_base = (char *)mid->resp_buf + 4;
750 iov[1].iov_len = len - 4;
2c8f981d 751 /* FIXME: add code to kill session */
bf5ea0e2 752 rc = cifs_verify_signature(&rqst, server,
0124cc45 753 mid->sequence_number);
985e4ff0 754 if (rc)
f96637be
JP
755 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
756 rc);
2c8f981d
JL
757 }
758
759 /* BB special case reconnect tid and uid here? */
760 return map_smb_to_linux_error(mid->resp_buf, log_error);
761}
762
fec344e3
JL
763struct mid_q_entry *
764cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
765{
766 int rc;
fec344e3 767 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
768 struct mid_q_entry *mid;
769
738f9de5
PS
770 if (rqst->rq_iov[0].iov_len != 4 ||
771 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
772 return ERR_PTR(-EIO);
773
792af7b0
PS
774 rc = allocate_mid(ses, hdr, &mid);
775 if (rc)
fec344e3
JL
776 return ERR_PTR(rc);
777 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
778 if (rc) {
3c1bf7e4 779 cifs_delete_mid(mid);
fec344e3
JL
780 return ERR_PTR(rc);
781 }
782 return mid;
792af7b0
PS
783}
784
4e34feb5
RS
785static void
786cifs_noop_callback(struct mid_q_entry *mid)
787{
788}
789
b8f57ee8 790int
e0bba0b8
RS
791compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
792 const int flags, const int num_rqst, struct smb_rqst *rqst,
793 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 794{
e0bba0b8 795 int i, j, rc = 0;
a891f0f8 796 int timeout, optype;
e0bba0b8 797 struct mid_q_entry *midQ[MAX_COMPOUND];
cb5c2e63 798 unsigned int credits = 0;
738f9de5 799 char *buf;
50c2f753 800
a891f0f8
PS
801 timeout = flags & CIFS_TIMEOUT_MASK;
802 optype = flags & CIFS_OP_MASK;
133672ef 803
e0bba0b8
RS
804 for (i = 0; i < num_rqst; i++)
805 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
806
807 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 808 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
809 return -EIO;
810 }
811
da502f7d 812 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 813 return -ENOENT;
7ee1af76 814
792af7b0
PS
815 /*
816 * Ensure that we do not send more than 50 overlapping requests
817 * to the same server. We may make this configurable later or
818 * use ses->maxReq.
819 */
a891f0f8 820 rc = wait_for_free_request(ses->server, timeout, optype);
da502f7d 821 if (rc)
7ee1af76 822 return rc;
7ee1af76 823
792af7b0
PS
824 /*
825 * Make sure that we sign in the same order that we send on this socket
826 * and avoid races inside tcp sendmsg code that could cause corruption
827 * of smb data.
828 */
7ee1af76 829
72ca545b 830 mutex_lock(&ses->server->srv_mutex);
7ee1af76 831
e0bba0b8
RS
832 for (i = 0; i < num_rqst; i++) {
833 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
834 if (IS_ERR(midQ[i])) {
835 for (j = 0; j < i; j++)
836 cifs_delete_mid(midQ[j]);
837 mutex_unlock(&ses->server->srv_mutex);
838 /* Update # of requests on wire to server */
839 add_credits(ses->server, 1, optype);
840 return PTR_ERR(midQ[i]);
841 }
842
843 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
4e34feb5
RS
844 /*
845 * We don't invoke the callback compounds unless it is the last
846 * request.
847 */
848 if (i < num_rqst - 1)
849 midQ[i]->callback = cifs_noop_callback;
1da177e4 850 }
789e6661 851 cifs_in_send_inc(ses->server);
e0bba0b8 852 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 853 cifs_in_send_dec(ses->server);
e0bba0b8
RS
854
855 for (i = 0; i < num_rqst; i++)
856 cifs_save_when_sent(midQ[i]);
7ee1af76 857
ad313cb8
JL
858 if (rc < 0)
859 ses->server->sequence_number -= 2;
e0bba0b8 860
72ca545b 861 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 862
cb5c2e63
RS
863 if (rc < 0)
864 goto out;
e0bba0b8 865
cb5c2e63
RS
866 /*
867 * Compounding is never used during session establish.
868 */
869 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
870 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
871 rqst[0].rq_nvec);
e0bba0b8 872
cb5c2e63
RS
873 if (timeout == CIFS_ASYNC_OP)
874 goto out;
e0bba0b8 875
cb5c2e63 876 for (i = 0; i < num_rqst; i++) {
e0bba0b8
RS
877 rc = wait_for_response(ses->server, midQ[i]);
878 if (rc != 0) {
879 cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
880 midQ[i]->mid);
881 send_cancel(ses->server, &rqst[i], midQ[i]);
882 spin_lock(&GlobalMid_Lock);
883 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
884 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
885 midQ[i]->callback = DeleteMidQEntry;
886 spin_unlock(&GlobalMid_Lock);
887 add_credits(ses->server, 1, optype);
888 return rc;
889 }
1be912dd 890 spin_unlock(&GlobalMid_Lock);
e0bba0b8 891 }
cb5c2e63
RS
892 }
893
894 for (i = 0; i < num_rqst; i++)
895 if (midQ[i]->resp_buf)
896 credits += ses->server->ops->get_credits(midQ[i]);
897 if (!credits)
898 credits = 1;
899
900 for (i = 0; i < num_rqst; i++) {
901 if (rc < 0)
902 goto out;
e0bba0b8
RS
903
904 rc = cifs_sync_mid_result(midQ[i], ses->server);
905 if (rc != 0) {
cb5c2e63 906 add_credits(ses->server, credits, optype);
1be912dd
JL
907 return rc;
908 }
2b2bdfba 909
e0bba0b8
RS
910 if (!midQ[i]->resp_buf ||
911 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
912 rc = -EIO;
913 cifs_dbg(FYI, "Bad MID state?\n");
914 goto out;
915 }
a891f0f8 916
e0bba0b8
RS
917 buf = (char *)midQ[i]->resp_buf;
918 resp_iov[i].iov_base = buf;
919 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
920 ses->server->vals->header_preamble_size;
921
922 if (midQ[i]->large_buf)
923 resp_buf_type[i] = CIFS_LARGE_BUFFER;
924 else
925 resp_buf_type[i] = CIFS_SMALL_BUFFER;
926
e0bba0b8
RS
927 rc = ses->server->ops->check_receive(midQ[i], ses->server,
928 flags & CIFS_LOG_ERROR);
1da177e4 929
e0bba0b8
RS
930 /* mark it so buf will not be freed by cifs_delete_mid */
931 if ((flags & CIFS_NO_RESP) == 0)
932 midQ[i]->resp_buf = NULL;
cb5c2e63 933
e0bba0b8 934 }
cb5c2e63
RS
935
936 /*
937 * Compounding is never used during session establish.
938 */
939 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
940 struct kvec iov = {
941 .iov_base = resp_iov[0].iov_base,
942 .iov_len = resp_iov[0].iov_len
943 };
944 smb311_update_preauth_hash(ses, &iov, 1);
945 }
946
7ee1af76 947out:
4e34feb5
RS
948 /*
949 * This will dequeue all mids. After this it is important that the
950 * demultiplex_thread will not process any of these mids any futher.
951 * This is prevented above by using a noop callback that will not
952 * wake this thread except for the very last PDU.
953 */
e0bba0b8
RS
954 for (i = 0; i < num_rqst; i++)
955 cifs_delete_mid(midQ[i]);
a891f0f8 956 add_credits(ses->server, credits, optype);
1da177e4 957
d6e04ae6
SF
958 return rc;
959}
1da177e4 960
e0bba0b8
RS
961int
962cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
963 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
964 struct kvec *resp_iov)
965{
966 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
967 resp_iov);
968}
969
738f9de5
PS
970int
971SendReceive2(const unsigned int xid, struct cifs_ses *ses,
972 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
973 const int flags, struct kvec *resp_iov)
974{
975 struct smb_rqst rqst;
3cecf486 976 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
977 int rc;
978
3cecf486 979 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
980 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
981 GFP_KERNEL);
117e3b7f
SF
982 if (!new_iov) {
983 /* otherwise cifs_send_recv below sets resp_buf_type */
984 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 985 return -ENOMEM;
117e3b7f 986 }
3cecf486
RS
987 } else
988 new_iov = s_iov;
738f9de5
PS
989
990 /* 1st iov is a RFC1001 length followed by the rest of the packet */
991 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
992
993 new_iov[0].iov_base = new_iov[1].iov_base;
994 new_iov[0].iov_len = 4;
995 new_iov[1].iov_base += 4;
996 new_iov[1].iov_len -= 4;
997
998 memset(&rqst, 0, sizeof(struct smb_rqst));
999 rqst.rq_iov = new_iov;
1000 rqst.rq_nvec = n_vec + 1;
1001
1002 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1003 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1004 kfree(new_iov);
738f9de5
PS
1005 return rc;
1006}
1007
1da177e4 1008int
96daf2b0 1009SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1010 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
a891f0f8 1011 int *pbytes_returned, const int timeout)
1da177e4
LT
1012{
1013 int rc = 0;
1da177e4 1014 struct mid_q_entry *midQ;
fb2036d8
PS
1015 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1016 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1017 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1da177e4
LT
1018
1019 if (ses == NULL) {
f96637be 1020 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1021 return -EIO;
1022 }
79a58d1f 1023 if (ses->server == NULL) {
f96637be 1024 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1025 return -EIO;
1026 }
1027
79a58d1f 1028 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1029 return -ENOENT;
1030
79a58d1f 1031 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1032 to the same server. We may make this configurable later or
1033 use ses->maxReq */
1da177e4 1034
fb2036d8 1035 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1036 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1037 len);
6d9c6d54
VL
1038 return -EIO;
1039 }
1040
a891f0f8 1041 rc = wait_for_free_request(ses->server, timeout, 0);
7ee1af76
JA
1042 if (rc)
1043 return rc;
1044
79a58d1f 1045 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1046 and avoid races inside tcp sendmsg code that could cause corruption
1047 of smb data */
1048
72ca545b 1049 mutex_lock(&ses->server->srv_mutex);
1da177e4 1050
7ee1af76
JA
1051 rc = allocate_mid(ses, in_buf, &midQ);
1052 if (rc) {
72ca545b 1053 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1054 /* Update # of requests on wire to server */
a891f0f8 1055 add_credits(ses->server, 1, 0);
7ee1af76 1056 return rc;
1da177e4
LT
1057 }
1058
ad009ac9 1059 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1060 if (rc) {
1061 mutex_unlock(&ses->server->srv_mutex);
1062 goto out;
1063 }
1da177e4 1064
7c9421e1 1065 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1066
1067 cifs_in_send_inc(ses->server);
fb2036d8 1068 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1069 cifs_in_send_dec(ses->server);
1070 cifs_save_when_sent(midQ);
ad313cb8
JL
1071
1072 if (rc < 0)
1073 ses->server->sequence_number -= 2;
1074
72ca545b 1075 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1076
79a58d1f 1077 if (rc < 0)
7ee1af76
JA
1078 goto out;
1079
a891f0f8 1080 if (timeout == CIFS_ASYNC_OP)
7ee1af76 1081 goto out;
1da177e4 1082
0ade640e 1083 rc = wait_for_response(ses->server, midQ);
1be912dd 1084 if (rc != 0) {
fb2036d8 1085 send_cancel(ses->server, &rqst, midQ);
1be912dd 1086 spin_lock(&GlobalMid_Lock);
7c9421e1 1087 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1088 /* no longer considered to be "in-flight" */
1089 midQ->callback = DeleteMidQEntry;
1090 spin_unlock(&GlobalMid_Lock);
a891f0f8 1091 add_credits(ses->server, 1, 0);
1be912dd
JL
1092 return rc;
1093 }
1094 spin_unlock(&GlobalMid_Lock);
1095 }
1da177e4 1096
3c1105df 1097 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1098 if (rc != 0) {
a891f0f8 1099 add_credits(ses->server, 1, 0);
1da177e4
LT
1100 return rc;
1101 }
50c2f753 1102
2c8f981d 1103 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1104 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1105 rc = -EIO;
f96637be 1106 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1107 goto out;
1da177e4 1108 }
7ee1af76 1109
d4e4854f 1110 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1111 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1112 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1113out:
3c1bf7e4 1114 cifs_delete_mid(midQ);
a891f0f8 1115 add_credits(ses->server, 1, 0);
1da177e4 1116
7ee1af76
JA
1117 return rc;
1118}
1da177e4 1119
7ee1af76
JA
1120/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1121 blocking lock to return. */
1122
1123static int
96daf2b0 1124send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1125 struct smb_hdr *in_buf,
1126 struct smb_hdr *out_buf)
1127{
1128 int bytes_returned;
96daf2b0 1129 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1130 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1131
1132 /* We just modify the current in_buf to change
1133 the type of lock from LOCKING_ANDX_SHARED_LOCK
1134 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1135 LOCKING_ANDX_CANCEL_LOCK. */
1136
1137 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1138 pSMB->Timeout = 0;
88257360 1139 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1140
1141 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1142 &bytes_returned, 0);
7ee1af76
JA
1143}
1144
1145int
96daf2b0 1146SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1147 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1148 int *pbytes_returned)
1149{
1150 int rc = 0;
1151 int rstart = 0;
7ee1af76 1152 struct mid_q_entry *midQ;
96daf2b0 1153 struct cifs_ses *ses;
fb2036d8
PS
1154 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1155 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1156 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
7ee1af76
JA
1157
1158 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1159 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1160 return -EIO;
1161 }
1162 ses = tcon->ses;
1163
79a58d1f 1164 if (ses->server == NULL) {
f96637be 1165 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1166 return -EIO;
1167 }
1168
79a58d1f 1169 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1170 return -ENOENT;
1171
79a58d1f 1172 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1173 to the same server. We may make this configurable later or
1174 use ses->maxReq */
1175
fb2036d8 1176 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1177 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1178 len);
6d9c6d54
VL
1179 return -EIO;
1180 }
1181
a891f0f8 1182 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
7ee1af76
JA
1183 if (rc)
1184 return rc;
1185
79a58d1f 1186 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1187 and avoid races inside tcp sendmsg code that could cause corruption
1188 of smb data */
1189
72ca545b 1190 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1191
1192 rc = allocate_mid(ses, in_buf, &midQ);
1193 if (rc) {
72ca545b 1194 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1195 return rc;
1196 }
1197
7ee1af76 1198 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1199 if (rc) {
3c1bf7e4 1200 cifs_delete_mid(midQ);
829049cb
VL
1201 mutex_unlock(&ses->server->srv_mutex);
1202 return rc;
1203 }
1da177e4 1204
7c9421e1 1205 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1206 cifs_in_send_inc(ses->server);
fb2036d8 1207 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1208 cifs_in_send_dec(ses->server);
1209 cifs_save_when_sent(midQ);
ad313cb8
JL
1210
1211 if (rc < 0)
1212 ses->server->sequence_number -= 2;
1213
72ca545b 1214 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1215
79a58d1f 1216 if (rc < 0) {
3c1bf7e4 1217 cifs_delete_mid(midQ);
7ee1af76
JA
1218 return rc;
1219 }
1220
1221 /* Wait for a reply - allow signals to interrupt. */
1222 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1223 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1224 ((ses->server->tcpStatus != CifsGood) &&
1225 (ses->server->tcpStatus != CifsNew)));
1226
1227 /* Were we interrupted by a signal ? */
1228 if ((rc == -ERESTARTSYS) &&
7c9421e1 1229 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1230 ((ses->server->tcpStatus == CifsGood) ||
1231 (ses->server->tcpStatus == CifsNew))) {
1232
1233 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1234 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1235 blocking lock to return. */
fb2036d8 1236 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1237 if (rc) {
3c1bf7e4 1238 cifs_delete_mid(midQ);
7ee1af76
JA
1239 return rc;
1240 }
1241 } else {
1242 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1243 to cause the blocking lock to return. */
1244
1245 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1246
1247 /* If we get -ENOLCK back the lock may have
1248 already been removed. Don't exit in this case. */
1249 if (rc && rc != -ENOLCK) {
3c1bf7e4 1250 cifs_delete_mid(midQ);
7ee1af76
JA
1251 return rc;
1252 }
1253 }
1254
1be912dd
JL
1255 rc = wait_for_response(ses->server, midQ);
1256 if (rc) {
fb2036d8 1257 send_cancel(ses->server, &rqst, midQ);
1be912dd 1258 spin_lock(&GlobalMid_Lock);
7c9421e1 1259 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1260 /* no longer considered to be "in-flight" */
1261 midQ->callback = DeleteMidQEntry;
1262 spin_unlock(&GlobalMid_Lock);
1263 return rc;
1264 }
1265 spin_unlock(&GlobalMid_Lock);
7ee1af76 1266 }
1be912dd
JL
1267
1268 /* We got the response - restart system call. */
1269 rstart = 1;
7ee1af76
JA
1270 }
1271
3c1105df 1272 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1273 if (rc != 0)
7ee1af76 1274 return rc;
50c2f753 1275
17c8bfed 1276 /* rcvd frame is ok */
7c9421e1 1277 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1278 rc = -EIO;
f96637be 1279 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1280 goto out;
1281 }
1da177e4 1282
d4e4854f 1283 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1284 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1285 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1286out:
3c1bf7e4 1287 cifs_delete_mid(midQ);
7ee1af76
JA
1288 if (rstart && rc == -EACCES)
1289 return -ERESTARTSYS;
1da177e4
LT
1290 return rc;
1291}