mm/memunmap: don't access uninitialized memmap in memunmap_pages()
[linux-2.6-block.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         temp->callback = cifs_wake_up_task;
80         temp->callback_data = current;
81
82         atomic_inc(&midCount);
83         temp->mid_state = MID_REQUEST_ALLOCATED;
84         return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90                                                refcount);
91
92         mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97         spin_lock(&GlobalMid_Lock);
98         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99         spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106         __le16 command = midEntry->server->vals->lock_cmd;
107         __u16 smb_cmd = le16_to_cpu(midEntry->command);
108         unsigned long now;
109         unsigned long roundtrip_time;
110         struct TCP_Server_Info *server = midEntry->server;
111 #endif
112         midEntry->mid_state = MID_FREE;
113         atomic_dec(&midCount);
114         if (midEntry->large_buf)
115                 cifs_buf_release(midEntry->resp_buf);
116         else
117                 cifs_small_buf_release(midEntry->resp_buf);
118 #ifdef CONFIG_CIFS_STATS2
119         now = jiffies;
120         if (now < midEntry->when_alloc)
121                 cifs_server_dbg(VFS, "invalid mid allocation time\n");
122         roundtrip_time = now - midEntry->when_alloc;
123
124         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126                         server->slowest_cmd[smb_cmd] = roundtrip_time;
127                         server->fastest_cmd[smb_cmd] = roundtrip_time;
128                 } else {
129                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
131                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
133                 }
134                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135                 server->time_per_cmd[smb_cmd] += roundtrip_time;
136         }
137         /*
138          * commands taking longer than one second (default) can be indications
139          * that something is wrong, unless it is quite a slow link or a very
140          * busy server. Note that this calc is unlikely or impossible to wrap
141          * as long as slow_rsp_threshold is not set way above recommended max
142          * value (32767 ie 9 hours) and is generally harmless even if wrong
143          * since only affects debug counters - so leaving the calc as simple
144          * comparison rather than doing multiple conversions and overflow
145          * checks
146          */
147         if ((slow_rsp_threshold != 0) &&
148             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
149             (midEntry->command != command)) {
150                 /*
151                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152                  * NB: le16_to_cpu returns unsigned so can not be negative below
153                  */
154                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
156
157                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
158                                midEntry->when_sent, midEntry->when_received);
159                 if (cifsFYI & CIFS_TIMER) {
160                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
161                                midEntry->command, midEntry->mid);
162                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
163                                now - midEntry->when_alloc,
164                                now - midEntry->when_sent,
165                                now - midEntry->when_received);
166                 }
167         }
168 #endif
169         cifs_mid_q_entry_release(midEntry);
170 }
171
172 void
173 cifs_delete_mid(struct mid_q_entry *mid)
174 {
175         spin_lock(&GlobalMid_Lock);
176         list_del_init(&mid->qhead);
177         mid->mid_flags |= MID_DELETED;
178         spin_unlock(&GlobalMid_Lock);
179
180         DeleteMidQEntry(mid);
181 }
182
183 /*
184  * smb_send_kvec - send an array of kvecs to the server
185  * @server:     Server to send the data to
186  * @smb_msg:    Message to send
187  * @sent:       amount of data sent on socket is stored here
188  *
189  * Our basic "send data to server" function. Should be called with srv_mutex
190  * held. The caller is responsible for handling the results.
191  */
192 static int
193 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194               size_t *sent)
195 {
196         int rc = 0;
197         int retries = 0;
198         struct socket *ssocket = server->ssocket;
199
200         *sent = 0;
201
202         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203         smb_msg->msg_namelen = sizeof(struct sockaddr);
204         smb_msg->msg_control = NULL;
205         smb_msg->msg_controllen = 0;
206         if (server->noblocksnd)
207                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
208         else
209                 smb_msg->msg_flags = MSG_NOSIGNAL;
210
211         while (msg_data_left(smb_msg)) {
212                 /*
213                  * If blocking send, we try 3 times, since each can block
214                  * for 5 seconds. For nonblocking  we have to try more
215                  * but wait increasing amounts of time allowing time for
216                  * socket to clear.  The overall time we wait in either
217                  * case to send on the socket is about 15 seconds.
218                  * Similarly we wait for 15 seconds for a response from
219                  * the server in SendReceive[2] for the server to send
220                  * a response back for most types of requests (except
221                  * SMB Write past end of file which can be slow, and
222                  * blocking lock operations). NFS waits slightly longer
223                  * than CIFS, but this can make it take longer for
224                  * nonresponsive servers to be detected and 15 seconds
225                  * is more than enough time for modern networks to
226                  * send a packet.  In most cases if we fail to send
227                  * after the retries we will kill the socket and
228                  * reconnect which may clear the network problem.
229                  */
230                 rc = sock_sendmsg(ssocket, smb_msg);
231                 if (rc == -EAGAIN) {
232                         retries++;
233                         if (retries >= 14 ||
234                             (!server->noblocksnd && (retries > 2))) {
235                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
236                                          ssocket);
237                                 return -EAGAIN;
238                         }
239                         msleep(1 << retries);
240                         continue;
241                 }
242
243                 if (rc < 0)
244                         return rc;
245
246                 if (rc == 0) {
247                         /* should never happen, letting socket clear before
248                            retrying is our only obvious option here */
249                         cifs_server_dbg(VFS, "tcp sent no data\n");
250                         msleep(500);
251                         continue;
252                 }
253
254                 /* send was at least partially successful */
255                 *sent += rc;
256                 retries = 0; /* in case we get ENOSPC on the next send */
257         }
258         return 0;
259 }
260
261 unsigned long
262 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
263 {
264         unsigned int i;
265         struct kvec *iov;
266         int nvec;
267         unsigned long buflen = 0;
268
269         if (server->vals->header_preamble_size == 0 &&
270             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
271                 iov = &rqst->rq_iov[1];
272                 nvec = rqst->rq_nvec - 1;
273         } else {
274                 iov = rqst->rq_iov;
275                 nvec = rqst->rq_nvec;
276         }
277
278         /* total up iov array first */
279         for (i = 0; i < nvec; i++)
280                 buflen += iov[i].iov_len;
281
282         /*
283          * Add in the page array if there is one. The caller needs to make
284          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285          * multiple pages ends at page boundary, rq_tailsz needs to be set to
286          * PAGE_SIZE.
287          */
288         if (rqst->rq_npages) {
289                 if (rqst->rq_npages == 1)
290                         buflen += rqst->rq_tailsz;
291                 else {
292                         /*
293                          * If there is more than one page, calculate the
294                          * buffer length based on rq_offset and rq_tailsz
295                          */
296                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297                                         rqst->rq_offset;
298                         buflen += rqst->rq_tailsz;
299                 }
300         }
301
302         return buflen;
303 }
304
305 static int
306 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307                 struct smb_rqst *rqst)
308 {
309         int rc = 0;
310         struct kvec *iov;
311         int n_vec;
312         unsigned int send_length = 0;
313         unsigned int i, j;
314         sigset_t mask, oldmask;
315         size_t total_len = 0, sent, size;
316         struct socket *ssocket = server->ssocket;
317         struct msghdr smb_msg;
318         int val = 1;
319         __be32 rfc1002_marker;
320
321         if (cifs_rdma_enabled(server) && server->smbd_conn) {
322                 rc = smbd_send(server, num_rqst, rqst);
323                 goto smbd_done;
324         }
325
326         if (ssocket == NULL)
327                 return -EAGAIN;
328
329         if (signal_pending(current)) {
330                 cifs_dbg(FYI, "signal is pending before sending any data\n");
331                 return -EINTR;
332         }
333
334         /* cork the socket */
335         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336                                 (char *)&val, sizeof(val));
337
338         for (j = 0; j < num_rqst; j++)
339                 send_length += smb_rqst_len(server, &rqst[j]);
340         rfc1002_marker = cpu_to_be32(send_length);
341
342         /*
343          * We should not allow signals to interrupt the network send because
344          * any partial send will cause session reconnects thus increasing
345          * latency of system calls and overload a server with unnecessary
346          * requests.
347          */
348
349         sigfillset(&mask);
350         sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
352         /* Generate a rfc1002 marker for SMB2+ */
353         if (server->vals->header_preamble_size == 0) {
354                 struct kvec hiov = {
355                         .iov_base = &rfc1002_marker,
356                         .iov_len  = 4
357                 };
358                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
359                 rc = smb_send_kvec(server, &smb_msg, &sent);
360                 if (rc < 0)
361                         goto unmask;
362
363                 total_len += sent;
364                 send_length += 4;
365         }
366
367         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
369         for (j = 0; j < num_rqst; j++) {
370                 iov = rqst[j].rq_iov;
371                 n_vec = rqst[j].rq_nvec;
372
373                 size = 0;
374                 for (i = 0; i < n_vec; i++) {
375                         dump_smb(iov[i].iov_base, iov[i].iov_len);
376                         size += iov[i].iov_len;
377                 }
378
379                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
380
381                 rc = smb_send_kvec(server, &smb_msg, &sent);
382                 if (rc < 0)
383                         goto unmask;
384
385                 total_len += sent;
386
387                 /* now walk the page array and send each page in it */
388                 for (i = 0; i < rqst[j].rq_npages; i++) {
389                         struct bio_vec bvec;
390
391                         bvec.bv_page = rqst[j].rq_pages[i];
392                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393                                              &bvec.bv_offset);
394
395                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
396                                       &bvec, 1, bvec.bv_len);
397                         rc = smb_send_kvec(server, &smb_msg, &sent);
398                         if (rc < 0)
399                                 break;
400
401                         total_len += sent;
402                 }
403         }
404
405 unmask:
406         sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408         /*
409          * If signal is pending but we have already sent the whole packet to
410          * the server we need to return success status to allow a corresponding
411          * mid entry to be kept in the pending requests queue thus allowing
412          * to handle responses from the server by the client.
413          *
414          * If only part of the packet has been sent there is no need to hide
415          * interrupt because the session will be reconnected anyway, so there
416          * won't be any response from the server to handle.
417          */
418
419         if (signal_pending(current) && (total_len != send_length)) {
420                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421                 rc = -EINTR;
422         }
423
424         /* uncork it */
425         val = 0;
426         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427                                 (char *)&val, sizeof(val));
428
429         if ((total_len > 0) && (total_len != send_length)) {
430                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
431                          send_length, total_len);
432                 /*
433                  * If we have only sent part of an SMB then the next SMB could
434                  * be taken as the remainder of this one. We need to kill the
435                  * socket so the server throws away the partial SMB
436                  */
437                 server->tcpStatus = CifsNeedReconnect;
438                 trace_smb3_partial_send_reconnect(server->CurrentMid,
439                                                   server->hostname);
440         }
441 smbd_done:
442         if (rc < 0 && rc != -EINTR)
443                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
444                          rc);
445         else if (rc > 0)
446                 rc = 0;
447
448         return rc;
449 }
450
451 static int
452 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453               struct smb_rqst *rqst, int flags)
454 {
455         struct kvec iov;
456         struct smb2_transform_hdr tr_hdr;
457         struct smb_rqst cur_rqst[MAX_COMPOUND];
458         int rc;
459
460         if (!(flags & CIFS_TRANSFORM_REQ))
461                 return __smb_send_rqst(server, num_rqst, rqst);
462
463         if (num_rqst > MAX_COMPOUND - 1)
464                 return -ENOMEM;
465
466         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467         memset(&iov, 0, sizeof(iov));
468         memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470         iov.iov_base = &tr_hdr;
471         iov.iov_len = sizeof(tr_hdr);
472         cur_rqst[0].rq_iov = &iov;
473         cur_rqst[0].rq_nvec = 1;
474
475         if (!server->ops->init_transform_rq) {
476                 cifs_server_dbg(VFS, "Encryption requested but transform "
477                                 "callback is missing\n");
478                 return -EIO;
479         }
480
481         rc = server->ops->init_transform_rq(server, num_rqst + 1,
482                                             &cur_rqst[0], rqst);
483         if (rc)
484                 return rc;
485
486         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
488         return rc;
489 }
490
491 int
492 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493          unsigned int smb_buf_length)
494 {
495         struct kvec iov[2];
496         struct smb_rqst rqst = { .rq_iov = iov,
497                                  .rq_nvec = 2 };
498
499         iov[0].iov_base = smb_buffer;
500         iov[0].iov_len = 4;
501         iov[1].iov_base = (char *)smb_buffer + 4;
502         iov[1].iov_len = smb_buf_length;
503
504         return __smb_send_rqst(server, 1, &rqst);
505 }
506
507 static int
508 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
509                       const int timeout, const int flags,
510                       unsigned int *instance)
511 {
512         int rc;
513         int *credits;
514         int optype;
515         long int t;
516
517         if (timeout < 0)
518                 t = MAX_JIFFY_OFFSET;
519         else
520                 t = msecs_to_jiffies(timeout);
521
522         optype = flags & CIFS_OP_MASK;
523
524         *instance = 0;
525
526         credits = server->ops->get_credits_field(server, optype);
527         /* Since an echo is already inflight, no need to wait to send another */
528         if (*credits <= 0 && optype == CIFS_ECHO_OP)
529                 return -EAGAIN;
530
531         spin_lock(&server->req_lock);
532         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
533                 /* oplock breaks must not be held up */
534                 server->in_flight++;
535                 if (server->in_flight > server->max_in_flight)
536                         server->max_in_flight = server->in_flight;
537                 *credits -= 1;
538                 *instance = server->reconnect_instance;
539                 spin_unlock(&server->req_lock);
540                 return 0;
541         }
542
543         while (1) {
544                 if (*credits < num_credits) {
545                         spin_unlock(&server->req_lock);
546                         cifs_num_waiters_inc(server);
547                         rc = wait_event_killable_timeout(server->request_q,
548                                 has_credits(server, credits, num_credits), t);
549                         cifs_num_waiters_dec(server);
550                         if (!rc) {
551                                 trace_smb3_credit_timeout(server->CurrentMid,
552                                         server->hostname, num_credits);
553                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
554                                          timeout);
555                                 return -ENOTSUPP;
556                         }
557                         if (rc == -ERESTARTSYS)
558                                 return -ERESTARTSYS;
559                         spin_lock(&server->req_lock);
560                 } else {
561                         if (server->tcpStatus == CifsExiting) {
562                                 spin_unlock(&server->req_lock);
563                                 return -ENOENT;
564                         }
565
566                         /*
567                          * For normal commands, reserve the last MAX_COMPOUND
568                          * credits to compound requests.
569                          * Otherwise these compounds could be permanently
570                          * starved for credits by single-credit requests.
571                          *
572                          * To prevent spinning CPU, block this thread until
573                          * there are >MAX_COMPOUND credits available.
574                          * But only do this is we already have a lot of
575                          * credits in flight to avoid triggering this check
576                          * for servers that are slow to hand out credits on
577                          * new sessions.
578                          */
579                         if (!optype && num_credits == 1 &&
580                             server->in_flight > 2 * MAX_COMPOUND &&
581                             *credits <= MAX_COMPOUND) {
582                                 spin_unlock(&server->req_lock);
583                                 cifs_num_waiters_inc(server);
584                                 rc = wait_event_killable_timeout(
585                                         server->request_q,
586                                         has_credits(server, credits,
587                                                     MAX_COMPOUND + 1),
588                                         t);
589                                 cifs_num_waiters_dec(server);
590                                 if (!rc) {
591                                         trace_smb3_credit_timeout(
592                                                 server->CurrentMid,
593                                                 server->hostname, num_credits);
594                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
595                                                  timeout);
596                                         return -ENOTSUPP;
597                                 }
598                                 if (rc == -ERESTARTSYS)
599                                         return -ERESTARTSYS;
600                                 spin_lock(&server->req_lock);
601                                 continue;
602                         }
603
604                         /*
605                          * Can not count locking commands against total
606                          * as they are allowed to block on server.
607                          */
608
609                         /* update # of requests on the wire to server */
610                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
611                                 *credits -= num_credits;
612                                 server->in_flight += num_credits;
613                                 if (server->in_flight > server->max_in_flight)
614                                         server->max_in_flight = server->in_flight;
615                                 *instance = server->reconnect_instance;
616                         }
617                         spin_unlock(&server->req_lock);
618                         break;
619                 }
620         }
621         return 0;
622 }
623
624 static int
625 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
626                       unsigned int *instance)
627 {
628         return wait_for_free_credits(server, 1, -1, flags,
629                                      instance);
630 }
631
632 static int
633 wait_for_compound_request(struct TCP_Server_Info *server, int num,
634                           const int flags, unsigned int *instance)
635 {
636         int *credits;
637
638         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
639
640         spin_lock(&server->req_lock);
641         if (*credits < num) {
642                 /*
643                  * Return immediately if not too many requests in flight since
644                  * we will likely be stuck on waiting for credits.
645                  */
646                 if (server->in_flight < num - *credits) {
647                         spin_unlock(&server->req_lock);
648                         return -ENOTSUPP;
649                 }
650         }
651         spin_unlock(&server->req_lock);
652
653         return wait_for_free_credits(server, num, 60000, flags,
654                                      instance);
655 }
656
657 int
658 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
659                       unsigned int *num, struct cifs_credits *credits)
660 {
661         *num = size;
662         credits->value = 0;
663         credits->instance = server->reconnect_instance;
664         return 0;
665 }
666
667 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
668                         struct mid_q_entry **ppmidQ)
669 {
670         if (ses->server->tcpStatus == CifsExiting) {
671                 return -ENOENT;
672         }
673
674         if (ses->server->tcpStatus == CifsNeedReconnect) {
675                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
676                 return -EAGAIN;
677         }
678
679         if (ses->status == CifsNew) {
680                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
681                         (in_buf->Command != SMB_COM_NEGOTIATE))
682                         return -EAGAIN;
683                 /* else ok - we are setting up session */
684         }
685
686         if (ses->status == CifsExiting) {
687                 /* check if SMB session is bad because we are setting it up */
688                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
689                         return -EAGAIN;
690                 /* else ok - we are shutting down session */
691         }
692
693         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
694         if (*ppmidQ == NULL)
695                 return -ENOMEM;
696         spin_lock(&GlobalMid_Lock);
697         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
698         spin_unlock(&GlobalMid_Lock);
699         return 0;
700 }
701
702 static int
703 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
704 {
705         int error;
706
707         error = wait_event_freezekillable_unsafe(server->response_q,
708                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
709         if (error < 0)
710                 return -ERESTARTSYS;
711
712         return 0;
713 }
714
715 struct mid_q_entry *
716 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
717 {
718         int rc;
719         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
720         struct mid_q_entry *mid;
721
722         if (rqst->rq_iov[0].iov_len != 4 ||
723             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
724                 return ERR_PTR(-EIO);
725
726         /* enable signing if server requires it */
727         if (server->sign)
728                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
729
730         mid = AllocMidQEntry(hdr, server);
731         if (mid == NULL)
732                 return ERR_PTR(-ENOMEM);
733
734         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
735         if (rc) {
736                 DeleteMidQEntry(mid);
737                 return ERR_PTR(rc);
738         }
739
740         return mid;
741 }
742
743 /*
744  * Send a SMB request and set the callback function in the mid to handle
745  * the result. Caller is responsible for dealing with timeouts.
746  */
747 int
748 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
749                 mid_receive_t *receive, mid_callback_t *callback,
750                 mid_handle_t *handle, void *cbdata, const int flags,
751                 const struct cifs_credits *exist_credits)
752 {
753         int rc;
754         struct mid_q_entry *mid;
755         struct cifs_credits credits = { .value = 0, .instance = 0 };
756         unsigned int instance;
757         int optype;
758
759         optype = flags & CIFS_OP_MASK;
760
761         if ((flags & CIFS_HAS_CREDITS) == 0) {
762                 rc = wait_for_free_request(server, flags, &instance);
763                 if (rc)
764                         return rc;
765                 credits.value = 1;
766                 credits.instance = instance;
767         } else
768                 instance = exist_credits->instance;
769
770         mutex_lock(&server->srv_mutex);
771
772         /*
773          * We can't use credits obtained from the previous session to send this
774          * request. Check if there were reconnects after we obtained credits and
775          * return -EAGAIN in such cases to let callers handle it.
776          */
777         if (instance != server->reconnect_instance) {
778                 mutex_unlock(&server->srv_mutex);
779                 add_credits_and_wake_if(server, &credits, optype);
780                 return -EAGAIN;
781         }
782
783         mid = server->ops->setup_async_request(server, rqst);
784         if (IS_ERR(mid)) {
785                 mutex_unlock(&server->srv_mutex);
786                 add_credits_and_wake_if(server, &credits, optype);
787                 return PTR_ERR(mid);
788         }
789
790         mid->receive = receive;
791         mid->callback = callback;
792         mid->callback_data = cbdata;
793         mid->handle = handle;
794         mid->mid_state = MID_REQUEST_SUBMITTED;
795
796         /* put it on the pending_mid_q */
797         spin_lock(&GlobalMid_Lock);
798         list_add_tail(&mid->qhead, &server->pending_mid_q);
799         spin_unlock(&GlobalMid_Lock);
800
801         /*
802          * Need to store the time in mid before calling I/O. For call_async,
803          * I/O response may come back and free the mid entry on another thread.
804          */
805         cifs_save_when_sent(mid);
806         cifs_in_send_inc(server);
807         rc = smb_send_rqst(server, 1, rqst, flags);
808         cifs_in_send_dec(server);
809
810         if (rc < 0) {
811                 revert_current_mid(server, mid->credits);
812                 server->sequence_number -= 2;
813                 cifs_delete_mid(mid);
814         }
815
816         mutex_unlock(&server->srv_mutex);
817
818         if (rc == 0)
819                 return 0;
820
821         add_credits_and_wake_if(server, &credits, optype);
822         return rc;
823 }
824
825 /*
826  *
827  * Send an SMB Request.  No response info (other than return code)
828  * needs to be parsed.
829  *
830  * flags indicate the type of request buffer and how long to wait
831  * and whether to log NT STATUS code (error) before mapping it to POSIX error
832  *
833  */
834 int
835 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
836                  char *in_buf, int flags)
837 {
838         int rc;
839         struct kvec iov[1];
840         struct kvec rsp_iov;
841         int resp_buf_type;
842
843         iov[0].iov_base = in_buf;
844         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
845         flags |= CIFS_NO_RSP_BUF;
846         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
847         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
848
849         return rc;
850 }
851
852 static int
853 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
854 {
855         int rc = 0;
856
857         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
858                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
859
860         spin_lock(&GlobalMid_Lock);
861         switch (mid->mid_state) {
862         case MID_RESPONSE_RECEIVED:
863                 spin_unlock(&GlobalMid_Lock);
864                 return rc;
865         case MID_RETRY_NEEDED:
866                 rc = -EAGAIN;
867                 break;
868         case MID_RESPONSE_MALFORMED:
869                 rc = -EIO;
870                 break;
871         case MID_SHUTDOWN:
872                 rc = -EHOSTDOWN;
873                 break;
874         default:
875                 list_del_init(&mid->qhead);
876                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
877                          __func__, mid->mid, mid->mid_state);
878                 rc = -EIO;
879         }
880         spin_unlock(&GlobalMid_Lock);
881
882         DeleteMidQEntry(mid);
883         return rc;
884 }
885
886 static inline int
887 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
888             struct mid_q_entry *mid)
889 {
890         return server->ops->send_cancel ?
891                                 server->ops->send_cancel(server, rqst, mid) : 0;
892 }
893
894 int
895 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
896                    bool log_error)
897 {
898         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
899
900         dump_smb(mid->resp_buf, min_t(u32, 92, len));
901
902         /* convert the length into a more usable form */
903         if (server->sign) {
904                 struct kvec iov[2];
905                 int rc = 0;
906                 struct smb_rqst rqst = { .rq_iov = iov,
907                                          .rq_nvec = 2 };
908
909                 iov[0].iov_base = mid->resp_buf;
910                 iov[0].iov_len = 4;
911                 iov[1].iov_base = (char *)mid->resp_buf + 4;
912                 iov[1].iov_len = len - 4;
913                 /* FIXME: add code to kill session */
914                 rc = cifs_verify_signature(&rqst, server,
915                                            mid->sequence_number);
916                 if (rc)
917                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
918                                  rc);
919         }
920
921         /* BB special case reconnect tid and uid here? */
922         return map_smb_to_linux_error(mid->resp_buf, log_error);
923 }
924
925 struct mid_q_entry *
926 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
927 {
928         int rc;
929         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
930         struct mid_q_entry *mid;
931
932         if (rqst->rq_iov[0].iov_len != 4 ||
933             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
934                 return ERR_PTR(-EIO);
935
936         rc = allocate_mid(ses, hdr, &mid);
937         if (rc)
938                 return ERR_PTR(rc);
939         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
940         if (rc) {
941                 cifs_delete_mid(mid);
942                 return ERR_PTR(rc);
943         }
944         return mid;
945 }
946
947 static void
948 cifs_compound_callback(struct mid_q_entry *mid)
949 {
950         struct TCP_Server_Info *server = mid->server;
951         struct cifs_credits credits;
952
953         credits.value = server->ops->get_credits(mid);
954         credits.instance = server->reconnect_instance;
955
956         add_credits(server, &credits, mid->optype);
957 }
958
959 static void
960 cifs_compound_last_callback(struct mid_q_entry *mid)
961 {
962         cifs_compound_callback(mid);
963         cifs_wake_up_task(mid);
964 }
965
966 static void
967 cifs_cancelled_callback(struct mid_q_entry *mid)
968 {
969         cifs_compound_callback(mid);
970         DeleteMidQEntry(mid);
971 }
972
973 int
974 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
975                    const int flags, const int num_rqst, struct smb_rqst *rqst,
976                    int *resp_buf_type, struct kvec *resp_iov)
977 {
978         int i, j, optype, rc = 0;
979         struct mid_q_entry *midQ[MAX_COMPOUND];
980         bool cancelled_mid[MAX_COMPOUND] = {false};
981         struct cifs_credits credits[MAX_COMPOUND] = {
982                 { .value = 0, .instance = 0 }
983         };
984         unsigned int instance;
985         char *buf;
986         struct TCP_Server_Info *server;
987
988         optype = flags & CIFS_OP_MASK;
989
990         for (i = 0; i < num_rqst; i++)
991                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
992
993         if ((ses == NULL) || (ses->server == NULL)) {
994                 cifs_dbg(VFS, "Null session\n");
995                 return -EIO;
996         }
997
998         server = ses->server;
999         if (server->tcpStatus == CifsExiting)
1000                 return -ENOENT;
1001
1002         /*
1003          * Wait for all the requests to become available.
1004          * This approach still leaves the possibility to be stuck waiting for
1005          * credits if the server doesn't grant credits to the outstanding
1006          * requests and if the client is completely idle, not generating any
1007          * other requests.
1008          * This can be handled by the eventual session reconnect.
1009          */
1010         rc = wait_for_compound_request(server, num_rqst, flags,
1011                                        &instance);
1012         if (rc)
1013                 return rc;
1014
1015         for (i = 0; i < num_rqst; i++) {
1016                 credits[i].value = 1;
1017                 credits[i].instance = instance;
1018         }
1019
1020         /*
1021          * Make sure that we sign in the same order that we send on this socket
1022          * and avoid races inside tcp sendmsg code that could cause corruption
1023          * of smb data.
1024          */
1025
1026         mutex_lock(&server->srv_mutex);
1027
1028         /*
1029          * All the parts of the compound chain belong obtained credits from the
1030          * same session. We can not use credits obtained from the previous
1031          * session to send this request. Check if there were reconnects after
1032          * we obtained credits and return -EAGAIN in such cases to let callers
1033          * handle it.
1034          */
1035         if (instance != server->reconnect_instance) {
1036                 mutex_unlock(&server->srv_mutex);
1037                 for (j = 0; j < num_rqst; j++)
1038                         add_credits(server, &credits[j], optype);
1039                 return -EAGAIN;
1040         }
1041
1042         for (i = 0; i < num_rqst; i++) {
1043                 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1044                 if (IS_ERR(midQ[i])) {
1045                         revert_current_mid(server, i);
1046                         for (j = 0; j < i; j++)
1047                                 cifs_delete_mid(midQ[j]);
1048                         mutex_unlock(&server->srv_mutex);
1049
1050                         /* Update # of requests on wire to server */
1051                         for (j = 0; j < num_rqst; j++)
1052                                 add_credits(server, &credits[j], optype);
1053                         return PTR_ERR(midQ[i]);
1054                 }
1055
1056                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1057                 midQ[i]->optype = optype;
1058                 /*
1059                  * Invoke callback for every part of the compound chain
1060                  * to calculate credits properly. Wake up this thread only when
1061                  * the last element is received.
1062                  */
1063                 if (i < num_rqst - 1)
1064                         midQ[i]->callback = cifs_compound_callback;
1065                 else
1066                         midQ[i]->callback = cifs_compound_last_callback;
1067         }
1068         cifs_in_send_inc(server);
1069         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1070         cifs_in_send_dec(server);
1071
1072         for (i = 0; i < num_rqst; i++)
1073                 cifs_save_when_sent(midQ[i]);
1074
1075         if (rc < 0) {
1076                 revert_current_mid(server, num_rqst);
1077                 server->sequence_number -= 2;
1078         }
1079
1080         mutex_unlock(&server->srv_mutex);
1081
1082         /*
1083          * If sending failed for some reason or it is an oplock break that we
1084          * will not receive a response to - return credits back
1085          */
1086         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1087                 for (i = 0; i < num_rqst; i++)
1088                         add_credits(server, &credits[i], optype);
1089                 goto out;
1090         }
1091
1092         /*
1093          * At this point the request is passed to the network stack - we assume
1094          * that any credits taken from the server structure on the client have
1095          * been spent and we can't return them back. Once we receive responses
1096          * we will collect credits granted by the server in the mid callbacks
1097          * and add those credits to the server structure.
1098          */
1099
1100         /*
1101          * Compounding is never used during session establish.
1102          */
1103         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1104                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1105                                            rqst[0].rq_nvec);
1106
1107         for (i = 0; i < num_rqst; i++) {
1108                 rc = wait_for_response(server, midQ[i]);
1109                 if (rc != 0)
1110                         break;
1111         }
1112         if (rc != 0) {
1113                 for (; i < num_rqst; i++) {
1114                         cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1115                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1116                         send_cancel(server, &rqst[i], midQ[i]);
1117                         spin_lock(&GlobalMid_Lock);
1118                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1119                                 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1120                                 midQ[i]->callback = cifs_cancelled_callback;
1121                                 cancelled_mid[i] = true;
1122                                 credits[i].value = 0;
1123                         }
1124                         spin_unlock(&GlobalMid_Lock);
1125                 }
1126         }
1127
1128         for (i = 0; i < num_rqst; i++) {
1129                 if (rc < 0)
1130                         goto out;
1131
1132                 rc = cifs_sync_mid_result(midQ[i], server);
1133                 if (rc != 0) {
1134                         /* mark this mid as cancelled to not free it below */
1135                         cancelled_mid[i] = true;
1136                         goto out;
1137                 }
1138
1139                 if (!midQ[i]->resp_buf ||
1140                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1141                         rc = -EIO;
1142                         cifs_dbg(FYI, "Bad MID state?\n");
1143                         goto out;
1144                 }
1145
1146                 buf = (char *)midQ[i]->resp_buf;
1147                 resp_iov[i].iov_base = buf;
1148                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1149                         server->vals->header_preamble_size;
1150
1151                 if (midQ[i]->large_buf)
1152                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1153                 else
1154                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1155
1156                 rc = server->ops->check_receive(midQ[i], server,
1157                                                      flags & CIFS_LOG_ERROR);
1158
1159                 /* mark it so buf will not be freed by cifs_delete_mid */
1160                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1161                         midQ[i]->resp_buf = NULL;
1162
1163         }
1164
1165         /*
1166          * Compounding is never used during session establish.
1167          */
1168         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1169                 struct kvec iov = {
1170                         .iov_base = resp_iov[0].iov_base,
1171                         .iov_len = resp_iov[0].iov_len
1172                 };
1173                 smb311_update_preauth_hash(ses, &iov, 1);
1174         }
1175
1176 out:
1177         /*
1178          * This will dequeue all mids. After this it is important that the
1179          * demultiplex_thread will not process any of these mids any futher.
1180          * This is prevented above by using a noop callback that will not
1181          * wake this thread except for the very last PDU.
1182          */
1183         for (i = 0; i < num_rqst; i++) {
1184                 if (!cancelled_mid[i])
1185                         cifs_delete_mid(midQ[i]);
1186         }
1187
1188         return rc;
1189 }
1190
1191 int
1192 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1193                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1194                struct kvec *resp_iov)
1195 {
1196         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1197                                   resp_iov);
1198 }
1199
1200 int
1201 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1202              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1203              const int flags, struct kvec *resp_iov)
1204 {
1205         struct smb_rqst rqst;
1206         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1207         int rc;
1208
1209         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1210                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1211                                         GFP_KERNEL);
1212                 if (!new_iov) {
1213                         /* otherwise cifs_send_recv below sets resp_buf_type */
1214                         *resp_buf_type = CIFS_NO_BUFFER;
1215                         return -ENOMEM;
1216                 }
1217         } else
1218                 new_iov = s_iov;
1219
1220         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1221         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1222
1223         new_iov[0].iov_base = new_iov[1].iov_base;
1224         new_iov[0].iov_len = 4;
1225         new_iov[1].iov_base += 4;
1226         new_iov[1].iov_len -= 4;
1227
1228         memset(&rqst, 0, sizeof(struct smb_rqst));
1229         rqst.rq_iov = new_iov;
1230         rqst.rq_nvec = n_vec + 1;
1231
1232         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1233         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1234                 kfree(new_iov);
1235         return rc;
1236 }
1237
1238 int
1239 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1240             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1241             int *pbytes_returned, const int flags)
1242 {
1243         int rc = 0;
1244         struct mid_q_entry *midQ;
1245         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1246         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1247         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1248         struct cifs_credits credits = { .value = 1, .instance = 0 };
1249         struct TCP_Server_Info *server;
1250
1251         if (ses == NULL) {
1252                 cifs_dbg(VFS, "Null smb session\n");
1253                 return -EIO;
1254         }
1255         server = ses->server;
1256         if (server == NULL) {
1257                 cifs_dbg(VFS, "Null tcp session\n");
1258                 return -EIO;
1259         }
1260
1261         if (server->tcpStatus == CifsExiting)
1262                 return -ENOENT;
1263
1264         /* Ensure that we do not send more than 50 overlapping requests
1265            to the same server. We may make this configurable later or
1266            use ses->maxReq */
1267
1268         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1269                 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1270                          len);
1271                 return -EIO;
1272         }
1273
1274         rc = wait_for_free_request(server, flags, &credits.instance);
1275         if (rc)
1276                 return rc;
1277
1278         /* make sure that we sign in the same order that we send on this socket
1279            and avoid races inside tcp sendmsg code that could cause corruption
1280            of smb data */
1281
1282         mutex_lock(&server->srv_mutex);
1283
1284         rc = allocate_mid(ses, in_buf, &midQ);
1285         if (rc) {
1286                 mutex_unlock(&ses->server->srv_mutex);
1287                 /* Update # of requests on wire to server */
1288                 add_credits(server, &credits, 0);
1289                 return rc;
1290         }
1291
1292         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1293         if (rc) {
1294                 mutex_unlock(&server->srv_mutex);
1295                 goto out;
1296         }
1297
1298         midQ->mid_state = MID_REQUEST_SUBMITTED;
1299
1300         cifs_in_send_inc(server);
1301         rc = smb_send(server, in_buf, len);
1302         cifs_in_send_dec(server);
1303         cifs_save_when_sent(midQ);
1304
1305         if (rc < 0)
1306                 server->sequence_number -= 2;
1307
1308         mutex_unlock(&server->srv_mutex);
1309
1310         if (rc < 0)
1311                 goto out;
1312
1313         rc = wait_for_response(server, midQ);
1314         if (rc != 0) {
1315                 send_cancel(server, &rqst, midQ);
1316                 spin_lock(&GlobalMid_Lock);
1317                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1318                         /* no longer considered to be "in-flight" */
1319                         midQ->callback = DeleteMidQEntry;
1320                         spin_unlock(&GlobalMid_Lock);
1321                         add_credits(server, &credits, 0);
1322                         return rc;
1323                 }
1324                 spin_unlock(&GlobalMid_Lock);
1325         }
1326
1327         rc = cifs_sync_mid_result(midQ, server);
1328         if (rc != 0) {
1329                 add_credits(server, &credits, 0);
1330                 return rc;
1331         }
1332
1333         if (!midQ->resp_buf || !out_buf ||
1334             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1335                 rc = -EIO;
1336                 cifs_server_dbg(VFS, "Bad MID state?\n");
1337                 goto out;
1338         }
1339
1340         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1341         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1342         rc = cifs_check_receive(midQ, server, 0);
1343 out:
1344         cifs_delete_mid(midQ);
1345         add_credits(server, &credits, 0);
1346
1347         return rc;
1348 }
1349
1350 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1351    blocking lock to return. */
1352
1353 static int
1354 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1355                         struct smb_hdr *in_buf,
1356                         struct smb_hdr *out_buf)
1357 {
1358         int bytes_returned;
1359         struct cifs_ses *ses = tcon->ses;
1360         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1361
1362         /* We just modify the current in_buf to change
1363            the type of lock from LOCKING_ANDX_SHARED_LOCK
1364            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1365            LOCKING_ANDX_CANCEL_LOCK. */
1366
1367         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1368         pSMB->Timeout = 0;
1369         pSMB->hdr.Mid = get_next_mid(ses->server);
1370
1371         return SendReceive(xid, ses, in_buf, out_buf,
1372                         &bytes_returned, 0);
1373 }
1374
1375 int
1376 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1377             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1378             int *pbytes_returned)
1379 {
1380         int rc = 0;
1381         int rstart = 0;
1382         struct mid_q_entry *midQ;
1383         struct cifs_ses *ses;
1384         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1385         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1386         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1387         unsigned int instance;
1388         struct TCP_Server_Info *server;
1389
1390         if (tcon == NULL || tcon->ses == NULL) {
1391                 cifs_dbg(VFS, "Null smb session\n");
1392                 return -EIO;
1393         }
1394         ses = tcon->ses;
1395         server = ses->server;
1396
1397         if (server == NULL) {
1398                 cifs_dbg(VFS, "Null tcp session\n");
1399                 return -EIO;
1400         }
1401
1402         if (server->tcpStatus == CifsExiting)
1403                 return -ENOENT;
1404
1405         /* Ensure that we do not send more than 50 overlapping requests
1406            to the same server. We may make this configurable later or
1407            use ses->maxReq */
1408
1409         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1410                 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1411                          len);
1412                 return -EIO;
1413         }
1414
1415         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1416         if (rc)
1417                 return rc;
1418
1419         /* make sure that we sign in the same order that we send on this socket
1420            and avoid races inside tcp sendmsg code that could cause corruption
1421            of smb data */
1422
1423         mutex_lock(&server->srv_mutex);
1424
1425         rc = allocate_mid(ses, in_buf, &midQ);
1426         if (rc) {
1427                 mutex_unlock(&server->srv_mutex);
1428                 return rc;
1429         }
1430
1431         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1432         if (rc) {
1433                 cifs_delete_mid(midQ);
1434                 mutex_unlock(&server->srv_mutex);
1435                 return rc;
1436         }
1437
1438         midQ->mid_state = MID_REQUEST_SUBMITTED;
1439         cifs_in_send_inc(server);
1440         rc = smb_send(server, in_buf, len);
1441         cifs_in_send_dec(server);
1442         cifs_save_when_sent(midQ);
1443
1444         if (rc < 0)
1445                 server->sequence_number -= 2;
1446
1447         mutex_unlock(&server->srv_mutex);
1448
1449         if (rc < 0) {
1450                 cifs_delete_mid(midQ);
1451                 return rc;
1452         }
1453
1454         /* Wait for a reply - allow signals to interrupt. */
1455         rc = wait_event_interruptible(server->response_q,
1456                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1457                 ((server->tcpStatus != CifsGood) &&
1458                  (server->tcpStatus != CifsNew)));
1459
1460         /* Were we interrupted by a signal ? */
1461         if ((rc == -ERESTARTSYS) &&
1462                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1463                 ((server->tcpStatus == CifsGood) ||
1464                  (server->tcpStatus == CifsNew))) {
1465
1466                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1467                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1468                            blocking lock to return. */
1469                         rc = send_cancel(server, &rqst, midQ);
1470                         if (rc) {
1471                                 cifs_delete_mid(midQ);
1472                                 return rc;
1473                         }
1474                 } else {
1475                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1476                            to cause the blocking lock to return. */
1477
1478                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1479
1480                         /* If we get -ENOLCK back the lock may have
1481                            already been removed. Don't exit in this case. */
1482                         if (rc && rc != -ENOLCK) {
1483                                 cifs_delete_mid(midQ);
1484                                 return rc;
1485                         }
1486                 }
1487
1488                 rc = wait_for_response(server, midQ);
1489                 if (rc) {
1490                         send_cancel(server, &rqst, midQ);
1491                         spin_lock(&GlobalMid_Lock);
1492                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1493                                 /* no longer considered to be "in-flight" */
1494                                 midQ->callback = DeleteMidQEntry;
1495                                 spin_unlock(&GlobalMid_Lock);
1496                                 return rc;
1497                         }
1498                         spin_unlock(&GlobalMid_Lock);
1499                 }
1500
1501                 /* We got the response - restart system call. */
1502                 rstart = 1;
1503         }
1504
1505         rc = cifs_sync_mid_result(midQ, server);
1506         if (rc != 0)
1507                 return rc;
1508
1509         /* rcvd frame is ok */
1510         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1511                 rc = -EIO;
1512                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1513                 goto out;
1514         }
1515
1516         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1517         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1518         rc = cifs_check_receive(midQ, server, 0);
1519 out:
1520         cifs_delete_mid(midQ);
1521         if (rstart && rc == -EACCES)
1522                 return -ERESTARTSYS;
1523         return rc;
1524 }