4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
47 cifs_wake_up_task(struct mid_q_entry *mid)
49 wake_up_process(mid->callback_data);
53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 struct mid_q_entry *temp;
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
86 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
91 mempool_free(mid, cifs_mid_poolp);
94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 #ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
113 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
128 (midEntry->command != command)) {
130 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
131 * NB: le16_to_cpu returns unsigned so can not be negative below
133 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
134 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
137 midEntry->mid, midEntry->pid,
138 midEntry->when_sent, midEntry->when_received);
139 if (cifsFYI & CIFS_TIMER) {
140 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
141 midEntry->command, midEntry->mid);
142 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
143 now - midEntry->when_alloc,
144 now - midEntry->when_sent,
145 now - midEntry->when_received);
149 cifs_mid_q_entry_release(midEntry);
153 cifs_delete_mid(struct mid_q_entry *mid)
155 spin_lock(&GlobalMid_Lock);
156 list_del_init(&mid->qhead);
157 mid->mid_flags |= MID_DELETED;
158 spin_unlock(&GlobalMid_Lock);
160 DeleteMidQEntry(mid);
164 * smb_send_kvec - send an array of kvecs to the server
165 * @server: Server to send the data to
166 * @smb_msg: Message to send
167 * @sent: amount of data sent on socket is stored here
169 * Our basic "send data to server" function. Should be called with srv_mutex
170 * held. The caller is responsible for handling the results.
173 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
178 struct socket *ssocket = server->ssocket;
182 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
183 smb_msg->msg_namelen = sizeof(struct sockaddr);
184 smb_msg->msg_control = NULL;
185 smb_msg->msg_controllen = 0;
186 if (server->noblocksnd)
187 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
189 smb_msg->msg_flags = MSG_NOSIGNAL;
191 while (msg_data_left(smb_msg)) {
193 * If blocking send, we try 3 times, since each can block
194 * for 5 seconds. For nonblocking we have to try more
195 * but wait increasing amounts of time allowing time for
196 * socket to clear. The overall time we wait in either
197 * case to send on the socket is about 15 seconds.
198 * Similarly we wait for 15 seconds for a response from
199 * the server in SendReceive[2] for the server to send
200 * a response back for most types of requests (except
201 * SMB Write past end of file which can be slow, and
202 * blocking lock operations). NFS waits slightly longer
203 * than CIFS, but this can make it take longer for
204 * nonresponsive servers to be detected and 15 seconds
205 * is more than enough time for modern networks to
206 * send a packet. In most cases if we fail to send
207 * after the retries we will kill the socket and
208 * reconnect which may clear the network problem.
210 rc = sock_sendmsg(ssocket, smb_msg);
214 (!server->noblocksnd && (retries > 2))) {
215 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
219 msleep(1 << retries);
227 /* should never happen, letting socket clear before
228 retrying is our only obvious option here */
229 cifs_dbg(VFS, "tcp sent no data\n");
234 /* send was at least partially successful */
236 retries = 0; /* in case we get ENOSPC on the next send */
242 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
247 unsigned long buflen = 0;
249 if (server->vals->header_preamble_size == 0 &&
250 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
251 iov = &rqst->rq_iov[1];
252 nvec = rqst->rq_nvec - 1;
255 nvec = rqst->rq_nvec;
258 /* total up iov array first */
259 for (i = 0; i < nvec; i++)
260 buflen += iov[i].iov_len;
263 * Add in the page array if there is one. The caller needs to make
264 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
265 * multiple pages ends at page boundary, rq_tailsz needs to be set to
268 if (rqst->rq_npages) {
269 if (rqst->rq_npages == 1)
270 buflen += rqst->rq_tailsz;
273 * If there is more than one page, calculate the
274 * buffer length based on rq_offset and rq_tailsz
276 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278 buflen += rqst->rq_tailsz;
286 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
287 struct smb_rqst *rqst)
292 unsigned int send_length = 0;
294 size_t total_len = 0, sent, size;
295 struct socket *ssocket = server->ssocket;
296 struct msghdr smb_msg;
298 __be32 rfc1002_marker;
300 if (cifs_rdma_enabled(server) && server->smbd_conn) {
301 rc = smbd_send(server, rqst);
307 /* cork the socket */
308 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
309 (char *)&val, sizeof(val));
311 for (j = 0; j < num_rqst; j++)
312 send_length += smb_rqst_len(server, &rqst[j]);
313 rfc1002_marker = cpu_to_be32(send_length);
315 /* Generate a rfc1002 marker for SMB2+ */
316 if (server->vals->header_preamble_size == 0) {
318 .iov_base = &rfc1002_marker,
321 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
322 rc = smb_send_kvec(server, &smb_msg, &sent);
330 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
332 for (j = 0; j < num_rqst; j++) {
333 iov = rqst[j].rq_iov;
334 n_vec = rqst[j].rq_nvec;
337 for (i = 0; i < n_vec; i++) {
338 dump_smb(iov[i].iov_base, iov[i].iov_len);
339 size += iov[i].iov_len;
342 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
344 rc = smb_send_kvec(server, &smb_msg, &sent);
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
358 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
374 if ((total_len > 0) && (total_len != send_length)) {
375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
376 send_length, total_len);
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
382 server->tcpStatus = CifsNeedReconnect;
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
387 if (rc < 0 && rc != -EINTR)
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
397 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
405 if (!(flags & CIFS_TRANSFORM_REQ))
406 return __smb_send_rqst(server, num_rqst, rqst);
408 if (num_rqst > MAX_COMPOUND - 1)
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
437 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
441 struct smb_rqst rqst = { .rq_iov = iov,
444 iov[0].iov_base = smb_buffer;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
449 return __smb_send_rqst(server, 1, &rqst);
453 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
458 spin_lock(&server->req_lock);
459 if (timeout == CIFS_ASYNC_OP) {
460 /* oplock breaks must not be held up */
463 spin_unlock(&server->req_lock);
469 spin_unlock(&server->req_lock);
470 cifs_num_waiters_inc(server);
471 rc = wait_event_killable(server->request_q,
472 has_credits(server, credits));
473 cifs_num_waiters_dec(server);
476 spin_lock(&server->req_lock);
478 if (server->tcpStatus == CifsExiting) {
479 spin_unlock(&server->req_lock);
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
488 /* update # of requests on the wire to server */
489 if (timeout != CIFS_BLOCKING_OP) {
493 spin_unlock(&server->req_lock);
501 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
510 return wait_for_free_credits(server, timeout, val);
514 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 unsigned int *num, unsigned int *credits)
522 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
523 struct mid_q_entry **ppmidQ)
525 if (ses->server->tcpStatus == CifsExiting) {
529 if (ses->server->tcpStatus == CifsNeedReconnect) {
530 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
534 if (ses->status == CifsNew) {
535 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
536 (in_buf->Command != SMB_COM_NEGOTIATE))
538 /* else ok - we are setting up session */
541 if (ses->status == CifsExiting) {
542 /* check if SMB session is bad because we are setting it up */
543 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
545 /* else ok - we are shutting down session */
548 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
558 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
562 error = wait_event_freezekillable_unsafe(server->response_q,
563 midQ->mid_state != MID_REQUEST_SUBMITTED);
571 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
574 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
575 struct mid_q_entry *mid;
577 if (rqst->rq_iov[0].iov_len != 4 ||
578 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 return ERR_PTR(-EIO);
581 /* enable signing if server requires it */
583 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
585 mid = AllocMidQEntry(hdr, server);
587 return ERR_PTR(-ENOMEM);
589 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
591 DeleteMidQEntry(mid);
599 * Send a SMB request and set the callback function in the mid to handle
600 * the result. Caller is responsible for dealing with timeouts.
603 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
604 mid_receive_t *receive, mid_callback_t *callback,
605 mid_handle_t *handle, void *cbdata, const int flags)
607 int rc, timeout, optype;
608 struct mid_q_entry *mid;
609 unsigned int credits = 0;
611 timeout = flags & CIFS_TIMEOUT_MASK;
612 optype = flags & CIFS_OP_MASK;
614 if ((flags & CIFS_HAS_CREDITS) == 0) {
615 rc = wait_for_free_request(server, timeout, optype);
621 mutex_lock(&server->srv_mutex);
622 mid = server->ops->setup_async_request(server, rqst);
624 mutex_unlock(&server->srv_mutex);
625 add_credits_and_wake_if(server, credits, optype);
629 mid->receive = receive;
630 mid->callback = callback;
631 mid->callback_data = cbdata;
632 mid->handle = handle;
633 mid->mid_state = MID_REQUEST_SUBMITTED;
635 /* put it on the pending_mid_q */
636 spin_lock(&GlobalMid_Lock);
637 list_add_tail(&mid->qhead, &server->pending_mid_q);
638 spin_unlock(&GlobalMid_Lock);
641 * Need to store the time in mid before calling I/O. For call_async,
642 * I/O response may come back and free the mid entry on another thread.
644 cifs_save_when_sent(mid);
645 cifs_in_send_inc(server);
646 rc = smb_send_rqst(server, 1, rqst, flags);
647 cifs_in_send_dec(server);
650 server->sequence_number -= 2;
651 cifs_delete_mid(mid);
654 mutex_unlock(&server->srv_mutex);
659 add_credits_and_wake_if(server, credits, optype);
665 * Send an SMB Request. No response info (other than return code)
666 * needs to be parsed.
668 * flags indicate the type of request buffer and how long to wait
669 * and whether to log NT STATUS code (error) before mapping it to POSIX error
673 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
674 char *in_buf, int flags)
681 iov[0].iov_base = in_buf;
682 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
683 flags |= CIFS_NO_RESP;
684 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
685 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
691 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
695 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
696 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
698 spin_lock(&GlobalMid_Lock);
699 switch (mid->mid_state) {
700 case MID_RESPONSE_RECEIVED:
701 spin_unlock(&GlobalMid_Lock);
703 case MID_RETRY_NEEDED:
706 case MID_RESPONSE_MALFORMED:
713 list_del_init(&mid->qhead);
714 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
715 __func__, mid->mid, mid->mid_state);
718 spin_unlock(&GlobalMid_Lock);
720 DeleteMidQEntry(mid);
725 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
726 struct mid_q_entry *mid)
728 return server->ops->send_cancel ?
729 server->ops->send_cancel(server, rqst, mid) : 0;
733 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
736 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
738 dump_smb(mid->resp_buf, min_t(u32, 92, len));
740 /* convert the length into a more usable form */
744 struct smb_rqst rqst = { .rq_iov = iov,
747 iov[0].iov_base = mid->resp_buf;
749 iov[1].iov_base = (char *)mid->resp_buf + 4;
750 iov[1].iov_len = len - 4;
751 /* FIXME: add code to kill session */
752 rc = cifs_verify_signature(&rqst, server,
753 mid->sequence_number);
755 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
759 /* BB special case reconnect tid and uid here? */
760 return map_smb_to_linux_error(mid->resp_buf, log_error);
764 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
767 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
768 struct mid_q_entry *mid;
770 if (rqst->rq_iov[0].iov_len != 4 ||
771 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
772 return ERR_PTR(-EIO);
774 rc = allocate_mid(ses, hdr, &mid);
777 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
779 cifs_delete_mid(mid);
786 cifs_compound_callback(struct mid_q_entry *mid)
788 struct TCP_Server_Info *server = mid->server;
790 add_credits(server, server->ops->get_credits(mid), mid->optype);
794 cifs_compound_last_callback(struct mid_q_entry *mid)
796 cifs_compound_callback(mid);
797 cifs_wake_up_task(mid);
801 cifs_cancelled_callback(struct mid_q_entry *mid)
803 cifs_compound_callback(mid);
804 DeleteMidQEntry(mid);
808 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
809 const int flags, const int num_rqst, struct smb_rqst *rqst,
810 int *resp_buf_type, struct kvec *resp_iov)
814 struct mid_q_entry *midQ[MAX_COMPOUND];
815 bool cancelled_mid[MAX_COMPOUND] = {false};
816 unsigned int credits[MAX_COMPOUND] = {0};
819 timeout = flags & CIFS_TIMEOUT_MASK;
820 optype = flags & CIFS_OP_MASK;
822 for (i = 0; i < num_rqst; i++)
823 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
825 if ((ses == NULL) || (ses->server == NULL)) {
826 cifs_dbg(VFS, "Null session\n");
830 if (ses->server->tcpStatus == CifsExiting)
834 * Ensure we obtain 1 credit per request in the compound chain.
835 * It can be optimized further by waiting for all the credits
836 * at once but this can wait long enough if we don't have enough
837 * credits due to some heavy operations in progress or the server
838 * not granting us much, so a fallback to the current approach is
841 for (i = 0; i < num_rqst; i++) {
842 rc = wait_for_free_request(ses->server, timeout, optype);
845 * We haven't sent an SMB packet to the server yet but
846 * we already obtained credits for i requests in the
847 * compound chain - need to return those credits back
848 * for future use. Note that we need to call add_credits
849 * multiple times to match the way we obtained credits
850 * in the first place and to account for in flight
851 * requests correctly.
853 for (j = 0; j < i; j++)
854 add_credits(ses->server, 1, optype);
861 * Make sure that we sign in the same order that we send on this socket
862 * and avoid races inside tcp sendmsg code that could cause corruption
866 mutex_lock(&ses->server->srv_mutex);
868 for (i = 0; i < num_rqst; i++) {
869 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
870 if (IS_ERR(midQ[i])) {
871 for (j = 0; j < i; j++)
872 cifs_delete_mid(midQ[j]);
873 mutex_unlock(&ses->server->srv_mutex);
875 /* Update # of requests on wire to server */
876 for (j = 0; j < num_rqst; j++)
877 add_credits(ses->server, credits[j], optype);
878 return PTR_ERR(midQ[i]);
881 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
882 midQ[i]->optype = optype;
884 * Invoke callback for every part of the compound chain
885 * to calculate credits properly. Wake up this thread only when
886 * the last element is received.
888 if (i < num_rqst - 1)
889 midQ[i]->callback = cifs_compound_callback;
891 midQ[i]->callback = cifs_compound_last_callback;
893 cifs_in_send_inc(ses->server);
894 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
895 cifs_in_send_dec(ses->server);
897 for (i = 0; i < num_rqst; i++)
898 cifs_save_when_sent(midQ[i]);
901 ses->server->sequence_number -= 2;
903 mutex_unlock(&ses->server->srv_mutex);
906 /* Sending failed for some reason - return credits back */
907 for (i = 0; i < num_rqst; i++)
908 add_credits(ses->server, credits[i], optype);
913 * At this point the request is passed to the network stack - we assume
914 * that any credits taken from the server structure on the client have
915 * been spent and we can't return them back. Once we receive responses
916 * we will collect credits granted by the server in the mid callbacks
917 * and add those credits to the server structure.
921 * Compounding is never used during session establish.
923 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
924 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
927 if (timeout == CIFS_ASYNC_OP)
930 for (i = 0; i < num_rqst; i++) {
931 rc = wait_for_response(ses->server, midQ[i]);
936 for (; i < num_rqst; i++) {
937 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
938 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
939 send_cancel(ses->server, &rqst[i], midQ[i]);
940 spin_lock(&GlobalMid_Lock);
941 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
942 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
943 midQ[i]->callback = cifs_cancelled_callback;
944 cancelled_mid[i] = true;
947 spin_unlock(&GlobalMid_Lock);
951 for (i = 0; i < num_rqst; i++) {
955 rc = cifs_sync_mid_result(midQ[i], ses->server);
957 /* mark this mid as cancelled to not free it below */
958 cancelled_mid[i] = true;
962 if (!midQ[i]->resp_buf ||
963 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
965 cifs_dbg(FYI, "Bad MID state?\n");
969 buf = (char *)midQ[i]->resp_buf;
970 resp_iov[i].iov_base = buf;
971 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
972 ses->server->vals->header_preamble_size;
974 if (midQ[i]->large_buf)
975 resp_buf_type[i] = CIFS_LARGE_BUFFER;
977 resp_buf_type[i] = CIFS_SMALL_BUFFER;
979 rc = ses->server->ops->check_receive(midQ[i], ses->server,
980 flags & CIFS_LOG_ERROR);
982 /* mark it so buf will not be freed by cifs_delete_mid */
983 if ((flags & CIFS_NO_RESP) == 0)
984 midQ[i]->resp_buf = NULL;
989 * Compounding is never used during session establish.
991 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
993 .iov_base = resp_iov[0].iov_base,
994 .iov_len = resp_iov[0].iov_len
996 smb311_update_preauth_hash(ses, &iov, 1);
1001 * This will dequeue all mids. After this it is important that the
1002 * demultiplex_thread will not process any of these mids any futher.
1003 * This is prevented above by using a noop callback that will not
1004 * wake this thread except for the very last PDU.
1006 for (i = 0; i < num_rqst; i++) {
1007 if (!cancelled_mid[i])
1008 cifs_delete_mid(midQ[i]);
1015 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1016 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1017 struct kvec *resp_iov)
1019 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1024 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1025 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1026 const int flags, struct kvec *resp_iov)
1028 struct smb_rqst rqst;
1029 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1032 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1033 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1036 /* otherwise cifs_send_recv below sets resp_buf_type */
1037 *resp_buf_type = CIFS_NO_BUFFER;
1043 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1044 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1046 new_iov[0].iov_base = new_iov[1].iov_base;
1047 new_iov[0].iov_len = 4;
1048 new_iov[1].iov_base += 4;
1049 new_iov[1].iov_len -= 4;
1051 memset(&rqst, 0, sizeof(struct smb_rqst));
1052 rqst.rq_iov = new_iov;
1053 rqst.rq_nvec = n_vec + 1;
1055 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1056 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1062 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1063 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1064 int *pbytes_returned, const int timeout)
1067 struct mid_q_entry *midQ;
1068 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1069 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1070 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1073 cifs_dbg(VFS, "Null smb session\n");
1076 if (ses->server == NULL) {
1077 cifs_dbg(VFS, "Null tcp session\n");
1081 if (ses->server->tcpStatus == CifsExiting)
1084 /* Ensure that we do not send more than 50 overlapping requests
1085 to the same server. We may make this configurable later or
1088 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1089 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1094 rc = wait_for_free_request(ses->server, timeout, 0);
1098 /* make sure that we sign in the same order that we send on this socket
1099 and avoid races inside tcp sendmsg code that could cause corruption
1102 mutex_lock(&ses->server->srv_mutex);
1104 rc = allocate_mid(ses, in_buf, &midQ);
1106 mutex_unlock(&ses->server->srv_mutex);
1107 /* Update # of requests on wire to server */
1108 add_credits(ses->server, 1, 0);
1112 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1114 mutex_unlock(&ses->server->srv_mutex);
1118 midQ->mid_state = MID_REQUEST_SUBMITTED;
1120 cifs_in_send_inc(ses->server);
1121 rc = smb_send(ses->server, in_buf, len);
1122 cifs_in_send_dec(ses->server);
1123 cifs_save_when_sent(midQ);
1126 ses->server->sequence_number -= 2;
1128 mutex_unlock(&ses->server->srv_mutex);
1133 if (timeout == CIFS_ASYNC_OP)
1136 rc = wait_for_response(ses->server, midQ);
1138 send_cancel(ses->server, &rqst, midQ);
1139 spin_lock(&GlobalMid_Lock);
1140 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1141 /* no longer considered to be "in-flight" */
1142 midQ->callback = DeleteMidQEntry;
1143 spin_unlock(&GlobalMid_Lock);
1144 add_credits(ses->server, 1, 0);
1147 spin_unlock(&GlobalMid_Lock);
1150 rc = cifs_sync_mid_result(midQ, ses->server);
1152 add_credits(ses->server, 1, 0);
1156 if (!midQ->resp_buf || !out_buf ||
1157 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1159 cifs_dbg(VFS, "Bad MID state?\n");
1163 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1164 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1165 rc = cifs_check_receive(midQ, ses->server, 0);
1167 cifs_delete_mid(midQ);
1168 add_credits(ses->server, 1, 0);
1173 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1174 blocking lock to return. */
1177 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1178 struct smb_hdr *in_buf,
1179 struct smb_hdr *out_buf)
1182 struct cifs_ses *ses = tcon->ses;
1183 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1185 /* We just modify the current in_buf to change
1186 the type of lock from LOCKING_ANDX_SHARED_LOCK
1187 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1188 LOCKING_ANDX_CANCEL_LOCK. */
1190 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1192 pSMB->hdr.Mid = get_next_mid(ses->server);
1194 return SendReceive(xid, ses, in_buf, out_buf,
1195 &bytes_returned, 0);
1199 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1200 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1201 int *pbytes_returned)
1205 struct mid_q_entry *midQ;
1206 struct cifs_ses *ses;
1207 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1208 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1209 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1211 if (tcon == NULL || tcon->ses == NULL) {
1212 cifs_dbg(VFS, "Null smb session\n");
1217 if (ses->server == NULL) {
1218 cifs_dbg(VFS, "Null tcp session\n");
1222 if (ses->server->tcpStatus == CifsExiting)
1225 /* Ensure that we do not send more than 50 overlapping requests
1226 to the same server. We may make this configurable later or
1229 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1230 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1235 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1239 /* make sure that we sign in the same order that we send on this socket
1240 and avoid races inside tcp sendmsg code that could cause corruption
1243 mutex_lock(&ses->server->srv_mutex);
1245 rc = allocate_mid(ses, in_buf, &midQ);
1247 mutex_unlock(&ses->server->srv_mutex);
1251 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1253 cifs_delete_mid(midQ);
1254 mutex_unlock(&ses->server->srv_mutex);
1258 midQ->mid_state = MID_REQUEST_SUBMITTED;
1259 cifs_in_send_inc(ses->server);
1260 rc = smb_send(ses->server, in_buf, len);
1261 cifs_in_send_dec(ses->server);
1262 cifs_save_when_sent(midQ);
1265 ses->server->sequence_number -= 2;
1267 mutex_unlock(&ses->server->srv_mutex);
1270 cifs_delete_mid(midQ);
1274 /* Wait for a reply - allow signals to interrupt. */
1275 rc = wait_event_interruptible(ses->server->response_q,
1276 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1277 ((ses->server->tcpStatus != CifsGood) &&
1278 (ses->server->tcpStatus != CifsNew)));
1280 /* Were we interrupted by a signal ? */
1281 if ((rc == -ERESTARTSYS) &&
1282 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1283 ((ses->server->tcpStatus == CifsGood) ||
1284 (ses->server->tcpStatus == CifsNew))) {
1286 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1287 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1288 blocking lock to return. */
1289 rc = send_cancel(ses->server, &rqst, midQ);
1291 cifs_delete_mid(midQ);
1295 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1296 to cause the blocking lock to return. */
1298 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1300 /* If we get -ENOLCK back the lock may have
1301 already been removed. Don't exit in this case. */
1302 if (rc && rc != -ENOLCK) {
1303 cifs_delete_mid(midQ);
1308 rc = wait_for_response(ses->server, midQ);
1310 send_cancel(ses->server, &rqst, midQ);
1311 spin_lock(&GlobalMid_Lock);
1312 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1313 /* no longer considered to be "in-flight" */
1314 midQ->callback = DeleteMidQEntry;
1315 spin_unlock(&GlobalMid_Lock);
1318 spin_unlock(&GlobalMid_Lock);
1321 /* We got the response - restart system call. */
1325 rc = cifs_sync_mid_result(midQ, ses->server);
1329 /* rcvd frame is ok */
1330 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1332 cifs_dbg(VFS, "Bad MID state?\n");
1336 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1337 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1338 rc = cifs_check_receive(midQ, ses->server, 0);
1340 cifs_delete_mid(midQ);
1341 if (rstart && rc == -EACCES)
1342 return -ERESTARTSYS;