1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Author(s): Steve French (sfrench@us.ibm.com)
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
43 #include "rfc1002pdu.h"
45 #include "smb2proto.h"
46 #include "smbdirect.h"
47 #include "dns_resolve.h"
48 #ifdef CONFIG_CIFS_DFS_UPCALL
49 #include "dfs_cache.h"
51 #include "fs_context.h"
54 extern mempool_t *cifs_req_poolp;
55 extern bool disable_legacy_dialects;
57 /* FIXME: should these be tunable? */
58 #define TLINK_ERROR_EXPIRE (1 * HZ)
59 #define TLINK_IDLE_EXPIRE (600 * HZ)
61 /* Drop the connection to not overload the server */
62 #define NUM_STATUS_IO_TIMEOUT 5
65 struct cifs_sb_info *cifs_sb;
66 struct smb3_fs_context *fs_ctx;
68 struct TCP_Server_Info *server;
70 struct cifs_tcon *tcon;
71 #ifdef CONFIG_CIFS_DFS_UPCALL
72 struct cifs_ses *root_ses;
74 char *origin_fullpath, *leaf_fullpath;
78 static int ip_connect(struct TCP_Server_Info *server);
79 static int generic_ip_connect(struct TCP_Server_Info *server);
80 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
81 static void cifs_prune_tlinks(struct work_struct *work);
84 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
85 * get their ip addresses changed at some point.
87 * This should be called with server->srv_mutex held.
89 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
93 char *unc, *ipaddr = NULL;
95 unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
97 if (!server->hostname)
100 len = strlen(server->hostname) + 3;
102 unc = kmalloc(len, GFP_KERNEL);
104 cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
107 scnprintf(unc, len, "\\\\%s", server->hostname);
109 rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
113 cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
114 __func__, server->hostname, rc);
115 goto requeue_resolve;
118 spin_lock(&cifs_tcp_ses_lock);
119 rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
121 spin_unlock(&cifs_tcp_ses_lock);
124 /* rc == 1 means success here */
126 now = ktime_get_real_seconds();
127 if (expiry && expiry > now)
129 * To make sure we don't use the cached entry, retry 1s
132 ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
137 cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
139 mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
145 static void cifs_resolve_server(struct work_struct *work)
148 struct TCP_Server_Info *server = container_of(work,
149 struct TCP_Server_Info, resolve.work);
151 mutex_lock(&server->srv_mutex);
154 * Resolve the hostname again to make sure that IP address is up-to-date.
156 rc = reconn_set_ipaddr_from_hostname(server);
158 cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
162 mutex_unlock(&server->srv_mutex);
166 * Mark all sessions and tcons for reconnect.
168 * @server needs to be previously set to CifsNeedReconnect.
172 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
173 bool mark_smb_session)
175 struct TCP_Server_Info *pserver;
176 struct cifs_ses *ses;
177 struct cifs_tcon *tcon;
180 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
181 * are not used until reconnected.
183 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
185 /* If server is a channel, select the primary channel */
186 pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
189 spin_lock(&cifs_tcp_ses_lock);
190 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
191 spin_lock(&ses->chan_lock);
192 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
195 if (mark_smb_session)
196 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
198 cifs_chan_set_need_reconnect(ses, server);
200 /* If all channels need reconnect, then tcon needs reconnect */
201 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
204 ses->status = CifsNeedReconnect;
206 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
207 tcon->need_reconnect = true;
208 tcon->tidStatus = CifsNeedReconnect;
211 ses->tcon_ipc->need_reconnect = true;
214 spin_unlock(&ses->chan_lock);
216 spin_unlock(&cifs_tcp_ses_lock);
220 cifs_abort_connection(struct TCP_Server_Info *server)
222 struct mid_q_entry *mid, *nmid;
223 struct list_head retry_list;
226 server->max_read = 0;
228 /* do not want to be sending data on a socket we are freeing */
229 cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
230 mutex_lock(&server->srv_mutex);
231 if (server->ssocket) {
232 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
233 server->ssocket->flags);
234 kernel_sock_shutdown(server->ssocket, SHUT_WR);
235 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
236 server->ssocket->flags);
237 sock_release(server->ssocket);
238 server->ssocket = NULL;
240 server->sequence_number = 0;
241 server->session_estab = false;
242 kfree(server->session_key.response);
243 server->session_key.response = NULL;
244 server->session_key.len = 0;
245 server->lstrp = jiffies;
247 /* mark submitted MIDs for retry and issue callback */
248 INIT_LIST_HEAD(&retry_list);
249 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
250 spin_lock(&GlobalMid_Lock);
251 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
252 kref_get(&mid->refcount);
253 if (mid->mid_state == MID_REQUEST_SUBMITTED)
254 mid->mid_state = MID_RETRY_NEEDED;
255 list_move(&mid->qhead, &retry_list);
256 mid->mid_flags |= MID_DELETED;
258 spin_unlock(&GlobalMid_Lock);
259 mutex_unlock(&server->srv_mutex);
261 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
262 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
263 list_del_init(&mid->qhead);
265 cifs_mid_q_entry_release(mid);
268 if (cifs_rdma_enabled(server)) {
269 mutex_lock(&server->srv_mutex);
270 smbd_destroy(server);
271 mutex_unlock(&server->srv_mutex);
275 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
277 spin_lock(&cifs_tcp_ses_lock);
278 server->nr_targets = num_targets;
279 if (server->tcpStatus == CifsExiting) {
280 /* the demux thread will exit normally next time through the loop */
281 spin_unlock(&cifs_tcp_ses_lock);
282 wake_up(&server->response_q);
286 cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
287 trace_smb3_reconnect(server->CurrentMid, server->conn_id,
289 server->tcpStatus = CifsNeedReconnect;
291 spin_unlock(&cifs_tcp_ses_lock);
296 * cifs tcp session reconnection
298 * mark tcp session as reconnecting so temporarily locked
299 * mark all smb sessions as reconnecting for tcp session
300 * reconnect tcp session
301 * wake up waiters on reconnection? - (not needed currently)
303 * if mark_smb_session is passed as true, unconditionally mark
304 * the smb session (and tcon) for reconnect as well. This value
305 * doesn't really matter for non-multichannel scenario.
308 static int __cifs_reconnect(struct TCP_Server_Info *server,
309 bool mark_smb_session)
313 if (!cifs_tcp_ses_needs_reconnect(server, 1))
316 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
318 cifs_abort_connection(server);
322 mutex_lock(&server->srv_mutex);
324 if (!cifs_swn_set_server_dstaddr(server)) {
325 /* resolve the hostname again to make sure that IP address is up-to-date */
326 rc = reconn_set_ipaddr_from_hostname(server);
327 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
330 if (cifs_rdma_enabled(server))
331 rc = smbd_reconnect(server);
333 rc = generic_ip_connect(server);
335 mutex_unlock(&server->srv_mutex);
336 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
339 atomic_inc(&tcpSesReconnectCount);
340 set_credits(server, 1);
341 spin_lock(&cifs_tcp_ses_lock);
342 if (server->tcpStatus != CifsExiting)
343 server->tcpStatus = CifsNeedNegotiate;
344 spin_unlock(&cifs_tcp_ses_lock);
345 cifs_swn_reset_server_dstaddr(server);
346 mutex_unlock(&server->srv_mutex);
347 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
349 } while (server->tcpStatus == CifsNeedReconnect);
351 spin_lock(&cifs_tcp_ses_lock);
352 if (server->tcpStatus == CifsNeedNegotiate)
353 mod_delayed_work(cifsiod_wq, &server->echo, 0);
354 spin_unlock(&cifs_tcp_ses_lock);
356 wake_up(&server->response_q);
360 #ifdef CONFIG_CIFS_DFS_UPCALL
361 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
366 if (!cifs_swn_set_server_dstaddr(server)) {
367 if (server->hostname != target) {
368 hostname = extract_hostname(target);
369 if (!IS_ERR(hostname)) {
370 kfree(server->hostname);
371 server->hostname = hostname;
373 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
374 __func__, PTR_ERR(hostname));
375 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
379 /* resolve the hostname again to make sure that IP address is up-to-date. */
380 rc = reconn_set_ipaddr_from_hostname(server);
381 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
383 /* Reconnect the socket */
384 if (cifs_rdma_enabled(server))
385 rc = smbd_reconnect(server);
387 rc = generic_ip_connect(server);
392 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
393 struct dfs_cache_tgt_iterator **target_hint)
396 struct dfs_cache_tgt_iterator *tit;
400 /* If dfs target list is empty, then reconnect to last server */
401 tit = dfs_cache_get_tgt_iterator(tl);
403 return __reconnect_target_unlocked(server, server->hostname);
405 /* Otherwise, try every dfs target in @tl */
406 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
407 rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
417 reconnect_dfs_server(struct TCP_Server_Info *server,
418 bool mark_smb_session)
421 const char *refpath = server->current_fullpath + 1;
422 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
423 struct dfs_cache_tgt_iterator *target_hint = NULL;
427 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
429 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
430 * targets (server->nr_targets). It's also possible that the cached referral was cleared
431 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
432 * refreshing the referral, so, in this case, default it to 1.
434 if (!dfs_cache_noreq_find(refpath, NULL, &tl))
435 num_targets = dfs_cache_get_nr_tgts(&tl);
439 if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
442 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
444 cifs_abort_connection(server);
448 mutex_lock(&server->srv_mutex);
450 rc = reconnect_target_unlocked(server, &tl, &target_hint);
452 /* Failed to reconnect socket */
453 mutex_unlock(&server->srv_mutex);
454 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
459 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
460 * process waiting for reconnect will know it needs to re-establish session and tcon
461 * through the reconnected target server.
463 atomic_inc(&tcpSesReconnectCount);
464 set_credits(server, 1);
465 spin_lock(&cifs_tcp_ses_lock);
466 if (server->tcpStatus != CifsExiting)
467 server->tcpStatus = CifsNeedNegotiate;
468 spin_unlock(&cifs_tcp_ses_lock);
469 cifs_swn_reset_server_dstaddr(server);
470 mutex_unlock(&server->srv_mutex);
471 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
472 } while (server->tcpStatus == CifsNeedReconnect);
475 dfs_cache_noreq_update_tgthint(refpath, target_hint);
477 dfs_cache_free_tgts(&tl);
479 /* Need to set up echo worker again once connection has been established */
480 spin_lock(&cifs_tcp_ses_lock);
481 if (server->tcpStatus == CifsNeedNegotiate)
482 mod_delayed_work(cifsiod_wq, &server->echo, 0);
484 spin_unlock(&cifs_tcp_ses_lock);
486 wake_up(&server->response_q);
490 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
492 /* If tcp session is not an dfs connection, then reconnect to last target server */
493 spin_lock(&cifs_tcp_ses_lock);
494 if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
495 spin_unlock(&cifs_tcp_ses_lock);
496 return __cifs_reconnect(server, mark_smb_session);
498 spin_unlock(&cifs_tcp_ses_lock);
500 return reconnect_dfs_server(server, mark_smb_session);
503 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
505 return __cifs_reconnect(server, mark_smb_session);
510 cifs_echo_request(struct work_struct *work)
513 struct TCP_Server_Info *server = container_of(work,
514 struct TCP_Server_Info, echo.work);
517 * We cannot send an echo if it is disabled.
518 * Also, no need to ping if we got a response recently.
521 if (server->tcpStatus == CifsNeedReconnect ||
522 server->tcpStatus == CifsExiting ||
523 server->tcpStatus == CifsNew ||
524 (server->ops->can_echo && !server->ops->can_echo(server)) ||
525 time_before(jiffies, server->lstrp + server->echo_interval - HZ))
528 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
530 cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
533 /* Check witness registrations */
537 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
541 allocate_buffers(struct TCP_Server_Info *server)
543 if (!server->bigbuf) {
544 server->bigbuf = (char *)cifs_buf_get();
545 if (!server->bigbuf) {
546 cifs_server_dbg(VFS, "No memory for large SMB response\n");
548 /* retry will check if exiting */
551 } else if (server->large_buf) {
552 /* we are reusing a dirty large buf, clear its start */
553 memset(server->bigbuf, 0, HEADER_SIZE(server));
556 if (!server->smallbuf) {
557 server->smallbuf = (char *)cifs_small_buf_get();
558 if (!server->smallbuf) {
559 cifs_server_dbg(VFS, "No memory for SMB response\n");
561 /* retry will check if exiting */
564 /* beginning of smb buffer is cleared in our buf_get */
566 /* if existing small buf clear beginning */
567 memset(server->smallbuf, 0, HEADER_SIZE(server));
574 server_unresponsive(struct TCP_Server_Info *server)
577 * We need to wait 3 echo intervals to make sure we handle such
579 * 1s client sends a normal SMB request
580 * 2s client gets a response
581 * 30s echo workqueue job pops, and decides we got a response recently
582 * and don't need to send another
584 * 65s kernel_recvmsg times out, and we see that we haven't gotten
585 * a response in >60s.
587 spin_lock(&cifs_tcp_ses_lock);
588 if ((server->tcpStatus == CifsGood ||
589 server->tcpStatus == CifsNeedNegotiate) &&
590 (!server->ops->can_echo || server->ops->can_echo(server)) &&
591 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
592 spin_unlock(&cifs_tcp_ses_lock);
593 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
594 (3 * server->echo_interval) / HZ);
595 cifs_reconnect(server, false);
598 spin_unlock(&cifs_tcp_ses_lock);
604 zero_credits(struct TCP_Server_Info *server)
608 spin_lock(&server->req_lock);
609 val = server->credits + server->echo_credits + server->oplock_credits;
610 if (server->in_flight == 0 && val == 0) {
611 spin_unlock(&server->req_lock);
614 spin_unlock(&server->req_lock);
619 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
624 smb_msg->msg_control = NULL;
625 smb_msg->msg_controllen = 0;
627 for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
630 /* reconnect if no credits and no requests in flight */
631 if (zero_credits(server)) {
632 cifs_reconnect(server, false);
633 return -ECONNABORTED;
636 if (server_unresponsive(server))
637 return -ECONNABORTED;
638 if (cifs_rdma_enabled(server) && server->smbd_conn)
639 length = smbd_recv(server->smbd_conn, smb_msg);
641 length = sock_recvmsg(server->ssocket, smb_msg, 0);
643 spin_lock(&cifs_tcp_ses_lock);
644 if (server->tcpStatus == CifsExiting) {
645 spin_unlock(&cifs_tcp_ses_lock);
649 if (server->tcpStatus == CifsNeedReconnect) {
650 spin_unlock(&cifs_tcp_ses_lock);
651 cifs_reconnect(server, false);
652 return -ECONNABORTED;
654 spin_unlock(&cifs_tcp_ses_lock);
656 if (length == -ERESTARTSYS ||
660 * Minimum sleep to prevent looping, allowing socket
661 * to clear and app threads to set tcpStatus
662 * CifsNeedReconnect if server hung.
664 usleep_range(1000, 2000);
670 cifs_dbg(FYI, "Received no data or error: %d\n", length);
671 cifs_reconnect(server, false);
672 return -ECONNABORTED;
679 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
680 unsigned int to_read)
682 struct msghdr smb_msg;
683 struct kvec iov = {.iov_base = buf, .iov_len = to_read};
684 iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
686 return cifs_readv_from_socket(server, &smb_msg);
690 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
692 struct msghdr smb_msg;
695 * iov_iter_discard already sets smb_msg.type and count and iov_offset
696 * and cifs_readv_from_socket sets msg_control and msg_controllen
697 * so little to initialize in struct msghdr
699 smb_msg.msg_name = NULL;
700 smb_msg.msg_namelen = 0;
701 iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
703 return cifs_readv_from_socket(server, &smb_msg);
707 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
708 unsigned int page_offset, unsigned int to_read)
710 struct msghdr smb_msg;
711 struct bio_vec bv = {
712 .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
713 iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
714 return cifs_readv_from_socket(server, &smb_msg);
718 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
721 * The first byte big endian of the length field,
722 * is actually not part of the length but the type
723 * with the most common, zero, as regular data.
726 case RFC1002_SESSION_MESSAGE:
727 /* Regular SMB response */
729 case RFC1002_SESSION_KEEP_ALIVE:
730 cifs_dbg(FYI, "RFC 1002 session keep alive\n");
732 case RFC1002_POSITIVE_SESSION_RESPONSE:
733 cifs_dbg(FYI, "RFC 1002 positive session response\n");
735 case RFC1002_NEGATIVE_SESSION_RESPONSE:
737 * We get this from Windows 98 instead of an error on
738 * SMB negprot response.
740 cifs_dbg(FYI, "RFC 1002 negative session response\n");
741 /* give server a second to clean up */
744 * Always try 445 first on reconnect since we get NACK
745 * on some if we ever connected to port 139 (the NACK
746 * is since we do not begin with RFC1001 session
749 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
750 cifs_reconnect(server, true);
753 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
754 cifs_reconnect(server, true);
761 dequeue_mid(struct mid_q_entry *mid, bool malformed)
763 #ifdef CONFIG_CIFS_STATS2
764 mid->when_received = jiffies;
766 spin_lock(&GlobalMid_Lock);
768 mid->mid_state = MID_RESPONSE_RECEIVED;
770 mid->mid_state = MID_RESPONSE_MALFORMED;
772 * Trying to handle/dequeue a mid after the send_recv()
773 * function has finished processing it is a bug.
775 if (mid->mid_flags & MID_DELETED) {
776 spin_unlock(&GlobalMid_Lock);
777 pr_warn_once("trying to dequeue a deleted mid\n");
779 list_del_init(&mid->qhead);
780 mid->mid_flags |= MID_DELETED;
781 spin_unlock(&GlobalMid_Lock);
786 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
788 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
791 * SMB1 does not use credits.
793 if (server->vals->header_preamble_size)
796 return le16_to_cpu(shdr->CreditRequest);
800 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
801 char *buf, int malformed)
803 if (server->ops->check_trans2 &&
804 server->ops->check_trans2(mid, server, buf, malformed))
806 mid->credits_received = smb2_get_credits_from_hdr(buf, server);
808 mid->large_buf = server->large_buf;
809 /* Was previous buf put in mpx struct for multi-rsp? */
810 if (!mid->multiRsp) {
811 /* smb buffer will be freed by user thread */
812 if (server->large_buf)
813 server->bigbuf = NULL;
815 server->smallbuf = NULL;
817 dequeue_mid(mid, malformed);
820 static void clean_demultiplex_info(struct TCP_Server_Info *server)
824 /* take it off the list, if it's not already */
825 spin_lock(&cifs_tcp_ses_lock);
826 list_del_init(&server->tcp_ses_list);
827 spin_unlock(&cifs_tcp_ses_lock);
829 cancel_delayed_work_sync(&server->echo);
830 cancel_delayed_work_sync(&server->resolve);
832 spin_lock(&cifs_tcp_ses_lock);
833 server->tcpStatus = CifsExiting;
834 spin_unlock(&cifs_tcp_ses_lock);
835 wake_up_all(&server->response_q);
837 /* check if we have blocked requests that need to free */
838 spin_lock(&server->req_lock);
839 if (server->credits <= 0)
841 spin_unlock(&server->req_lock);
843 * Although there should not be any requests blocked on this queue it
844 * can not hurt to be paranoid and try to wake up requests that may
845 * haven been blocked when more than 50 at time were on the wire to the
846 * same server - they now will see the session is in exit state and get
847 * out of SendReceive.
849 wake_up_all(&server->request_q);
850 /* give those requests time to exit */
852 if (cifs_rdma_enabled(server))
853 smbd_destroy(server);
854 if (server->ssocket) {
855 sock_release(server->ssocket);
856 server->ssocket = NULL;
859 if (!list_empty(&server->pending_mid_q)) {
860 struct list_head dispose_list;
861 struct mid_q_entry *mid_entry;
862 struct list_head *tmp, *tmp2;
864 INIT_LIST_HEAD(&dispose_list);
865 spin_lock(&GlobalMid_Lock);
866 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
867 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
868 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
869 kref_get(&mid_entry->refcount);
870 mid_entry->mid_state = MID_SHUTDOWN;
871 list_move(&mid_entry->qhead, &dispose_list);
872 mid_entry->mid_flags |= MID_DELETED;
874 spin_unlock(&GlobalMid_Lock);
876 /* now walk dispose list and issue callbacks */
877 list_for_each_safe(tmp, tmp2, &dispose_list) {
878 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
879 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
880 list_del_init(&mid_entry->qhead);
881 mid_entry->callback(mid_entry);
882 cifs_mid_q_entry_release(mid_entry);
884 /* 1/8th of sec is more than enough time for them to exit */
888 if (!list_empty(&server->pending_mid_q)) {
890 * mpx threads have not exited yet give them at least the smb
891 * send timeout time for long ops.
893 * Due to delays on oplock break requests, we need to wait at
894 * least 45 seconds before giving up on a request getting a
895 * response and going ahead and killing cifsd.
897 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
900 * If threads still have not exited they are probably never
901 * coming home not much else we can do but free the memory.
905 #ifdef CONFIG_CIFS_DFS_UPCALL
906 kfree(server->origin_fullpath);
907 kfree(server->leaf_fullpath);
911 length = atomic_dec_return(&tcpSesAllocCount);
913 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
917 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
920 char *buf = server->smallbuf;
921 unsigned int pdu_length = server->pdu_size;
923 /* make sure this will fit in a large buffer */
924 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
925 server->vals->header_preamble_size) {
926 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
927 cifs_reconnect(server, true);
928 return -ECONNABORTED;
931 /* switch to large buffer if too big for a small one */
932 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
933 server->large_buf = true;
934 memcpy(server->bigbuf, buf, server->total_read);
935 buf = server->bigbuf;
938 /* now read the rest */
939 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
940 pdu_length - HEADER_SIZE(server) + 1
941 + server->vals->header_preamble_size);
945 server->total_read += length;
947 dump_smb(buf, server->total_read);
949 return cifs_handle_standard(server, mid);
953 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
955 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
959 * We know that we received enough to get to the MID as we
960 * checked the pdu_length earlier. Now check to see
961 * if the rest of the header is OK. We borrow the length
962 * var for the rest of the loop to avoid a new stack var.
964 * 48 bytes is enough to display the header and a little bit
965 * into the payload for debugging purposes.
967 length = server->ops->check_message(buf, server->total_read, server);
969 cifs_dump_mem("Bad SMB: ", buf,
970 min_t(unsigned int, server->total_read, 48));
972 if (server->ops->is_session_expired &&
973 server->ops->is_session_expired(buf)) {
974 cifs_reconnect(server, true);
978 if (server->ops->is_status_pending &&
979 server->ops->is_status_pending(buf, server))
985 handle_mid(mid, server, buf, length);
990 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
992 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
993 int scredits, in_flight;
996 * SMB1 does not use credits.
998 if (server->vals->header_preamble_size)
1001 if (shdr->CreditRequest) {
1002 spin_lock(&server->req_lock);
1003 server->credits += le16_to_cpu(shdr->CreditRequest);
1004 scredits = server->credits;
1005 in_flight = server->in_flight;
1006 spin_unlock(&server->req_lock);
1007 wake_up(&server->request_q);
1009 trace_smb3_add_credits(server->CurrentMid,
1010 server->conn_id, server->hostname, scredits,
1011 le16_to_cpu(shdr->CreditRequest), in_flight);
1012 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1013 __func__, le16_to_cpu(shdr->CreditRequest),
1020 cifs_demultiplex_thread(void *p)
1022 int i, num_mids, length;
1023 struct TCP_Server_Info *server = p;
1024 unsigned int pdu_length;
1025 unsigned int next_offset;
1027 struct task_struct *task_to_wake = NULL;
1028 struct mid_q_entry *mids[MAX_COMPOUND];
1029 char *bufs[MAX_COMPOUND];
1030 unsigned int noreclaim_flag, num_io_timeout = 0;
1032 noreclaim_flag = memalloc_noreclaim_save();
1033 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1035 length = atomic_inc_return(&tcpSesAllocCount);
1037 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1040 allow_kernel_signal(SIGKILL);
1041 while (server->tcpStatus != CifsExiting) {
1042 if (try_to_freeze())
1045 if (!allocate_buffers(server))
1048 server->large_buf = false;
1049 buf = server->smallbuf;
1050 pdu_length = 4; /* enough to get RFC1001 header */
1052 length = cifs_read_from_socket(server, buf, pdu_length);
1056 if (server->vals->header_preamble_size == 0)
1057 server->total_read = 0;
1059 server->total_read = length;
1062 * The right amount was read from socket - 4 bytes,
1063 * so we can now interpret the length field.
1065 pdu_length = get_rfc1002_length(buf);
1067 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1068 if (!is_smb_response(server, buf[0]))
1071 server->pdu_size = pdu_length;
1073 /* make sure we have enough to get to the MID */
1074 if (server->pdu_size < HEADER_SIZE(server) - 1 -
1075 server->vals->header_preamble_size) {
1076 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1078 cifs_reconnect(server, true);
1082 /* read down to the MID */
1083 length = cifs_read_from_socket(server,
1084 buf + server->vals->header_preamble_size,
1085 HEADER_SIZE(server) - 1
1086 - server->vals->header_preamble_size);
1089 server->total_read += length;
1091 if (server->ops->next_header) {
1092 next_offset = server->ops->next_header(buf);
1094 server->pdu_size = next_offset;
1097 memset(mids, 0, sizeof(mids));
1098 memset(bufs, 0, sizeof(bufs));
1101 if (server->ops->is_transform_hdr &&
1102 server->ops->receive_transform &&
1103 server->ops->is_transform_hdr(buf)) {
1104 length = server->ops->receive_transform(server,
1109 mids[0] = server->ops->find_mid(server, buf);
1113 if (!mids[0] || !mids[0]->receive)
1114 length = standard_receive3(server, mids[0]);
1116 length = mids[0]->receive(server, mids[0]);
1120 for (i = 0; i < num_mids; i++)
1122 cifs_mid_q_entry_release(mids[i]);
1126 if (server->ops->is_status_io_timeout &&
1127 server->ops->is_status_io_timeout(buf)) {
1129 if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
1130 cifs_reconnect(server, false);
1136 server->lstrp = jiffies;
1138 for (i = 0; i < num_mids; i++) {
1139 if (mids[i] != NULL) {
1140 mids[i]->resp_buf_size = server->pdu_size;
1142 if (bufs[i] && server->ops->is_network_name_deleted)
1143 server->ops->is_network_name_deleted(bufs[i],
1146 if (!mids[i]->multiRsp || mids[i]->multiEnd)
1147 mids[i]->callback(mids[i]);
1149 cifs_mid_q_entry_release(mids[i]);
1150 } else if (server->ops->is_oplock_break &&
1151 server->ops->is_oplock_break(bufs[i],
1153 smb2_add_credits_from_hdr(bufs[i], server);
1154 cifs_dbg(FYI, "Received oplock break\n");
1156 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1157 atomic_read(&midCount));
1158 cifs_dump_mem("Received Data is: ", bufs[i],
1159 HEADER_SIZE(server));
1160 smb2_add_credits_from_hdr(bufs[i], server);
1161 #ifdef CONFIG_CIFS_DEBUG2
1162 if (server->ops->dump_detail)
1163 server->ops->dump_detail(bufs[i],
1165 cifs_dump_mids(server);
1166 #endif /* CIFS_DEBUG2 */
1170 if (pdu_length > server->pdu_size) {
1171 if (!allocate_buffers(server))
1173 pdu_length -= server->pdu_size;
1174 server->total_read = 0;
1175 server->large_buf = false;
1176 buf = server->smallbuf;
1179 } /* end while !EXITING */
1181 /* buffer usually freed in free_mid - need to free it here on exit */
1182 cifs_buf_release(server->bigbuf);
1183 if (server->smallbuf) /* no sense logging a debug message if NULL */
1184 cifs_small_buf_release(server->smallbuf);
1186 task_to_wake = xchg(&server->tsk, NULL);
1187 clean_demultiplex_info(server);
1189 /* if server->tsk was NULL then wait for a signal before exiting */
1190 if (!task_to_wake) {
1191 set_current_state(TASK_INTERRUPTIBLE);
1192 while (!signal_pending(current)) {
1194 set_current_state(TASK_INTERRUPTIBLE);
1196 set_current_state(TASK_RUNNING);
1199 memalloc_noreclaim_restore(noreclaim_flag);
1200 module_put_and_kthread_exit(0);
1204 * Returns true if srcaddr isn't specified and rhs isn't specified, or
1205 * if srcaddr is specified and matches the IP address of the rhs argument
1208 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1210 switch (srcaddr->sa_family) {
1212 return (rhs->sa_family == AF_UNSPEC);
1214 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1215 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1216 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1219 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1220 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1221 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
1225 return false; /* don't expect to be here */
1230 * If no port is specified in addr structure, we try to match with 445 port
1231 * and if it fails - with 139 ports. It should be called only if address
1232 * families of server and addr are equal.
1235 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1237 __be16 port, *sport;
1239 /* SMBDirect manages its own ports, don't match it here */
1243 switch (addr->sa_family) {
1245 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1246 port = ((struct sockaddr_in *) addr)->sin_port;
1249 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1250 port = ((struct sockaddr_in6 *) addr)->sin6_port;
1258 port = htons(CIFS_PORT);
1262 port = htons(RFC1001_PORT);
1265 return port == *sport;
1269 match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
1270 struct sockaddr *srcaddr)
1272 switch (addr->sa_family) {
1274 struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
1275 struct sockaddr_in *srv_addr4 =
1276 (struct sockaddr_in *)&server->dstaddr;
1278 if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
1283 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
1284 struct sockaddr_in6 *srv_addr6 =
1285 (struct sockaddr_in6 *)&server->dstaddr;
1287 if (!ipv6_addr_equal(&addr6->sin6_addr,
1288 &srv_addr6->sin6_addr))
1290 if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
1296 return false; /* don't expect to be here */
1299 if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
1306 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1309 * The select_sectype function should either return the ctx->sectype
1310 * that was specified, or "Unspecified" if that sectype was not
1311 * compatible with the given NEGOTIATE request.
1313 if (server->ops->select_sectype(server, ctx->sectype)
1318 * Now check if signing mode is acceptable. No need to check
1319 * global_secflags at this point since if MUST_SIGN is set then
1320 * the server->sign had better be too.
1322 if (ctx->sign && !server->sign)
1328 static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1330 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1332 if (ctx->nosharesock)
1335 /* this server does not share socket */
1336 if (server->nosharesock)
1339 /* If multidialect negotiation see if existing sessions match one */
1340 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1341 if (server->vals->protocol_id < SMB30_PROT_ID)
1343 } else if (strcmp(ctx->vals->version_string,
1344 SMBDEFAULT_VERSION_STRING) == 0) {
1345 if (server->vals->protocol_id < SMB21_PROT_ID)
1347 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1350 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1353 if (strcasecmp(server->hostname, ctx->server_hostname))
1356 if (!match_address(server, addr,
1357 (struct sockaddr *)&ctx->srcaddr))
1360 if (!match_port(server, addr))
1363 if (!match_security(server, ctx))
1366 if (server->echo_interval != ctx->echo_interval * HZ)
1369 if (server->rdma != ctx->rdma)
1372 if (server->ignore_signature != ctx->ignore_signature)
1375 if (server->min_offload != ctx->min_offload)
1381 struct TCP_Server_Info *
1382 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1384 struct TCP_Server_Info *server;
1386 spin_lock(&cifs_tcp_ses_lock);
1387 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1388 #ifdef CONFIG_CIFS_DFS_UPCALL
1390 * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
1391 * DFS connections to do failover properly, so avoid sharing them with regular
1392 * shares or even links that may connect to same server but having completely
1393 * different failover targets.
1395 if (server->is_dfs_conn)
1399 * Skip ses channels since they're only handled in lower layers
1400 * (e.g. cifs_send_recv).
1402 if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx))
1405 ++server->srv_count;
1406 spin_unlock(&cifs_tcp_ses_lock);
1407 cifs_dbg(FYI, "Existing tcp session with server found\n");
1410 spin_unlock(&cifs_tcp_ses_lock);
1415 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1417 struct task_struct *task;
1419 spin_lock(&cifs_tcp_ses_lock);
1420 if (--server->srv_count > 0) {
1421 spin_unlock(&cifs_tcp_ses_lock);
1425 /* srv_count can never go negative */
1426 WARN_ON(server->srv_count < 0);
1428 put_net(cifs_net_ns(server));
1430 list_del_init(&server->tcp_ses_list);
1431 spin_unlock(&cifs_tcp_ses_lock);
1433 /* For secondary channels, we pick up ref-count on the primary server */
1434 if (CIFS_SERVER_IS_CHAN(server))
1435 cifs_put_tcp_session(server->primary_server, from_reconnect);
1437 cancel_delayed_work_sync(&server->echo);
1438 cancel_delayed_work_sync(&server->resolve);
1442 * Avoid deadlock here: reconnect work calls
1443 * cifs_put_tcp_session() at its end. Need to be sure
1444 * that reconnect work does nothing with server pointer after
1447 cancel_delayed_work(&server->reconnect);
1449 cancel_delayed_work_sync(&server->reconnect);
1451 spin_lock(&cifs_tcp_ses_lock);
1452 server->tcpStatus = CifsExiting;
1453 spin_unlock(&cifs_tcp_ses_lock);
1455 cifs_crypto_secmech_release(server);
1457 kfree(server->session_key.response);
1458 server->session_key.response = NULL;
1459 server->session_key.len = 0;
1460 kfree(server->hostname);
1462 task = xchg(&server->tsk, NULL);
1464 send_sig(SIGKILL, task, 1);
1467 struct TCP_Server_Info *
1468 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1469 struct TCP_Server_Info *primary_server)
1471 struct TCP_Server_Info *tcp_ses = NULL;
1474 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1476 /* see if we already have a matching tcp_ses */
1477 tcp_ses = cifs_find_tcp_session(ctx);
1481 tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
1487 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1488 if (!tcp_ses->hostname) {
1493 if (ctx->nosharesock)
1494 tcp_ses->nosharesock = true;
1496 tcp_ses->ops = ctx->ops;
1497 tcp_ses->vals = ctx->vals;
1498 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1500 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1501 tcp_ses->noblockcnt = ctx->rootfs;
1502 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1503 tcp_ses->noautotune = ctx->noautotune;
1504 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1505 tcp_ses->rdma = ctx->rdma;
1506 tcp_ses->in_flight = 0;
1507 tcp_ses->max_in_flight = 0;
1508 tcp_ses->credits = 1;
1509 if (primary_server) {
1510 spin_lock(&cifs_tcp_ses_lock);
1511 ++primary_server->srv_count;
1512 tcp_ses->primary_server = primary_server;
1513 spin_unlock(&cifs_tcp_ses_lock);
1515 init_waitqueue_head(&tcp_ses->response_q);
1516 init_waitqueue_head(&tcp_ses->request_q);
1517 INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1518 mutex_init(&tcp_ses->srv_mutex);
1519 memcpy(tcp_ses->workstation_RFC1001_name,
1520 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1521 memcpy(tcp_ses->server_RFC1001_name,
1522 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1523 tcp_ses->session_estab = false;
1524 tcp_ses->sequence_number = 0;
1525 tcp_ses->reconnect_instance = 1;
1526 tcp_ses->lstrp = jiffies;
1527 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
1528 spin_lock_init(&tcp_ses->req_lock);
1529 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1530 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1531 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1532 INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
1533 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1534 mutex_init(&tcp_ses->reconnect_mutex);
1535 #ifdef CONFIG_CIFS_DFS_UPCALL
1536 mutex_init(&tcp_ses->refpath_lock);
1538 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1539 sizeof(tcp_ses->srcaddr));
1540 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1541 sizeof(tcp_ses->dstaddr));
1542 if (ctx->use_client_guid)
1543 memcpy(tcp_ses->client_guid, ctx->client_guid,
1544 SMB2_CLIENT_GUID_SIZE);
1546 generate_random_uuid(tcp_ses->client_guid);
1548 * at this point we are the only ones with the pointer
1549 * to the struct since the kernel thread not created yet
1550 * no need to spinlock this init of tcpStatus or srv_count
1552 tcp_ses->tcpStatus = CifsNew;
1553 ++tcp_ses->srv_count;
1555 if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
1556 ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
1557 tcp_ses->echo_interval = ctx->echo_interval * HZ;
1559 tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
1560 if (tcp_ses->rdma) {
1561 #ifndef CONFIG_CIFS_SMB_DIRECT
1562 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1564 goto out_err_crypto_release;
1566 tcp_ses->smbd_conn = smbd_get_connection(
1567 tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1568 if (tcp_ses->smbd_conn) {
1569 cifs_dbg(VFS, "RDMA transport established\n");
1571 goto smbd_connected;
1574 goto out_err_crypto_release;
1577 rc = ip_connect(tcp_ses);
1579 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1580 goto out_err_crypto_release;
1584 * since we're in a cifs function already, we know that
1585 * this will succeed. No need for try_module_get().
1587 __module_get(THIS_MODULE);
1588 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1590 if (IS_ERR(tcp_ses->tsk)) {
1591 rc = PTR_ERR(tcp_ses->tsk);
1592 cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1593 module_put(THIS_MODULE);
1594 goto out_err_crypto_release;
1596 tcp_ses->min_offload = ctx->min_offload;
1598 * at this point we are the only ones with the pointer
1599 * to the struct since the kernel thread not created yet
1600 * no need to spinlock this update of tcpStatus
1602 spin_lock(&cifs_tcp_ses_lock);
1603 tcp_ses->tcpStatus = CifsNeedNegotiate;
1604 spin_unlock(&cifs_tcp_ses_lock);
1606 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1607 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1609 tcp_ses->max_credits = ctx->max_credits;
1611 tcp_ses->nr_targets = 1;
1612 tcp_ses->ignore_signature = ctx->ignore_signature;
1613 /* thread spawned, put it on the list */
1614 spin_lock(&cifs_tcp_ses_lock);
1615 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1616 spin_unlock(&cifs_tcp_ses_lock);
1618 /* queue echo request delayed work */
1619 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1621 /* queue dns resolution delayed work */
1622 cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
1623 __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
1625 queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
1629 out_err_crypto_release:
1630 cifs_crypto_secmech_release(tcp_ses);
1632 put_net(cifs_net_ns(tcp_ses));
1636 if (CIFS_SERVER_IS_CHAN(tcp_ses))
1637 cifs_put_tcp_session(tcp_ses->primary_server, false);
1638 kfree(tcp_ses->hostname);
1639 if (tcp_ses->ssocket)
1640 sock_release(tcp_ses->ssocket);
1646 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1648 if (ctx->sectype != Unspecified &&
1649 ctx->sectype != ses->sectype)
1653 * If an existing session is limited to less channels than
1654 * requested, it should not be reused
1656 spin_lock(&ses->chan_lock);
1657 if (ses->chan_max < ctx->max_channels) {
1658 spin_unlock(&ses->chan_lock);
1661 spin_unlock(&ses->chan_lock);
1663 switch (ses->sectype) {
1665 if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1669 /* NULL username means anonymous session */
1670 if (ses->user_name == NULL) {
1676 /* anything else takes username/password */
1677 if (strncmp(ses->user_name,
1678 ctx->username ? ctx->username : "",
1679 CIFS_MAX_USERNAME_LEN))
1681 if ((ctx->username && strlen(ctx->username) != 0) &&
1682 ses->password != NULL &&
1683 strncmp(ses->password,
1684 ctx->password ? ctx->password : "",
1685 CIFS_MAX_PASSWORD_LEN))
1692 * cifs_setup_ipc - helper to setup the IPC tcon for the session
1693 * @ses: smb session to issue the request on
1694 * @ctx: the superblock configuration context to use for building the
1695 * new tree connection for the IPC (interprocess communication RPC)
1697 * A new IPC connection is made and stored in the session
1698 * tcon_ipc. The IPC tcon has the same lifetime as the session.
1701 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1704 struct cifs_tcon *tcon;
1705 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
1707 struct TCP_Server_Info *server = ses->server;
1710 * If the mount request that resulted in the creation of the
1711 * session requires encryption, force IPC to be encrypted too.
1714 if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
1717 cifs_server_dbg(VFS,
1718 "IPC: server doesn't support encryption\n");
1723 tcon = tconInfoAlloc();
1727 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
1733 rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
1737 cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
1742 cifs_dbg(FYI, "IPC tcon rc = %d ipc tid = %d\n", rc, tcon->tid);
1744 ses->tcon_ipc = tcon;
1750 * cifs_free_ipc - helper to release the session IPC tcon
1751 * @ses: smb session to unmount the IPC from
1753 * Needs to be called everytime a session is destroyed.
1755 * On session close, the IPC is closed and the server must release all tcons of the session.
1756 * No need to send a tree disconnect here.
1758 * Besides, it will make the server to not close durable and resilient files on session close, as
1759 * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
1762 cifs_free_ipc(struct cifs_ses *ses)
1764 struct cifs_tcon *tcon = ses->tcon_ipc;
1770 ses->tcon_ipc = NULL;
1774 static struct cifs_ses *
1775 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1777 struct cifs_ses *ses;
1779 spin_lock(&cifs_tcp_ses_lock);
1780 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1781 if (ses->status == CifsExiting)
1783 if (!match_session(ses, ctx))
1786 spin_unlock(&cifs_tcp_ses_lock);
1789 spin_unlock(&cifs_tcp_ses_lock);
1793 void cifs_put_smb_ses(struct cifs_ses *ses)
1795 unsigned int rc, xid;
1796 unsigned int chan_count;
1797 struct TCP_Server_Info *server = ses->server;
1798 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
1800 spin_lock(&cifs_tcp_ses_lock);
1801 if (ses->status == CifsExiting) {
1802 spin_unlock(&cifs_tcp_ses_lock);
1806 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
1807 cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE");
1809 if (--ses->ses_count > 0) {
1810 spin_unlock(&cifs_tcp_ses_lock);
1814 /* ses_count can never go negative */
1815 WARN_ON(ses->ses_count < 0);
1817 if (ses->status == CifsGood)
1818 ses->status = CifsExiting;
1819 spin_unlock(&cifs_tcp_ses_lock);
1823 if (ses->status == CifsExiting && server->ops->logoff) {
1825 rc = server->ops->logoff(xid, ses);
1827 cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
1832 spin_lock(&cifs_tcp_ses_lock);
1833 list_del_init(&ses->smb_ses_list);
1834 spin_unlock(&cifs_tcp_ses_lock);
1836 spin_lock(&ses->chan_lock);
1837 chan_count = ses->chan_count;
1839 /* close any extra channels */
1840 if (chan_count > 1) {
1843 for (i = 1; i < chan_count; i++) {
1844 spin_unlock(&ses->chan_lock);
1845 cifs_put_tcp_session(ses->chans[i].server, 0);
1846 spin_lock(&ses->chan_lock);
1847 ses->chans[i].server = NULL;
1850 spin_unlock(&ses->chan_lock);
1853 cifs_put_tcp_session(server, 0);
1858 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
1859 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
1861 /* Populate username and pw fields from keyring if possible */
1863 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
1867 const char *delim, *payload;
1871 struct TCP_Server_Info *server = ses->server;
1872 struct sockaddr_in *sa;
1873 struct sockaddr_in6 *sa6;
1874 const struct user_key_payload *upayload;
1876 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
1880 /* try to find an address key first */
1881 switch (server->dstaddr.ss_family) {
1883 sa = (struct sockaddr_in *)&server->dstaddr;
1884 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
1887 sa6 = (struct sockaddr_in6 *)&server->dstaddr;
1888 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
1891 cifs_dbg(FYI, "Bad ss_family (%hu)\n",
1892 server->dstaddr.ss_family);
1897 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
1898 key = request_key(&key_type_logon, desc, "");
1900 if (!ses->domainName) {
1901 cifs_dbg(FYI, "domainName is NULL\n");
1906 /* didn't work, try to find a domain key */
1907 sprintf(desc, "cifs:d:%s", ses->domainName);
1908 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
1909 key = request_key(&key_type_logon, desc, "");
1917 down_read(&key->sem);
1918 upayload = user_key_payload_locked(key);
1919 if (IS_ERR_OR_NULL(upayload)) {
1920 rc = upayload ? PTR_ERR(upayload) : -EINVAL;
1924 /* find first : in payload */
1925 payload = upayload->data;
1926 delim = strnchr(payload, upayload->datalen, ':');
1927 cifs_dbg(FYI, "payload=%s\n", payload);
1929 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
1935 len = delim - payload;
1936 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
1937 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
1943 ctx->username = kstrndup(payload, len, GFP_KERNEL);
1944 if (!ctx->username) {
1945 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
1950 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
1952 len = key->datalen - (len + 1);
1953 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
1954 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
1956 kfree(ctx->username);
1957 ctx->username = NULL;
1962 ctx->password = kstrndup(delim, len, GFP_KERNEL);
1963 if (!ctx->password) {
1964 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
1967 kfree(ctx->username);
1968 ctx->username = NULL;
1973 * If we have a domain key then we must set the domainName in the
1976 if (is_domain && ses->domainName) {
1977 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
1978 if (!ctx->domainname) {
1979 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
1982 kfree(ctx->username);
1983 ctx->username = NULL;
1984 kfree_sensitive(ctx->password);
1985 ctx->password = NULL;
1990 ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
1991 if (!ctx->workstation_name) {
1992 cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
1994 kfree(ctx->username);
1995 ctx->username = NULL;
1996 kfree_sensitive(ctx->password);
1997 ctx->password = NULL;
1998 kfree(ctx->domainname);
1999 ctx->domainname = NULL;
2008 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2011 #else /* ! CONFIG_KEYS */
2013 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
2014 struct cifs_ses *ses __attribute__((unused)))
2018 #endif /* CONFIG_KEYS */
2021 * cifs_get_smb_ses - get a session matching @ctx data from @server
2022 * @server: server to setup the session to
2023 * @ctx: superblock configuration context to use to setup the session
2025 * This function assumes it is being called from cifs_mount() where we
2026 * already got a server reference (server refcount +1). See
2027 * cifs_get_tcon() for refcount explanations.
2030 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2034 struct cifs_ses *ses;
2035 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2036 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2040 ses = cifs_find_smb_ses(server, ctx);
2042 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2045 spin_lock(&ses->chan_lock);
2046 if (cifs_chan_needs_reconnect(ses, server)) {
2047 spin_unlock(&ses->chan_lock);
2048 cifs_dbg(FYI, "Session needs reconnect\n");
2050 mutex_lock(&ses->session_mutex);
2051 rc = cifs_negotiate_protocol(xid, ses, server);
2053 mutex_unlock(&ses->session_mutex);
2054 /* problem -- put our ses reference */
2055 cifs_put_smb_ses(ses);
2060 rc = cifs_setup_session(xid, ses, server,
2063 mutex_unlock(&ses->session_mutex);
2064 /* problem -- put our reference */
2065 cifs_put_smb_ses(ses);
2069 mutex_unlock(&ses->session_mutex);
2071 spin_lock(&ses->chan_lock);
2073 spin_unlock(&ses->chan_lock);
2075 /* existing SMB ses has a server reference already */
2076 cifs_put_tcp_session(server, 0);
2081 cifs_dbg(FYI, "Existing smb sess not found\n");
2082 ses = sesInfoAlloc();
2086 /* new SMB session uses our server ref */
2087 ses->server = server;
2088 if (server->dstaddr.ss_family == AF_INET6)
2089 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2091 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2093 if (ctx->username) {
2094 ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2095 if (!ses->user_name)
2099 /* ctx->password freed at unmount */
2100 if (ctx->password) {
2101 ses->password = kstrdup(ctx->password, GFP_KERNEL);
2105 if (ctx->domainname) {
2106 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2107 if (!ses->domainName)
2110 if (ctx->workstation_name) {
2111 ses->workstation_name = kstrdup(ctx->workstation_name,
2113 if (!ses->workstation_name)
2116 if (ctx->domainauto)
2117 ses->domainAuto = ctx->domainauto;
2118 ses->cred_uid = ctx->cred_uid;
2119 ses->linux_uid = ctx->linux_uid;
2121 ses->sectype = ctx->sectype;
2122 ses->sign = ctx->sign;
2124 /* add server as first channel */
2125 spin_lock(&ses->chan_lock);
2126 ses->chans[0].server = server;
2127 ses->chan_count = 1;
2128 ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2129 ses->chans_need_reconnect = 1;
2130 spin_unlock(&ses->chan_lock);
2132 mutex_lock(&ses->session_mutex);
2133 rc = cifs_negotiate_protocol(xid, ses, server);
2135 rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2136 mutex_unlock(&ses->session_mutex);
2138 /* each channel uses a different signing key */
2139 spin_lock(&ses->chan_lock);
2140 memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2141 sizeof(ses->smb3signingkey));
2142 spin_unlock(&ses->chan_lock);
2148 * success, put it on the list and add it as first channel
2149 * note: the session becomes active soon after this. So you'll
2150 * need to lock before changing something in the session.
2152 spin_lock(&cifs_tcp_ses_lock);
2153 list_add(&ses->smb_ses_list, &server->smb_ses_list);
2154 spin_unlock(&cifs_tcp_ses_lock);
2158 cifs_setup_ipc(ses, ctx);
2168 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2170 if (tcon->tidStatus == CifsExiting)
2172 if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE))
2174 if (tcon->seal != ctx->seal)
2176 if (tcon->snapshot_time != ctx->snapshot_time)
2178 if (tcon->handle_timeout != ctx->handle_timeout)
2180 if (tcon->no_lease != ctx->no_lease)
2182 if (tcon->nodelete != ctx->nodelete)
2187 static struct cifs_tcon *
2188 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2190 struct list_head *tmp;
2191 struct cifs_tcon *tcon;
2193 spin_lock(&cifs_tcp_ses_lock);
2194 list_for_each(tmp, &ses->tcon_list) {
2195 tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
2197 if (!match_tcon(tcon, ctx))
2200 spin_unlock(&cifs_tcp_ses_lock);
2203 spin_unlock(&cifs_tcp_ses_lock);
2208 cifs_put_tcon(struct cifs_tcon *tcon)
2211 struct cifs_ses *ses;
2214 * IPC tcon share the lifetime of their session and are
2215 * destroyed in the session put function
2217 if (tcon == NULL || tcon->ipc)
2221 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2222 spin_lock(&cifs_tcp_ses_lock);
2223 if (--tcon->tc_count > 0) {
2224 spin_unlock(&cifs_tcp_ses_lock);
2228 /* tc_count can never go negative */
2229 WARN_ON(tcon->tc_count < 0);
2231 list_del_init(&tcon->tcon_list);
2232 spin_unlock(&cifs_tcp_ses_lock);
2234 if (tcon->use_witness) {
2237 rc = cifs_swn_unregister(tcon);
2239 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2245 if (ses->server->ops->tree_disconnect)
2246 ses->server->ops->tree_disconnect(xid, tcon);
2249 cifs_fscache_release_super_cookie(tcon);
2251 cifs_put_smb_ses(ses);
2255 * cifs_get_tcon - get a tcon matching @ctx data from @ses
2256 * @ses: smb session to issue the request on
2257 * @ctx: the superblock configuration context to use for building the
2259 * - tcon refcount is the number of mount points using the tcon.
2260 * - ses refcount is the number of tcon using the session.
2262 * 1. This function assumes it is being called from cifs_mount() where
2263 * we already got a session reference (ses refcount +1).
2265 * 2. Since we're in the context of adding a mount point, the end
2266 * result should be either:
2268 * a) a new tcon already allocated with refcount=1 (1 mount point) and
2269 * its session refcount incremented (1 new tcon). This +1 was
2270 * already done in (1).
2272 * b) an existing tcon with refcount+1 (add a mount point to it) and
2273 * identical ses refcount (no new tcon). Because of (1) we need to
2274 * decrement the ses refcount.
2276 static struct cifs_tcon *
2277 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2280 struct cifs_tcon *tcon;
2282 tcon = cifs_find_tcon(ses, ctx);
2285 * tcon has refcount already incremented but we need to
2286 * decrement extra ses reference gotten by caller (case b)
2288 cifs_dbg(FYI, "Found match on UNC path\n");
2289 cifs_put_smb_ses(ses);
2293 if (!ses->server->ops->tree_connect) {
2298 tcon = tconInfoAlloc();
2304 if (ctx->snapshot_time) {
2305 if (ses->server->vals->protocol_id == 0) {
2307 "Use SMB2 or later for snapshot mount option\n");
2311 tcon->snapshot_time = ctx->snapshot_time;
2314 if (ctx->handle_timeout) {
2315 if (ses->server->vals->protocol_id == 0) {
2317 "Use SMB2.1 or later for handle timeout option\n");
2321 tcon->handle_timeout = ctx->handle_timeout;
2325 if (ctx->password) {
2326 tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2327 if (!tcon->password) {
2334 if (ses->server->vals->protocol_id == 0) {
2336 "SMB3 or later required for encryption\n");
2339 } else if (tcon->ses->server->capabilities &
2340 SMB2_GLOBAL_CAP_ENCRYPTION)
2343 cifs_dbg(VFS, "Encryption is not supported on share\n");
2349 if (ctx->linux_ext) {
2350 if (ses->server->posix_ext_supported) {
2351 tcon->posix_extensions = true;
2352 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2353 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2354 (strcmp(ses->server->vals->version_string,
2355 SMB3ANY_VERSION_STRING) == 0) ||
2356 (strcmp(ses->server->vals->version_string,
2357 SMBDEFAULT_VERSION_STRING) == 0)) {
2358 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2362 cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
2363 "disabled but required for POSIX extensions\n");
2370 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2373 cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2377 tcon->use_persistent = false;
2378 /* check if SMB2 or later, CIFS does not support persistent handles */
2379 if (ctx->persistent) {
2380 if (ses->server->vals->protocol_id == 0) {
2382 "SMB3 or later required for persistent handles\n");
2385 } else if (ses->server->capabilities &
2386 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2387 tcon->use_persistent = true;
2388 else /* persistent handles requested but not supported */ {
2390 "Persistent handles not supported on share\n");
2394 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2395 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2396 && (ctx->nopersistent == false)) {
2397 cifs_dbg(FYI, "enabling persistent handles\n");
2398 tcon->use_persistent = true;
2399 } else if (ctx->resilient) {
2400 if (ses->server->vals->protocol_id == 0) {
2402 "SMB2.1 or later required for resilient handles\n");
2406 tcon->use_resilient = true;
2409 tcon->use_witness = false;
2410 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2411 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2412 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2414 * Set witness in use flag in first place
2415 * to retry registration in the echo task
2417 tcon->use_witness = true;
2418 /* And try to register immediately */
2419 rc = cifs_swn_register(tcon);
2421 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2425 /* TODO: try to extend for non-cluster uses (eg multichannel) */
2426 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2431 cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2437 /* If the user really knows what they are doing they can override */
2438 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2440 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2441 else if (ctx->cache_rw)
2442 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2445 if (ctx->no_lease) {
2446 if (ses->server->vals->protocol_id == 0) {
2448 "SMB2 or later required for nolease option\n");
2452 tcon->no_lease = ctx->no_lease;
2456 * We can have only one retry value for a connection to a share so for
2457 * resources mounted more than once to the same server share the last
2458 * value passed in for the retry flag is used.
2460 tcon->retry = ctx->retry;
2461 tcon->nocase = ctx->nocase;
2462 if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
2463 tcon->nohandlecache = ctx->nohandlecache;
2465 tcon->nohandlecache = true;
2466 tcon->nodelete = ctx->nodelete;
2467 tcon->local_lease = ctx->local_lease;
2468 INIT_LIST_HEAD(&tcon->pending_opens);
2470 spin_lock(&cifs_tcp_ses_lock);
2471 list_add(&tcon->tcon_list, &ses->tcon_list);
2472 spin_unlock(&cifs_tcp_ses_lock);
2482 cifs_put_tlink(struct tcon_link *tlink)
2484 if (!tlink || IS_ERR(tlink))
2487 if (!atomic_dec_and_test(&tlink->tl_count) ||
2488 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2489 tlink->tl_time = jiffies;
2493 if (!IS_ERR(tlink_tcon(tlink)))
2494 cifs_put_tcon(tlink_tcon(tlink));
2500 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2502 struct cifs_sb_info *old = CIFS_SB(sb);
2503 struct cifs_sb_info *new = mnt_data->cifs_sb;
2504 unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
2505 unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
2507 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2510 if (old->mnt_cifs_serverino_autodisabled)
2511 newflags &= ~CIFS_MOUNT_SERVER_INUM;
2513 if (oldflags != newflags)
2517 * We want to share sb only if we don't specify an r/wsize or
2518 * specified r/wsize is greater than or equal to existing one.
2520 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2523 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2526 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2527 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2530 if (old->ctx->file_mode != new->ctx->file_mode ||
2531 old->ctx->dir_mode != new->ctx->dir_mode)
2534 if (strcmp(old->local_nls->charset, new->local_nls->charset))
2537 if (old->ctx->acregmax != new->ctx->acregmax)
2539 if (old->ctx->acdirmax != new->ctx->acdirmax)
2546 match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2548 struct cifs_sb_info *old = CIFS_SB(sb);
2549 struct cifs_sb_info *new = mnt_data->cifs_sb;
2550 bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2552 bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2555 if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2557 else if (!old_set && !new_set)
2564 cifs_match_super(struct super_block *sb, void *data)
2566 struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
2567 struct smb3_fs_context *ctx;
2568 struct cifs_sb_info *cifs_sb;
2569 struct TCP_Server_Info *tcp_srv;
2570 struct cifs_ses *ses;
2571 struct cifs_tcon *tcon;
2572 struct tcon_link *tlink;
2575 spin_lock(&cifs_tcp_ses_lock);
2576 cifs_sb = CIFS_SB(sb);
2577 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2578 if (tlink == NULL) {
2579 /* can not match superblock if tlink were ever null */
2580 spin_unlock(&cifs_tcp_ses_lock);
2583 tcon = tlink_tcon(tlink);
2585 tcp_srv = ses->server;
2587 ctx = mnt_data->ctx;
2589 if (!match_server(tcp_srv, ctx) ||
2590 !match_session(ses, ctx) ||
2591 !match_tcon(tcon, ctx) ||
2592 !match_prepath(sb, mnt_data)) {
2597 rc = compare_mount_options(sb, mnt_data);
2599 spin_unlock(&cifs_tcp_ses_lock);
2600 cifs_put_tlink(tlink);
2604 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2605 static struct lock_class_key cifs_key[2];
2606 static struct lock_class_key cifs_slock_key[2];
2609 cifs_reclassify_socket4(struct socket *sock)
2611 struct sock *sk = sock->sk;
2612 BUG_ON(!sock_allow_reclassification(sk));
2613 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
2614 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
2618 cifs_reclassify_socket6(struct socket *sock)
2620 struct sock *sk = sock->sk;
2621 BUG_ON(!sock_allow_reclassification(sk));
2622 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
2623 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
2627 cifs_reclassify_socket4(struct socket *sock)
2632 cifs_reclassify_socket6(struct socket *sock)
2637 /* See RFC1001 section 14 on representation of Netbios names */
2638 static void rfc1002mangle(char *target, char *source, unsigned int length)
2642 for (i = 0, j = 0; i < (length); i++) {
2643 /* mask a nibble at a time and encode */
2644 target[j] = 'A' + (0x0F & (source[i] >> 4));
2645 target[j+1] = 'A' + (0x0F & source[i]);
2652 bind_socket(struct TCP_Server_Info *server)
2655 if (server->srcaddr.ss_family != AF_UNSPEC) {
2656 /* Bind to the specified local IP address */
2657 struct socket *socket = server->ssocket;
2658 rc = socket->ops->bind(socket,
2659 (struct sockaddr *) &server->srcaddr,
2660 sizeof(server->srcaddr));
2662 struct sockaddr_in *saddr4;
2663 struct sockaddr_in6 *saddr6;
2664 saddr4 = (struct sockaddr_in *)&server->srcaddr;
2665 saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2666 if (saddr6->sin6_family == AF_INET6)
2667 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
2668 &saddr6->sin6_addr, rc);
2670 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
2671 &saddr4->sin_addr.s_addr, rc);
2678 ip_rfc1001_connect(struct TCP_Server_Info *server)
2682 * some servers require RFC1001 sessinit before sending
2683 * negprot - BB check reconnection in case where second
2684 * sessinit is sent but no second negprot
2686 struct rfc1002_session_packet *ses_init_buf;
2687 struct smb_hdr *smb_buf;
2688 ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
2691 ses_init_buf->trailer.session_req.called_len = 32;
2693 if (server->server_RFC1001_name[0] != 0)
2694 rfc1002mangle(ses_init_buf->trailer.
2695 session_req.called_name,
2696 server->server_RFC1001_name,
2697 RFC1001_NAME_LEN_WITH_NULL);
2699 rfc1002mangle(ses_init_buf->trailer.
2700 session_req.called_name,
2701 DEFAULT_CIFS_CALLED_NAME,
2702 RFC1001_NAME_LEN_WITH_NULL);
2704 ses_init_buf->trailer.session_req.calling_len = 32;
2707 * calling name ends in null (byte 16) from old smb
2710 if (server->workstation_RFC1001_name[0] != 0)
2711 rfc1002mangle(ses_init_buf->trailer.
2712 session_req.calling_name,
2713 server->workstation_RFC1001_name,
2714 RFC1001_NAME_LEN_WITH_NULL);
2716 rfc1002mangle(ses_init_buf->trailer.
2717 session_req.calling_name,
2719 RFC1001_NAME_LEN_WITH_NULL);
2721 ses_init_buf->trailer.session_req.scope1 = 0;
2722 ses_init_buf->trailer.session_req.scope2 = 0;
2723 smb_buf = (struct smb_hdr *)ses_init_buf;
2725 /* sizeof RFC1002_SESSION_REQUEST with no scope */
2726 smb_buf->smb_buf_length = cpu_to_be32(0x81000044);
2727 rc = smb_send(server, smb_buf, 0x44);
2728 kfree(ses_init_buf);
2730 * RFC1001 layer in at least one server
2731 * requires very short break before negprot
2732 * presumably because not expecting negprot
2733 * to follow so fast. This is a simple
2734 * solution that works without
2735 * complicating the code and causes no
2736 * significant slowing down on mount
2739 usleep_range(1000, 2000);
2742 * else the negprot may still work without this
2743 * even though malloc failed
2750 generic_ip_connect(struct TCP_Server_Info *server)
2755 struct socket *socket = server->ssocket;
2756 struct sockaddr *saddr;
2758 saddr = (struct sockaddr *) &server->dstaddr;
2760 if (server->dstaddr.ss_family == AF_INET6) {
2761 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
2763 sport = ipv6->sin6_port;
2764 slen = sizeof(struct sockaddr_in6);
2766 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
2769 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
2771 sport = ipv4->sin_port;
2772 slen = sizeof(struct sockaddr_in);
2774 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
2778 if (socket == NULL) {
2779 rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
2780 IPPROTO_TCP, &socket, 1);
2782 cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
2783 server->ssocket = NULL;
2787 /* BB other socket options to set KEEPALIVE, NODELAY? */
2788 cifs_dbg(FYI, "Socket created\n");
2789 server->ssocket = socket;
2790 socket->sk->sk_allocation = GFP_NOFS;
2791 if (sfamily == AF_INET6)
2792 cifs_reclassify_socket6(socket);
2794 cifs_reclassify_socket4(socket);
2797 rc = bind_socket(server);
2802 * Eventually check for other socket options to change from
2803 * the default. sock_setsockopt not used because it expects
2806 socket->sk->sk_rcvtimeo = 7 * HZ;
2807 socket->sk->sk_sndtimeo = 5 * HZ;
2809 /* make the bufsizes depend on wsize/rsize and max requests */
2810 if (server->noautotune) {
2811 if (socket->sk->sk_sndbuf < (200 * 1024))
2812 socket->sk->sk_sndbuf = 200 * 1024;
2813 if (socket->sk->sk_rcvbuf < (140 * 1024))
2814 socket->sk->sk_rcvbuf = 140 * 1024;
2817 if (server->tcp_nodelay)
2818 tcp_sock_set_nodelay(socket->sk);
2820 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
2821 socket->sk->sk_sndbuf,
2822 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
2824 rc = socket->ops->connect(socket, saddr, slen,
2825 server->noblockcnt ? O_NONBLOCK : 0);
2827 * When mounting SMB root file systems, we do not want to block in
2828 * connect. Otherwise bail out and then let cifs_reconnect() perform
2829 * reconnect failover - if possible.
2831 if (server->noblockcnt && rc == -EINPROGRESS)
2834 cifs_dbg(FYI, "Error %d connecting to server\n", rc);
2835 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
2836 sock_release(socket);
2837 server->ssocket = NULL;
2840 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
2841 if (sport == htons(RFC1001_PORT))
2842 rc = ip_rfc1001_connect(server);
2848 ip_connect(struct TCP_Server_Info *server)
2851 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2852 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2854 if (server->dstaddr.ss_family == AF_INET6)
2855 sport = &addr6->sin6_port;
2857 sport = &addr->sin_port;
2862 /* try with 445 port at first */
2863 *sport = htons(CIFS_PORT);
2865 rc = generic_ip_connect(server);
2869 /* if it failed, try with 139 port */
2870 *sport = htons(RFC1001_PORT);
2873 return generic_ip_connect(server);
2876 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
2877 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
2880 * If we are reconnecting then should we check to see if
2881 * any requested capabilities changed locally e.g. via
2882 * remount but we can not do much about it here
2883 * if they have (even if we could detect it by the following)
2884 * Perhaps we could add a backpointer to array of sb from tcon
2885 * or if we change to make all sb to same share the same
2886 * sb as NFS - then we only have one backpointer to sb.
2887 * What if we wanted to mount the server share twice once with
2888 * and once without posixacls or posix paths?
2890 __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2892 if (ctx && ctx->no_linux_ext) {
2893 tcon->fsUnixInfo.Capability = 0;
2894 tcon->unix_ext = 0; /* Unix Extensions disabled */
2895 cifs_dbg(FYI, "Linux protocol extensions disabled\n");
2898 tcon->unix_ext = 1; /* Unix Extensions supported */
2900 if (!tcon->unix_ext) {
2901 cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
2905 if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
2906 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2907 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
2909 * check for reconnect case in which we do not
2910 * want to change the mount behavior if we can avoid it
2914 * turn off POSIX ACL and PATHNAMES if not set
2915 * originally at mount time
2917 if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
2918 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
2919 if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
2920 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
2921 cifs_dbg(VFS, "POSIXPATH support change\n");
2922 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
2923 } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
2924 cifs_dbg(VFS, "possible reconnect error\n");
2925 cifs_dbg(VFS, "server disabled POSIX path support\n");
2929 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
2930 cifs_dbg(VFS, "per-share encryption not supported yet\n");
2932 cap &= CIFS_UNIX_CAP_MASK;
2933 if (ctx && ctx->no_psx_acl)
2934 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
2935 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
2936 cifs_dbg(FYI, "negotiated posix acl support\n");
2938 cifs_sb->mnt_cifs_flags |=
2939 CIFS_MOUNT_POSIXACL;
2942 if (ctx && ctx->posix_paths == 0)
2943 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
2944 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
2945 cifs_dbg(FYI, "negotiate posix pathnames\n");
2947 cifs_sb->mnt_cifs_flags |=
2948 CIFS_MOUNT_POSIX_PATHS;
2951 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
2952 #ifdef CONFIG_CIFS_DEBUG2
2953 if (cap & CIFS_UNIX_FCNTL_CAP)
2954 cifs_dbg(FYI, "FCNTL cap\n");
2955 if (cap & CIFS_UNIX_EXTATTR_CAP)
2956 cifs_dbg(FYI, "EXTATTR cap\n");
2957 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
2958 cifs_dbg(FYI, "POSIX path cap\n");
2959 if (cap & CIFS_UNIX_XATTR_CAP)
2960 cifs_dbg(FYI, "XATTR cap\n");
2961 if (cap & CIFS_UNIX_POSIX_ACL_CAP)
2962 cifs_dbg(FYI, "POSIX ACL cap\n");
2963 if (cap & CIFS_UNIX_LARGE_READ_CAP)
2964 cifs_dbg(FYI, "very large read cap\n");
2965 if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
2966 cifs_dbg(FYI, "very large write cap\n");
2967 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
2968 cifs_dbg(FYI, "transport encryption cap\n");
2969 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
2970 cifs_dbg(FYI, "mandatory transport encryption cap\n");
2971 #endif /* CIFS_DEBUG2 */
2972 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
2974 cifs_dbg(FYI, "resetting capabilities failed\n");
2976 cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
2982 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
2984 struct smb3_fs_context *ctx = cifs_sb->ctx;
2986 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
2988 spin_lock_init(&cifs_sb->tlink_tree_lock);
2989 cifs_sb->tlink_tree = RB_ROOT;
2991 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
2992 ctx->file_mode, ctx->dir_mode);
2994 /* this is needed for ASCII cp to Unicode converts */
2995 if (ctx->iocharset == NULL) {
2996 /* load_nls_default cannot return null */
2997 cifs_sb->local_nls = load_nls_default();
2999 cifs_sb->local_nls = load_nls(ctx->iocharset);
3000 if (cifs_sb->local_nls == NULL) {
3001 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3006 ctx->local_nls = cifs_sb->local_nls;
3008 smb3_update_mnt_flags(cifs_sb);
3011 cifs_dbg(FYI, "mounting share using direct i/o\n");
3012 if (ctx->cache_ro) {
3013 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3014 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3015 } else if (ctx->cache_rw) {
3016 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3017 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3018 CIFS_MOUNT_RW_CACHE);
3021 if ((ctx->cifs_acl) && (ctx->dynperm))
3022 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3025 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3026 if (cifs_sb->prepath == NULL)
3028 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3034 /* Release all succeed connections */
3035 static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
3040 cifs_put_tcon(mnt_ctx->tcon);
3041 else if (mnt_ctx->ses)
3042 cifs_put_smb_ses(mnt_ctx->ses);
3043 else if (mnt_ctx->server)
3044 cifs_put_tcp_session(mnt_ctx->server, 0);
3045 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3046 free_xid(mnt_ctx->xid);
3049 /* Get connections for tcp, ses and tcon */
3050 static int mount_get_conns(struct mount_ctx *mnt_ctx)
3053 struct TCP_Server_Info *server = NULL;
3054 struct cifs_ses *ses = NULL;
3055 struct cifs_tcon *tcon = NULL;
3056 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3057 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3062 /* get a reference to a tcp session */
3063 server = cifs_get_tcp_session(ctx, NULL);
3064 if (IS_ERR(server)) {
3065 rc = PTR_ERR(server);
3070 /* get a reference to a SMB session */
3071 ses = cifs_get_smb_ses(server, ctx);
3078 if ((ctx->persistent == true) && (!(ses->server->capabilities &
3079 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3080 cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3085 /* search for existing tcon to this server share */
3086 tcon = cifs_get_tcon(ses, ctx);
3093 /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
3094 if (tcon->posix_extensions)
3095 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3097 /* tell server which Unix caps we support */
3098 if (cap_unix(tcon->ses)) {
3100 * reset of caps checks mount to see if unix extensions disabled
3101 * for just this mount.
3103 reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
3104 spin_lock(&cifs_tcp_ses_lock);
3105 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3106 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3107 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3108 spin_unlock(&cifs_tcp_ses_lock);
3112 spin_unlock(&cifs_tcp_ses_lock);
3114 tcon->unix_ext = 0; /* server does not support them */
3116 /* do not care if a following call succeed - informational */
3117 if (!tcon->pipe && server->ops->qfs_tcon) {
3118 server->ops->qfs_tcon(xid, tcon, cifs_sb);
3119 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3120 if (tcon->fsDevInfo.DeviceCharacteristics &
3121 cpu_to_le32(FILE_READ_ONLY_DEVICE))
3122 cifs_dbg(VFS, "mounted to read only share\n");
3123 else if ((cifs_sb->mnt_cifs_flags &
3124 CIFS_MOUNT_RW_CACHE) == 0)
3125 cifs_dbg(VFS, "read only mount of RW share\n");
3126 /* no need to log a RW mount of a typical RW share */
3131 * Clamp the rsize/wsize mount arguments if they are too big for the server
3132 * and set the rsize/wsize to the negotiated values if not passed in by
3135 if ((cifs_sb->ctx->wsize == 0) ||
3136 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
3137 cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
3138 if ((cifs_sb->ctx->rsize == 0) ||
3139 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3140 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3143 * The cookie is initialized from volume info returned above.
3144 * Inside cifs_fscache_get_super_cookie it checks
3145 * that we do not get super cookie twice.
3147 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3148 cifs_fscache_get_super_cookie(tcon);
3151 mnt_ctx->server = server;
3153 mnt_ctx->tcon = tcon;
3159 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3160 struct cifs_tcon *tcon)
3162 struct tcon_link *tlink;
3164 /* hang the tcon off of the superblock */
3165 tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3169 tlink->tl_uid = ses->linux_uid;
3170 tlink->tl_tcon = tcon;
3171 tlink->tl_time = jiffies;
3172 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3173 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3175 cifs_sb->master_tlink = tlink;
3176 spin_lock(&cifs_sb->tlink_tree_lock);
3177 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3178 spin_unlock(&cifs_sb->tlink_tree_lock);
3180 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3185 #ifdef CONFIG_CIFS_DFS_UPCALL
3186 /* Get unique dfs connections */
3187 static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
3191 mnt_ctx->fs_ctx->nosharesock = true;
3192 rc = mount_get_conns(mnt_ctx);
3193 if (mnt_ctx->server) {
3194 cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
3195 spin_lock(&cifs_tcp_ses_lock);
3196 mnt_ctx->server->is_dfs_conn = true;
3197 spin_unlock(&cifs_tcp_ses_lock);
3203 * cifs_build_path_to_root returns full path to root when we do not have an
3204 * existing connection (tcon)
3207 build_unc_path_to_root(const struct smb3_fs_context *ctx,
3208 const struct cifs_sb_info *cifs_sb, bool useppath)
3210 char *full_path, *pos;
3211 unsigned int pplen = useppath && ctx->prepath ?
3212 strlen(ctx->prepath) + 1 : 0;
3213 unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
3215 if (unc_len > MAX_TREE_SIZE)
3216 return ERR_PTR(-EINVAL);
3218 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
3219 if (full_path == NULL)
3220 return ERR_PTR(-ENOMEM);
3222 memcpy(full_path, ctx->UNC, unc_len);
3223 pos = full_path + unc_len;
3226 *pos = CIFS_DIR_SEP(cifs_sb);
3227 memcpy(pos + 1, ctx->prepath, pplen);
3231 *pos = '\0'; /* add trailing null */
3232 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
3233 cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
3238 * expand_dfs_referral - Update cifs_sb from dfs referral path
3240 * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
3241 * submount. Otherwise it will be left untouched.
3243 static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
3244 struct dfs_info3_param *referral)
3247 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3248 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3249 char *fake_devname = NULL, *mdata = NULL;
3251 mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
3253 if (IS_ERR(mdata)) {
3254 rc = PTR_ERR(mdata);
3258 * We can not clear out the whole structure since we no longer have an explicit
3259 * function to parse a mount-string. Instead we need to clear out the individual
3260 * fields that are no longer valid.
3262 kfree(ctx->prepath);
3263 ctx->prepath = NULL;
3264 rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
3266 kfree(fake_devname);
3267 kfree(cifs_sb->ctx->mount_options);
3268 cifs_sb->ctx->mount_options = mdata;
3274 /* TODO: all callers to this are broken. We are not parsing mount_options here
3275 * we should pass a clone of the original context?
3278 cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
3283 cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
3284 rc = smb3_parse_devname(devname, ctx);
3286 cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
3294 rc = smb3_parse_opt(mntopts, "ip", &ip);
3296 cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
3300 rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
3303 cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
3308 if (ctx->nullauth) {
3309 cifs_dbg(FYI, "Anonymous login\n");
3310 kfree(ctx->username);
3311 ctx->username = NULL;
3312 } else if (ctx->username) {
3313 /* BB fixme parse for domain name here */
3314 cifs_dbg(FYI, "Username: %s\n", ctx->username);
3316 cifs_dbg(VFS, "No username specified\n");
3317 /* In userspace mount helper we can get user name from alternate
3318 locations such as env variables and files on disk */
3326 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3328 struct cifs_tcon *tcon,
3329 struct cifs_sb_info *cifs_sb,
3336 int skip = added_treename ? 1 : 0;
3338 sep = CIFS_DIR_SEP(cifs_sb);
3341 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3343 /* skip separators */
3348 /* next separator */
3349 while (*s && *s != sep)
3352 * if the treename is added, we then have to skip the first
3353 * part within the separators
3360 * temporarily null-terminate the path at the end of
3361 * the current component
3365 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3373 * Check if path is remote (e.g. a DFS share). Return -EREMOTE if it is,
3376 static int is_path_remote(struct mount_ctx *mnt_ctx)
3379 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3380 struct TCP_Server_Info *server = mnt_ctx->server;
3381 unsigned int xid = mnt_ctx->xid;
3382 struct cifs_tcon *tcon = mnt_ctx->tcon;
3383 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3386 if (!server->ops->is_path_accessible)
3390 * cifs_build_path_to_root works only when we have a valid tcon
3392 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3393 tcon->Flags & SMB_SHARE_IS_IN_DFS);
3394 if (full_path == NULL)
3397 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3399 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3401 #ifdef CONFIG_CIFS_DFS_UPCALL
3402 if (rc == -ENOENT && is_tcon_dfs(tcon))
3403 rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon, cifs_sb,
3406 if (rc != 0 && rc != -EREMOTE) {
3411 if (rc != -EREMOTE) {
3412 rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3413 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3415 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3416 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3425 #ifdef CONFIG_CIFS_DFS_UPCALL
3426 static void set_root_ses(struct mount_ctx *mnt_ctx)
3429 spin_lock(&cifs_tcp_ses_lock);
3430 mnt_ctx->ses->ses_count++;
3431 spin_unlock(&cifs_tcp_ses_lock);
3432 dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
3434 mnt_ctx->root_ses = mnt_ctx->ses;
3437 static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
3440 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3441 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3445 rc = mount_get_conns(mnt_ctx);
3447 * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
3448 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
3450 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
3451 * to respond with PATH_NOT_COVERED to requests that include the prefix.
3453 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
3454 dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
3455 ctx->UNC + 1, NULL, root_tl)) {
3458 /* Check if it is fully accessible and then mount it */
3459 rc = is_path_remote(mnt_ctx);
3462 else if (rc != -EREMOTE)
3468 static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
3469 const char *ref_path, struct dfs_cache_tgt_iterator *tit)
3472 struct dfs_info3_param ref = {};
3473 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3474 char *oldmnt = cifs_sb->ctx->mount_options;
3476 rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
3480 rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
3484 /* Connect to new target only if we were redirected (e.g. mount options changed) */
3485 if (oldmnt != cifs_sb->ctx->mount_options) {
3486 mount_put_conns(mnt_ctx);
3487 rc = mount_get_dfs_conns(mnt_ctx);
3490 if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
3491 set_root_ses(mnt_ctx);
3492 rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
3493 cifs_remap(cifs_sb), ref_path, tit);
3497 free_dfs_info_param(&ref);
3501 static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
3505 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3506 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3507 struct dfs_cache_tgt_iterator *tit;
3509 /* Put initial connections as they might be shared with other mounts. We need unique dfs
3510 * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
3513 mount_put_conns(mnt_ctx);
3514 mount_get_dfs_conns(mnt_ctx);
3515 set_root_ses(mnt_ctx);
3517 full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3518 if (IS_ERR(full_path))
3519 return PTR_ERR(full_path);
3521 mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
3522 cifs_remap(cifs_sb));
3523 if (IS_ERR(mnt_ctx->origin_fullpath)) {
3524 rc = PTR_ERR(mnt_ctx->origin_fullpath);
3525 mnt_ctx->origin_fullpath = NULL;
3529 /* Try all dfs root targets */
3530 for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
3531 tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
3532 rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
3534 mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
3535 if (!mnt_ctx->leaf_fullpath)
3546 static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
3549 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3550 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3552 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
3553 struct dfs_cache_tgt_iterator *tit;
3555 full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3556 if (IS_ERR(full_path))
3557 return PTR_ERR(full_path);
3559 kfree(mnt_ctx->leaf_fullpath);
3560 mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
3561 cifs_remap(cifs_sb));
3562 if (IS_ERR(mnt_ctx->leaf_fullpath)) {
3563 rc = PTR_ERR(mnt_ctx->leaf_fullpath);
3564 mnt_ctx->leaf_fullpath = NULL;
3568 /* Get referral from dfs link */
3569 rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
3570 cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
3574 /* Try all dfs link targets */
3575 for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
3576 tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
3577 rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
3579 rc = is_path_remote(mnt_ctx);
3586 dfs_cache_free_tgts(&tl);
3590 static int follow_dfs_link(struct mount_ctx *mnt_ctx)
3593 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3594 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3598 full_path = build_unc_path_to_root(ctx, cifs_sb, true);
3599 if (IS_ERR(full_path))
3600 return PTR_ERR(full_path);
3602 kfree(mnt_ctx->origin_fullpath);
3603 mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
3604 cifs_remap(cifs_sb));
3607 if (IS_ERR(mnt_ctx->origin_fullpath)) {
3608 rc = PTR_ERR(mnt_ctx->origin_fullpath);
3609 mnt_ctx->origin_fullpath = NULL;
3614 rc = __follow_dfs_link(mnt_ctx);
3615 if (!rc || rc != -EREMOTE)
3617 } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
3622 /* Set up DFS referral paths for failover */
3623 static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
3625 struct TCP_Server_Info *server = mnt_ctx->server;
3627 server->origin_fullpath = mnt_ctx->origin_fullpath;
3628 server->leaf_fullpath = mnt_ctx->leaf_fullpath;
3629 server->current_fullpath = mnt_ctx->leaf_fullpath;
3630 mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
3633 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3636 struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3637 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
3640 rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
3646 uuid_gen(&mnt_ctx.mount_id);
3647 rc = connect_dfs_root(&mnt_ctx, &tl);
3648 dfs_cache_free_tgts(&tl);
3653 rc = is_path_remote(&mnt_ctx);
3655 rc = follow_dfs_link(&mnt_ctx);
3659 setup_server_referral_paths(&mnt_ctx);
3661 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3662 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3664 cifs_autodisable_serverino(cifs_sb);
3666 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3667 * that have different prefix paths.
3669 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3670 kfree(cifs_sb->prepath);
3671 cifs_sb->prepath = ctx->prepath;
3672 ctx->prepath = NULL;
3673 uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
3676 free_xid(mnt_ctx.xid);
3677 cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
3678 return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3681 dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
3682 kfree(mnt_ctx.origin_fullpath);
3683 kfree(mnt_ctx.leaf_fullpath);
3684 mount_put_conns(&mnt_ctx);
3688 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3691 struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3693 rc = mount_get_conns(&mnt_ctx);
3698 rc = is_path_remote(&mnt_ctx);
3705 free_xid(mnt_ctx.xid);
3706 return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3709 mount_put_conns(&mnt_ctx);
3715 * Issue a TREE_CONNECT request.
3718 CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3719 const char *tree, struct cifs_tcon *tcon,
3720 const struct nls_table *nls_codepage)
3722 struct smb_hdr *smb_buffer;
3723 struct smb_hdr *smb_buffer_response;
3726 unsigned char *bcc_ptr;
3729 __u16 bytes_left, count;
3734 smb_buffer = cifs_buf_get();
3735 if (smb_buffer == NULL)
3738 smb_buffer_response = smb_buffer;
3740 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
3741 NULL /*no tid */ , 4 /*wct */ );
3743 smb_buffer->Mid = get_next_mid(ses->server);
3744 smb_buffer->Uid = ses->Suid;
3745 pSMB = (TCONX_REQ *) smb_buffer;
3746 pSMBr = (TCONX_RSP *) smb_buffer_response;
3748 pSMB->AndXCommand = 0xFF;
3749 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3750 bcc_ptr = &pSMB->Password[0];
3751 if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) {
3752 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3753 *bcc_ptr = 0; /* password is null byte */
3754 bcc_ptr++; /* skip password */
3755 /* already aligned so no need to do it below */
3758 if (ses->server->sign)
3759 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
3761 if (ses->capabilities & CAP_STATUS32) {
3762 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
3764 if (ses->capabilities & CAP_DFS) {
3765 smb_buffer->Flags2 |= SMBFLG2_DFS;
3767 if (ses->capabilities & CAP_UNICODE) {
3768 smb_buffer->Flags2 |= SMBFLG2_UNICODE;
3770 cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
3771 6 /* max utf8 char length in bytes */ *
3772 (/* server len*/ + 256 /* share len */), nls_codepage);
3773 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
3774 bcc_ptr += 2; /* skip trailing null */
3775 } else { /* ASCII */
3776 strcpy(bcc_ptr, tree);
3777 bcc_ptr += strlen(tree) + 1;
3779 strcpy(bcc_ptr, "?????");
3780 bcc_ptr += strlen("?????");
3782 count = bcc_ptr - &pSMB->Password[0];
3783 be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
3784 pSMB->ByteCount = cpu_to_le16(count);
3786 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
3789 /* above now done in SendReceive */
3793 tcon->tid = smb_buffer_response->Tid;
3794 bcc_ptr = pByteArea(smb_buffer_response);
3795 bytes_left = get_bcc(smb_buffer_response);
3796 length = strnlen(bcc_ptr, bytes_left - 2);
3797 if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
3803 /* skip service field (NB: this field is always ASCII) */
3805 if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
3806 (bcc_ptr[2] == 'C')) {
3807 cifs_dbg(FYI, "IPC connection\n");
3811 } else if (length == 2) {
3812 if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
3813 /* the most common case */
3814 cifs_dbg(FYI, "disk share connection\n");
3817 bcc_ptr += length + 1;
3818 bytes_left -= (length + 1);
3819 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
3821 /* mostly informational -- no need to fail on error here */
3822 kfree(tcon->nativeFileSystem);
3823 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
3824 bytes_left, is_unicode,
3827 cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
3829 if ((smb_buffer_response->WordCount == 3) ||
3830 (smb_buffer_response->WordCount == 7))
3831 /* field is in same location */
3832 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
3835 cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
3838 cifs_buf_release(smb_buffer);
3842 static void delayed_free(struct rcu_head *p)
3844 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3846 unload_nls(cifs_sb->local_nls);
3847 smb3_cleanup_fs_context(cifs_sb->ctx);
3852 cifs_umount(struct cifs_sb_info *cifs_sb)
3854 struct rb_root *root = &cifs_sb->tlink_tree;
3855 struct rb_node *node;
3856 struct tcon_link *tlink;
3858 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3860 spin_lock(&cifs_sb->tlink_tree_lock);
3861 while ((node = rb_first(root))) {
3862 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3863 cifs_get_tlink(tlink);
3864 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3865 rb_erase(node, root);
3867 spin_unlock(&cifs_sb->tlink_tree_lock);
3868 cifs_put_tlink(tlink);
3869 spin_lock(&cifs_sb->tlink_tree_lock);
3871 spin_unlock(&cifs_sb->tlink_tree_lock);
3873 kfree(cifs_sb->prepath);
3874 #ifdef CONFIG_CIFS_DFS_UPCALL
3875 dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
3877 call_rcu(&cifs_sb->rcu, delayed_free);
3881 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
3882 struct TCP_Server_Info *server)
3886 if (!server->ops->need_neg || !server->ops->negotiate)
3889 /* only send once per connect */
3890 spin_lock(&cifs_tcp_ses_lock);
3891 if (!server->ops->need_neg(server) ||
3892 server->tcpStatus != CifsNeedNegotiate) {
3893 spin_unlock(&cifs_tcp_ses_lock);
3896 server->tcpStatus = CifsInNegotiate;
3897 spin_unlock(&cifs_tcp_ses_lock);
3899 rc = server->ops->negotiate(xid, ses, server);
3901 spin_lock(&cifs_tcp_ses_lock);
3902 if (server->tcpStatus == CifsInNegotiate)
3903 server->tcpStatus = CifsNeedSessSetup;
3906 spin_unlock(&cifs_tcp_ses_lock);
3908 spin_lock(&cifs_tcp_ses_lock);
3909 if (server->tcpStatus == CifsInNegotiate)
3910 server->tcpStatus = CifsNeedNegotiate;
3911 spin_unlock(&cifs_tcp_ses_lock);
3918 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
3919 struct TCP_Server_Info *server,
3920 struct nls_table *nls_info)
3923 bool is_binding = false;
3925 /* only send once per connect */
3926 spin_lock(&cifs_tcp_ses_lock);
3927 if (server->tcpStatus != CifsNeedSessSetup) {
3928 spin_unlock(&cifs_tcp_ses_lock);
3931 server->tcpStatus = CifsInSessSetup;
3932 spin_unlock(&cifs_tcp_ses_lock);
3934 spin_lock(&ses->chan_lock);
3935 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
3936 spin_unlock(&ses->chan_lock);
3939 ses->capabilities = server->capabilities;
3940 if (!linuxExtEnabled)
3941 ses->capabilities &= (~server->vals->cap_unix);
3943 if (ses->auth_key.response) {
3944 cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
3945 ses->auth_key.response);
3946 kfree(ses->auth_key.response);
3947 ses->auth_key.response = NULL;
3948 ses->auth_key.len = 0;
3952 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
3953 server->sec_mode, server->capabilities, server->timeAdj);
3955 if (server->ops->sess_setup)
3956 rc = server->ops->sess_setup(xid, ses, server, nls_info);
3959 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
3960 spin_lock(&cifs_tcp_ses_lock);
3961 if (server->tcpStatus == CifsInSessSetup)
3962 server->tcpStatus = CifsNeedSessSetup;
3963 spin_unlock(&cifs_tcp_ses_lock);
3965 spin_lock(&cifs_tcp_ses_lock);
3966 if (server->tcpStatus == CifsInSessSetup)
3967 server->tcpStatus = CifsGood;
3968 /* Even if one channel is active, session is in good state */
3969 ses->status = CifsGood;
3970 spin_unlock(&cifs_tcp_ses_lock);
3972 spin_lock(&ses->chan_lock);
3973 cifs_chan_clear_need_reconnect(ses, server);
3974 spin_unlock(&ses->chan_lock);
3981 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
3983 ctx->sectype = ses->sectype;
3985 /* krb5 is special, since we don't need username or pw */
3986 if (ctx->sectype == Kerberos)
3989 return cifs_set_cifscreds(ctx, ses);
3992 static struct cifs_tcon *
3993 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
3996 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3997 struct cifs_ses *ses;
3998 struct cifs_tcon *tcon = NULL;
3999 struct smb3_fs_context *ctx;
4001 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
4003 return ERR_PTR(-ENOMEM);
4005 ctx->local_nls = cifs_sb->local_nls;
4006 ctx->linux_uid = fsuid;
4007 ctx->cred_uid = fsuid;
4008 ctx->UNC = master_tcon->treeName;
4009 ctx->retry = master_tcon->retry;
4010 ctx->nocase = master_tcon->nocase;
4011 ctx->nohandlecache = master_tcon->nohandlecache;
4012 ctx->local_lease = master_tcon->local_lease;
4013 ctx->no_lease = master_tcon->no_lease;
4014 ctx->resilient = master_tcon->use_resilient;
4015 ctx->persistent = master_tcon->use_persistent;
4016 ctx->handle_timeout = master_tcon->handle_timeout;
4017 ctx->no_linux_ext = !master_tcon->unix_ext;
4018 ctx->linux_ext = master_tcon->posix_extensions;
4019 ctx->sectype = master_tcon->ses->sectype;
4020 ctx->sign = master_tcon->ses->sign;
4021 ctx->seal = master_tcon->seal;
4022 ctx->witness = master_tcon->use_witness;
4024 rc = cifs_set_vol_auth(ctx, master_tcon->ses);
4030 /* get a reference for the same TCP session */
4031 spin_lock(&cifs_tcp_ses_lock);
4032 ++master_tcon->ses->server->srv_count;
4033 spin_unlock(&cifs_tcp_ses_lock);
4035 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
4037 tcon = (struct cifs_tcon *)ses;
4038 cifs_put_tcp_session(master_tcon->ses->server, 0);
4042 tcon = cifs_get_tcon(ses, ctx);
4044 cifs_put_smb_ses(ses);
4049 reset_cifs_unix_caps(0, tcon, NULL, ctx);
4052 kfree(ctx->username);
4053 kfree_sensitive(ctx->password);
4060 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4062 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4065 /* find and return a tlink with given uid */
4066 static struct tcon_link *
4067 tlink_rb_search(struct rb_root *root, kuid_t uid)
4069 struct rb_node *node = root->rb_node;
4070 struct tcon_link *tlink;
4073 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4075 if (uid_gt(tlink->tl_uid, uid))
4076 node = node->rb_left;
4077 else if (uid_lt(tlink->tl_uid, uid))
4078 node = node->rb_right;
4085 /* insert a tcon_link into the tree */
4087 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4089 struct rb_node **new = &(root->rb_node), *parent = NULL;
4090 struct tcon_link *tlink;
4093 tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4096 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4097 new = &((*new)->rb_left);
4099 new = &((*new)->rb_right);
4102 rb_link_node(&new_tlink->tl_rbnode, parent, new);
4103 rb_insert_color(&new_tlink->tl_rbnode, root);
4107 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4110 * If the superblock doesn't refer to a multiuser mount, then just return
4111 * the master tcon for the mount.
4113 * First, search the rbtree for an existing tcon for this fsuid. If one
4114 * exists, then check to see if it's pending construction. If it is then wait
4115 * for construction to complete. Once it's no longer pending, check to see if
4116 * it failed and either return an error or retry construction, depending on
4119 * If one doesn't exist then insert a new tcon_link struct into the tree and
4120 * try to construct a new one.
4123 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4126 kuid_t fsuid = current_fsuid();
4127 struct tcon_link *tlink, *newtlink;
4129 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4130 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4132 spin_lock(&cifs_sb->tlink_tree_lock);
4133 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4135 cifs_get_tlink(tlink);
4136 spin_unlock(&cifs_sb->tlink_tree_lock);
4138 if (tlink == NULL) {
4139 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
4140 if (newtlink == NULL)
4141 return ERR_PTR(-ENOMEM);
4142 newtlink->tl_uid = fsuid;
4143 newtlink->tl_tcon = ERR_PTR(-EACCES);
4144 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4145 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4146 cifs_get_tlink(newtlink);
4148 spin_lock(&cifs_sb->tlink_tree_lock);
4149 /* was one inserted after previous search? */
4150 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4152 cifs_get_tlink(tlink);
4153 spin_unlock(&cifs_sb->tlink_tree_lock);
4155 goto wait_for_construction;
4158 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4159 spin_unlock(&cifs_sb->tlink_tree_lock);
4161 wait_for_construction:
4162 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4163 TASK_INTERRUPTIBLE);
4165 cifs_put_tlink(tlink);
4166 return ERR_PTR(-ERESTARTSYS);
4169 /* if it's good, return it */
4170 if (!IS_ERR(tlink->tl_tcon))
4173 /* return error if we tried this already recently */
4174 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4175 cifs_put_tlink(tlink);
4176 return ERR_PTR(-EACCES);
4179 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4180 goto wait_for_construction;
4183 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4184 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4185 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4187 if (IS_ERR(tlink->tl_tcon)) {
4188 cifs_put_tlink(tlink);
4189 return ERR_PTR(-EACCES);
4196 * periodic workqueue job that scans tcon_tree for a superblock and closes
4200 cifs_prune_tlinks(struct work_struct *work)
4202 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4204 struct rb_root *root = &cifs_sb->tlink_tree;
4205 struct rb_node *node;
4206 struct rb_node *tmp;
4207 struct tcon_link *tlink;
4210 * Because we drop the spinlock in the loop in order to put the tlink
4211 * it's not guarded against removal of links from the tree. The only
4212 * places that remove entries from the tree are this function and
4213 * umounts. Because this function is non-reentrant and is canceled
4214 * before umount can proceed, this is safe.
4216 spin_lock(&cifs_sb->tlink_tree_lock);
4217 node = rb_first(root);
4218 while (node != NULL) {
4220 node = rb_next(tmp);
4221 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4223 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4224 atomic_read(&tlink->tl_count) != 0 ||
4225 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4228 cifs_get_tlink(tlink);
4229 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4230 rb_erase(tmp, root);
4232 spin_unlock(&cifs_sb->tlink_tree_lock);
4233 cifs_put_tlink(tlink);
4234 spin_lock(&cifs_sb->tlink_tree_lock);
4236 spin_unlock(&cifs_sb->tlink_tree_lock);
4238 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4242 #ifdef CONFIG_CIFS_DFS_UPCALL
4243 /* Update dfs referral path of superblock */
4244 static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
4248 size_t len = strlen(target);
4249 char *refpath, *npath;
4251 if (unlikely(len < 2 || *target != '\\'))
4254 if (target[1] == '\\') {
4256 refpath = kmalloc(len, GFP_KERNEL);
4260 scnprintf(refpath, len, "%s", target);
4262 len += sizeof("\\");
4263 refpath = kmalloc(len, GFP_KERNEL);
4267 scnprintf(refpath, len, "\\%s", target);
4270 npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
4273 if (IS_ERR(npath)) {
4274 rc = PTR_ERR(npath);
4276 mutex_lock(&server->refpath_lock);
4277 kfree(server->leaf_fullpath);
4278 server->leaf_fullpath = npath;
4279 mutex_unlock(&server->refpath_lock);
4280 server->current_fullpath = server->leaf_fullpath;
4285 static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
4286 size_t tcp_host_len, char *share, bool *target_match)
4289 const char *dfs_host;
4290 size_t dfs_host_len;
4292 *target_match = true;
4293 extract_unc_hostname(share, &dfs_host, &dfs_host_len);
4295 /* Check if hostnames or addresses match */
4296 if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
4297 cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
4298 dfs_host, (int)tcp_host_len, tcp_host);
4299 rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
4301 cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
4306 static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
4307 struct cifs_sb_info *cifs_sb, char *tree, bool islink,
4308 struct dfs_cache_tgt_list *tl)
4311 struct TCP_Server_Info *server = tcon->ses->server;
4312 const struct smb_version_operations *ops = server->ops;
4313 struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
4314 char *share = NULL, *prefix = NULL;
4315 const char *tcp_host;
4316 size_t tcp_host_len;
4317 struct dfs_cache_tgt_iterator *tit;
4320 extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
4322 tit = dfs_cache_get_tgt_iterator(tl);
4328 /* Try to tree connect to all dfs targets */
4329 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
4330 const char *target = dfs_cache_get_tgt_name(tit);
4331 struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
4335 share = prefix = NULL;
4337 /* Check if share matches with tcp ses */
4338 rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
4340 cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
4344 rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
4348 if (!target_match) {
4353 if (ipc->need_reconnect) {
4354 scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
4355 rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
4360 scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
4362 rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
4366 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
4367 * to it. Otherwise, cache the dfs referral and then mark current tcp ses for
4368 * reconnect so either the demultiplex thread or the echo worker will reconnect to
4369 * newly resolved target.
4371 if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
4373 rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
4376 rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
4378 rc = cifs_update_super_prepath(cifs_sb, prefix);
4380 /* Target is another dfs share */
4381 rc = update_server_fullpath(server, cifs_sb, target);
4382 dfs_cache_free_tgts(tl);
4386 list_replace_init(&ntl.tl_list, &tl->tl_list);
4388 dfs_cache_free_tgts(&ntl);
4400 static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
4401 struct cifs_sb_info *cifs_sb, char *tree, bool islink,
4402 struct dfs_cache_tgt_list *tl)
4406 struct TCP_Server_Info *server = tcon->ses->server;
4409 rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
4410 if (!rc || rc != -EREMOTE)
4412 } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
4414 * If we couldn't tree connect to any targets from last referral path, then retry from
4415 * original referral path.
4417 if (rc && server->current_fullpath != server->origin_fullpath) {
4418 server->current_fullpath = server->origin_fullpath;
4419 cifs_reconnect(tcon->ses->server, true);
4422 dfs_cache_free_tgts(tl);
4426 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4429 struct TCP_Server_Info *server = tcon->ses->server;
4430 const struct smb_version_operations *ops = server->ops;
4431 struct super_block *sb = NULL;
4432 struct cifs_sb_info *cifs_sb;
4433 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
4435 struct dfs_info3_param ref = {0};
4437 /* only send once per connect */
4438 spin_lock(&cifs_tcp_ses_lock);
4439 if (tcon->ses->status != CifsGood ||
4440 (tcon->tidStatus != CifsNew &&
4441 tcon->tidStatus != CifsNeedTcon)) {
4442 spin_unlock(&cifs_tcp_ses_lock);
4445 tcon->tidStatus = CifsInTcon;
4446 spin_unlock(&cifs_tcp_ses_lock);
4448 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
4455 scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
4456 rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
4460 sb = cifs_get_tcp_super(server);
4463 cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
4467 cifs_sb = CIFS_SB(sb);
4469 /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
4470 if (!server->current_fullpath ||
4471 dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
4472 rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, cifs_sb->local_nls);
4476 rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
4478 free_dfs_info_param(&ref);
4482 cifs_put_tcp_super(sb);
4485 spin_lock(&cifs_tcp_ses_lock);
4486 if (tcon->tidStatus == CifsInTcon)
4487 tcon->tidStatus = CifsNeedTcon;
4488 spin_unlock(&cifs_tcp_ses_lock);
4490 spin_lock(&cifs_tcp_ses_lock);
4491 if (tcon->tidStatus == CifsInTcon)
4492 tcon->tidStatus = CifsGood;
4493 spin_unlock(&cifs_tcp_ses_lock);
4494 tcon->need_reconnect = false;
4500 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4503 const struct smb_version_operations *ops = tcon->ses->server->ops;
4505 /* only send once per connect */
4506 spin_lock(&cifs_tcp_ses_lock);
4507 if (tcon->ses->status != CifsGood ||
4508 (tcon->tidStatus != CifsNew &&
4509 tcon->tidStatus != CifsNeedTcon)) {
4510 spin_unlock(&cifs_tcp_ses_lock);
4513 tcon->tidStatus = CifsInTcon;
4514 spin_unlock(&cifs_tcp_ses_lock);
4516 rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
4518 spin_lock(&cifs_tcp_ses_lock);
4519 if (tcon->tidStatus == CifsInTcon)
4520 tcon->tidStatus = CifsNeedTcon;
4521 spin_unlock(&cifs_tcp_ses_lock);
4523 spin_lock(&cifs_tcp_ses_lock);
4524 if (tcon->tidStatus == CifsInTcon)
4525 tcon->tidStatus = CifsGood;
4526 spin_unlock(&cifs_tcp_ses_lock);
4527 tcon->need_reconnect = false;