4 * Copyright (C) International Business Machines Corp., 2009, 2013
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
9 * Contains the routines for constructing the SMB2 PDUs themselves
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
32 #include <linux/kernel.h>
33 #include <linux/vfs.h>
34 #include <linux/task_io_accounting_ops.h>
35 #include <linux/uaccess.h>
36 #include <linux/pagemap.h>
37 #include <linux/xattr.h>
41 #include "cifsproto.h"
42 #include "smb2proto.h"
43 #include "cifs_unicode.h"
44 #include "cifs_debug.h"
46 #include "smb2status.h"
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
57 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
81 smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
111 /* BB FIXME when we do write > 64K add +1 for every 64K in req or rsp */
112 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
113 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
115 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
116 hdr->CreditCharge = cpu_to_le16(1);
117 /* else CreditCharge MBZ */
119 hdr->TreeId = tcon->tid;
120 /* Uid is not converted */
122 hdr->SessionId = tcon->ses->Suid;
125 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
126 * to pass the path on the Open SMB prefixed by \\server\share.
127 * Not sure when we would need to do the augmented path (if ever) and
128 * setting this flag breaks the SMB2 open operation since it is
129 * illegal to send an empty path name (without \\server\share prefix)
130 * when the DFS flag is set in the SMB open header. We could
131 * consider setting the flag on all operations other than open
132 * but it is safer to net set it for now.
134 /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
135 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
137 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
138 hdr->Flags |= SMB2_FLAGS_SIGNED;
140 pdu->StructureSize2 = cpu_to_le16(parmsize);
145 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
148 struct nls_table *nls_codepage;
149 struct cifs_ses *ses;
150 struct TCP_Server_Info *server;
153 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
154 * check for tcp and smb session status done differently
155 * for those three - in the calling routine.
160 if (smb2_command == SMB2_TREE_CONNECT)
163 if (tcon->tidStatus == CifsExiting) {
165 * only tree disconnect, open, and write,
166 * (and ulogoff which does not have tcon)
167 * are allowed as we start force umount.
169 if ((smb2_command != SMB2_WRITE) &&
170 (smb2_command != SMB2_CREATE) &&
171 (smb2_command != SMB2_TREE_DISCONNECT)) {
172 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
177 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
178 (!tcon->ses->server))
182 server = ses->server;
185 * Give demultiplex thread up to 10 seconds to reconnect, should be
186 * greater than cifs socket timeout which is 7 seconds
188 while (server->tcpStatus == CifsNeedReconnect) {
190 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
191 * here since they are implicitly done when session drops.
193 switch (smb2_command) {
195 * BB Should we keep oplock break and add flush to exceptions?
197 case SMB2_TREE_DISCONNECT:
200 case SMB2_OPLOCK_BREAK:
204 wait_event_interruptible_timeout(server->response_q,
205 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
207 /* are we still trying to reconnect? */
208 if (server->tcpStatus != CifsNeedReconnect)
212 * on "soft" mounts we wait once. Hard mounts keep
213 * retrying until process is killed or server comes
217 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
222 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
225 nls_codepage = load_nls_default();
228 * need to prevent multiple threads trying to simultaneously reconnect
229 * the same SMB session
231 mutex_lock(&tcon->ses->session_mutex);
232 rc = cifs_negotiate_protocol(0, tcon->ses);
233 if (!rc && tcon->ses->need_reconnect)
234 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
236 if (rc || !tcon->need_reconnect) {
237 mutex_unlock(&tcon->ses->session_mutex);
241 cifs_mark_open_files_invalid(tcon);
242 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
243 mutex_unlock(&tcon->ses->session_mutex);
244 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
247 atomic_inc(&tconInfoReconnectCount);
249 * BB FIXME add code to check if wsize needs update due to negotiated
250 * smb buffer size shrinking.
254 * Check if handle based operation so we know whether we can continue
255 * or not without returning to caller to reset file handle.
258 * BB Is flush done by server on drop of tcp session? Should we special
259 * case it and skip above?
261 switch (smb2_command) {
267 case SMB2_QUERY_DIRECTORY:
268 case SMB2_CHANGE_NOTIFY:
269 case SMB2_QUERY_INFO:
273 unload_nls(nls_codepage);
278 * Allocate and return pointer to an SMB request hdr, and set basic
279 * SMB information in the SMB header. If the return code is zero, this
280 * function must have filled in request_buf pointer.
283 small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
288 rc = smb2_reconnect(smb2_command, tcon);
292 /* BB eventually switch this to SMB2 specific small buf size */
293 *request_buf = cifs_small_buf_get();
294 if (*request_buf == NULL) {
295 /* BB should we add a retry in here if not a writepage? */
299 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
302 #ifdef CONFIG_CIFS_STATS2
303 uint16_t com_code = le16_to_cpu(smb2_command);
304 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
306 cifs_stats_inc(&tcon->num_smbs_sent);
313 free_rsp_buf(int resp_buftype, void *rsp)
315 if (resp_buftype == CIFS_SMALL_BUFFER)
316 cifs_small_buf_release(rsp);
317 else if (resp_buftype == CIFS_LARGE_BUFFER)
318 cifs_buf_release(rsp);
324 * SMB2 Worker functions follow:
326 * The general structure of the worker functions is:
327 * 1) Call smb2_init (assembles SMB2 header)
328 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
329 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
330 * 4) Decode SMB2 command specific fields in the fixed length area
331 * 5) Decode variable length data area (if any for this SMB2 command type)
332 * 6) Call free smb buffer
338 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
340 struct smb2_negotiate_req *req;
341 struct smb2_negotiate_rsp *rsp;
345 struct TCP_Server_Info *server = ses->server;
346 int blob_offset, blob_length;
348 int flags = CIFS_NEG_OP;
350 cifs_dbg(FYI, "Negotiate protocol\n");
353 WARN(1, "%s: server is NULL!\n", __func__);
357 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
361 req->hdr.SessionId = 0;
363 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
365 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
366 inc_rfc1001_len(req, 2);
368 /* only one of SMB2 signing flags may be set in SMB2 request */
370 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
371 else if (global_secflags & CIFSSEC_MAY_SIGN)
372 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
374 req->SecurityMode = 0;
376 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
378 memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
380 iov[0].iov_base = (char *)req;
381 /* 4 for rfc1002 length field */
382 iov[0].iov_len = get_rfc1002_length(req) + 4;
384 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
386 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
388 * No tcon so can't do
389 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
394 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
396 /* BB we may eventually want to match the negotiated vs. requested
397 dialect, even though we are only requesting one at a time */
398 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
399 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
400 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
401 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
402 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
403 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
404 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
405 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
407 cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
408 le16_to_cpu(rsp->DialectRevision));
412 server->dialect = le16_to_cpu(rsp->DialectRevision);
414 /* SMB2 only has an extended negflavor */
415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
416 /* set it to the maximum buffer size value we can send with 1 credit */
417 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
418 SMB2_MAX_BUFFER_SIZE);
419 server->max_read = le32_to_cpu(rsp->MaxReadSize);
420 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
421 /* BB Do we need to validate the SecurityMode? */
422 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
423 server->capabilities = le32_to_cpu(rsp->Capabilities);
425 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
427 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
430 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
432 * ses->sectype = RawNTLMSSP;
433 * but for time being this is our only auth choice so doesn't matter.
434 * We just found a server which sets blob length to zero expecting raw.
436 if (blob_length == 0)
437 cifs_dbg(FYI, "missing security blob on negprot\n");
439 rc = cifs_enable_signing(server, ses->sign);
440 #ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
444 rc = decode_neg_token_init(security_blob, blob_length,
455 free_rsp_buf(resp_buftype, rsp);
459 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
462 struct validate_negotiate_info_req vneg_inbuf;
463 struct validate_negotiate_info_rsp *pneg_rsp;
466 cifs_dbg(FYI, "validate negotiate\n");
469 * validation ioctl must be signed, so no point sending this if we
470 * can not sign it. We could eventually change this to selectively
471 * sign just this, the first and only signed request on a connection.
472 * This is good enough for now since a user who wants better security
473 * would also enable signing on the mount. Having validation of
474 * negotiate info for signed connections helps reduce attack vectors
476 if (tcon->ses->server->sign == false)
477 return 0; /* validation requires signing */
479 vneg_inbuf.Capabilities =
480 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
481 memcpy(vneg_inbuf.Guid, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
484 vneg_inbuf.SecurityMode =
485 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
486 else if (global_secflags & CIFSSEC_MAY_SIGN)
487 vneg_inbuf.SecurityMode =
488 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
490 vneg_inbuf.SecurityMode = 0;
492 vneg_inbuf.DialectCount = cpu_to_le16(1);
493 vneg_inbuf.Dialects[0] =
494 cpu_to_le16(tcon->ses->server->vals->protocol_id);
496 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
497 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
498 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
499 (char **)&pneg_rsp, &rsplen);
502 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
506 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
507 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
511 /* check validate negotiate info response matches what we got earlier */
512 if (pneg_rsp->Dialect !=
513 cpu_to_le16(tcon->ses->server->vals->protocol_id))
516 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
519 /* do not validate server guid because not saved at negprot time yet */
521 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
522 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
525 /* validate negotiate successful */
526 cifs_dbg(FYI, "validate negotiate info successful\n");
530 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
535 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
536 const struct nls_table *nls_cp)
538 struct smb2_sess_setup_req *req;
539 struct smb2_sess_setup_rsp *rsp = NULL;
543 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
544 struct TCP_Server_Info *server = ses->server;
547 char *ntlmssp_blob = NULL;
548 bool use_spnego = false; /* else use raw ntlmssp */
550 cifs_dbg(FYI, "Session Setup\n");
553 WARN(1, "%s: server is NULL!\n", __func__);
558 * If we are here due to reconnect, free per-smb session key
559 * in case signing was required.
561 kfree(ses->auth_key.response);
562 ses->auth_key.response = NULL;
565 * If memory allocation is successful, caller of this function
568 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
571 ses->ntlmssp->sesskey_per_smbsess = true;
573 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
574 ses->sectype = RawNTLMSSP;
576 ssetup_ntlmssp_authenticate:
577 if (phase == NtLmChallenge)
578 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
580 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
584 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
585 req->VcNumber = 0; /* MBZ */
586 /* to enable echos and oplocks */
587 req->hdr.CreditRequest = cpu_to_le16(3);
589 /* only one of SMB2 signing flags may be set in SMB2 request */
591 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
592 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
593 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
595 req->SecurityMode = 0;
597 req->Capabilities = 0;
598 req->Channel = 0; /* MBZ */
600 iov[0].iov_base = (char *)req;
601 /* 4 for rfc1002 length field and 1 for pad */
602 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
603 if (phase == NtLmNegotiate) {
604 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
606 if (ntlmssp_blob == NULL) {
610 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
612 /* blob_length = build_spnego_ntlmssp_blob(
614 sizeof(struct _NEGOTIATE_MESSAGE),
616 /* BB eventually need to add this */
617 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
622 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
623 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
624 security_blob = ntlmssp_blob;
626 } else if (phase == NtLmAuthenticate) {
627 req->hdr.SessionId = ses->Suid;
628 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
630 if (ntlmssp_blob == NULL) {
634 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
637 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
639 goto ssetup_exit; /* BB double check error handling */
642 /* blob_length = build_spnego_ntlmssp_blob(
646 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
651 security_blob = ntlmssp_blob;
654 cifs_dbg(VFS, "illegal ntlmssp phase\n");
659 /* Testing shows that buffer offset must be at location of Buffer[0] */
660 req->SecurityBufferOffset =
661 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
662 1 /* pad */ - 4 /* rfc1001 len */);
663 req->SecurityBufferLength = cpu_to_le16(blob_length);
664 iov[1].iov_base = security_blob;
665 iov[1].iov_len = blob_length;
667 inc_rfc1001_len(req, blob_length - 1 /* pad */);
669 /* BB add code to build os and lm fields */
671 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
672 CIFS_LOG_ERROR | CIFS_NEG_OP);
674 kfree(security_blob);
675 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
676 if (resp_buftype != CIFS_NO_BUFFER &&
677 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
678 if (phase != NtLmNegotiate) {
679 cifs_dbg(VFS, "Unexpected more processing error\n");
682 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
683 le16_to_cpu(rsp->SecurityBufferOffset)) {
684 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
685 le16_to_cpu(rsp->SecurityBufferOffset));
690 /* NTLMSSP Negotiate sent now processing challenge (response) */
691 phase = NtLmChallenge; /* process ntlmssp challenge */
692 rc = 0; /* MORE_PROCESSING is not an error here but expected */
693 ses->Suid = rsp->hdr.SessionId;
694 rc = decode_ntlmssp_challenge(rsp->Buffer,
695 le16_to_cpu(rsp->SecurityBufferLength), ses);
699 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
700 * but at least the raw NTLMSSP case works.
703 * No tcon so can't do
704 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
709 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
710 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
711 cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
713 free_rsp_buf(resp_buftype, rsp);
715 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
716 if ((phase == NtLmChallenge) && (rc == 0))
717 goto ssetup_ntlmssp_authenticate;
720 mutex_lock(&server->srv_mutex);
721 if (server->sign && server->ops->generate_signingkey) {
722 rc = server->ops->generate_signingkey(ses);
723 kfree(ses->auth_key.response);
724 ses->auth_key.response = NULL;
727 "SMB3 session key generation failed\n");
728 mutex_unlock(&server->srv_mutex);
732 if (!server->session_estab) {
733 server->sequence_number = 0x2;
734 server->session_estab = true;
736 mutex_unlock(&server->srv_mutex);
738 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
739 spin_lock(&GlobalMid_Lock);
740 ses->status = CifsGood;
741 ses->need_reconnect = false;
742 spin_unlock(&GlobalMid_Lock);
747 kfree(ses->auth_key.response);
748 ses->auth_key.response = NULL;
756 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
758 struct smb2_logoff_req *req; /* response is also trivial struct */
760 struct TCP_Server_Info *server;
762 cifs_dbg(FYI, "disconnect session %p\n", ses);
764 if (ses && (ses->server))
765 server = ses->server;
769 /* no need to send SMB logoff if uid already closed due to reconnect */
770 if (ses->need_reconnect)
771 goto smb2_session_already_dead;
773 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
777 /* since no tcon, smb2_init can not do this, so do here */
778 req->hdr.SessionId = ses->Suid;
780 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
782 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
784 * No tcon so can't do
785 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
788 smb2_session_already_dead:
792 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
794 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
797 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
799 /* These are similar values to what Windows uses */
800 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
802 tcon->max_chunks = 256;
803 tcon->max_bytes_chunk = 1048576;
804 tcon->max_bytes_copy = 16777216;
808 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
809 struct cifs_tcon *tcon, const struct nls_table *cp)
811 struct smb2_tree_connect_req *req;
812 struct smb2_tree_connect_rsp *rsp = NULL;
817 struct TCP_Server_Info *server;
818 __le16 *unc_path = NULL;
820 cifs_dbg(FYI, "TCON\n");
822 if ((ses->server) && tree)
823 server = ses->server;
827 if (tcon && tcon->bad_network_name)
830 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
831 if (unc_path == NULL)
834 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
836 if (unc_path_len < 2) {
841 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
848 /* since no tcon, smb2_init can not do this, so do here */
849 req->hdr.SessionId = ses->Suid;
850 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
851 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
854 iov[0].iov_base = (char *)req;
855 /* 4 for rfc1002 length field and 1 for pad */
856 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
858 /* Testing shows that buffer offset must be at location of Buffer[0] */
859 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
860 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
861 req->PathLength = cpu_to_le16(unc_path_len - 2);
862 iov[1].iov_base = unc_path;
863 iov[1].iov_len = unc_path_len;
865 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
867 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
868 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
872 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
873 tcon->need_reconnect = true;
875 goto tcon_error_exit;
879 ses->ipc_tid = rsp->hdr.TreeId;
883 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
884 cifs_dbg(FYI, "connection to disk share\n");
885 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
887 cifs_dbg(FYI, "connection to pipe share\n");
888 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
890 cifs_dbg(FYI, "connection to printer\n");
892 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
894 goto tcon_error_exit;
897 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
898 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
899 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
900 tcon->tidStatus = CifsGood;
901 tcon->need_reconnect = false;
902 tcon->tid = rsp->hdr.TreeId;
903 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
905 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
906 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
907 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
908 init_copy_chunk_defaults(tcon);
909 if (tcon->ses->server->ops->validate_negotiate)
910 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
912 free_rsp_buf(resp_buftype, rsp);
917 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
918 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
919 tcon->bad_network_name = true;
925 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
927 struct smb2_tree_disconnect_req *req; /* response is trivial */
929 struct TCP_Server_Info *server;
930 struct cifs_ses *ses = tcon->ses;
932 cifs_dbg(FYI, "Tree Disconnect\n");
934 if (ses && (ses->server))
935 server = ses->server;
939 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
942 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
946 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
948 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
954 static struct create_durable *
955 create_durable_buf(void)
957 struct create_durable *buf;
959 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
963 buf->ccontext.DataOffset = cpu_to_le16(offsetof
964 (struct create_durable, Data));
965 buf->ccontext.DataLength = cpu_to_le32(16);
966 buf->ccontext.NameOffset = cpu_to_le16(offsetof
967 (struct create_durable, Name));
968 buf->ccontext.NameLength = cpu_to_le16(4);
976 static struct create_durable *
977 create_reconnect_durable_buf(struct cifs_fid *fid)
979 struct create_durable *buf;
981 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
985 buf->ccontext.DataOffset = cpu_to_le16(offsetof
986 (struct create_durable, Data));
987 buf->ccontext.DataLength = cpu_to_le32(16);
988 buf->ccontext.NameOffset = cpu_to_le16(offsetof
989 (struct create_durable, Name));
990 buf->ccontext.NameLength = cpu_to_le16(4);
991 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
992 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
1001 parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
1002 unsigned int *epoch)
1005 struct create_context *cc;
1006 unsigned int next = 0;
1009 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
1010 cc = (struct create_context *)data_offset;
1012 cc = (struct create_context *)((char *)cc + next);
1013 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1014 if (le16_to_cpu(cc->NameLength) != 4 ||
1015 strncmp(name, "RqLs", 4)) {
1016 next = le32_to_cpu(cc->Next);
1019 return server->ops->parse_lease_buf(cc, epoch);
1020 } while (next != 0);
1026 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1027 unsigned int *num_iovec, __u8 *oplock)
1029 struct smb2_create_req *req = iov[0].iov_base;
1030 unsigned int num = *num_iovec;
1032 iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
1033 if (iov[num].iov_base == NULL)
1035 iov[num].iov_len = server->vals->create_lease_size;
1036 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1037 if (!req->CreateContextsOffset)
1038 req->CreateContextsOffset = cpu_to_le32(
1039 sizeof(struct smb2_create_req) - 4 +
1040 iov[num - 1].iov_len);
1041 le32_add_cpu(&req->CreateContextsLength,
1042 server->vals->create_lease_size);
1043 inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
1044 *num_iovec = num + 1;
1049 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
1050 struct cifs_open_parms *oparms)
1052 struct smb2_create_req *req = iov[0].iov_base;
1053 unsigned int num = *num_iovec;
1055 if (oparms->reconnect) {
1056 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
1057 /* indicate that we don't need to relock the file */
1058 oparms->reconnect = false;
1060 iov[num].iov_base = create_durable_buf();
1061 if (iov[num].iov_base == NULL)
1063 iov[num].iov_len = sizeof(struct create_durable);
1064 if (!req->CreateContextsOffset)
1065 req->CreateContextsOffset =
1066 cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
1068 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
1069 inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
1070 *num_iovec = num + 1;
1075 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
1076 __u8 *oplock, struct smb2_file_all_info *buf,
1077 struct smb2_err_rsp **err_buf)
1079 struct smb2_create_req *req;
1080 struct smb2_create_rsp *rsp;
1081 struct TCP_Server_Info *server;
1082 struct cifs_tcon *tcon = oparms->tcon;
1083 struct cifs_ses *ses = tcon->ses;
1087 __le16 *copy_path = NULL;
1090 unsigned int num_iovecs = 2;
1091 __u32 file_attributes = 0;
1093 cifs_dbg(FYI, "create/open\n");
1095 if (ses && (ses->server))
1096 server = ses->server;
1100 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
1104 if (oparms->create_options & CREATE_OPTION_READONLY)
1105 file_attributes |= ATTR_READONLY;
1107 req->ImpersonationLevel = IL_IMPERSONATION;
1108 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
1109 /* File attributes ignored on open (used in create though) */
1110 req->FileAttributes = cpu_to_le32(file_attributes);
1111 req->ShareAccess = FILE_SHARE_ALL_LE;
1112 req->CreateDisposition = cpu_to_le32(oparms->disposition);
1113 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
1114 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
1115 /* do not count rfc1001 len field */
1116 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
1118 iov[0].iov_base = (char *)req;
1119 /* 4 for rfc1002 length field */
1120 iov[0].iov_len = get_rfc1002_length(req) + 4;
1122 /* MUST set path len (NameLength) to 0 opening root of share */
1123 req->NameLength = cpu_to_le16(uni_path_len - 2);
1124 /* -1 since last byte is buf[0] which is sent below (path) */
1126 if (uni_path_len % 8 != 0) {
1127 copy_size = uni_path_len / 8 * 8;
1128 if (copy_size < uni_path_len)
1131 copy_path = kzalloc(copy_size, GFP_KERNEL);
1134 memcpy((char *)copy_path, (const char *)path,
1136 uni_path_len = copy_size;
1140 iov[1].iov_len = uni_path_len;
1141 iov[1].iov_base = path;
1142 /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
1143 inc_rfc1001_len(req, uni_path_len - 1);
1145 if (!server->oplocks)
1146 *oplock = SMB2_OPLOCK_LEVEL_NONE;
1148 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
1149 *oplock == SMB2_OPLOCK_LEVEL_NONE)
1150 req->RequestedOplockLevel = *oplock;
1152 rc = add_lease_context(server, iov, &num_iovecs, oplock);
1154 cifs_small_buf_release(req);
1160 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
1161 /* need to set Next field of lease context if we request it */
1162 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
1163 struct create_context *ccontext =
1164 (struct create_context *)iov[num_iovecs-1].iov_base;
1166 cpu_to_le32(server->vals->create_lease_size);
1168 rc = add_durable_context(iov, &num_iovecs, oparms);
1170 cifs_small_buf_release(req);
1172 kfree(iov[num_iovecs-1].iov_base);
1177 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1178 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1181 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
1183 *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
1188 oparms->fid->persistent_fid = rsp->PersistentFileId;
1189 oparms->fid->volatile_fid = rsp->VolatileFileId;
1192 memcpy(buf, &rsp->CreationTime, 32);
1193 buf->AllocationSize = rsp->AllocationSize;
1194 buf->EndOfFile = rsp->EndofFile;
1195 buf->Attributes = rsp->FileAttributes;
1196 buf->NumberOfLinks = cpu_to_le32(1);
1197 buf->DeletePending = 0;
1200 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
1201 *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
1203 *oplock = rsp->OplockLevel;
1206 free_rsp_buf(resp_buftype, rsp);
1211 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
1214 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1215 u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
1216 u32 indatalen, char **out_data, u32 *plen /* returned data len */)
1218 struct smb2_ioctl_req *req;
1219 struct smb2_ioctl_rsp *rsp;
1220 struct TCP_Server_Info *server;
1221 struct cifs_ses *ses = tcon->ses;
1227 cifs_dbg(FYI, "SMB2 IOCTL\n");
1230 /* zero out returned data len, in case of error */
1234 if (ses && (ses->server))
1235 server = ses->server;
1239 rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
1243 req->CtlCode = cpu_to_le32(opcode);
1244 req->PersistentFileId = persistent_fid;
1245 req->VolatileFileId = volatile_fid;
1248 req->InputCount = cpu_to_le32(indatalen);
1249 /* do not set InputOffset if no input data */
1251 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
1252 iov[1].iov_base = in_data;
1253 iov[1].iov_len = indatalen;
1258 req->OutputOffset = 0;
1259 req->OutputCount = 0; /* MBZ */
1262 * Could increase MaxOutputResponse, but that would require more
1263 * than one credit. Windows typically sets this smaller, but for some
1264 * ioctls it may be useful to allow server to send more. No point
1265 * limiting what the server can send as long as fits in one credit
1267 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
1270 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
1274 iov[0].iov_base = (char *)req;
1277 * If no input data, the size of ioctl struct in
1278 * protocol spec still includes a 1 byte data buffer,
1279 * but if input data passed to ioctl, we do not
1280 * want to double count this, so we do not send
1281 * the dummy one byte of data in iovec[0] if sending
1282 * input data (in iovec[1]). We also must add 4 bytes
1283 * in first iovec to allow for rfc1002 length field.
1287 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1288 inc_rfc1001_len(req, indatalen - 1);
1290 iov[0].iov_len = get_rfc1002_length(req) + 4;
1293 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1294 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1296 if ((rc != 0) && (rc != -EINVAL)) {
1298 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1300 } else if (rc == -EINVAL) {
1301 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1302 (opcode != FSCTL_SRV_COPYCHUNK)) {
1304 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1309 /* check if caller wants to look at return data or just return rc */
1310 if ((plen == NULL) || (out_data == NULL))
1313 *plen = le32_to_cpu(rsp->OutputCount);
1315 /* We check for obvious errors in the output buffer length and offset */
1317 goto ioctl_exit; /* server returned no data */
1318 else if (*plen > 0xFF00) {
1319 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
1325 if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
1326 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
1327 le32_to_cpu(rsp->OutputOffset));
1333 *out_data = kmalloc(*plen, GFP_KERNEL);
1334 if (*out_data == NULL) {
1339 memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
1342 free_rsp_buf(resp_buftype, rsp);
1347 * Individual callers to ioctl worker function follow
1351 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1352 u64 persistent_fid, u64 volatile_fid)
1355 char *res_key = NULL;
1356 struct compress_ioctl fsctl_input;
1357 char *ret_data = NULL;
1359 fsctl_input.CompressionState =
1360 __constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
1362 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1363 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
1364 (char *)&fsctl_input /* data input */,
1365 2 /* in data len */, &ret_data /* out data */, NULL);
1367 cifs_dbg(FYI, "set compression rc %d\n", rc);
1374 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1375 u64 persistent_fid, u64 volatile_fid)
1377 struct smb2_close_req *req;
1378 struct smb2_close_rsp *rsp;
1379 struct TCP_Server_Info *server;
1380 struct cifs_ses *ses = tcon->ses;
1385 cifs_dbg(FYI, "Close\n");
1387 if (ses && (ses->server))
1388 server = ses->server;
1392 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1396 req->PersistentFileId = persistent_fid;
1397 req->VolatileFileId = volatile_fid;
1399 iov[0].iov_base = (char *)req;
1400 /* 4 for rfc1002 length field */
1401 iov[0].iov_len = get_rfc1002_length(req) + 4;
1403 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1404 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1408 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1412 /* BB FIXME - decode close response, update inode for caching */
1415 free_rsp_buf(resp_buftype, rsp);
1420 validate_buf(unsigned int offset, unsigned int buffer_length,
1421 struct smb2_hdr *hdr, unsigned int min_buf_size)
1424 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1425 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1426 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1427 char *end_of_buf = begin_of_buf + buffer_length;
1430 if (buffer_length < min_buf_size) {
1431 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1432 buffer_length, min_buf_size);
1436 /* check if beyond RFC1001 maximum length */
1437 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
1438 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1439 buffer_length, smb_len);
1443 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
1444 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
1452 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1453 * Caller must free buffer.
1456 validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1457 struct smb2_hdr *hdr, unsigned int minbufsize,
1461 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1467 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1471 memcpy(data, begin_of_buf, buffer_length);
1477 query_info(const unsigned int xid, struct cifs_tcon *tcon,
1478 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1479 size_t output_len, size_t min_len, void *data)
1481 struct smb2_query_info_req *req;
1482 struct smb2_query_info_rsp *rsp = NULL;
1486 struct TCP_Server_Info *server;
1487 struct cifs_ses *ses = tcon->ses;
1489 cifs_dbg(FYI, "Query Info\n");
1491 if (ses && (ses->server))
1492 server = ses->server;
1496 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1500 req->InfoType = SMB2_O_INFO_FILE;
1501 req->FileInfoClass = info_class;
1502 req->PersistentFileId = persistent_fid;
1503 req->VolatileFileId = volatile_fid;
1504 /* 4 for rfc1002 length field and 1 for Buffer */
1505 req->InputBufferOffset =
1506 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
1507 req->OutputBufferLength = cpu_to_le32(output_len);
1509 iov[0].iov_base = (char *)req;
1510 /* 4 for rfc1002 length field */
1511 iov[0].iov_len = get_rfc1002_length(req) + 4;
1513 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1514 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1517 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1521 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1522 le32_to_cpu(rsp->OutputBufferLength),
1523 &rsp->hdr, min_len, data);
1526 free_rsp_buf(resp_buftype, rsp);
1531 SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1532 u64 persistent_fid, u64 volatile_fid,
1533 struct smb2_file_all_info *data)
1535 return query_info(xid, tcon, persistent_fid, volatile_fid,
1536 FILE_ALL_INFORMATION,
1537 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1538 sizeof(struct smb2_file_all_info), data);
1542 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1543 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1545 return query_info(xid, tcon, persistent_fid, volatile_fid,
1546 FILE_INTERNAL_INFORMATION,
1547 sizeof(struct smb2_file_internal_info),
1548 sizeof(struct smb2_file_internal_info), uniqueid);
1552 * This is a no-op for now. We're not really interested in the reply, but
1553 * rather in the fact that the server sent one and that server->lstrp
1556 * FIXME: maybe we should consider checking that the reply matches request?
1559 smb2_echo_callback(struct mid_q_entry *mid)
1561 struct TCP_Server_Info *server = mid->callback_data;
1562 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1563 unsigned int credits_received = 1;
1565 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1566 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1568 DeleteMidQEntry(mid);
1569 add_credits(server, credits_received, CIFS_ECHO_OP);
1573 SMB2_echo(struct TCP_Server_Info *server)
1575 struct smb2_echo_req *req;
1578 struct smb_rqst rqst = { .rq_iov = &iov,
1581 cifs_dbg(FYI, "In echo request\n");
1583 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1587 req->hdr.CreditRequest = cpu_to_le16(1);
1589 iov.iov_base = (char *)req;
1590 /* 4 for rfc1002 length field */
1591 iov.iov_len = get_rfc1002_length(req) + 4;
1593 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
1596 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
1598 cifs_small_buf_release(req);
1603 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1606 struct smb2_flush_req *req;
1607 struct TCP_Server_Info *server;
1608 struct cifs_ses *ses = tcon->ses;
1613 cifs_dbg(FYI, "Flush\n");
1615 if (ses && (ses->server))
1616 server = ses->server;
1620 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1624 req->PersistentFileId = persistent_fid;
1625 req->VolatileFileId = volatile_fid;
1627 iov[0].iov_base = (char *)req;
1628 /* 4 for rfc1002 length field */
1629 iov[0].iov_len = get_rfc1002_length(req) + 4;
1631 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1633 if ((rc != 0) && tcon)
1634 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1636 free_rsp_buf(resp_buftype, iov[0].iov_base);
1641 * To form a chain of read requests, any read requests after the first should
1642 * have the end_of_chain boolean set to true.
1645 smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1646 unsigned int remaining_bytes, int request_type)
1649 struct smb2_read_req *req = NULL;
1651 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1654 if (io_parms->tcon->ses->server == NULL)
1655 return -ECONNABORTED;
1657 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1659 req->PersistentFileId = io_parms->persistent_fid;
1660 req->VolatileFileId = io_parms->volatile_fid;
1661 req->ReadChannelInfoOffset = 0; /* reserved */
1662 req->ReadChannelInfoLength = 0; /* reserved */
1663 req->Channel = 0; /* reserved */
1664 req->MinimumCount = 0;
1665 req->Length = cpu_to_le32(io_parms->length);
1666 req->Offset = cpu_to_le64(io_parms->offset);
1668 if (request_type & CHAINED_REQUEST) {
1669 if (!(request_type & END_OF_CHAIN)) {
1670 /* 4 for rfc1002 length field */
1671 req->hdr.NextCommand =
1672 cpu_to_le32(get_rfc1002_length(req) + 4);
1673 } else /* END_OF_CHAIN */
1674 req->hdr.NextCommand = 0;
1675 if (request_type & RELATED_REQUEST) {
1676 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1678 * Related requests use info from previous read request
1681 req->hdr.SessionId = 0xFFFFFFFF;
1682 req->hdr.TreeId = 0xFFFFFFFF;
1683 req->PersistentFileId = 0xFFFFFFFF;
1684 req->VolatileFileId = 0xFFFFFFFF;
1687 if (remaining_bytes > io_parms->length)
1688 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1690 req->RemainingBytes = 0;
1692 iov[0].iov_base = (char *)req;
1693 /* 4 for rfc1002 length field */
1694 iov[0].iov_len = get_rfc1002_length(req) + 4;
1699 smb2_readv_callback(struct mid_q_entry *mid)
1701 struct cifs_readdata *rdata = mid->callback_data;
1702 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1703 struct TCP_Server_Info *server = tcon->ses->server;
1704 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
1705 unsigned int credits_received = 1;
1706 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
1708 .rq_pages = rdata->pages,
1709 .rq_npages = rdata->nr_pages,
1710 .rq_pagesz = rdata->pagesz,
1711 .rq_tailsz = rdata->tailsz };
1713 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1714 __func__, mid->mid, mid->mid_state, rdata->result,
1717 switch (mid->mid_state) {
1718 case MID_RESPONSE_RECEIVED:
1719 credits_received = le16_to_cpu(buf->CreditRequest);
1720 /* result already set, check signature */
1724 rc = smb2_verify_signature(&rqst, server);
1726 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1729 /* FIXME: should this be counted toward the initiating task? */
1730 task_io_account_read(rdata->bytes);
1731 cifs_stats_bytes_read(tcon, rdata->bytes);
1733 case MID_REQUEST_SUBMITTED:
1734 case MID_RETRY_NEEDED:
1735 rdata->result = -EAGAIN;
1738 if (rdata->result != -ENODATA)
1739 rdata->result = -EIO;
1743 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1745 queue_work(cifsiod_wq, &rdata->work);
1746 DeleteMidQEntry(mid);
1747 add_credits(server, credits_received, 0);
1750 /* smb2_async_readv - send an async write, and set up mid to handle result */
1752 smb2_async_readv(struct cifs_readdata *rdata)
1755 struct smb2_hdr *buf;
1756 struct cifs_io_parms io_parms;
1757 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
1760 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1761 __func__, rdata->offset, rdata->bytes);
1763 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1764 io_parms.offset = rdata->offset;
1765 io_parms.length = rdata->bytes;
1766 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1767 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1768 io_parms.pid = rdata->pid;
1769 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
1773 buf = (struct smb2_hdr *)rdata->iov.iov_base;
1774 /* 4 for rfc1002 length field */
1775 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
1777 kref_get(&rdata->refcount);
1778 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
1779 cifs_readv_receive, smb2_readv_callback,
1782 kref_put(&rdata->refcount, cifs_readdata_release);
1783 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1786 cifs_small_buf_release(buf);
1791 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1792 unsigned int *nbytes, char **buf, int *buf_type)
1794 int resp_buftype, rc = -EACCES;
1795 struct smb2_read_rsp *rsp = NULL;
1799 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1803 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1804 &resp_buftype, CIFS_LOG_ERROR);
1806 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1808 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1809 free_rsp_buf(resp_buftype, iov[0].iov_base);
1814 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
1815 cifs_dbg(VFS, "Send error in read = %d\n", rc);
1817 *nbytes = le32_to_cpu(rsp->DataLength);
1818 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1819 (*nbytes > io_parms->length)) {
1820 cifs_dbg(FYI, "bad length %d for count %d\n",
1821 *nbytes, io_parms->length);
1828 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1830 free_rsp_buf(resp_buftype, iov[0].iov_base);
1831 } else if (resp_buftype != CIFS_NO_BUFFER) {
1832 *buf = iov[0].iov_base;
1833 if (resp_buftype == CIFS_SMALL_BUFFER)
1834 *buf_type = CIFS_SMALL_BUFFER;
1835 else if (resp_buftype == CIFS_LARGE_BUFFER)
1836 *buf_type = CIFS_LARGE_BUFFER;
1842 * Check the mid_state and signature on received buffer (if any), and queue the
1843 * workqueue completion task.
1846 smb2_writev_callback(struct mid_q_entry *mid)
1848 struct cifs_writedata *wdata = mid->callback_data;
1849 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1850 unsigned int written;
1851 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1852 unsigned int credits_received = 1;
1854 switch (mid->mid_state) {
1855 case MID_RESPONSE_RECEIVED:
1856 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1857 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1858 if (wdata->result != 0)
1861 written = le32_to_cpu(rsp->DataLength);
1863 * Mask off high 16 bits when bytes written as returned
1864 * by the server is greater than bytes requested by the
1865 * client. OS/2 servers are known to set incorrect
1868 if (written > wdata->bytes)
1871 if (written < wdata->bytes)
1872 wdata->result = -ENOSPC;
1874 wdata->bytes = written;
1876 case MID_REQUEST_SUBMITTED:
1877 case MID_RETRY_NEEDED:
1878 wdata->result = -EAGAIN;
1881 wdata->result = -EIO;
1886 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1888 queue_work(cifsiod_wq, &wdata->work);
1889 DeleteMidQEntry(mid);
1890 add_credits(tcon->ses->server, credits_received, 0);
1893 /* smb2_async_writev - send an async write, and set up mid to handle result */
1895 smb2_async_writev(struct cifs_writedata *wdata,
1896 void (*release)(struct kref *kref))
1899 struct smb2_write_req *req = NULL;
1900 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1902 struct smb_rqst rqst;
1904 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
1906 goto async_writev_out;
1908 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1910 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1911 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1912 req->WriteChannelInfoOffset = 0;
1913 req->WriteChannelInfoLength = 0;
1915 req->Offset = cpu_to_le64(wdata->offset);
1916 /* 4 for rfc1002 length field */
1917 req->DataOffset = cpu_to_le16(
1918 offsetof(struct smb2_write_req, Buffer) - 4);
1919 req->RemainingBytes = 0;
1921 /* 4 for rfc1002 length field and 1 for Buffer */
1922 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1927 rqst.rq_pages = wdata->pages;
1928 rqst.rq_npages = wdata->nr_pages;
1929 rqst.rq_pagesz = wdata->pagesz;
1930 rqst.rq_tailsz = wdata->tailsz;
1932 cifs_dbg(FYI, "async write at %llu %u bytes\n",
1933 wdata->offset, wdata->bytes);
1935 req->Length = cpu_to_le32(wdata->bytes);
1937 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1939 kref_get(&wdata->refcount);
1940 rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
1941 smb2_writev_callback, wdata, 0);
1944 kref_put(&wdata->refcount, release);
1945 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1949 cifs_small_buf_release(req);
1954 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
1955 * The length field from io_parms must be at least 1 and indicates a number of
1956 * elements with data to write that begins with position 1 in iov array. All
1957 * data length is specified by count.
1960 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
1961 unsigned int *nbytes, struct kvec *iov, int n_vec)
1964 struct smb2_write_req *req = NULL;
1965 struct smb2_write_rsp *rsp = NULL;
1972 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
1976 if (io_parms->tcon->ses->server == NULL)
1977 return -ECONNABORTED;
1979 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1981 req->PersistentFileId = io_parms->persistent_fid;
1982 req->VolatileFileId = io_parms->volatile_fid;
1983 req->WriteChannelInfoOffset = 0;
1984 req->WriteChannelInfoLength = 0;
1986 req->Length = cpu_to_le32(io_parms->length);
1987 req->Offset = cpu_to_le64(io_parms->offset);
1988 /* 4 for rfc1002 length field */
1989 req->DataOffset = cpu_to_le16(
1990 offsetof(struct smb2_write_req, Buffer) - 4);
1991 req->RemainingBytes = 0;
1993 iov[0].iov_base = (char *)req;
1994 /* 4 for rfc1002 length field and 1 for Buffer */
1995 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1997 /* length of entire message including data to be written */
1998 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
2000 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
2002 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
2005 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
2006 cifs_dbg(VFS, "Send error in write = %d\n", rc);
2008 *nbytes = le32_to_cpu(rsp->DataLength);
2010 free_rsp_buf(resp_buftype, rsp);
2015 num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
2018 unsigned int entrycount = 0;
2019 unsigned int next_offset = 0;
2020 FILE_DIRECTORY_INFO *entryptr;
2022 if (bufstart == NULL)
2025 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
2028 entryptr = (FILE_DIRECTORY_INFO *)
2029 ((char *)entryptr + next_offset);
2031 if ((char *)entryptr + size > end_of_buf) {
2032 cifs_dbg(VFS, "malformed search entry would overflow\n");
2036 len = le32_to_cpu(entryptr->FileNameLength);
2037 if ((char *)entryptr + len + size > end_of_buf) {
2038 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
2043 *lastentry = (char *)entryptr;
2046 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
2058 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
2059 u64 persistent_fid, u64 volatile_fid, int index,
2060 struct cifs_search_info *srch_inf)
2062 struct smb2_query_directory_req *req;
2063 struct smb2_query_directory_rsp *rsp = NULL;
2068 unsigned char *bufptr;
2069 struct TCP_Server_Info *server;
2070 struct cifs_ses *ses = tcon->ses;
2071 __le16 asteriks = cpu_to_le16('*');
2073 unsigned int output_size = CIFSMaxBufSize;
2074 size_t info_buf_size;
2076 if (ses && (ses->server))
2077 server = ses->server;
2081 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
2085 switch (srch_inf->info_level) {
2086 case SMB_FIND_FILE_DIRECTORY_INFO:
2087 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
2088 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
2090 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
2091 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
2092 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
2095 cifs_dbg(VFS, "info level %u isn't supported\n",
2096 srch_inf->info_level);
2101 req->FileIndex = cpu_to_le32(index);
2102 req->PersistentFileId = persistent_fid;
2103 req->VolatileFileId = volatile_fid;
2106 bufptr = req->Buffer;
2107 memcpy(bufptr, &asteriks, len);
2109 req->FileNameOffset =
2110 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
2111 req->FileNameLength = cpu_to_le16(len);
2113 * BB could be 30 bytes or so longer if we used SMB2 specific
2114 * buffer lengths, but this is safe and close enough.
2116 output_size = min_t(unsigned int, output_size, server->maxBuf);
2117 output_size = min_t(unsigned int, output_size, 2 << 15);
2118 req->OutputBufferLength = cpu_to_le32(output_size);
2120 iov[0].iov_base = (char *)req;
2121 /* 4 for RFC1001 length and 1 for Buffer */
2122 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2124 iov[1].iov_base = (char *)(req->Buffer);
2125 iov[1].iov_len = len;
2127 inc_rfc1001_len(req, len - 1 /* Buffer */);
2129 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
2130 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
2133 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
2137 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2138 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2143 srch_inf->unicode = true;
2145 if (srch_inf->ntwrk_buf_start) {
2146 if (srch_inf->smallBuf)
2147 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
2149 cifs_buf_release(srch_inf->ntwrk_buf_start);
2151 srch_inf->ntwrk_buf_start = (char *)rsp;
2152 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
2153 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
2154 /* 4 for rfc1002 length field */
2155 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
2156 srch_inf->entries_in_buffer =
2157 num_entries(srch_inf->srch_entries_start, end_of_smb,
2158 &srch_inf->last_entry, info_buf_size);
2159 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
2160 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
2161 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
2162 srch_inf->srch_entries_start, srch_inf->last_entry);
2163 if (resp_buftype == CIFS_LARGE_BUFFER)
2164 srch_inf->smallBuf = false;
2165 else if (resp_buftype == CIFS_SMALL_BUFFER)
2166 srch_inf->smallBuf = true;
2168 cifs_dbg(VFS, "illegal search buffer type\n");
2170 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
2171 srch_inf->endOfSearch = 1;
2173 srch_inf->endOfSearch = 0;
2178 free_rsp_buf(resp_buftype, rsp);
2183 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2184 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
2185 unsigned int num, void **data, unsigned int *size)
2187 struct smb2_set_info_req *req;
2188 struct smb2_set_info_rsp *rsp = NULL;
2193 struct TCP_Server_Info *server;
2194 struct cifs_ses *ses = tcon->ses;
2196 if (ses && (ses->server))
2197 server = ses->server;
2204 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
2208 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
2214 req->hdr.ProcessId = cpu_to_le32(pid);
2216 req->InfoType = SMB2_O_INFO_FILE;
2217 req->FileInfoClass = info_class;
2218 req->PersistentFileId = persistent_fid;
2219 req->VolatileFileId = volatile_fid;
2221 /* 4 for RFC1001 length and 1 for Buffer */
2223 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
2224 req->BufferLength = cpu_to_le32(*size);
2226 inc_rfc1001_len(req, *size - 1 /* Buffer */);
2228 memcpy(req->Buffer, *data, *size);
2230 iov[0].iov_base = (char *)req;
2231 /* 4 for RFC1001 length */
2232 iov[0].iov_len = get_rfc1002_length(req) + 4;
2234 for (i = 1; i < num; i++) {
2235 inc_rfc1001_len(req, size[i]);
2236 le32_add_cpu(&req->BufferLength, size[i]);
2237 iov[i].iov_base = (char *)data[i];
2238 iov[i].iov_len = size[i];
2241 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2242 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2245 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
2247 free_rsp_buf(resp_buftype, rsp);
2253 SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
2254 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2256 struct smb2_file_rename_info info;
2258 unsigned int size[2];
2260 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2262 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2266 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
2267 /* 0 = fail if target already exists */
2268 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2269 info.FileNameLength = cpu_to_le32(len);
2272 size[0] = sizeof(struct smb2_file_rename_info);
2274 data[1] = target_file;
2275 size[1] = len + 2 /* null */;
2277 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
2278 current->tgid, FILE_RENAME_INFORMATION, 2, data,
2285 SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
2286 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2288 struct smb2_file_link_info info;
2290 unsigned int size[2];
2292 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2294 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2298 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
2299 /* 0 = fail if link already exists */
2300 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2301 info.FileNameLength = cpu_to_le32(len);
2304 size[0] = sizeof(struct smb2_file_link_info);
2306 data[1] = target_file;
2307 size[1] = len + 2 /* null */;
2309 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
2310 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
2316 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2317 u64 volatile_fid, u32 pid, __le64 *eof)
2319 struct smb2_file_eof_info info;
2323 info.EndOfFile = *eof;
2326 size = sizeof(struct smb2_file_eof_info);
2328 return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
2329 FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
2333 SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2334 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2337 size = sizeof(FILE_BASIC_INFO);
2338 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2339 current->tgid, FILE_BASIC_INFORMATION, 1,
2340 (void **)&buf, &size);
2344 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2345 const u64 persistent_fid, const u64 volatile_fid,
2349 struct smb2_oplock_break *req = NULL;
2351 cifs_dbg(FYI, "SMB2_oplock_break\n");
2352 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2357 req->VolatileFid = volatile_fid;
2358 req->PersistentFid = persistent_fid;
2359 req->OplockLevel = oplock_level;
2360 req->hdr.CreditRequest = cpu_to_le16(1);
2362 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2363 /* SMB2 buffer freed by function above */
2366 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2367 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
2374 copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2375 struct kstatfs *kst)
2377 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2378 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2379 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2380 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2381 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2386 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2387 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2390 struct smb2_query_info_req *req;
2392 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
2394 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2397 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2401 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2402 req->FileInfoClass = level;
2403 req->PersistentFileId = persistent_fid;
2404 req->VolatileFileId = volatile_fid;
2405 /* 4 for rfc1002 length field and 1 for pad */
2406 req->InputBufferOffset =
2407 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2408 req->OutputBufferLength = cpu_to_le32(
2409 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2411 iov->iov_base = (char *)req;
2412 /* 4 for rfc1002 length field */
2413 iov->iov_len = get_rfc1002_length(req) + 4;
2418 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2419 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2421 struct smb2_query_info_rsp *rsp = NULL;
2425 struct cifs_ses *ses = tcon->ses;
2426 struct smb2_fs_full_size_info *info = NULL;
2428 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2429 sizeof(struct smb2_fs_full_size_info),
2430 persistent_fid, volatile_fid);
2434 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2436 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2439 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2441 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2442 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2443 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2444 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2445 sizeof(struct smb2_fs_full_size_info));
2447 copy_fs_info_to_kstatfs(info, fsdata);
2450 free_rsp_buf(resp_buftype, iov.iov_base);
2455 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
2456 u64 persistent_fid, u64 volatile_fid, int level)
2458 struct smb2_query_info_rsp *rsp = NULL;
2461 int resp_buftype, max_len, min_len;
2462 struct cifs_ses *ses = tcon->ses;
2463 unsigned int rsp_len, offset;
2465 if (level == FS_DEVICE_INFORMATION) {
2466 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2467 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2468 } else if (level == FS_ATTRIBUTE_INFORMATION) {
2469 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
2470 min_len = MIN_FS_ATTR_INFO_SIZE;
2471 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
2472 max_len = sizeof(struct smb3_fs_ss_info);
2473 min_len = sizeof(struct smb3_fs_ss_info);
2475 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
2479 rc = build_qfs_info_req(&iov, tcon, level, max_len,
2480 persistent_fid, volatile_fid);
2484 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2486 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2489 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2491 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
2492 offset = le16_to_cpu(rsp->OutputBufferOffset);
2493 rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
2497 if (level == FS_ATTRIBUTE_INFORMATION)
2498 memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
2499 + (char *)&rsp->hdr, min_t(unsigned int,
2501 else if (level == FS_DEVICE_INFORMATION)
2502 memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
2503 + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
2504 else if (level == FS_SECTOR_SIZE_INFORMATION) {
2505 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
2506 (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
2507 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
2508 tcon->perf_sector_size =
2509 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
2513 free_rsp_buf(resp_buftype, iov.iov_base);
2518 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2519 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2520 const __u32 num_lock, struct smb2_lock_element *buf)
2523 struct smb2_lock_req *req = NULL;
2528 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
2530 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2534 req->hdr.ProcessId = cpu_to_le32(pid);
2535 req->LockCount = cpu_to_le16(num_lock);
2537 req->PersistentFileId = persist_fid;
2538 req->VolatileFileId = volatile_fid;
2540 count = num_lock * sizeof(struct smb2_lock_element);
2541 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2543 iov[0].iov_base = (char *)req;
2544 /* 4 for rfc1002 length field and count for all locks */
2545 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2546 iov[1].iov_base = (char *)buf;
2547 iov[1].iov_len = count;
2549 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2550 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2552 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
2553 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2560 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2561 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2562 const __u64 length, const __u64 offset, const __u32 lock_flags,
2565 struct smb2_lock_element lock;
2567 lock.Offset = cpu_to_le64(offset);
2568 lock.Length = cpu_to_le64(length);
2569 lock.Flags = cpu_to_le32(lock_flags);
2570 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2571 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2573 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2577 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2578 __u8 *lease_key, const __le32 lease_state)
2581 struct smb2_lease_ack *req = NULL;
2583 cifs_dbg(FYI, "SMB2_lease_break\n");
2584 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2589 req->hdr.CreditRequest = cpu_to_le16(1);
2590 req->StructureSize = cpu_to_le16(36);
2591 inc_rfc1001_len(req, 12);
2593 memcpy(req->LeaseKey, lease_key, 16);
2594 req->LeaseState = lease_state;
2596 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2597 /* SMB2 buffer freed by function above */
2600 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2601 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);