dm-crypt: use __bio_add_page to add single page to clone bio
[linux-block.git] / fs / ksmbd / connection.c
CommitLineData
0626e664
NJ
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
4 * Copyright (C) 2018 Samsung Electronics Co., Ltd.
5 */
6
7#include <linux/mutex.h>
8#include <linux/freezer.h>
9#include <linux/module.h>
10
11#include "server.h"
0626e664
NJ
12#include "smb_common.h"
13#include "mgmt/ksmbd_ida.h"
14#include "connection.h"
15#include "transport_tcp.h"
16#include "transport_rdma.h"
17
18static DEFINE_MUTEX(init_lock);
19
20static struct ksmbd_conn_ops default_conn_ops;
21
d63528eb 22LIST_HEAD(conn_list);
abcc506a 23DECLARE_RWSEM(conn_list_lock);
0626e664
NJ
24
25/**
26 * ksmbd_conn_free() - free resources of the connection instance
27 *
28 * @conn: connection instance to be cleand up
29 *
30 * During the thread termination, the corresponding conn instance
31 * resources(sock/memory) are released and finally the conn object is freed.
32 */
33void ksmbd_conn_free(struct ksmbd_conn *conn)
34{
abcc506a 35 down_write(&conn_list_lock);
0626e664 36 list_del(&conn->conns_list);
abcc506a 37 up_write(&conn_list_lock);
0626e664 38
e4d3e6b5 39 xa_destroy(&conn->sessions);
79f6b11a 40 kvfree(conn->request_buf);
0626e664
NJ
41 kfree(conn->preauth_info);
42 kfree(conn);
43}
44
45/**
46 * ksmbd_conn_alloc() - initialize a new connection instance
47 *
48 * Return: ksmbd_conn struct on success, otherwise NULL
49 */
50struct ksmbd_conn *ksmbd_conn_alloc(void)
51{
52 struct ksmbd_conn *conn;
53
54 conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
55 if (!conn)
56 return NULL;
57
58 conn->need_neg = true;
f5c779b7 59 ksmbd_conn_set_new(conn);
0626e664
NJ
60 conn->local_nls = load_nls("utf8");
61 if (!conn->local_nls)
62 conn->local_nls = load_nls_default();
16b5f54e
AH
63 if (IS_ENABLED(CONFIG_UNICODE))
64 conn->um = utf8_load(UNICODE_AGE(12, 1, 0));
65 else
66 conn->um = ERR_PTR(-EOPNOTSUPP);
67 if (IS_ERR(conn->um))
68 conn->um = NULL;
0626e664
NJ
69 atomic_set(&conn->req_running, 0);
70 atomic_set(&conn->r_count, 0);
bf8acc9e 71 conn->total_credits = 1;
376b9133 72 conn->outstanding_credits = 0;
bf8acc9e 73
0626e664 74 init_waitqueue_head(&conn->req_running_q);
a14c5738 75 init_waitqueue_head(&conn->r_count_q);
0626e664 76 INIT_LIST_HEAD(&conn->conns_list);
0626e664
NJ
77 INIT_LIST_HEAD(&conn->requests);
78 INIT_LIST_HEAD(&conn->async_requests);
79 spin_lock_init(&conn->request_lock);
80 spin_lock_init(&conn->credits_lock);
d40012a8 81 ida_init(&conn->async_ida);
e4d3e6b5 82 xa_init(&conn->sessions);
0626e664 83
d63528eb
HL
84 spin_lock_init(&conn->llist_lock);
85 INIT_LIST_HEAD(&conn->lock_list);
86
abcc506a 87 down_write(&conn_list_lock);
0626e664 88 list_add(&conn->conns_list, &conn_list);
abcc506a 89 up_write(&conn_list_lock);
0626e664
NJ
90 return conn;
91}
92
93bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
94{
95 struct ksmbd_conn *t;
96 bool ret = false;
97
abcc506a 98 down_read(&conn_list_lock);
0626e664
NJ
99 list_for_each_entry(t, &conn_list, conns_list) {
100 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
101 continue;
102
103 ret = true;
104 break;
105 }
abcc506a 106 up_read(&conn_list_lock);
0626e664
NJ
107 return ret;
108}
109
110void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
111{
112 struct ksmbd_conn *conn = work->conn;
113 struct list_head *requests_queue = NULL;
114
3a9b557f 115 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
0626e664 116 requests_queue = &conn->requests;
0626e664
NJ
117
118 if (requests_queue) {
119 atomic_inc(&conn->req_running);
120 spin_lock(&conn->request_lock);
121 list_add_tail(&work->request_entry, requests_queue);
122 spin_unlock(&conn->request_lock);
123 }
124}
125
126int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
127{
128 struct ksmbd_conn *conn = work->conn;
129 int ret = 1;
130
131 if (list_empty(&work->request_entry) &&
64b39f4a 132 list_empty(&work->async_request_entry))
0626e664
NJ
133 return 0;
134
4b92841e
HL
135 if (!work->multiRsp)
136 atomic_dec(&conn->req_running);
0626e664 137 if (!work->multiRsp) {
3a9b557f 138 spin_lock(&conn->request_lock);
0626e664 139 list_del_init(&work->request_entry);
3a9b557f
NJ
140 spin_unlock(&conn->request_lock);
141 if (work->asynchronous)
142 release_async_work(work);
0626e664
NJ
143 ret = 0;
144 }
0626e664
NJ
145
146 wake_up_all(&conn->req_running_q);
147 return ret;
148}
149
f5c779b7 150void ksmbd_conn_lock(struct ksmbd_conn *conn)
0626e664
NJ
151{
152 mutex_lock(&conn->srv_mutex);
153}
154
f5c779b7 155void ksmbd_conn_unlock(struct ksmbd_conn *conn)
0626e664
NJ
156{
157 mutex_unlock(&conn->srv_mutex);
158}
159
abcc506a 160void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
0626e664 161{
abcc506a
NJ
162 struct ksmbd_conn *conn;
163
164 down_read(&conn_list_lock);
165 list_for_each_entry(conn, &conn_list, conns_list) {
166 if (conn->binding || xa_load(&conn->sessions, sess_id))
167 WRITE_ONCE(conn->status, status);
168 }
169 up_read(&conn_list_lock);
170}
171
172void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
173{
174 struct ksmbd_conn *bind_conn;
175
0626e664 176 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
abcc506a
NJ
177
178 down_read(&conn_list_lock);
179 list_for_each_entry(bind_conn, &conn_list, conns_list) {
180 if (bind_conn == conn)
181 continue;
182
183 if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
184 !ksmbd_conn_releasing(bind_conn) &&
185 atomic_read(&bind_conn->req_running)) {
186 wait_event(bind_conn->req_running_q,
187 atomic_read(&bind_conn->req_running) == 0);
188 }
189 }
190 up_read(&conn_list_lock);
0626e664
NJ
191}
192
193int ksmbd_conn_write(struct ksmbd_work *work)
194{
195 struct ksmbd_conn *conn = work->conn;
0626e664
NJ
196 size_t len = 0;
197 int sent;
198 struct kvec iov[3];
199 int iov_idx = 0;
200
cb451720 201 if (!work->response_buf) {
bde1694a 202 pr_err("NULL response header\n");
0626e664
NJ
203 return -EINVAL;
204 }
205
e5066499 206 if (work->tr_buf) {
0626e664 207 iov[iov_idx] = (struct kvec) { work->tr_buf,
2dd9129f 208 sizeof(struct smb2_transform_hdr) + 4 };
0626e664
NJ
209 len += iov[iov_idx++].iov_len;
210 }
211
e5066499 212 if (work->aux_payload_sz) {
cb451720 213 iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
0626e664 214 len += iov[iov_idx++].iov_len;
e5066499 215 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
0626e664
NJ
216 len += iov[iov_idx++].iov_len;
217 } else {
e5066499
NJ
218 if (work->tr_buf)
219 iov[iov_idx].iov_len = work->resp_hdr_sz;
0626e664 220 else
cb451720
NJ
221 iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
222 iov[iov_idx].iov_base = work->response_buf;
0626e664
NJ
223 len += iov[iov_idx++].iov_len;
224 }
225
226 ksmbd_conn_lock(conn);
227 sent = conn->transport->ops->writev(conn->transport, &iov[0],
228 iov_idx, len,
229 work->need_invalidate_rkey,
230 work->remote_key);
231 ksmbd_conn_unlock(conn);
232
233 if (sent < 0) {
bde1694a 234 pr_err("Failed to send message: %d\n", sent);
0626e664
NJ
235 return sent;
236 }
237
238 return 0;
239}
240
1807abcf
HL
241int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
242 void *buf, unsigned int buflen,
243 struct smb2_buffer_desc_v1 *desc,
244 unsigned int desc_len)
0626e664
NJ
245{
246 int ret = -EINVAL;
247
248 if (conn->transport->ops->rdma_read)
249 ret = conn->transport->ops->rdma_read(conn->transport,
070fb21e 250 buf, buflen,
1807abcf 251 desc, desc_len);
0626e664
NJ
252 return ret;
253}
254
1807abcf
HL
255int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
256 void *buf, unsigned int buflen,
257 struct smb2_buffer_desc_v1 *desc,
258 unsigned int desc_len)
0626e664
NJ
259{
260 int ret = -EINVAL;
261
262 if (conn->transport->ops->rdma_write)
263 ret = conn->transport->ops->rdma_write(conn->transport,
070fb21e 264 buf, buflen,
1807abcf 265 desc, desc_len);
0626e664
NJ
266 return ret;
267}
268
269bool ksmbd_conn_alive(struct ksmbd_conn *conn)
270{
271 if (!ksmbd_server_running())
272 return false;
273
f5c779b7 274 if (ksmbd_conn_exiting(conn))
0626e664
NJ
275 return false;
276
277 if (kthread_should_stop())
278 return false;
279
280 if (atomic_read(&conn->stats.open_files_count) > 0)
281 return true;
282
283 /*
284 * Stop current session if the time that get last request from client
a9c241d0 285 * is bigger than deadtime user configured and opening file count is
0626e664
NJ
286 * zero.
287 */
288 if (server_conf.deadtime > 0 &&
64b39f4a 289 time_after(jiffies, conn->last_active + server_conf.deadtime)) {
0626e664 290 ksmbd_debug(CONN, "No response from client in %lu minutes\n",
070fb21e 291 server_conf.deadtime / SMB_ECHO_INTERVAL);
0626e664
NJ
292 return false;
293 }
294 return true;
295}
296
297/**
298 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
299 * @p: connection instance
300 *
301 * One thread each per connection
302 *
303 * Return: 0 on success
304 */
305int ksmbd_conn_handler_loop(void *p)
306{
307 struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
308 struct ksmbd_transport *t = conn->transport;
62c487b5 309 unsigned int pdu_size, max_allowed_pdu_size;
0626e664
NJ
310 char hdr_buf[4] = {0,};
311 int size;
312
313 mutex_init(&conn->srv_mutex);
314 __module_get(THIS_MODULE);
315
316 if (t->ops->prepare && t->ops->prepare(t))
317 goto out;
318
319 conn->last_active = jiffies;
320 while (ksmbd_conn_alive(conn)) {
321 if (try_to_freeze())
322 continue;
323
79f6b11a 324 kvfree(conn->request_buf);
0626e664
NJ
325 conn->request_buf = NULL;
326
be6f42fa 327 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
0626e664
NJ
328 if (size != sizeof(hdr_buf))
329 break;
330
331 pdu_size = get_rfc1002_len(hdr_buf);
332 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
333
f5c779b7 334 if (ksmbd_conn_good(conn))
62c487b5
NJ
335 max_allowed_pdu_size =
336 SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
337 else
338 max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
339
340 if (pdu_size > max_allowed_pdu_size) {
7a17c61e 341 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
62c487b5 342 pdu_size, max_allowed_pdu_size,
f5c779b7 343 READ_ONCE(conn->status));
62c487b5
NJ
344 break;
345 }
346
36399990 347 /*
39b291b8 348 * Check maximum pdu size(0x00FFFFFF).
36399990 349 */
39b291b8 350 if (pdu_size > MAX_STREAM_PROT_LEN)
62c487b5 351 break;
0626e664
NJ
352
353 /* 4 for rfc1002 length field */
354 size = pdu_size + 4;
e416ea62 355 conn->request_buf = kvmalloc(size, GFP_KERNEL);
0626e664 356 if (!conn->request_buf)
83dcedd5 357 break;
0626e664
NJ
358
359 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
360 if (!ksmbd_smb_request(conn))
361 break;
362
363 /*
364 * We already read 4 bytes to find out PDU size, now
365 * read in PDU
366 */
be6f42fa 367 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
0626e664 368 if (size < 0) {
bde1694a 369 pr_err("sock_read failed: %d\n", size);
0626e664
NJ
370 break;
371 }
372
373 if (size != pdu_size) {
bde1694a
NJ
374 pr_err("PDU error. Read: %d, Expected: %d\n",
375 size, pdu_size);
0626e664
NJ
376 continue;
377 }
378
379 if (!default_conn_ops.process_fn) {
bde1694a 380 pr_err("No connection request callback\n");
0626e664
NJ
381 break;
382 }
383
384 if (default_conn_ops.process_fn(conn)) {
bde1694a 385 pr_err("Cannot handle request\n");
0626e664
NJ
386 break;
387 }
388 }
389
390out:
abcc506a 391 ksmbd_conn_set_releasing(conn);
0626e664 392 /* Wait till all reference dropped to the Server object*/
a14c5738
NJ
393 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
394
16b5f54e
AH
395 if (IS_ENABLED(CONFIG_UNICODE))
396 utf8_unload(conn->um);
0626e664
NJ
397 unload_nls(conn->local_nls);
398 if (default_conn_ops.terminate_fn)
399 default_conn_ops.terminate_fn(conn);
400 t->ops->disconnect(t);
401 module_put(THIS_MODULE);
402 return 0;
403}
404
405void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
406{
407 default_conn_ops.process_fn = ops->process_fn;
408 default_conn_ops.terminate_fn = ops->terminate_fn;
409}
410
411int ksmbd_conn_transport_init(void)
412{
413 int ret;
414
415 mutex_lock(&init_lock);
416 ret = ksmbd_tcp_init();
417 if (ret) {
418 pr_err("Failed to init TCP subsystem: %d\n", ret);
419 goto out;
420 }
421
422 ret = ksmbd_rdma_init();
423 if (ret) {
0a427cc6 424 pr_err("Failed to init RDMA subsystem: %d\n", ret);
0626e664
NJ
425 goto out;
426 }
427out:
428 mutex_unlock(&init_lock);
429 return ret;
430}
431
432static void stop_sessions(void)
433{
434 struct ksmbd_conn *conn;
136dff3a 435 struct ksmbd_transport *t;
0626e664
NJ
436
437again:
abcc506a 438 down_read(&conn_list_lock);
0626e664
NJ
439 list_for_each_entry(conn, &conn_list, conns_list) {
440 struct task_struct *task;
441
136dff3a
YC
442 t = conn->transport;
443 task = t->handler;
0626e664
NJ
444 if (task)
445 ksmbd_debug(CONN, "Stop session handler %s/%d\n",
070fb21e 446 task->comm, task_pid_nr(task));
f5c779b7 447 ksmbd_conn_set_exiting(conn);
136dff3a 448 if (t->ops->shutdown) {
abcc506a 449 up_read(&conn_list_lock);
136dff3a 450 t->ops->shutdown(t);
abcc506a 451 down_read(&conn_list_lock);
136dff3a 452 }
0626e664 453 }
abcc506a 454 up_read(&conn_list_lock);
0626e664
NJ
455
456 if (!list_empty(&conn_list)) {
64b39f4a 457 schedule_timeout_interruptible(HZ / 10); /* 100ms */
0626e664
NJ
458 goto again;
459 }
460}
461
462void ksmbd_conn_transport_destroy(void)
463{
464 mutex_lock(&init_lock);
465 ksmbd_tcp_destroy();
466 ksmbd_rdma_destroy();
467 stop_sessions();
468 mutex_unlock(&init_lock);
469}