1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* In-kernel rxperf server for testing purposes.
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) "rxperf: " fmt
9 #include <linux/module.h>
10 #include <linux/slab.h>
12 #include <net/af_rxrpc.h>
13 #define RXRPC_TRACE_ONLY_DEFINE_ENUMS
14 #include <trace/events/rxrpc.h>
16 MODULE_DESCRIPTION("rxperf test server (afs)");
17 MODULE_AUTHOR("Red Hat, Inc.");
18 MODULE_LICENSE("GPL");
20 #define RXPERF_PORT 7009
21 #define RX_PERF_SERVICE 147
22 #define RX_PERF_VERSION 3
23 #define RX_PERF_SEND 0
24 #define RX_PERF_RECV 1
26 #define RX_PERF_FILE 4
27 #define RX_PERF_MAGIC_COOKIE 0x4711
29 struct rxperf_proto_params {
36 static const u8 rxperf_magic_cookie[] = { 0x00, 0x00, 0x47, 0x11 };
37 static const u8 secret[8] = { 0xa7, 0x83, 0x8a, 0xcb, 0xc7, 0x83, 0xec, 0x94 };
39 enum rxperf_call_state {
40 RXPERF_CALL_SV_AWAIT_PARAMS, /* Server: Awaiting parameter block */
41 RXPERF_CALL_SV_AWAIT_REQUEST, /* Server: Awaiting request data */
42 RXPERF_CALL_SV_REPLYING, /* Server: Replying */
43 RXPERF_CALL_SV_AWAIT_ACK, /* Server: Awaiting final ACK */
44 RXPERF_CALL_COMPLETE, /* Completed or failed */
48 struct rxrpc_call *rxcall;
51 struct work_struct work;
54 size_t req_len; /* Size of request blob */
55 size_t reply_len; /* Size of reply blob */
56 unsigned int debug_id;
57 unsigned int operation_id;
58 struct rxperf_proto_params params;
61 enum rxperf_call_state state;
63 unsigned short unmarshal;
65 int (*deliver)(struct rxperf_call *call);
66 void (*processor)(struct work_struct *work);
69 static struct socket *rxperf_socket;
70 static struct key *rxperf_sec_keyring; /* Ring of security/crypto keys */
71 static struct workqueue_struct *rxperf_workqueue;
73 static void rxperf_deliver_to_call(struct work_struct *work);
74 static int rxperf_deliver_param_block(struct rxperf_call *call);
75 static int rxperf_deliver_request(struct rxperf_call *call);
76 static int rxperf_process_call(struct rxperf_call *call);
77 static void rxperf_charge_preallocation(struct work_struct *work);
79 static DECLARE_WORK(rxperf_charge_preallocation_work,
80 rxperf_charge_preallocation);
82 static inline void rxperf_set_call_state(struct rxperf_call *call,
83 enum rxperf_call_state to)
88 static inline void rxperf_set_call_complete(struct rxperf_call *call,
89 int error, s32 remote_abort)
91 if (call->state != RXPERF_CALL_COMPLETE) {
92 call->abort_code = remote_abort;
94 call->state = RXPERF_CALL_COMPLETE;
98 static void rxperf_rx_discard_new_call(struct rxrpc_call *rxcall,
99 unsigned long user_call_ID)
101 kfree((struct rxperf_call *)user_call_ID);
104 static void rxperf_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
105 unsigned long user_call_ID)
107 queue_work(rxperf_workqueue, &rxperf_charge_preallocation_work);
110 static void rxperf_queue_call_work(struct rxperf_call *call)
112 queue_work(rxperf_workqueue, &call->work);
115 static void rxperf_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
116 unsigned long call_user_ID)
118 struct rxperf_call *call = (struct rxperf_call *)call_user_ID;
120 if (call->state != RXPERF_CALL_COMPLETE)
121 rxperf_queue_call_work(call);
124 static void rxperf_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
126 struct rxperf_call *call = (struct rxperf_call *)user_call_ID;
128 call->rxcall = rxcall;
131 static void rxperf_notify_end_reply_tx(struct sock *sock,
132 struct rxrpc_call *rxcall,
133 unsigned long call_user_ID)
135 rxperf_set_call_state((struct rxperf_call *)call_user_ID,
136 RXPERF_CALL_SV_AWAIT_ACK);
140 * Charge the incoming call preallocation.
142 static void rxperf_charge_preallocation(struct work_struct *work)
144 struct rxperf_call *call;
147 call = kzalloc(sizeof(*call), GFP_KERNEL);
151 call->type = "unset";
152 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
153 call->deliver = rxperf_deliver_param_block;
154 call->state = RXPERF_CALL_SV_AWAIT_PARAMS;
155 call->service_id = RX_PERF_SERVICE;
156 call->iov_len = sizeof(call->params);
157 call->kvec[0].iov_len = sizeof(call->params);
158 call->kvec[0].iov_base = &call->params;
159 iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
160 INIT_WORK(&call->work, rxperf_deliver_to_call);
162 if (rxrpc_kernel_charge_accept(rxperf_socket,
176 * Open an rxrpc socket and bind it to be a server for callback notifications
177 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
179 static int rxperf_open_socket(void)
181 struct sockaddr_rxrpc srx;
182 struct socket *socket;
185 ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET6,
190 socket->sk->sk_allocation = GFP_NOFS;
192 /* bind the callback manager's address to make this a server socket */
193 memset(&srx, 0, sizeof(srx));
194 srx.srx_family = AF_RXRPC;
195 srx.srx_service = RX_PERF_SERVICE;
196 srx.transport_type = SOCK_DGRAM;
197 srx.transport_len = sizeof(srx.transport.sin6);
198 srx.transport.sin6.sin6_family = AF_INET6;
199 srx.transport.sin6.sin6_port = htons(RXPERF_PORT);
201 ret = rxrpc_sock_set_min_security_level(socket->sk,
202 RXRPC_SECURITY_ENCRYPT);
206 ret = rxrpc_sock_set_security_keyring(socket->sk, rxperf_sec_keyring);
208 ret = kernel_bind(socket, (struct sockaddr *)&srx, sizeof(srx));
212 rxrpc_kernel_new_call_notification(socket, rxperf_rx_new_call,
213 rxperf_rx_discard_new_call);
215 ret = kernel_listen(socket, INT_MAX);
219 rxperf_socket = socket;
220 rxperf_charge_preallocation(&rxperf_charge_preallocation_work);
224 sock_release(socket);
226 pr_err("Can't set up rxperf socket: %d\n", ret);
231 * close the rxrpc socket rxperf was using
233 static void rxperf_close_socket(void)
235 kernel_listen(rxperf_socket, 0);
236 kernel_sock_shutdown(rxperf_socket, SHUT_RDWR);
237 flush_workqueue(rxperf_workqueue);
238 sock_release(rxperf_socket);
242 * Log remote abort codes that indicate that we have a protocol disagreement
245 static void rxperf_log_error(struct rxperf_call *call, s32 remote_abort)
251 switch (remote_abort) {
252 case RX_EOF: msg = "unexpected EOF"; break;
253 case RXGEN_CC_MARSHAL: msg = "client marshalling"; break;
254 case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break;
255 case RXGEN_SS_MARSHAL: msg = "server marshalling"; break;
256 case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break;
257 case RXGEN_DECODE: msg = "opcode decode"; break;
258 case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break;
259 case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break;
260 case -32: msg = "insufficient data"; break;
268 pr_info("Peer reported %s failure on %s\n", msg, call->type);
273 * deliver messages to a call
275 static void rxperf_deliver_to_call(struct work_struct *work)
277 struct rxperf_call *call = container_of(work, struct rxperf_call, work);
278 enum rxperf_call_state state;
279 u32 abort_code, remote_abort = 0;
282 if (call->state == RXPERF_CALL_COMPLETE)
285 while (state = call->state,
286 state == RXPERF_CALL_SV_AWAIT_PARAMS ||
287 state == RXPERF_CALL_SV_AWAIT_REQUEST ||
288 state == RXPERF_CALL_SV_AWAIT_ACK
290 if (state == RXPERF_CALL_SV_AWAIT_ACK) {
291 if (!rxrpc_kernel_check_life(rxperf_socket, call->rxcall))
296 ret = call->deliver(call);
298 ret = rxperf_process_call(call);
307 rxperf_log_error(call, call->abort_code);
310 abort_code = RXGEN_OPCODE;
311 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
313 rxperf_abort_op_not_supported);
316 abort_code = RX_USER_ABORT;
317 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
319 rxperf_abort_op_not_supported);
322 pr_err("Call %u in bad state %u\n",
323 call->debug_id, call->state);
330 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
331 RXGEN_SS_UNMARSHAL, ret,
332 rxperf_abort_unmarshal_error);
335 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
337 rxperf_abort_general_error);
343 rxperf_set_call_complete(call, ret, remote_abort);
344 /* The call may have been requeued */
345 rxrpc_kernel_end_call(rxperf_socket, call->rxcall);
346 cancel_work(&call->work);
351 * Extract a piece of data from the received data socket buffers.
353 static int rxperf_extract_data(struct rxperf_call *call, bool want_more)
355 u32 remote_abort = 0;
358 ret = rxrpc_kernel_recv_data(rxperf_socket, call->rxcall, &call->iter,
359 &call->iov_len, want_more, &remote_abort,
361 pr_debug("Extract i=%zu l=%zu m=%u ret=%d\n",
362 iov_iter_count(&call->iter), call->iov_len, want_more, ret);
363 if (ret == 0 || ret == -EAGAIN)
367 switch (call->state) {
368 case RXPERF_CALL_SV_AWAIT_REQUEST:
369 rxperf_set_call_state(call, RXPERF_CALL_SV_REPLYING);
371 case RXPERF_CALL_COMPLETE:
372 pr_debug("premature completion %d", call->error);
380 rxperf_set_call_complete(call, ret, remote_abort);
385 * Grab the operation ID from an incoming manager call.
387 static int rxperf_deliver_param_block(struct rxperf_call *call)
392 /* Extract the parameter block */
393 ret = rxperf_extract_data(call, true);
397 version = ntohl(call->params.version);
398 call->operation_id = ntohl(call->params.type);
399 call->deliver = rxperf_deliver_request;
401 if (version != RX_PERF_VERSION) {
402 pr_info("Version mismatch %x\n", version);
406 switch (call->operation_id) {
410 call->iov_len = 4; /* Expect req size */
415 call->iov_len = 4; /* Expect reply size */
419 call->iov_len = 8; /* Expect req size and reply size */
428 rxperf_set_call_state(call, RXPERF_CALL_SV_AWAIT_REQUEST);
429 return call->deliver(call);
433 * Deliver the request data.
435 static int rxperf_deliver_request(struct rxperf_call *call)
439 switch (call->unmarshal) {
441 call->kvec[0].iov_len = call->iov_len;
442 call->kvec[0].iov_base = call->tmp;
443 iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
447 ret = rxperf_extract_data(call, true);
451 switch (call->operation_id) {
454 call->req_len = ntohl(call->tmp[0]);
460 call->reply_len = ntohl(call->tmp[0]);
464 call->req_len = ntohl(call->tmp[0]);
465 call->reply_len = ntohl(call->tmp[1]);
468 pr_info("Can't parse extra params\n");
472 pr_debug("CALL op=%s rq=%zx rp=%zx\n",
473 call->type, call->req_len, call->reply_len);
475 call->iov_len = call->req_len;
476 iov_iter_discard(&call->iter, READ, call->req_len);
480 ret = rxperf_extract_data(call, false);
491 * Process a call for which we've received the request.
493 static int rxperf_process_call(struct rxperf_call *call)
495 struct msghdr msg = {};
499 size_t reply_len = call->reply_len, len;
501 rxrpc_kernel_set_tx_length(rxperf_socket, call->rxcall,
502 reply_len + sizeof(rxperf_magic_cookie));
504 while (reply_len > 0) {
505 len = min_t(size_t, reply_len, PAGE_SIZE);
506 bvec_set_page(&bv, ZERO_PAGE(0), len, 0);
507 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, len);
508 msg.msg_flags = MSG_MORE;
509 n = rxrpc_kernel_send_data(rxperf_socket, call->rxcall, &msg,
510 len, rxperf_notify_end_reply_tx);
518 len = sizeof(rxperf_magic_cookie);
519 iov[0].iov_base = (void *)rxperf_magic_cookie;
520 iov[0].iov_len = len;
521 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
523 n = rxrpc_kernel_send_data(rxperf_socket, call->rxcall, &msg, len,
524 rxperf_notify_end_reply_tx);
526 return 0; /* Success */
529 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
530 RXGEN_SS_MARSHAL, -ENOMEM,
536 * Add a key to the security keyring.
538 static int rxperf_add_key(struct key *keyring)
543 kref = key_create_or_update(make_key_ref(keyring, true),
545 __stringify(RX_PERF_SERVICE) ":2",
548 KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH
550 KEY_ALLOC_NOT_IN_QUOTA);
553 pr_err("Can't allocate rxperf server key: %ld\n", PTR_ERR(kref));
554 return PTR_ERR(kref);
557 ret = key_link(keyring, key_ref_to_ptr(kref));
559 pr_err("Can't link rxperf server key: %d\n", ret);
565 * Initialise the rxperf server.
567 static int __init rxperf_init(void)
572 pr_info("Server registering\n");
574 rxperf_workqueue = alloc_workqueue("rxperf", 0, 0);
575 if (!rxperf_workqueue)
576 goto error_workqueue;
578 keyring = keyring_alloc("rxperf_server",
579 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
580 KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
582 KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
584 KEY_OTH_VIEW | KEY_OTH_READ | KEY_OTH_SEARCH,
585 KEY_ALLOC_NOT_IN_QUOTA,
587 if (IS_ERR(keyring)) {
588 pr_err("Can't allocate rxperf server keyring: %ld\n",
592 rxperf_sec_keyring = keyring;
593 ret = rxperf_add_key(keyring);
597 ret = rxperf_open_socket();
604 key_put(rxperf_sec_keyring);
606 destroy_workqueue(rxperf_workqueue);
609 pr_err("Failed to register: %d\n", ret);
612 late_initcall(rxperf_init); /* Must be called after net/ to create socket */
614 static void __exit rxperf_exit(void)
616 pr_info("Server unregistering.\n");
618 rxperf_close_socket();
619 key_put(rxperf_sec_keyring);
620 destroy_workqueue(rxperf_workqueue);
623 module_exit(rxperf_exit);