Commit | Line | Data |
---|---|---|
09c434b8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
eac3731b | 2 | /* |
eac3731b JH |
3 | * IUCV protocol stack for Linux on zSeries |
4 | * | |
c23cad92 | 5 | * Copyright IBM Corp. 2006, 2009 |
eac3731b JH |
6 | * |
7 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> | |
c23cad92 UB |
8 | * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> |
9 | * PM functions: | |
10 | * Ursula Braun <ursula.braun@de.ibm.com> | |
eac3731b JH |
11 | */ |
12 | ||
8f7c502c UB |
13 | #define KMSG_COMPONENT "af_iucv" |
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
15 | ||
b6459415 | 16 | #include <linux/filter.h> |
eac3731b | 17 | #include <linux/module.h> |
238965b7 | 18 | #include <linux/netdevice.h> |
eac3731b | 19 | #include <linux/types.h> |
0d1c7664 | 20 | #include <linux/limits.h> |
eac3731b JH |
21 | #include <linux/list.h> |
22 | #include <linux/errno.h> | |
23 | #include <linux/kernel.h> | |
174cd4b1 | 24 | #include <linux/sched/signal.h> |
eac3731b JH |
25 | #include <linux/slab.h> |
26 | #include <linux/skbuff.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/poll.h> | |
02f06918 | 29 | #include <linux/security.h> |
eac3731b | 30 | #include <net/sock.h> |
52109a06 | 31 | #include <asm/machine.h> |
eac3731b JH |
32 | #include <asm/ebcdic.h> |
33 | #include <asm/cpcmd.h> | |
34 | #include <linux/kmod.h> | |
35 | ||
eac3731b JH |
36 | #include <net/iucv/af_iucv.h> |
37 | ||
3881ac44 | 38 | #define VERSION "1.2" |
eac3731b JH |
39 | |
40 | static char iucv_userid[80]; | |
41 | ||
eac3731b JH |
42 | static struct proto iucv_proto = { |
43 | .name = "AF_IUCV", | |
44 | .owner = THIS_MODULE, | |
45 | .obj_size = sizeof(struct iucv_sock), | |
46 | }; | |
47 | ||
6fcd61f7 | 48 | static struct iucv_interface *pr_iucv; |
87c272c6 | 49 | static struct iucv_handler af_iucv_handler; |
6fcd61f7 | 50 | |
b8942e3b HB |
51 | /* special AF_IUCV IPRM messages */ |
52 | static const u8 iprm_shutdown[8] = | |
53 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; | |
54 | ||
c593642c | 55 | #define TRGCLS_SIZE sizeof_field(struct iucv_message, class) |
44b1e6b5 | 56 | |
0ea920d2 HB |
57 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ |
58 | do { \ | |
59 | DEFINE_WAIT(__wait); \ | |
60 | long __timeo = timeo; \ | |
61 | ret = 0; \ | |
aa395145 | 62 | prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ |
0ea920d2 | 63 | while (!(condition)) { \ |
0ea920d2 HB |
64 | if (!__timeo) { \ |
65 | ret = -EAGAIN; \ | |
66 | break; \ | |
67 | } \ | |
68 | if (signal_pending(current)) { \ | |
69 | ret = sock_intr_errno(__timeo); \ | |
70 | break; \ | |
71 | } \ | |
72 | release_sock(sk); \ | |
73 | __timeo = schedule_timeout(__timeo); \ | |
74 | lock_sock(sk); \ | |
75 | ret = sock_error(sk); \ | |
76 | if (ret) \ | |
77 | break; \ | |
78 | } \ | |
aa395145 | 79 | finish_wait(sk_sleep(sk), &__wait); \ |
0ea920d2 HB |
80 | } while (0) |
81 | ||
82 | #define iucv_sock_wait(sk, condition, timeo) \ | |
83 | ({ \ | |
84 | int __ret = 0; \ | |
85 | if (!(condition)) \ | |
86 | __iucv_sock_wait(sk, condition, timeo, __ret); \ | |
87 | __ret; \ | |
88 | }) | |
44b1e6b5 | 89 | |
e9a36ca5 JW |
90 | static struct sock *iucv_accept_dequeue(struct sock *parent, |
91 | struct socket *newsock); | |
57f20448 HC |
92 | static void iucv_sock_kill(struct sock *sk); |
93 | static void iucv_sock_close(struct sock *sk); | |
94 | ||
80bc97aa | 95 | static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify); |
3881ac44 | 96 | |
eac3731b | 97 | static struct iucv_sock_list iucv_sk_list = { |
3db8ce35 | 98 | .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), |
eac3731b JH |
99 | .autobind_name = ATOMIC_INIT(0) |
100 | }; | |
101 | ||
eac3731b JH |
102 | static inline void high_nmcpy(unsigned char *dst, char *src) |
103 | { | |
104 | memcpy(dst, src, 8); | |
105 | } | |
106 | ||
107 | static inline void low_nmcpy(unsigned char *dst, char *src) | |
108 | { | |
109 | memcpy(&dst[8], src, 8); | |
110 | } | |
111 | ||
b8942e3b HB |
112 | /** |
113 | * iucv_msg_length() - Returns the length of an iucv message. | |
114 | * @msg: Pointer to struct iucv_message, MUST NOT be NULL | |
115 | * | |
116 | * The function returns the length of the specified iucv message @msg of data | |
117 | * stored in a buffer and of data stored in the parameter list (PRMDATA). | |
118 | * | |
119 | * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket | |
120 | * data: | |
121 | * PRMDATA[0..6] socket data (max 7 bytes); | |
122 | * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) | |
123 | * | |
25985edc | 124 | * The socket data length is computed by subtracting the socket data length |
b8942e3b HB |
125 | * value from 0xFF. |
126 | * If the socket data len is greater 7, then PRMDATA can be used for special | |
127 | * notifications (see iucv_sock_shutdown); and further, | |
128 | * if the socket data len is > 7, the function returns 8. | |
129 | * | |
130 | * Use this function to allocate socket buffers to store iucv message data. | |
131 | */ | |
132 | static inline size_t iucv_msg_length(struct iucv_message *msg) | |
133 | { | |
134 | size_t datalen; | |
135 | ||
136 | if (msg->flags & IUCV_IPRMDATA) { | |
137 | datalen = 0xff - msg->rmmsg[7]; | |
138 | return (datalen < 8) ? datalen : 8; | |
139 | } | |
140 | return msg->length; | |
141 | } | |
142 | ||
0ea920d2 HB |
143 | /** |
144 | * iucv_sock_in_state() - check for specific states | |
145 | * @sk: sock structure | |
146 | * @state: first iucv sk state | |
7c8e1a91 | 147 | * @state2: second iucv sk state |
0ea920d2 HB |
148 | * |
149 | * Returns true if the socket in either in the first or second state. | |
150 | */ | |
151 | static int iucv_sock_in_state(struct sock *sk, int state, int state2) | |
152 | { | |
153 | return (sk->sk_state == state || sk->sk_state == state2); | |
154 | } | |
155 | ||
156 | /** | |
157 | * iucv_below_msglim() - function to check if messages can be sent | |
158 | * @sk: sock structure | |
159 | * | |
160 | * Returns true if the send queue length is lower than the message limit. | |
161 | * Always returns true if the socket is not connected (no iucv path for | |
162 | * checking the message limit). | |
163 | */ | |
164 | static inline int iucv_below_msglim(struct sock *sk) | |
165 | { | |
166 | struct iucv_sock *iucv = iucv_sk(sk); | |
167 | ||
168 | if (sk->sk_state != IUCV_CONNECTED) | |
169 | return 1; | |
3881ac44 | 170 | if (iucv->transport == AF_IUCV_TRANS_IUCV) |
ef6af7bd | 171 | return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); |
3881ac44 UB |
172 | else |
173 | return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && | |
174 | (atomic_read(&iucv->pendings) <= 0)); | |
0ea920d2 HB |
175 | } |
176 | ||
7c8e1a91 | 177 | /* |
0ea920d2 HB |
178 | * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit |
179 | */ | |
180 | static void iucv_sock_wake_msglim(struct sock *sk) | |
181 | { | |
43815482 ED |
182 | struct socket_wq *wq; |
183 | ||
184 | rcu_read_lock(); | |
185 | wq = rcu_dereference(sk->sk_wq); | |
1ce0bf50 | 186 | if (skwq_has_sleeper(wq)) |
43815482 | 187 | wake_up_interruptible_all(&wq->wait); |
1abe267f | 188 | sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); |
43815482 | 189 | rcu_read_unlock(); |
0ea920d2 HB |
190 | } |
191 | ||
7c8e1a91 | 192 | /* |
3881ac44 UB |
193 | * afiucv_hs_send() - send a message through HiperSockets transport |
194 | */ | |
195 | static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |
196 | struct sk_buff *skb, u8 flags) | |
197 | { | |
3881ac44 UB |
198 | struct iucv_sock *iucv = iucv_sk(sock); |
199 | struct af_iucv_trans_hdr *phs_hdr; | |
3881ac44 UB |
200 | int err, confirm_recv = 0; |
201 | ||
cd11d112 JW |
202 | phs_hdr = skb_push(skb, sizeof(*phs_hdr)); |
203 | memset(phs_hdr, 0, sizeof(*phs_hdr)); | |
3881ac44 | 204 | skb_reset_network_header(skb); |
3881ac44 UB |
205 | |
206 | phs_hdr->magic = ETH_P_AF_IUCV; | |
207 | phs_hdr->version = 1; | |
208 | phs_hdr->flags = flags; | |
209 | if (flags == AF_IUCV_FLAG_SYN) | |
210 | phs_hdr->window = iucv->msglimit; | |
211 | else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { | |
212 | confirm_recv = atomic_read(&iucv->msg_recv); | |
213 | phs_hdr->window = confirm_recv; | |
214 | if (confirm_recv) | |
215 | phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; | |
216 | } | |
217 | memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); | |
218 | memcpy(phs_hdr->destAppName, iucv->dst_name, 8); | |
219 | memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); | |
220 | memcpy(phs_hdr->srcAppName, iucv->src_name, 8); | |
221 | ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); | |
222 | ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); | |
223 | ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); | |
224 | ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); | |
225 | if (imsg) | |
226 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | |
227 | ||
800c5eb7 | 228 | skb->dev = iucv->hs_dev; |
b2f54394 JW |
229 | if (!skb->dev) { |
230 | err = -ENODEV; | |
231 | goto err_free; | |
232 | } | |
238965b7 JW |
233 | |
234 | dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len); | |
235 | ||
b2f54394 JW |
236 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { |
237 | err = -ENETDOWN; | |
238 | goto err_free; | |
239 | } | |
3881ac44 | 240 | if (skb->len > skb->dev->mtu) { |
b2f54394 JW |
241 | if (sock->sk_type == SOCK_SEQPACKET) { |
242 | err = -EMSGSIZE; | |
243 | goto err_free; | |
244 | } | |
2c3b4456 JW |
245 | err = pskb_trim(skb, skb->dev->mtu); |
246 | if (err) | |
247 | goto err_free; | |
3881ac44 | 248 | } |
2e56c26b | 249 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); |
238965b7 | 250 | |
ef6af7bd | 251 | atomic_inc(&iucv->skbs_in_xmit); |
3881ac44 | 252 | err = dev_queue_xmit(skb); |
800c5eb7 | 253 | if (net_xmit_eval(err)) { |
ef6af7bd | 254 | atomic_dec(&iucv->skbs_in_xmit); |
3881ac44 UB |
255 | } else { |
256 | atomic_sub(confirm_recv, &iucv->msg_recv); | |
257 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); | |
258 | } | |
800c5eb7 | 259 | return net_xmit_eval(err); |
b2f54394 JW |
260 | |
261 | err_free: | |
262 | kfree_skb(skb); | |
263 | return err; | |
3881ac44 UB |
264 | } |
265 | ||
eac3731b JH |
266 | static struct sock *__iucv_get_sock_by_name(char *nm) |
267 | { | |
268 | struct sock *sk; | |
eac3731b | 269 | |
b67bfe0d | 270 | sk_for_each(sk, &iucv_sk_list.head) |
eac3731b JH |
271 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) |
272 | return sk; | |
273 | ||
274 | return NULL; | |
275 | } | |
276 | ||
277 | static void iucv_sock_destruct(struct sock *sk) | |
278 | { | |
279 | skb_queue_purge(&sk->sk_receive_queue); | |
82492a35 UB |
280 | skb_queue_purge(&sk->sk_error_queue); |
281 | ||
82492a35 UB |
282 | if (!sock_flag(sk, SOCK_DEAD)) { |
283 | pr_err("Attempt to release alive iucv socket %p\n", sk); | |
284 | return; | |
285 | } | |
286 | ||
287 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | |
b2c9c5df | 288 | WARN_ON(refcount_read(&sk->sk_wmem_alloc)); |
82492a35 UB |
289 | WARN_ON(sk->sk_wmem_queued); |
290 | WARN_ON(sk->sk_forward_alloc); | |
eac3731b JH |
291 | } |
292 | ||
293 | /* Cleanup Listen */ | |
294 | static void iucv_sock_cleanup_listen(struct sock *parent) | |
295 | { | |
296 | struct sock *sk; | |
297 | ||
298 | /* Close non-accepted connections */ | |
299 | while ((sk = iucv_accept_dequeue(parent, NULL))) { | |
300 | iucv_sock_close(sk); | |
301 | iucv_sock_kill(sk); | |
302 | } | |
303 | ||
304 | parent->sk_state = IUCV_CLOSED; | |
eac3731b JH |
305 | } |
306 | ||
e9a36ca5 JW |
307 | static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) |
308 | { | |
309 | write_lock_bh(&l->lock); | |
310 | sk_add_node(sk, &l->head); | |
311 | write_unlock_bh(&l->lock); | |
312 | } | |
313 | ||
314 | static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) | |
315 | { | |
316 | write_lock_bh(&l->lock); | |
317 | sk_del_node_init(sk); | |
318 | write_unlock_bh(&l->lock); | |
319 | } | |
320 | ||
7514bab0 | 321 | /* Kill socket (only if zapped and orphaned) */ |
eac3731b JH |
322 | static void iucv_sock_kill(struct sock *sk) |
323 | { | |
324 | if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) | |
325 | return; | |
326 | ||
327 | iucv_sock_unlink(&iucv_sk_list, sk); | |
328 | sock_set_flag(sk, SOCK_DEAD); | |
329 | sock_put(sk); | |
330 | } | |
331 | ||
7d316b94 UB |
332 | /* Terminate an IUCV path */ |
333 | static void iucv_sever_path(struct sock *sk, int with_user_data) | |
334 | { | |
335 | unsigned char user_data[16]; | |
336 | struct iucv_sock *iucv = iucv_sk(sk); | |
337 | struct iucv_path *path = iucv->path; | |
338 | ||
f558120c AW |
339 | /* Whoever resets the path pointer, must sever and free it. */ |
340 | if (xchg(&iucv->path, NULL)) { | |
7d316b94 UB |
341 | if (with_user_data) { |
342 | low_nmcpy(user_data, iucv->src_name); | |
343 | high_nmcpy(user_data, iucv->dst_name); | |
344 | ASCEBC(user_data, sizeof(user_data)); | |
345 | pr_iucv->path_sever(path, user_data); | |
346 | } else | |
347 | pr_iucv->path_sever(path, NULL); | |
348 | iucv_path_free(path); | |
349 | } | |
350 | } | |
351 | ||
4e0ad322 | 352 | /* Send controlling flags through an IUCV socket for HIPER transport */ |
9fbd87d4 UB |
353 | static int iucv_send_ctrl(struct sock *sk, u8 flags) |
354 | { | |
238965b7 | 355 | struct iucv_sock *iucv = iucv_sk(sk); |
9fbd87d4 UB |
356 | int err = 0; |
357 | int blen; | |
358 | struct sk_buff *skb; | |
4e0ad322 | 359 | u8 shutdown = 0; |
9fbd87d4 | 360 | |
238965b7 JW |
361 | blen = sizeof(struct af_iucv_trans_hdr) + |
362 | LL_RESERVED_SPACE(iucv->hs_dev); | |
4e0ad322 UB |
363 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
364 | /* controlling flags should be sent anyway */ | |
365 | shutdown = sk->sk_shutdown; | |
366 | sk->sk_shutdown &= RCV_SHUTDOWN; | |
367 | } | |
9fbd87d4 UB |
368 | skb = sock_alloc_send_skb(sk, blen, 1, &err); |
369 | if (skb) { | |
370 | skb_reserve(skb, blen); | |
371 | err = afiucv_hs_send(NULL, sk, skb, flags); | |
372 | } | |
4e0ad322 UB |
373 | if (shutdown) |
374 | sk->sk_shutdown = shutdown; | |
9fbd87d4 UB |
375 | return err; |
376 | } | |
377 | ||
eac3731b JH |
378 | /* Close an IUCV socket */ |
379 | static void iucv_sock_close(struct sock *sk) | |
380 | { | |
eac3731b | 381 | struct iucv_sock *iucv = iucv_sk(sk); |
561e0360 | 382 | unsigned long timeo; |
800c5eb7 | 383 | int err = 0; |
eac3731b | 384 | |
eac3731b JH |
385 | lock_sock(sk); |
386 | ||
da99f056 | 387 | switch (sk->sk_state) { |
eac3731b JH |
388 | case IUCV_LISTEN: |
389 | iucv_sock_cleanup_listen(sk); | |
390 | break; | |
391 | ||
392 | case IUCV_CONNECTED: | |
3881ac44 | 393 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
9fbd87d4 | 394 | err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); |
3881ac44 UB |
395 | sk->sk_state = IUCV_DISCONN; |
396 | sk->sk_state_change(sk); | |
397 | } | |
df561f66 | 398 | fallthrough; |
05bba1ed GS |
399 | |
400 | case IUCV_DISCONN: | |
561e0360 JH |
401 | sk->sk_state = IUCV_CLOSING; |
402 | sk->sk_state_change(sk); | |
403 | ||
ef6af7bd | 404 | if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { |
561e0360 JH |
405 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
406 | timeo = sk->sk_lingertime; | |
407 | else | |
408 | timeo = IUCV_DISCONN_TIMEOUT; | |
9f6298a6 | 409 | iucv_sock_wait(sk, |
0ea920d2 HB |
410 | iucv_sock_in_state(sk, IUCV_CLOSED, 0), |
411 | timeo); | |
561e0360 | 412 | } |
df561f66 | 413 | fallthrough; |
561e0360 | 414 | |
05bba1ed | 415 | case IUCV_CLOSING: |
561e0360 JH |
416 | sk->sk_state = IUCV_CLOSED; |
417 | sk->sk_state_change(sk); | |
418 | ||
eac3731b JH |
419 | sk->sk_err = ECONNRESET; |
420 | sk->sk_state_change(sk); | |
421 | ||
800c5eb7 | 422 | skb_queue_purge(&iucv->send_skb_q); |
561e0360 | 423 | skb_queue_purge(&iucv->backlog_skb_q); |
df561f66 | 424 | fallthrough; |
eac3731b | 425 | |
05bba1ed | 426 | default: |
7d316b94 | 427 | iucv_sever_path(sk, 1); |
3ff50b79 | 428 | } |
eac3731b | 429 | |
800c5eb7 UB |
430 | if (iucv->hs_dev) { |
431 | dev_put(iucv->hs_dev); | |
432 | iucv->hs_dev = NULL; | |
433 | sk->sk_bound_dev_if = 0; | |
434 | } | |
435 | ||
7514bab0 HB |
436 | /* mark socket for deletion by iucv_sock_kill() */ |
437 | sock_set_flag(sk, SOCK_ZAPPED); | |
438 | ||
eac3731b | 439 | release_sock(sk); |
eac3731b JH |
440 | } |
441 | ||
442 | static void iucv_sock_init(struct sock *sk, struct sock *parent) | |
443 | { | |
02f06918 | 444 | if (parent) { |
eac3731b | 445 | sk->sk_type = parent->sk_type; |
02f06918 PM |
446 | security_sk_clone(parent, sk); |
447 | } | |
eac3731b JH |
448 | } |
449 | ||
11aa9c28 | 450 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) |
eac3731b JH |
451 | { |
452 | struct sock *sk; | |
493d3971 | 453 | struct iucv_sock *iucv; |
eac3731b | 454 | |
11aa9c28 | 455 | sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); |
eac3731b JH |
456 | if (!sk) |
457 | return NULL; | |
493d3971 | 458 | iucv = iucv_sk(sk); |
eac3731b JH |
459 | |
460 | sock_init_data(sock, sk); | |
493d3971 UB |
461 | INIT_LIST_HEAD(&iucv->accept_q); |
462 | spin_lock_init(&iucv->accept_q_lock); | |
463 | skb_queue_head_init(&iucv->send_skb_q); | |
464 | INIT_LIST_HEAD(&iucv->message_q.list); | |
465 | spin_lock_init(&iucv->message_q.lock); | |
466 | skb_queue_head_init(&iucv->backlog_skb_q); | |
467 | iucv->send_tag = 0; | |
3881ac44 | 468 | atomic_set(&iucv->pendings, 0); |
493d3971 | 469 | iucv->flags = 0; |
3881ac44 | 470 | iucv->msglimit = 0; |
ef6af7bd | 471 | atomic_set(&iucv->skbs_in_xmit, 0); |
3881ac44 UB |
472 | atomic_set(&iucv->msg_sent, 0); |
473 | atomic_set(&iucv->msg_recv, 0); | |
493d3971 | 474 | iucv->path = NULL; |
3881ac44 | 475 | iucv->sk_txnotify = afiucv_hs_callback_txnotify; |
b5d8cf0a | 476 | memset(&iucv->init, 0, sizeof(iucv->init)); |
3881ac44 UB |
477 | if (pr_iucv) |
478 | iucv->transport = AF_IUCV_TRANS_IUCV; | |
479 | else | |
480 | iucv->transport = AF_IUCV_TRANS_HIPER; | |
eac3731b JH |
481 | |
482 | sk->sk_destruct = iucv_sock_destruct; | |
483 | sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; | |
eac3731b JH |
484 | |
485 | sock_reset_flag(sk, SOCK_ZAPPED); | |
486 | ||
487 | sk->sk_protocol = proto; | |
488 | sk->sk_state = IUCV_OPEN; | |
489 | ||
eac3731b JH |
490 | iucv_sock_link(&iucv_sk_list, sk); |
491 | return sk; | |
492 | } | |
493 | ||
e9a36ca5 | 494 | static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) |
eac3731b | 495 | { |
febca281 UB |
496 | unsigned long flags; |
497 | struct iucv_sock *par = iucv_sk(parent); | |
498 | ||
eac3731b | 499 | sock_hold(sk); |
febca281 UB |
500 | spin_lock_irqsave(&par->accept_q_lock, flags); |
501 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); | |
502 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | |
eac3731b | 503 | iucv_sk(sk)->parent = parent; |
49f5eba7 | 504 | sk_acceptq_added(parent); |
eac3731b JH |
505 | } |
506 | ||
e9a36ca5 | 507 | static void iucv_accept_unlink(struct sock *sk) |
eac3731b | 508 | { |
febca281 UB |
509 | unsigned long flags; |
510 | struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); | |
511 | ||
512 | spin_lock_irqsave(&par->accept_q_lock, flags); | |
eac3731b | 513 | list_del_init(&iucv_sk(sk)->accept_q); |
febca281 | 514 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
49f5eba7 | 515 | sk_acceptq_removed(iucv_sk(sk)->parent); |
eac3731b JH |
516 | iucv_sk(sk)->parent = NULL; |
517 | sock_put(sk); | |
518 | } | |
519 | ||
e9a36ca5 JW |
520 | static struct sock *iucv_accept_dequeue(struct sock *parent, |
521 | struct socket *newsock) | |
eac3731b JH |
522 | { |
523 | struct iucv_sock *isk, *n; | |
524 | struct sock *sk; | |
525 | ||
da99f056 | 526 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
eac3731b JH |
527 | sk = (struct sock *) isk; |
528 | lock_sock(sk); | |
529 | ||
530 | if (sk->sk_state == IUCV_CLOSED) { | |
eac3731b | 531 | iucv_accept_unlink(sk); |
febca281 | 532 | release_sock(sk); |
eac3731b JH |
533 | continue; |
534 | } | |
535 | ||
536 | if (sk->sk_state == IUCV_CONNECTED || | |
aac6399c | 537 | sk->sk_state == IUCV_DISCONN || |
eac3731b JH |
538 | !newsock) { |
539 | iucv_accept_unlink(sk); | |
540 | if (newsock) | |
541 | sock_graft(sk, newsock); | |
542 | ||
eac3731b JH |
543 | release_sock(sk); |
544 | return sk; | |
545 | } | |
546 | ||
547 | release_sock(sk); | |
548 | } | |
549 | return NULL; | |
550 | } | |
551 | ||
53a4b499 PH |
552 | static void __iucv_auto_name(struct iucv_sock *iucv) |
553 | { | |
554 | char name[12]; | |
555 | ||
556 | sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); | |
557 | while (__iucv_get_sock_by_name(name)) { | |
558 | sprintf(name, "%08x", | |
559 | atomic_inc_return(&iucv_sk_list.autobind_name)); | |
560 | } | |
561 | memcpy(iucv->src_name, name, 8); | |
562 | } | |
563 | ||
eac3731b JH |
564 | /* Bind an unbound socket */ |
565 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | |
566 | int addr_len) | |
567 | { | |
97f8841e | 568 | DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); |
398999ba | 569 | char uid[sizeof(sa->siucv_user_id)]; |
eac3731b JH |
570 | struct sock *sk = sock->sk; |
571 | struct iucv_sock *iucv; | |
3881ac44 UB |
572 | int err = 0; |
573 | struct net_device *dev; | |
eac3731b JH |
574 | |
575 | /* Verify the input sockaddr */ | |
e3c42b61 MJ |
576 | if (addr_len < sizeof(struct sockaddr_iucv) || |
577 | addr->sa_family != AF_IUCV) | |
52a82e23 UB |
578 | return -EINVAL; |
579 | ||
eac3731b JH |
580 | lock_sock(sk); |
581 | if (sk->sk_state != IUCV_OPEN) { | |
582 | err = -EBADFD; | |
583 | goto done; | |
584 | } | |
585 | ||
586 | write_lock_bh(&iucv_sk_list.lock); | |
587 | ||
588 | iucv = iucv_sk(sk); | |
589 | if (__iucv_get_sock_by_name(sa->siucv_name)) { | |
590 | err = -EADDRINUSE; | |
591 | goto done_unlock; | |
592 | } | |
3881ac44 | 593 | if (iucv->path) |
eac3731b | 594 | goto done_unlock; |
eac3731b JH |
595 | |
596 | /* Bind the socket */ | |
3881ac44 UB |
597 | if (pr_iucv) |
598 | if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) | |
599 | goto vm_bind; /* VM IUCV transport */ | |
eac3731b | 600 | |
3881ac44 UB |
601 | /* try hiper transport */ |
602 | memcpy(uid, sa->siucv_user_id, sizeof(uid)); | |
603 | ASCEBC(uid, 8); | |
604 | rcu_read_lock(); | |
605 | for_each_netdev_rcu(&init_net, dev) { | |
606 | if (!memcmp(dev->perm_addr, uid, 8)) { | |
3881ac44 | 607 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); |
bf05d48d | 608 | /* Check for uninitialized siucv_name */ |
53a4b499 PH |
609 | if (strncmp(sa->siucv_name, " ", 8) == 0) |
610 | __iucv_auto_name(iucv); | |
611 | else | |
612 | memcpy(iucv->src_name, sa->siucv_name, 8); | |
816abbad | 613 | sk->sk_bound_dev_if = dev->ifindex; |
800c5eb7 UB |
614 | iucv->hs_dev = dev; |
615 | dev_hold(dev); | |
3881ac44 UB |
616 | sk->sk_state = IUCV_BOUND; |
617 | iucv->transport = AF_IUCV_TRANS_HIPER; | |
618 | if (!iucv->msglimit) | |
619 | iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; | |
620 | rcu_read_unlock(); | |
621 | goto done_unlock; | |
622 | } | |
623 | } | |
624 | rcu_read_unlock(); | |
625 | vm_bind: | |
626 | if (pr_iucv) { | |
627 | /* use local userid for backward compat */ | |
628 | memcpy(iucv->src_name, sa->siucv_name, 8); | |
629 | memcpy(iucv->src_user_id, iucv_userid, 8); | |
630 | sk->sk_state = IUCV_BOUND; | |
631 | iucv->transport = AF_IUCV_TRANS_IUCV; | |
fdbf6326 | 632 | sk->sk_allocation |= GFP_DMA; |
3881ac44 UB |
633 | if (!iucv->msglimit) |
634 | iucv->msglimit = IUCV_QUEUELEN_DEFAULT; | |
635 | goto done_unlock; | |
636 | } | |
637 | /* found no dev to bind */ | |
638 | err = -ENODEV; | |
eac3731b JH |
639 | done_unlock: |
640 | /* Release the socket list lock */ | |
641 | write_unlock_bh(&iucv_sk_list.lock); | |
642 | done: | |
643 | release_sock(sk); | |
644 | return err; | |
645 | } | |
646 | ||
647 | /* Automatically bind an unbound socket */ | |
648 | static int iucv_sock_autobind(struct sock *sk) | |
649 | { | |
650 | struct iucv_sock *iucv = iucv_sk(sk); | |
eac3731b JH |
651 | int err = 0; |
652 | ||
aac6399c | 653 | if (unlikely(!pr_iucv)) |
eac3731b JH |
654 | return -EPROTO; |
655 | ||
aac6399c | 656 | memcpy(iucv->src_user_id, iucv_userid, 8); |
fdbf6326 JW |
657 | iucv->transport = AF_IUCV_TRANS_IUCV; |
658 | sk->sk_allocation |= GFP_DMA; | |
eac3731b JH |
659 | |
660 | write_lock_bh(&iucv_sk_list.lock); | |
53a4b499 | 661 | __iucv_auto_name(iucv); |
eac3731b JH |
662 | write_unlock_bh(&iucv_sk_list.lock); |
663 | ||
3881ac44 UB |
664 | if (!iucv->msglimit) |
665 | iucv->msglimit = IUCV_QUEUELEN_DEFAULT; | |
666 | ||
eac3731b JH |
667 | return err; |
668 | } | |
669 | ||
3881ac44 | 670 | static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) |
eac3731b | 671 | { |
97f8841e | 672 | DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); |
eac3731b | 673 | struct sock *sk = sock->sk; |
493d3971 | 674 | struct iucv_sock *iucv = iucv_sk(sk); |
eac3731b JH |
675 | unsigned char user_data[16]; |
676 | int err; | |
677 | ||
eac3731b | 678 | high_nmcpy(user_data, sa->siucv_name); |
493d3971 | 679 | low_nmcpy(user_data, iucv->src_name); |
eac3731b JH |
680 | ASCEBC(user_data, sizeof(user_data)); |
681 | ||
eac3731b | 682 | /* Create path. */ |
09488e2e | 683 | iucv->path = iucv_path_alloc(iucv->msglimit, |
b8942e3b | 684 | IUCV_IPRMDATA, GFP_KERNEL); |
d4444722 UB |
685 | if (!iucv->path) { |
686 | err = -ENOMEM; | |
687 | goto done; | |
688 | } | |
6fcd61f7 FB |
689 | err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, |
690 | sa->siucv_user_id, NULL, user_data, | |
691 | sk); | |
eac3731b JH |
692 | if (err) { |
693 | iucv_path_free(iucv->path); | |
694 | iucv->path = NULL; | |
55cdea9e HB |
695 | switch (err) { |
696 | case 0x0b: /* Target communicator is not logged on */ | |
697 | err = -ENETUNREACH; | |
698 | break; | |
699 | case 0x0d: /* Max connections for this guest exceeded */ | |
700 | case 0x0e: /* Max connections for target guest exceeded */ | |
701 | err = -EAGAIN; | |
702 | break; | |
703 | case 0x0f: /* Missing IUCV authorization */ | |
704 | err = -EACCES; | |
705 | break; | |
706 | default: | |
707 | err = -ECONNREFUSED; | |
708 | break; | |
709 | } | |
eac3731b | 710 | } |
3881ac44 UB |
711 | done: |
712 | return err; | |
713 | } | |
eac3731b | 714 | |
3881ac44 UB |
715 | /* Connect an unconnected socket */ |
716 | static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |
717 | int alen, int flags) | |
718 | { | |
97f8841e | 719 | DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); |
3881ac44 UB |
720 | struct sock *sk = sock->sk; |
721 | struct iucv_sock *iucv = iucv_sk(sk); | |
722 | int err; | |
723 | ||
e3c42b61 | 724 | if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) |
3881ac44 UB |
725 | return -EINVAL; |
726 | ||
727 | if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) | |
728 | return -EBADFD; | |
729 | ||
730 | if (sk->sk_state == IUCV_OPEN && | |
731 | iucv->transport == AF_IUCV_TRANS_HIPER) | |
732 | return -EBADFD; /* explicit bind required */ | |
733 | ||
734 | if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) | |
735 | return -EINVAL; | |
736 | ||
737 | if (sk->sk_state == IUCV_OPEN) { | |
738 | err = iucv_sock_autobind(sk); | |
739 | if (unlikely(err)) | |
740 | return err; | |
741 | } | |
742 | ||
743 | lock_sock(sk); | |
744 | ||
745 | /* Set the destination information */ | |
746 | memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); | |
747 | memcpy(iucv->dst_name, sa->siucv_name, 8); | |
748 | ||
749 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | |
9fbd87d4 | 750 | err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); |
3881ac44 UB |
751 | else |
752 | err = afiucv_path_connect(sock, addr); | |
753 | if (err) | |
754 | goto done; | |
755 | ||
756 | if (sk->sk_state != IUCV_CONNECTED) | |
0ea920d2 HB |
757 | err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, |
758 | IUCV_DISCONN), | |
759 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | |
eac3731b | 760 | |
3881ac44 | 761 | if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) |
b8942e3b | 762 | err = -ECONNREFUSED; |
18becbc5 | 763 | |
7d316b94 UB |
764 | if (err && iucv->transport == AF_IUCV_TRANS_IUCV) |
765 | iucv_sever_path(sk, 0); | |
18becbc5 | 766 | |
eac3731b JH |
767 | done: |
768 | release_sock(sk); | |
769 | return err; | |
770 | } | |
771 | ||
772 | /* Move a socket into listening state. */ | |
773 | static int iucv_sock_listen(struct socket *sock, int backlog) | |
774 | { | |
775 | struct sock *sk = sock->sk; | |
776 | int err; | |
777 | ||
778 | lock_sock(sk); | |
779 | ||
780 | err = -EINVAL; | |
aa8e71f5 HB |
781 | if (sk->sk_state != IUCV_BOUND) |
782 | goto done; | |
783 | ||
784 | if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) | |
eac3731b JH |
785 | goto done; |
786 | ||
787 | sk->sk_max_ack_backlog = backlog; | |
788 | sk->sk_ack_backlog = 0; | |
789 | sk->sk_state = IUCV_LISTEN; | |
790 | err = 0; | |
791 | ||
792 | done: | |
793 | release_sock(sk); | |
794 | return err; | |
795 | } | |
796 | ||
797 | /* Accept a pending connection */ | |
798 | static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |
92ef0fd5 | 799 | struct proto_accept_arg *arg) |
eac3731b JH |
800 | { |
801 | DECLARE_WAITQUEUE(wait, current); | |
802 | struct sock *sk = sock->sk, *nsk; | |
803 | long timeo; | |
804 | int err = 0; | |
805 | ||
561e0360 | 806 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
eac3731b JH |
807 | |
808 | if (sk->sk_state != IUCV_LISTEN) { | |
809 | err = -EBADFD; | |
810 | goto done; | |
811 | } | |
812 | ||
92ef0fd5 | 813 | timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); |
eac3731b JH |
814 | |
815 | /* Wait for an incoming connection */ | |
aa395145 | 816 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
da99f056 | 817 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
eac3731b JH |
818 | set_current_state(TASK_INTERRUPTIBLE); |
819 | if (!timeo) { | |
820 | err = -EAGAIN; | |
821 | break; | |
822 | } | |
823 | ||
824 | release_sock(sk); | |
825 | timeo = schedule_timeout(timeo); | |
561e0360 | 826 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
eac3731b JH |
827 | |
828 | if (sk->sk_state != IUCV_LISTEN) { | |
829 | err = -EBADFD; | |
830 | break; | |
831 | } | |
832 | ||
833 | if (signal_pending(current)) { | |
834 | err = sock_intr_errno(timeo); | |
835 | break; | |
836 | } | |
837 | } | |
838 | ||
839 | set_current_state(TASK_RUNNING); | |
aa395145 | 840 | remove_wait_queue(sk_sleep(sk), &wait); |
eac3731b JH |
841 | |
842 | if (err) | |
843 | goto done; | |
844 | ||
845 | newsock->state = SS_CONNECTED; | |
846 | ||
847 | done: | |
848 | release_sock(sk); | |
849 | return err; | |
850 | } | |
851 | ||
852 | static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, | |
9b2c45d4 | 853 | int peer) |
eac3731b | 854 | { |
97f8841e | 855 | DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr); |
eac3731b | 856 | struct sock *sk = sock->sk; |
493d3971 | 857 | struct iucv_sock *iucv = iucv_sk(sk); |
eac3731b JH |
858 | |
859 | addr->sa_family = AF_IUCV; | |
eac3731b JH |
860 | |
861 | if (peer) { | |
493d3971 UB |
862 | memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); |
863 | memcpy(siucv->siucv_name, iucv->dst_name, 8); | |
eac3731b | 864 | } else { |
493d3971 UB |
865 | memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); |
866 | memcpy(siucv->siucv_name, iucv->src_name, 8); | |
eac3731b JH |
867 | } |
868 | memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); | |
869 | memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); | |
493d3971 | 870 | memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); |
eac3731b | 871 | |
9b2c45d4 | 872 | return sizeof(struct sockaddr_iucv); |
eac3731b JH |
873 | } |
874 | ||
b8942e3b HB |
875 | /** |
876 | * iucv_send_iprm() - Send socket data in parameter list of an iucv message. | |
877 | * @path: IUCV path | |
878 | * @msg: Pointer to a struct iucv_message | |
879 | * @skb: The socket data to send, skb->len MUST BE <= 7 | |
880 | * | |
881 | * Send the socket data in the parameter list in the iucv message | |
882 | * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter | |
883 | * list and the socket data len at index 7 (last byte). | |
884 | * See also iucv_msg_length(). | |
885 | * | |
886 | * Returns the error code from the iucv_message_send() call. | |
887 | */ | |
888 | static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, | |
889 | struct sk_buff *skb) | |
890 | { | |
891 | u8 prmdata[8]; | |
892 | ||
893 | memcpy(prmdata, (void *) skb->data, skb->len); | |
894 | prmdata[7] = 0xff - (u8) skb->len; | |
6fcd61f7 | 895 | return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, |
b8942e3b HB |
896 | (void *) prmdata, 8); |
897 | } | |
898 | ||
1b784140 YX |
899 | static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, |
900 | size_t len) | |
eac3731b JH |
901 | { |
902 | struct sock *sk = sock->sk; | |
903 | struct iucv_sock *iucv = iucv_sk(sk); | |
dc5367bc JW |
904 | size_t headroom = 0; |
905 | size_t linear; | |
eac3731b | 906 | struct sk_buff *skb; |
0506eb01 | 907 | struct iucv_message txmsg = {0}; |
44b1e6b5 HB |
908 | struct cmsghdr *cmsg; |
909 | int cmsg_done; | |
0ea920d2 | 910 | long timeo; |
8f7c502c UB |
911 | char user_id[9]; |
912 | char appl_id[9]; | |
eac3731b | 913 | int err; |
0ea920d2 | 914 | int noblock = msg->msg_flags & MSG_DONTWAIT; |
eac3731b JH |
915 | |
916 | err = sock_error(sk); | |
917 | if (err) | |
918 | return err; | |
919 | ||
920 | if (msg->msg_flags & MSG_OOB) | |
921 | return -EOPNOTSUPP; | |
922 | ||
aa8e71f5 HB |
923 | /* SOCK_SEQPACKET: we do not support segmented records */ |
924 | if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) | |
925 | return -EOPNOTSUPP; | |
926 | ||
eac3731b JH |
927 | lock_sock(sk); |
928 | ||
929 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | |
930 | err = -EPIPE; | |
931 | goto out; | |
932 | } | |
933 | ||
bb664f49 HB |
934 | /* Return if the socket is not in connected state */ |
935 | if (sk->sk_state != IUCV_CONNECTED) { | |
936 | err = -ENOTCONN; | |
937 | goto out; | |
938 | } | |
44b1e6b5 | 939 | |
bb664f49 HB |
940 | /* initialize defaults */ |
941 | cmsg_done = 0; /* check for duplicate headers */ | |
44b1e6b5 | 942 | |
bb664f49 | 943 | /* iterate over control messages */ |
f95b414e | 944 | for_each_cmsghdr(cmsg, msg) { |
bb664f49 HB |
945 | if (!CMSG_OK(msg, cmsg)) { |
946 | err = -EINVAL; | |
947 | goto out; | |
948 | } | |
44b1e6b5 | 949 | |
bb664f49 HB |
950 | if (cmsg->cmsg_level != SOL_IUCV) |
951 | continue; | |
44b1e6b5 | 952 | |
bb664f49 HB |
953 | if (cmsg->cmsg_type & cmsg_done) { |
954 | err = -EINVAL; | |
955 | goto out; | |
956 | } | |
957 | cmsg_done |= cmsg->cmsg_type; | |
44b1e6b5 | 958 | |
bb664f49 HB |
959 | switch (cmsg->cmsg_type) { |
960 | case SCM_IUCV_TRGCLS: | |
961 | if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { | |
44b1e6b5 HB |
962 | err = -EINVAL; |
963 | goto out; | |
964 | } | |
44b1e6b5 | 965 | |
bb664f49 HB |
966 | /* set iucv message target class */ |
967 | memcpy(&txmsg.class, | |
968 | (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); | |
44b1e6b5 | 969 | |
bb664f49 | 970 | break; |
44b1e6b5 | 971 | |
bb664f49 HB |
972 | default: |
973 | err = -EINVAL; | |
974 | goto out; | |
44b1e6b5 | 975 | } |
bb664f49 | 976 | } |
44b1e6b5 | 977 | |
bb664f49 HB |
978 | /* allocate one skb for each iucv message: |
979 | * this is fine for SOCK_SEQPACKET (unless we want to support | |
980 | * segmented records using the MSG_EOR flag), but | |
981 | * for SOCK_STREAM we might want to improve it in future */ | |
dc5367bc | 982 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
238965b7 JW |
983 | headroom = sizeof(struct af_iucv_trans_hdr) + |
984 | LL_RESERVED_SPACE(iucv->hs_dev); | |
2c3b4456 | 985 | linear = min(len, PAGE_SIZE - headroom); |
e5374399 | 986 | } else { |
dc5367bc JW |
987 | if (len < PAGE_SIZE) { |
988 | linear = len; | |
989 | } else { | |
990 | /* In nonlinear "classic" iucv skb, | |
991 | * reserve space for iucv_array | |
992 | */ | |
993 | headroom = sizeof(struct iucv_array) * | |
994 | (MAX_SKB_FRAGS + 1); | |
995 | linear = PAGE_SIZE - headroom; | |
996 | } | |
e5374399 EC |
997 | } |
998 | skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, | |
999 | noblock, &err, 0); | |
ed4ac422 | 1000 | if (!skb) |
bb664f49 | 1001 | goto out; |
e5374399 EC |
1002 | if (headroom) |
1003 | skb_reserve(skb, headroom); | |
1004 | skb_put(skb, linear); | |
1005 | skb->len = len; | |
1006 | skb->data_len = len - linear; | |
1007 | err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); | |
1008 | if (err) | |
bb664f49 | 1009 | goto fail; |
eac3731b | 1010 | |
0ea920d2 HB |
1011 | /* wait if outstanding messages for iucv path has reached */ |
1012 | timeo = sock_sndtimeo(sk, noblock); | |
1013 | err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); | |
1014 | if (err) | |
1015 | goto fail; | |
eac3731b | 1016 | |
0ea920d2 HB |
1017 | /* return -ECONNRESET if the socket is no longer connected */ |
1018 | if (sk->sk_state != IUCV_CONNECTED) { | |
1019 | err = -ECONNRESET; | |
1020 | goto fail; | |
1021 | } | |
b8942e3b | 1022 | |
bb664f49 HB |
1023 | /* increment and save iucv message tag for msg_completion cbk */ |
1024 | txmsg.tag = iucv->send_tag++; | |
f9c41a62 | 1025 | IUCV_SKB_CB(skb)->tag = txmsg.tag; |
800c5eb7 | 1026 | |
3881ac44 UB |
1027 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1028 | atomic_inc(&iucv->msg_sent); | |
1029 | err = afiucv_hs_send(&txmsg, sk, skb, 0); | |
1030 | if (err) { | |
1031 | atomic_dec(&iucv->msg_sent); | |
b2f54394 | 1032 | goto out; |
3881ac44 | 1033 | } |
e5374399 EC |
1034 | } else { /* Classic VM IUCV transport */ |
1035 | skb_queue_tail(&iucv->send_skb_q, skb); | |
ef6af7bd | 1036 | atomic_inc(&iucv->skbs_in_xmit); |
e5374399 EC |
1037 | |
1038 | if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && | |
1039 | skb->len <= 7) { | |
1040 | err = iucv_send_iprm(iucv->path, &txmsg, skb); | |
1041 | ||
1042 | /* on success: there is no message_complete callback */ | |
1043 | /* for an IPRMDATA msg; remove skb from send queue */ | |
1044 | if (err == 0) { | |
ef6af7bd | 1045 | atomic_dec(&iucv->skbs_in_xmit); |
e5374399 | 1046 | skb_unlink(skb, &iucv->send_skb_q); |
10d6393d | 1047 | consume_skb(skb); |
e5374399 | 1048 | } |
b8942e3b | 1049 | |
e5374399 EC |
1050 | /* this error should never happen since the */ |
1051 | /* IUCV_IPRMDATA path flag is set... sever path */ | |
1052 | if (err == 0x15) { | |
1053 | pr_iucv->path_sever(iucv->path, NULL); | |
ef6af7bd | 1054 | atomic_dec(&iucv->skbs_in_xmit); |
e5374399 EC |
1055 | skb_unlink(skb, &iucv->send_skb_q); |
1056 | err = -EPIPE; | |
1057 | goto fail; | |
1058 | } | |
1059 | } else if (skb_is_nonlinear(skb)) { | |
1060 | struct iucv_array *iba = (struct iucv_array *)skb->head; | |
1061 | int i; | |
1062 | ||
1063 | /* skip iucv_array lying in the headroom */ | |
2aca9eaf | 1064 | iba[0].address = virt_to_dma32(skb->data); |
e5374399 EC |
1065 | iba[0].length = (u32)skb_headlen(skb); |
1066 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1067 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1068 | ||
2aca9eaf | 1069 | iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); |
e5374399 EC |
1070 | iba[i + 1].length = (u32)skb_frag_size(frag); |
1071 | } | |
1072 | err = pr_iucv->message_send(iucv->path, &txmsg, | |
1073 | IUCV_IPBUFLST, 0, | |
1074 | (void *)iba, skb->len); | |
1075 | } else { /* non-IPRM Linear skb */ | |
1076 | err = pr_iucv->message_send(iucv->path, &txmsg, | |
1077 | 0, 0, (void *)skb->data, skb->len); | |
bb664f49 | 1078 | } |
e5374399 EC |
1079 | if (err) { |
1080 | if (err == 3) { | |
1081 | user_id[8] = 0; | |
1082 | memcpy(user_id, iucv->dst_user_id, 8); | |
1083 | appl_id[8] = 0; | |
1084 | memcpy(appl_id, iucv->dst_name, 8); | |
1085 | pr_err( | |
1086 | "Application %s on z/VM guest %s exceeds message limit\n", | |
1087 | appl_id, user_id); | |
1088 | err = -EAGAIN; | |
1089 | } else { | |
1090 | err = -EPIPE; | |
1091 | } | |
ef6af7bd JW |
1092 | |
1093 | atomic_dec(&iucv->skbs_in_xmit); | |
eac3731b | 1094 | skb_unlink(skb, &iucv->send_skb_q); |
eac3731b JH |
1095 | goto fail; |
1096 | } | |
eac3731b JH |
1097 | } |
1098 | ||
1099 | release_sock(sk); | |
1100 | return len; | |
1101 | ||
1102 | fail: | |
1103 | kfree_skb(skb); | |
1104 | out: | |
1105 | release_sock(sk); | |
1106 | return err; | |
1107 | } | |
1108 | ||
a006353a EC |
1109 | static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) |
1110 | { | |
1111 | size_t headroom, linear; | |
1112 | struct sk_buff *skb; | |
1113 | int err; | |
1114 | ||
1115 | if (len < PAGE_SIZE) { | |
1116 | headroom = 0; | |
1117 | linear = len; | |
1118 | } else { | |
1119 | headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); | |
1120 | linear = PAGE_SIZE - headroom; | |
1121 | } | |
1122 | skb = alloc_skb_with_frags(headroom + linear, len - linear, | |
1123 | 0, &err, GFP_ATOMIC | GFP_DMA); | |
1124 | WARN_ONCE(!skb, | |
1125 | "alloc of recv iucv skb len=%lu failed with errcode=%d\n", | |
1126 | len, err); | |
1127 | if (skb) { | |
1128 | if (headroom) | |
1129 | skb_reserve(skb, headroom); | |
1130 | skb_put(skb, linear); | |
1131 | skb->len = len; | |
1132 | skb->data_len = len - linear; | |
1133 | } | |
1134 | return skb; | |
1135 | } | |
1136 | ||
bf95d20f HB |
1137 | /* iucv_process_message() - Receive a single outstanding IUCV message |
1138 | * | |
1139 | * Locking: must be called with message_q.lock held | |
1140 | */ | |
f0703c80 UB |
1141 | static void iucv_process_message(struct sock *sk, struct sk_buff *skb, |
1142 | struct iucv_path *path, | |
1143 | struct iucv_message *msg) | |
1144 | { | |
1145 | int rc; | |
b8942e3b HB |
1146 | unsigned int len; |
1147 | ||
1148 | len = iucv_msg_length(msg); | |
f0703c80 | 1149 | |
44b1e6b5 HB |
1150 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ |
1151 | /* Note: the first 4 bytes are reserved for msg tag */ | |
f9c41a62 | 1152 | IUCV_SKB_CB(skb)->class = msg->class; |
44b1e6b5 | 1153 | |
b8942e3b HB |
1154 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ |
1155 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | |
1156 | if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { | |
1157 | skb->data = NULL; | |
1158 | skb->len = 0; | |
1159 | } | |
f0703c80 | 1160 | } else { |
a006353a EC |
1161 | if (skb_is_nonlinear(skb)) { |
1162 | struct iucv_array *iba = (struct iucv_array *)skb->head; | |
1163 | int i; | |
1164 | ||
2aca9eaf | 1165 | iba[0].address = virt_to_dma32(skb->data); |
a006353a EC |
1166 | iba[0].length = (u32)skb_headlen(skb); |
1167 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1168 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1169 | ||
2aca9eaf | 1170 | iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); |
a006353a EC |
1171 | iba[i + 1].length = (u32)skb_frag_size(frag); |
1172 | } | |
1173 | rc = pr_iucv->message_receive(path, msg, | |
1174 | IUCV_IPBUFLST, | |
1175 | (void *)iba, len, NULL); | |
1176 | } else { | |
1177 | rc = pr_iucv->message_receive(path, msg, | |
6fcd61f7 FB |
1178 | msg->flags & IUCV_IPRMDATA, |
1179 | skb->data, len, NULL); | |
a006353a | 1180 | } |
f0703c80 UB |
1181 | if (rc) { |
1182 | kfree_skb(skb); | |
1183 | return; | |
1184 | } | |
a006353a | 1185 | WARN_ON_ONCE(skb->len != len); |
f0703c80 UB |
1186 | } |
1187 | ||
f9c41a62 | 1188 | IUCV_SKB_CB(skb)->offset = 0; |
8c68b1a0 UB |
1189 | if (sk_filter(sk, skb)) { |
1190 | atomic_inc(&sk->sk_drops); /* skb rejected by filter */ | |
1191 | kfree_skb(skb); | |
1192 | return; | |
1193 | } | |
1194 | if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ | |
1195 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | |
f0703c80 UB |
1196 | } |
1197 | ||
bf95d20f HB |
1198 | /* iucv_process_message_q() - Process outstanding IUCV messages |
1199 | * | |
1200 | * Locking: must be called with message_q.lock held | |
1201 | */ | |
f0703c80 UB |
1202 | static void iucv_process_message_q(struct sock *sk) |
1203 | { | |
1204 | struct iucv_sock *iucv = iucv_sk(sk); | |
1205 | struct sk_buff *skb; | |
1206 | struct sock_msg_q *p, *n; | |
1207 | ||
1208 | list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { | |
a006353a | 1209 | skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); |
f0703c80 UB |
1210 | if (!skb) |
1211 | break; | |
1212 | iucv_process_message(sk, skb, p->path, &p->msg); | |
1213 | list_del(&p->list); | |
1214 | kfree(p); | |
1215 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | |
1216 | break; | |
1217 | } | |
1218 | } | |
1219 | ||
1b784140 YX |
1220 | static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, |
1221 | size_t len, int flags) | |
eac3731b | 1222 | { |
eac3731b | 1223 | struct sock *sk = sock->sk; |
561e0360 | 1224 | struct iucv_sock *iucv = iucv_sk(sk); |
aa8e71f5 | 1225 | unsigned int copied, rlen; |
9fbd87d4 | 1226 | struct sk_buff *skb, *rskb, *cskb; |
eac3731b | 1227 | int err = 0; |
f9c41a62 | 1228 | u32 offset; |
eac3731b | 1229 | |
aac6399c | 1230 | if ((sk->sk_state == IUCV_DISCONN) && |
f0703c80 UB |
1231 | skb_queue_empty(&iucv->backlog_skb_q) && |
1232 | skb_queue_empty(&sk->sk_receive_queue) && | |
1233 | list_empty(&iucv->message_q.list)) | |
561e0360 JH |
1234 | return 0; |
1235 | ||
eac3731b JH |
1236 | if (flags & (MSG_OOB)) |
1237 | return -EOPNOTSUPP; | |
1238 | ||
60d3705f | 1239 | /* receive/dequeue next skb: |
ebaf8131 SJ |
1240 | * the function understands MSG_PEEK and, thus, does not dequeue skb |
1241 | * only refcount is increased. | |
1242 | */ | |
f4b41f06 | 1243 | skb = skb_recv_datagram(sk, flags, &err); |
eac3731b JH |
1244 | if (!skb) { |
1245 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1246 | return 0; | |
1247 | return err; | |
1248 | } | |
1249 | ||
f9c41a62 UB |
1250 | offset = IUCV_SKB_CB(skb)->offset; |
1251 | rlen = skb->len - offset; /* real length of skb */ | |
aa8e71f5 | 1252 | copied = min_t(unsigned int, rlen, len); |
82492a35 UB |
1253 | if (!rlen) |
1254 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | |
eac3731b | 1255 | |
561e0360 | 1256 | cskb = skb; |
51f3d02b | 1257 | if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { |
ebaf8131 SJ |
1258 | err = -EFAULT; |
1259 | goto err_out; | |
eac3731b JH |
1260 | } |
1261 | ||
aa8e71f5 HB |
1262 | /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ |
1263 | if (sk->sk_type == SOCK_SEQPACKET) { | |
1264 | if (copied < rlen) | |
1265 | msg->msg_flags |= MSG_TRUNC; | |
1266 | /* each iucv message contains a complete record */ | |
1267 | msg->msg_flags |= MSG_EOR; | |
1268 | } | |
eac3731b | 1269 | |
44b1e6b5 HB |
1270 | /* create control message to store iucv msg target class: |
1271 | * get the trgcls from the control buffer of the skb due to | |
1272 | * fragmentation of original iucv message. */ | |
1273 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | |
f9c41a62 UB |
1274 | sizeof(IUCV_SKB_CB(skb)->class), |
1275 | (void *)&IUCV_SKB_CB(skb)->class); | |
ebaf8131 SJ |
1276 | if (err) |
1277 | goto err_out; | |
44b1e6b5 | 1278 | |
eac3731b JH |
1279 | /* Mark read part of skb as used */ |
1280 | if (!(flags & MSG_PEEK)) { | |
eac3731b | 1281 | |
aa8e71f5 HB |
1282 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
1283 | if (sk->sk_type == SOCK_STREAM) { | |
f9c41a62 UB |
1284 | if (copied < rlen) { |
1285 | IUCV_SKB_CB(skb)->offset = offset + copied; | |
2f139a5d | 1286 | skb_queue_head(&sk->sk_receive_queue, skb); |
aa8e71f5 HB |
1287 | goto done; |
1288 | } | |
eac3731b JH |
1289 | } |
1290 | ||
10d6393d | 1291 | consume_skb(skb); |
800c5eb7 UB |
1292 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1293 | atomic_inc(&iucv->msg_recv); | |
1294 | if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { | |
1295 | WARN_ON(1); | |
1296 | iucv_sock_close(sk); | |
1297 | return -EFAULT; | |
1298 | } | |
1299 | } | |
561e0360 JH |
1300 | |
1301 | /* Queue backlog skbs */ | |
bf95d20f | 1302 | spin_lock_bh(&iucv->message_q.lock); |
f0703c80 | 1303 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
da99f056 | 1304 | while (rskb) { |
f9c41a62 | 1305 | IUCV_SKB_CB(rskb)->offset = 0; |
8c68b1a0 UB |
1306 | if (__sock_queue_rcv_skb(sk, rskb)) { |
1307 | /* handle rcv queue full */ | |
f0703c80 | 1308 | skb_queue_head(&iucv->backlog_skb_q, |
561e0360 JH |
1309 | rskb); |
1310 | break; | |
561e0360 | 1311 | } |
8c68b1a0 | 1312 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
561e0360 | 1313 | } |
f0703c80 | 1314 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
f0703c80 UB |
1315 | if (!list_empty(&iucv->message_q.list)) |
1316 | iucv_process_message_q(sk); | |
3881ac44 UB |
1317 | if (atomic_read(&iucv->msg_recv) >= |
1318 | iucv->msglimit / 2) { | |
9fbd87d4 | 1319 | err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); |
3881ac44 UB |
1320 | if (err) { |
1321 | sk->sk_state = IUCV_DISCONN; | |
1322 | sk->sk_state_change(sk); | |
1323 | } | |
1324 | } | |
f0703c80 | 1325 | } |
bf95d20f | 1326 | spin_unlock_bh(&iucv->message_q.lock); |
60d3705f | 1327 | } |
eac3731b JH |
1328 | |
1329 | done: | |
aa8e71f5 HB |
1330 | /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ |
1331 | if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) | |
1332 | copied = rlen; | |
ebaf8131 SJ |
1333 | if (flags & MSG_PEEK) |
1334 | skb_unref(skb); | |
aa8e71f5 HB |
1335 | |
1336 | return copied; | |
ebaf8131 SJ |
1337 | |
1338 | err_out: | |
1339 | if (!(flags & MSG_PEEK)) | |
1340 | skb_queue_head(&sk->sk_receive_queue, skb); | |
1341 | else | |
1342 | skb_unref(skb); | |
1343 | ||
1344 | return err; | |
eac3731b JH |
1345 | } |
1346 | ||
ade994f4 | 1347 | static inline __poll_t iucv_accept_poll(struct sock *parent) |
eac3731b JH |
1348 | { |
1349 | struct iucv_sock *isk, *n; | |
1350 | struct sock *sk; | |
1351 | ||
da99f056 | 1352 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
eac3731b JH |
1353 | sk = (struct sock *) isk; |
1354 | ||
1355 | if (sk->sk_state == IUCV_CONNECTED) | |
a9a08845 | 1356 | return EPOLLIN | EPOLLRDNORM; |
eac3731b JH |
1357 | } |
1358 | ||
1359 | return 0; | |
1360 | } | |
1361 | ||
e9a36ca5 JW |
1362 | static __poll_t iucv_sock_poll(struct file *file, struct socket *sock, |
1363 | poll_table *wait) | |
eac3731b JH |
1364 | { |
1365 | struct sock *sk = sock->sk; | |
ade994f4 | 1366 | __poll_t mask = 0; |
eac3731b | 1367 | |
89ab066d | 1368 | sock_poll_wait(file, sock, wait); |
a11e1d43 | 1369 | |
eac3731b JH |
1370 | if (sk->sk_state == IUCV_LISTEN) |
1371 | return iucv_accept_poll(sk); | |
1372 | ||
1373 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
a9a08845 LT |
1374 | mask |= EPOLLERR | |
1375 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); | |
eac3731b JH |
1376 | |
1377 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
a9a08845 | 1378 | mask |= EPOLLRDHUP; |
eac3731b JH |
1379 | |
1380 | if (sk->sk_shutdown == SHUTDOWN_MASK) | |
a9a08845 | 1381 | mask |= EPOLLHUP; |
eac3731b JH |
1382 | |
1383 | if (!skb_queue_empty(&sk->sk_receive_queue) || | |
da99f056 | 1384 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
a9a08845 | 1385 | mask |= EPOLLIN | EPOLLRDNORM; |
eac3731b JH |
1386 | |
1387 | if (sk->sk_state == IUCV_CLOSED) | |
a9a08845 | 1388 | mask |= EPOLLHUP; |
eac3731b | 1389 | |
aac6399c | 1390 | if (sk->sk_state == IUCV_DISCONN) |
a9a08845 | 1391 | mask |= EPOLLIN; |
561e0360 | 1392 | |
7f1b0ea4 | 1393 | if (sock_writeable(sk) && iucv_below_msglim(sk)) |
a9a08845 | 1394 | mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; |
eac3731b | 1395 | else |
9cd3e072 | 1396 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
eac3731b JH |
1397 | |
1398 | return mask; | |
1399 | } | |
1400 | ||
1401 | static int iucv_sock_shutdown(struct socket *sock, int how) | |
1402 | { | |
1403 | struct sock *sk = sock->sk; | |
1404 | struct iucv_sock *iucv = iucv_sk(sk); | |
1405 | struct iucv_message txmsg; | |
1406 | int err = 0; | |
eac3731b JH |
1407 | |
1408 | how++; | |
1409 | ||
1410 | if ((how & ~SHUTDOWN_MASK) || !how) | |
1411 | return -EINVAL; | |
1412 | ||
1413 | lock_sock(sk); | |
da99f056 | 1414 | switch (sk->sk_state) { |
82492a35 | 1415 | case IUCV_LISTEN: |
e14ad5fa HB |
1416 | case IUCV_DISCONN: |
1417 | case IUCV_CLOSING: | |
eac3731b JH |
1418 | case IUCV_CLOSED: |
1419 | err = -ENOTCONN; | |
1420 | goto fail; | |
eac3731b | 1421 | default: |
eac3731b JH |
1422 | break; |
1423 | } | |
1424 | ||
4031eeaf UB |
1425 | if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) && |
1426 | sk->sk_state == IUCV_CONNECTED) { | |
82492a35 UB |
1427 | if (iucv->transport == AF_IUCV_TRANS_IUCV) { |
1428 | txmsg.class = 0; | |
1429 | txmsg.tag = 0; | |
1430 | err = pr_iucv->message_send(iucv->path, &txmsg, | |
1431 | IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); | |
1432 | if (err) { | |
1433 | switch (err) { | |
1434 | case 1: | |
1435 | err = -ENOTCONN; | |
1436 | break; | |
1437 | case 2: | |
1438 | err = -ECONNRESET; | |
1439 | break; | |
1440 | default: | |
1441 | err = -ENOTCONN; | |
1442 | break; | |
1443 | } | |
eac3731b | 1444 | } |
82492a35 UB |
1445 | } else |
1446 | iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); | |
eac3731b JH |
1447 | } |
1448 | ||
82492a35 | 1449 | sk->sk_shutdown |= how; |
eac3731b | 1450 | if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { |
1042cab8 UB |
1451 | if ((iucv->transport == AF_IUCV_TRANS_IUCV) && |
1452 | iucv->path) { | |
82492a35 UB |
1453 | err = pr_iucv->path_quiesce(iucv->path, NULL); |
1454 | if (err) | |
1455 | err = -ENOTCONN; | |
1456 | /* skb_queue_purge(&sk->sk_receive_queue); */ | |
1457 | } | |
eac3731b JH |
1458 | skb_queue_purge(&sk->sk_receive_queue); |
1459 | } | |
1460 | ||
1461 | /* Wake up anyone sleeping in poll */ | |
1462 | sk->sk_state_change(sk); | |
1463 | ||
1464 | fail: | |
1465 | release_sock(sk); | |
1466 | return err; | |
1467 | } | |
1468 | ||
1469 | static int iucv_sock_release(struct socket *sock) | |
1470 | { | |
1471 | struct sock *sk = sock->sk; | |
1472 | int err = 0; | |
1473 | ||
1474 | if (!sk) | |
1475 | return 0; | |
1476 | ||
1477 | iucv_sock_close(sk); | |
1478 | ||
eac3731b JH |
1479 | sock_orphan(sk); |
1480 | iucv_sock_kill(sk); | |
1481 | return err; | |
1482 | } | |
1483 | ||
9d5c5d8f HB |
1484 | /* getsockopt and setsockopt */ |
1485 | static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, | |
a7b75c5a | 1486 | sockptr_t optval, unsigned int optlen) |
9d5c5d8f HB |
1487 | { |
1488 | struct sock *sk = sock->sk; | |
1489 | struct iucv_sock *iucv = iucv_sk(sk); | |
1490 | int val; | |
1491 | int rc; | |
1492 | ||
1493 | if (level != SOL_IUCV) | |
1494 | return -ENOPROTOOPT; | |
1495 | ||
1496 | if (optlen < sizeof(int)) | |
1497 | return -EINVAL; | |
1498 | ||
a7b75c5a | 1499 | if (copy_from_sockptr(&val, optval, sizeof(int))) |
9d5c5d8f HB |
1500 | return -EFAULT; |
1501 | ||
1502 | rc = 0; | |
1503 | ||
1504 | lock_sock(sk); | |
1505 | switch (optname) { | |
1506 | case SO_IPRMDATA_MSG: | |
1507 | if (val) | |
1508 | iucv->flags |= IUCV_IPRMDATA; | |
1509 | else | |
1510 | iucv->flags &= ~IUCV_IPRMDATA; | |
1511 | break; | |
09488e2e HB |
1512 | case SO_MSGLIMIT: |
1513 | switch (sk->sk_state) { | |
1514 | case IUCV_OPEN: | |
1515 | case IUCV_BOUND: | |
0d1c7664 | 1516 | if (val < 1 || val > U16_MAX) |
09488e2e HB |
1517 | rc = -EINVAL; |
1518 | else | |
1519 | iucv->msglimit = val; | |
1520 | break; | |
1521 | default: | |
1522 | rc = -EINVAL; | |
1523 | break; | |
1524 | } | |
1525 | break; | |
9d5c5d8f HB |
1526 | default: |
1527 | rc = -ENOPROTOOPT; | |
1528 | break; | |
1529 | } | |
1530 | release_sock(sk); | |
1531 | ||
1532 | return rc; | |
1533 | } | |
1534 | ||
1535 | static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, | |
1536 | char __user *optval, int __user *optlen) | |
1537 | { | |
1538 | struct sock *sk = sock->sk; | |
1539 | struct iucv_sock *iucv = iucv_sk(sk); | |
51363b87 UB |
1540 | unsigned int val; |
1541 | int len; | |
9d5c5d8f HB |
1542 | |
1543 | if (level != SOL_IUCV) | |
1544 | return -ENOPROTOOPT; | |
1545 | ||
1546 | if (get_user(len, optlen)) | |
1547 | return -EFAULT; | |
1548 | ||
1549 | if (len < 0) | |
1550 | return -EINVAL; | |
1551 | ||
1552 | len = min_t(unsigned int, len, sizeof(int)); | |
1553 | ||
1554 | switch (optname) { | |
1555 | case SO_IPRMDATA_MSG: | |
1556 | val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; | |
1557 | break; | |
09488e2e HB |
1558 | case SO_MSGLIMIT: |
1559 | lock_sock(sk); | |
1560 | val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ | |
1561 | : iucv->msglimit; /* default */ | |
1562 | release_sock(sk); | |
1563 | break; | |
51363b87 UB |
1564 | case SO_MSGSIZE: |
1565 | if (sk->sk_state == IUCV_OPEN) | |
1566 | return -EBADFD; | |
1567 | val = (iucv->hs_dev) ? iucv->hs_dev->mtu - | |
1568 | sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : | |
1569 | 0x7fffffff; | |
1570 | break; | |
9d5c5d8f HB |
1571 | default: |
1572 | return -ENOPROTOOPT; | |
1573 | } | |
1574 | ||
1575 | if (put_user(len, optlen)) | |
1576 | return -EFAULT; | |
1577 | if (copy_to_user(optval, &val, len)) | |
1578 | return -EFAULT; | |
1579 | ||
1580 | return 0; | |
1581 | } | |
1582 | ||
1583 | ||
eac3731b JH |
1584 | /* Callback wrappers - called from iucv base support */ |
1585 | static int iucv_callback_connreq(struct iucv_path *path, | |
1586 | u8 ipvmid[8], u8 ipuser[16]) | |
1587 | { | |
1588 | unsigned char user_data[16]; | |
1589 | unsigned char nuser_data[16]; | |
1590 | unsigned char src_name[8]; | |
eac3731b JH |
1591 | struct sock *sk, *nsk; |
1592 | struct iucv_sock *iucv, *niucv; | |
1593 | int err; | |
1594 | ||
1595 | memcpy(src_name, ipuser, 8); | |
1596 | EBCASC(src_name, 8); | |
1597 | /* Find out if this path belongs to af_iucv. */ | |
1598 | read_lock(&iucv_sk_list.lock); | |
1599 | iucv = NULL; | |
febca281 | 1600 | sk = NULL; |
b67bfe0d | 1601 | sk_for_each(sk, &iucv_sk_list.head) |
eac3731b JH |
1602 | if (sk->sk_state == IUCV_LISTEN && |
1603 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | |
1604 | /* | |
1605 | * Found a listening socket with | |
1606 | * src_name == ipuser[0-7]. | |
1607 | */ | |
1608 | iucv = iucv_sk(sk); | |
1609 | break; | |
1610 | } | |
1611 | read_unlock(&iucv_sk_list.lock); | |
1612 | if (!iucv) | |
1613 | /* No socket found, not one of our paths. */ | |
1614 | return -EINVAL; | |
1615 | ||
1616 | bh_lock_sock(sk); | |
1617 | ||
1618 | /* Check if parent socket is listening */ | |
1619 | low_nmcpy(user_data, iucv->src_name); | |
1620 | high_nmcpy(user_data, iucv->dst_name); | |
1621 | ASCEBC(user_data, sizeof(user_data)); | |
1622 | if (sk->sk_state != IUCV_LISTEN) { | |
6fcd61f7 | 1623 | err = pr_iucv->path_sever(path, user_data); |
65dbd7c2 | 1624 | iucv_path_free(path); |
eac3731b JH |
1625 | goto fail; |
1626 | } | |
1627 | ||
1628 | /* Check for backlog size */ | |
1629 | if (sk_acceptq_is_full(sk)) { | |
6fcd61f7 | 1630 | err = pr_iucv->path_sever(path, user_data); |
65dbd7c2 | 1631 | iucv_path_free(path); |
eac3731b JH |
1632 | goto fail; |
1633 | } | |
1634 | ||
1635 | /* Create the new socket */ | |
c5dab094 | 1636 | nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); |
da99f056 | 1637 | if (!nsk) { |
6fcd61f7 | 1638 | err = pr_iucv->path_sever(path, user_data); |
65dbd7c2 | 1639 | iucv_path_free(path); |
eac3731b JH |
1640 | goto fail; |
1641 | } | |
1642 | ||
1643 | niucv = iucv_sk(nsk); | |
1644 | iucv_sock_init(nsk, sk); | |
fdbf6326 JW |
1645 | niucv->transport = AF_IUCV_TRANS_IUCV; |
1646 | nsk->sk_allocation |= GFP_DMA; | |
eac3731b JH |
1647 | |
1648 | /* Set the new iucv_sock */ | |
1649 | memcpy(niucv->dst_name, ipuser + 8, 8); | |
1650 | EBCASC(niucv->dst_name, 8); | |
1651 | memcpy(niucv->dst_user_id, ipvmid, 8); | |
1652 | memcpy(niucv->src_name, iucv->src_name, 8); | |
1653 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | |
1654 | niucv->path = path; | |
1655 | ||
1656 | /* Call iucv_accept */ | |
1657 | high_nmcpy(nuser_data, ipuser + 8); | |
1658 | memcpy(nuser_data + 8, niucv->src_name, 8); | |
1659 | ASCEBC(nuser_data + 8, 8); | |
1660 | ||
09488e2e HB |
1661 | /* set message limit for path based on msglimit of accepting socket */ |
1662 | niucv->msglimit = iucv->msglimit; | |
1663 | path->msglim = iucv->msglimit; | |
6fcd61f7 | 1664 | err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); |
da99f056 | 1665 | if (err) { |
7d316b94 | 1666 | iucv_sever_path(nsk, 1); |
65dbd7c2 | 1667 | iucv_sock_kill(nsk); |
eac3731b JH |
1668 | goto fail; |
1669 | } | |
1670 | ||
1671 | iucv_accept_enqueue(sk, nsk); | |
1672 | ||
1673 | /* Wake up accept */ | |
1674 | nsk->sk_state = IUCV_CONNECTED; | |
676d2369 | 1675 | sk->sk_data_ready(sk); |
eac3731b JH |
1676 | err = 0; |
1677 | fail: | |
1678 | bh_unlock_sock(sk); | |
1679 | return 0; | |
1680 | } | |
1681 | ||
1682 | static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | |
1683 | { | |
1684 | struct sock *sk = path->private; | |
1685 | ||
1686 | sk->sk_state = IUCV_CONNECTED; | |
1687 | sk->sk_state_change(sk); | |
1688 | } | |
1689 | ||
1690 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |
1691 | { | |
1692 | struct sock *sk = path->private; | |
561e0360 | 1693 | struct iucv_sock *iucv = iucv_sk(sk); |
f0703c80 UB |
1694 | struct sk_buff *skb; |
1695 | struct sock_msg_q *save_msg; | |
1696 | int len; | |
561e0360 | 1697 | |
fe86e54e | 1698 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
6fcd61f7 | 1699 | pr_iucv->message_reject(path, msg); |
eac3731b | 1700 | return; |
fe86e54e | 1701 | } |
eac3731b | 1702 | |
3fa6b5ad | 1703 | spin_lock(&iucv->message_q.lock); |
eac3731b | 1704 | |
f0703c80 UB |
1705 | if (!list_empty(&iucv->message_q.list) || |
1706 | !skb_queue_empty(&iucv->backlog_skb_q)) | |
1707 | goto save_message; | |
1708 | ||
1709 | len = atomic_read(&sk->sk_rmem_alloc); | |
87fb4b7b | 1710 | len += SKB_TRUESIZE(iucv_msg_length(msg)); |
f0703c80 UB |
1711 | if (len > sk->sk_rcvbuf) |
1712 | goto save_message; | |
1713 | ||
a006353a | 1714 | skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); |
f0703c80 UB |
1715 | if (!skb) |
1716 | goto save_message; | |
eac3731b | 1717 | |
f0703c80 | 1718 | iucv_process_message(sk, skb, path, msg); |
3fa6b5ad | 1719 | goto out_unlock; |
eac3731b | 1720 | |
f0703c80 UB |
1721 | save_message: |
1722 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | |
d4444722 | 1723 | if (!save_msg) |
a56635a5 | 1724 | goto out_unlock; |
f0703c80 UB |
1725 | save_msg->path = path; |
1726 | save_msg->msg = *msg; | |
eac3731b | 1727 | |
f0703c80 | 1728 | list_add_tail(&save_msg->list, &iucv->message_q.list); |
3fa6b5ad HB |
1729 | |
1730 | out_unlock: | |
f0703c80 | 1731 | spin_unlock(&iucv->message_q.lock); |
eac3731b JH |
1732 | } |
1733 | ||
1734 | static void iucv_callback_txdone(struct iucv_path *path, | |
1735 | struct iucv_message *msg) | |
1736 | { | |
1737 | struct sock *sk = path->private; | |
f2a77991 | 1738 | struct sk_buff *this = NULL; |
ef6af7bd | 1739 | struct sk_buff_head *list; |
9e733177 | 1740 | struct sk_buff *list_skb; |
ef6af7bd | 1741 | struct iucv_sock *iucv; |
eac3731b JH |
1742 | unsigned long flags; |
1743 | ||
ef6af7bd JW |
1744 | iucv = iucv_sk(sk); |
1745 | list = &iucv->send_skb_q; | |
1746 | ||
7d316b94 | 1747 | bh_lock_sock(sk); |
561e0360 | 1748 | |
9e733177 DM |
1749 | spin_lock_irqsave(&list->lock, flags); |
1750 | skb_queue_walk(list, list_skb) { | |
1751 | if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { | |
1752 | this = list_skb; | |
1753 | break; | |
f2a77991 | 1754 | } |
9e733177 | 1755 | } |
ef6af7bd JW |
1756 | if (this) { |
1757 | atomic_dec(&iucv->skbs_in_xmit); | |
9e733177 | 1758 | __skb_unlink(this, list); |
ef6af7bd JW |
1759 | } |
1760 | ||
9e733177 | 1761 | spin_unlock_irqrestore(&list->lock, flags); |
eac3731b | 1762 | |
9e733177 | 1763 | if (this) { |
10d6393d | 1764 | consume_skb(this); |
9e733177 DM |
1765 | /* wake up any process waiting for sending */ |
1766 | iucv_sock_wake_msglim(sk); | |
561e0360 | 1767 | } |
eac3731b | 1768 | |
da99f056 | 1769 | if (sk->sk_state == IUCV_CLOSING) { |
ef6af7bd | 1770 | if (atomic_read(&iucv->skbs_in_xmit) == 0) { |
561e0360 JH |
1771 | sk->sk_state = IUCV_CLOSED; |
1772 | sk->sk_state_change(sk); | |
1773 | } | |
1774 | } | |
7d316b94 | 1775 | bh_unlock_sock(sk); |
eac3731b | 1776 | |
eac3731b JH |
1777 | } |
1778 | ||
1779 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | |
1780 | { | |
1781 | struct sock *sk = path->private; | |
1782 | ||
7d316b94 UB |
1783 | if (sk->sk_state == IUCV_CLOSED) |
1784 | return; | |
1785 | ||
1786 | bh_lock_sock(sk); | |
1787 | iucv_sever_path(sk, 1); | |
aac6399c | 1788 | sk->sk_state = IUCV_DISCONN; |
eac3731b JH |
1789 | |
1790 | sk->sk_state_change(sk); | |
7d316b94 | 1791 | bh_unlock_sock(sk); |
eac3731b JH |
1792 | } |
1793 | ||
af88b52d HB |
1794 | /* called if the other communication side shuts down its RECV direction; |
1795 | * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. | |
1796 | */ | |
1797 | static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) | |
1798 | { | |
1799 | struct sock *sk = path->private; | |
1800 | ||
1801 | bh_lock_sock(sk); | |
1802 | if (sk->sk_state != IUCV_CLOSED) { | |
1803 | sk->sk_shutdown |= SEND_SHUTDOWN; | |
1804 | sk->sk_state_change(sk); | |
1805 | } | |
1806 | bh_unlock_sock(sk); | |
1807 | } | |
1808 | ||
87c272c6 JW |
1809 | static struct iucv_handler af_iucv_handler = { |
1810 | .path_pending = iucv_callback_connreq, | |
1811 | .path_complete = iucv_callback_connack, | |
1812 | .path_severed = iucv_callback_connrej, | |
1813 | .message_pending = iucv_callback_rx, | |
1814 | .message_complete = iucv_callback_txdone, | |
1815 | .path_quiesced = iucv_callback_shutdown, | |
1816 | }; | |
1817 | ||
3881ac44 UB |
1818 | /***************** HiperSockets transport callbacks ********************/ |
1819 | static void afiucv_swap_src_dest(struct sk_buff *skb) | |
1820 | { | |
cd11d112 | 1821 | struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); |
3881ac44 UB |
1822 | char tmpID[8]; |
1823 | char tmpName[8]; | |
1824 | ||
1825 | ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); | |
1826 | ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); | |
1827 | ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); | |
1828 | ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); | |
1829 | memcpy(tmpID, trans_hdr->srcUserID, 8); | |
1830 | memcpy(tmpName, trans_hdr->srcAppName, 8); | |
1831 | memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); | |
1832 | memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); | |
1833 | memcpy(trans_hdr->destUserID, tmpID, 8); | |
1834 | memcpy(trans_hdr->destAppName, tmpName, 8); | |
1835 | skb_push(skb, ETH_HLEN); | |
1836 | memset(skb->data, 0, ETH_HLEN); | |
1837 | } | |
1838 | ||
7c8e1a91 | 1839 | /* |
3881ac44 | 1840 | * afiucv_hs_callback_syn - react on received SYN |
7c8e1a91 | 1841 | */ |
3881ac44 UB |
1842 | static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) |
1843 | { | |
cd11d112 | 1844 | struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); |
3881ac44 UB |
1845 | struct sock *nsk; |
1846 | struct iucv_sock *iucv, *niucv; | |
3881ac44 UB |
1847 | int err; |
1848 | ||
1849 | iucv = iucv_sk(sk); | |
3881ac44 UB |
1850 | if (!iucv) { |
1851 | /* no sock - connection refused */ | |
1852 | afiucv_swap_src_dest(skb); | |
1853 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; | |
1854 | err = dev_queue_xmit(skb); | |
1855 | goto out; | |
1856 | } | |
1857 | ||
c5dab094 | 1858 | nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); |
3881ac44 UB |
1859 | bh_lock_sock(sk); |
1860 | if ((sk->sk_state != IUCV_LISTEN) || | |
1861 | sk_acceptq_is_full(sk) || | |
1862 | !nsk) { | |
1863 | /* error on server socket - connection refused */ | |
3881ac44 UB |
1864 | afiucv_swap_src_dest(skb); |
1865 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; | |
1866 | err = dev_queue_xmit(skb); | |
4d520f62 | 1867 | iucv_sock_kill(nsk); |
3881ac44 UB |
1868 | bh_unlock_sock(sk); |
1869 | goto out; | |
1870 | } | |
1871 | ||
1872 | niucv = iucv_sk(nsk); | |
1873 | iucv_sock_init(nsk, sk); | |
1874 | niucv->transport = AF_IUCV_TRANS_HIPER; | |
1875 | niucv->msglimit = iucv->msglimit; | |
1876 | if (!trans_hdr->window) | |
1877 | niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; | |
1878 | else | |
1879 | niucv->msglimit_peer = trans_hdr->window; | |
1880 | memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); | |
1881 | memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); | |
1882 | memcpy(niucv->src_name, iucv->src_name, 8); | |
1883 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | |
1884 | nsk->sk_bound_dev_if = sk->sk_bound_dev_if; | |
800c5eb7 UB |
1885 | niucv->hs_dev = iucv->hs_dev; |
1886 | dev_hold(niucv->hs_dev); | |
3881ac44 UB |
1887 | afiucv_swap_src_dest(skb); |
1888 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; | |
1889 | trans_hdr->window = niucv->msglimit; | |
1890 | /* if receiver acks the xmit connection is established */ | |
1891 | err = dev_queue_xmit(skb); | |
1892 | if (!err) { | |
1893 | iucv_accept_enqueue(sk, nsk); | |
1894 | nsk->sk_state = IUCV_CONNECTED; | |
676d2369 | 1895 | sk->sk_data_ready(sk); |
3881ac44 UB |
1896 | } else |
1897 | iucv_sock_kill(nsk); | |
1898 | bh_unlock_sock(sk); | |
1899 | ||
1900 | out: | |
1901 | return NET_RX_SUCCESS; | |
1902 | } | |
1903 | ||
7c8e1a91 | 1904 | /* |
3881ac44 | 1905 | * afiucv_hs_callback_synack() - react on received SYN-ACK |
7c8e1a91 | 1906 | */ |
3881ac44 UB |
1907 | static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) |
1908 | { | |
1909 | struct iucv_sock *iucv = iucv_sk(sk); | |
3881ac44 | 1910 | |
10d6393d JW |
1911 | if (!iucv || sk->sk_state != IUCV_BOUND) { |
1912 | kfree_skb(skb); | |
1913 | return NET_RX_SUCCESS; | |
1914 | } | |
1915 | ||
3881ac44 | 1916 | bh_lock_sock(sk); |
cd11d112 | 1917 | iucv->msglimit_peer = iucv_trans_hdr(skb)->window; |
3881ac44 UB |
1918 | sk->sk_state = IUCV_CONNECTED; |
1919 | sk->sk_state_change(sk); | |
1920 | bh_unlock_sock(sk); | |
10d6393d | 1921 | consume_skb(skb); |
3881ac44 UB |
1922 | return NET_RX_SUCCESS; |
1923 | } | |
1924 | ||
7c8e1a91 | 1925 | /* |
3881ac44 | 1926 | * afiucv_hs_callback_synfin() - react on received SYN_FIN |
7c8e1a91 | 1927 | */ |
3881ac44 UB |
1928 | static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) |
1929 | { | |
1930 | struct iucv_sock *iucv = iucv_sk(sk); | |
1931 | ||
10d6393d JW |
1932 | if (!iucv || sk->sk_state != IUCV_BOUND) { |
1933 | kfree_skb(skb); | |
1934 | return NET_RX_SUCCESS; | |
1935 | } | |
1936 | ||
3881ac44 UB |
1937 | bh_lock_sock(sk); |
1938 | sk->sk_state = IUCV_DISCONN; | |
1939 | sk->sk_state_change(sk); | |
1940 | bh_unlock_sock(sk); | |
10d6393d | 1941 | consume_skb(skb); |
3881ac44 UB |
1942 | return NET_RX_SUCCESS; |
1943 | } | |
1944 | ||
7c8e1a91 | 1945 | /* |
3881ac44 | 1946 | * afiucv_hs_callback_fin() - react on received FIN |
7c8e1a91 | 1947 | */ |
3881ac44 UB |
1948 | static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) |
1949 | { | |
1950 | struct iucv_sock *iucv = iucv_sk(sk); | |
1951 | ||
1952 | /* other end of connection closed */ | |
10d6393d JW |
1953 | if (!iucv) { |
1954 | kfree_skb(skb); | |
1955 | return NET_RX_SUCCESS; | |
1956 | } | |
1957 | ||
800c5eb7 UB |
1958 | bh_lock_sock(sk); |
1959 | if (sk->sk_state == IUCV_CONNECTED) { | |
aac6399c | 1960 | sk->sk_state = IUCV_DISCONN; |
3881ac44 | 1961 | sk->sk_state_change(sk); |
3881ac44 | 1962 | } |
800c5eb7 | 1963 | bh_unlock_sock(sk); |
10d6393d | 1964 | consume_skb(skb); |
3881ac44 UB |
1965 | return NET_RX_SUCCESS; |
1966 | } | |
1967 | ||
7c8e1a91 | 1968 | /* |
3881ac44 | 1969 | * afiucv_hs_callback_win() - react on received WIN |
7c8e1a91 | 1970 | */ |
3881ac44 UB |
1971 | static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) |
1972 | { | |
1973 | struct iucv_sock *iucv = iucv_sk(sk); | |
3881ac44 UB |
1974 | |
1975 | if (!iucv) | |
1976 | return NET_RX_SUCCESS; | |
1977 | ||
1978 | if (sk->sk_state != IUCV_CONNECTED) | |
1979 | return NET_RX_SUCCESS; | |
1980 | ||
cd11d112 | 1981 | atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); |
3881ac44 UB |
1982 | iucv_sock_wake_msglim(sk); |
1983 | return NET_RX_SUCCESS; | |
1984 | } | |
1985 | ||
7c8e1a91 | 1986 | /* |
3881ac44 | 1987 | * afiucv_hs_callback_rx() - react on received data |
7c8e1a91 | 1988 | */ |
3881ac44 UB |
1989 | static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) |
1990 | { | |
1991 | struct iucv_sock *iucv = iucv_sk(sk); | |
1992 | ||
1993 | if (!iucv) { | |
1994 | kfree_skb(skb); | |
1995 | return NET_RX_SUCCESS; | |
1996 | } | |
1997 | ||
1998 | if (sk->sk_state != IUCV_CONNECTED) { | |
1999 | kfree_skb(skb); | |
2000 | return NET_RX_SUCCESS; | |
2001 | } | |
2002 | ||
82492a35 UB |
2003 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
2004 | kfree_skb(skb); | |
2005 | return NET_RX_SUCCESS; | |
2006 | } | |
2007 | ||
979f66b3 | 2008 | /* write stuff from iucv_msg to skb cb */ |
3881ac44 UB |
2009 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); |
2010 | skb_reset_transport_header(skb); | |
2011 | skb_reset_network_header(skb); | |
f9c41a62 | 2012 | IUCV_SKB_CB(skb)->offset = 0; |
8c68b1a0 UB |
2013 | if (sk_filter(sk, skb)) { |
2014 | atomic_inc(&sk->sk_drops); /* skb rejected by filter */ | |
2015 | kfree_skb(skb); | |
2016 | return NET_RX_SUCCESS; | |
2017 | } | |
2018 | ||
3881ac44 UB |
2019 | spin_lock(&iucv->message_q.lock); |
2020 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | |
8c68b1a0 | 2021 | if (__sock_queue_rcv_skb(sk, skb)) |
3881ac44 UB |
2022 | /* handle rcv queue full */ |
2023 | skb_queue_tail(&iucv->backlog_skb_q, skb); | |
3881ac44 UB |
2024 | } else |
2025 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | |
2026 | spin_unlock(&iucv->message_q.lock); | |
2027 | return NET_RX_SUCCESS; | |
2028 | } | |
2029 | ||
7c8e1a91 | 2030 | /* |
3881ac44 UB |
2031 | * afiucv_hs_rcv() - base function for arriving data through HiperSockets |
2032 | * transport | |
2033 | * called from netif RX softirq | |
7c8e1a91 | 2034 | */ |
3881ac44 UB |
2035 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, |
2036 | struct packet_type *pt, struct net_device *orig_dev) | |
2037 | { | |
3881ac44 UB |
2038 | struct sock *sk; |
2039 | struct iucv_sock *iucv; | |
2040 | struct af_iucv_trans_hdr *trans_hdr; | |
22244099 | 2041 | int err = NET_RX_SUCCESS; |
3881ac44 | 2042 | char nullstring[8]; |
3881ac44 | 2043 | |
cd11d112 | 2044 | if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { |
979f66b3 EC |
2045 | kfree_skb(skb); |
2046 | return NET_RX_SUCCESS; | |
2047 | } | |
cd11d112 JW |
2048 | |
2049 | trans_hdr = iucv_trans_hdr(skb); | |
3881ac44 UB |
2050 | EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); |
2051 | EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); | |
2052 | EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); | |
2053 | EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); | |
2054 | memset(nullstring, 0, sizeof(nullstring)); | |
2055 | iucv = NULL; | |
2056 | sk = NULL; | |
2057 | read_lock(&iucv_sk_list.lock); | |
b67bfe0d | 2058 | sk_for_each(sk, &iucv_sk_list.head) { |
3881ac44 UB |
2059 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { |
2060 | if ((!memcmp(&iucv_sk(sk)->src_name, | |
2061 | trans_hdr->destAppName, 8)) && | |
2062 | (!memcmp(&iucv_sk(sk)->src_user_id, | |
2063 | trans_hdr->destUserID, 8)) && | |
2064 | (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && | |
2065 | (!memcmp(&iucv_sk(sk)->dst_user_id, | |
2066 | nullstring, 8))) { | |
2067 | iucv = iucv_sk(sk); | |
2068 | break; | |
2069 | } | |
2070 | } else { | |
2071 | if ((!memcmp(&iucv_sk(sk)->src_name, | |
2072 | trans_hdr->destAppName, 8)) && | |
2073 | (!memcmp(&iucv_sk(sk)->src_user_id, | |
2074 | trans_hdr->destUserID, 8)) && | |
2075 | (!memcmp(&iucv_sk(sk)->dst_name, | |
2076 | trans_hdr->srcAppName, 8)) && | |
2077 | (!memcmp(&iucv_sk(sk)->dst_user_id, | |
2078 | trans_hdr->srcUserID, 8))) { | |
2079 | iucv = iucv_sk(sk); | |
2080 | break; | |
2081 | } | |
2082 | } | |
2083 | } | |
2084 | read_unlock(&iucv_sk_list.lock); | |
2085 | if (!iucv) | |
2086 | sk = NULL; | |
2087 | ||
2088 | /* no sock | |
2089 | how should we send with no sock | |
2090 | 1) send without sock no send rc checking? | |
2091 | 2) introduce default sock to handle this cases | |
2092 | ||
2093 | SYN -> send SYN|ACK in good case, send SYN|FIN in bad case | |
2094 | data -> send FIN | |
2095 | SYN|ACK, SYN|FIN, FIN -> no action? */ | |
2096 | ||
2097 | switch (trans_hdr->flags) { | |
2098 | case AF_IUCV_FLAG_SYN: | |
2099 | /* connect request */ | |
2100 | err = afiucv_hs_callback_syn(sk, skb); | |
2101 | break; | |
2102 | case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): | |
2103 | /* connect request confirmed */ | |
2104 | err = afiucv_hs_callback_synack(sk, skb); | |
2105 | break; | |
2106 | case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): | |
2107 | /* connect request refused */ | |
2108 | err = afiucv_hs_callback_synfin(sk, skb); | |
2109 | break; | |
2110 | case (AF_IUCV_FLAG_FIN): | |
2111 | /* close request */ | |
2112 | err = afiucv_hs_callback_fin(sk, skb); | |
2113 | break; | |
2114 | case (AF_IUCV_FLAG_WIN): | |
2115 | err = afiucv_hs_callback_win(sk, skb); | |
800c5eb7 | 2116 | if (skb->len == sizeof(struct af_iucv_trans_hdr)) { |
10d6393d | 2117 | consume_skb(skb); |
800c5eb7 UB |
2118 | break; |
2119 | } | |
df561f66 | 2120 | fallthrough; /* and receive non-zero length data */ |
82492a35 UB |
2121 | case (AF_IUCV_FLAG_SHT): |
2122 | /* shutdown request */ | |
df561f66 | 2123 | fallthrough; /* and receive zero length data */ |
3881ac44 UB |
2124 | case 0: |
2125 | /* plain data frame */ | |
f9c41a62 | 2126 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; |
3881ac44 UB |
2127 | err = afiucv_hs_callback_rx(sk, skb); |
2128 | break; | |
2129 | default: | |
22244099 | 2130 | kfree_skb(skb); |
3881ac44 UB |
2131 | } |
2132 | ||
2133 | return err; | |
2134 | } | |
2135 | ||
7c8e1a91 | 2136 | /* |
bf05d48d | 2137 | * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets |
3881ac44 | 2138 | * transport |
7c8e1a91 | 2139 | */ |
80bc97aa | 2140 | static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) |
3881ac44 | 2141 | { |
80bc97aa | 2142 | struct iucv_sock *iucv = iucv_sk(sk); |
3881ac44 | 2143 | |
c464444f | 2144 | if (sock_flag(sk, SOCK_ZAPPED)) |
3881ac44 UB |
2145 | return; |
2146 | ||
80bc97aa JW |
2147 | switch (n) { |
2148 | case TX_NOTIFY_OK: | |
2149 | atomic_dec(&iucv->skbs_in_xmit); | |
2150 | iucv_sock_wake_msglim(sk); | |
2151 | break; | |
2152 | case TX_NOTIFY_PENDING: | |
2153 | atomic_inc(&iucv->pendings); | |
2154 | break; | |
2155 | case TX_NOTIFY_DELAYED_OK: | |
2156 | atomic_dec(&iucv->skbs_in_xmit); | |
2157 | if (atomic_dec_return(&iucv->pendings) <= 0) | |
2158 | iucv_sock_wake_msglim(sk); | |
2159 | break; | |
2160 | default: | |
2161 | atomic_dec(&iucv->skbs_in_xmit); | |
2162 | if (sk->sk_state == IUCV_CONNECTED) { | |
2163 | sk->sk_state = IUCV_DISCONN; | |
2164 | sk->sk_state_change(sk); | |
3881ac44 | 2165 | } |
3881ac44 | 2166 | } |
3881ac44 | 2167 | |
42bd48e0 | 2168 | if (sk->sk_state == IUCV_CLOSING) { |
ef6af7bd | 2169 | if (atomic_read(&iucv->skbs_in_xmit) == 0) { |
42bd48e0 UB |
2170 | sk->sk_state = IUCV_CLOSED; |
2171 | sk->sk_state_change(sk); | |
2172 | } | |
2173 | } | |
3881ac44 | 2174 | } |
9fbd87d4 UB |
2175 | |
2176 | /* | |
2177 | * afiucv_netdev_event: handle netdev notifier chain events | |
2178 | */ | |
2179 | static int afiucv_netdev_event(struct notifier_block *this, | |
2180 | unsigned long event, void *ptr) | |
2181 | { | |
351638e7 | 2182 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
9fbd87d4 UB |
2183 | struct sock *sk; |
2184 | struct iucv_sock *iucv; | |
2185 | ||
2186 | switch (event) { | |
2187 | case NETDEV_REBOOT: | |
2188 | case NETDEV_GOING_DOWN: | |
b67bfe0d | 2189 | sk_for_each(sk, &iucv_sk_list.head) { |
9fbd87d4 UB |
2190 | iucv = iucv_sk(sk); |
2191 | if ((iucv->hs_dev == event_dev) && | |
2192 | (sk->sk_state == IUCV_CONNECTED)) { | |
2193 | if (event == NETDEV_GOING_DOWN) | |
2194 | iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); | |
2195 | sk->sk_state = IUCV_DISCONN; | |
2196 | sk->sk_state_change(sk); | |
2197 | } | |
2198 | } | |
2199 | break; | |
2200 | case NETDEV_DOWN: | |
2201 | case NETDEV_UNREGISTER: | |
2202 | default: | |
2203 | break; | |
2204 | } | |
2205 | return NOTIFY_DONE; | |
2206 | } | |
2207 | ||
2208 | static struct notifier_block afiucv_netdev_notifier = { | |
2209 | .notifier_call = afiucv_netdev_event, | |
2210 | }; | |
2211 | ||
5708e868 | 2212 | static const struct proto_ops iucv_sock_ops = { |
eac3731b JH |
2213 | .family = PF_IUCV, |
2214 | .owner = THIS_MODULE, | |
2215 | .release = iucv_sock_release, | |
2216 | .bind = iucv_sock_bind, | |
2217 | .connect = iucv_sock_connect, | |
2218 | .listen = iucv_sock_listen, | |
2219 | .accept = iucv_sock_accept, | |
2220 | .getname = iucv_sock_getname, | |
2221 | .sendmsg = iucv_sock_sendmsg, | |
2222 | .recvmsg = iucv_sock_recvmsg, | |
a11e1d43 | 2223 | .poll = iucv_sock_poll, |
eac3731b JH |
2224 | .ioctl = sock_no_ioctl, |
2225 | .mmap = sock_no_mmap, | |
2226 | .socketpair = sock_no_socketpair, | |
2227 | .shutdown = iucv_sock_shutdown, | |
9d5c5d8f HB |
2228 | .setsockopt = iucv_sock_setsockopt, |
2229 | .getsockopt = iucv_sock_getsockopt, | |
eac3731b JH |
2230 | }; |
2231 | ||
e9a36ca5 JW |
2232 | static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, |
2233 | int kern) | |
2234 | { | |
2235 | struct sock *sk; | |
2236 | ||
2237 | if (protocol && protocol != PF_IUCV) | |
2238 | return -EPROTONOSUPPORT; | |
2239 | ||
2240 | sock->state = SS_UNCONNECTED; | |
2241 | ||
2242 | switch (sock->type) { | |
2243 | case SOCK_STREAM: | |
2244 | case SOCK_SEQPACKET: | |
2245 | /* currently, proto ops can handle both sk types */ | |
2246 | sock->ops = &iucv_sock_ops; | |
2247 | break; | |
2248 | default: | |
2249 | return -ESOCKTNOSUPPORT; | |
2250 | } | |
2251 | ||
2252 | sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); | |
2253 | if (!sk) | |
2254 | return -ENOMEM; | |
2255 | ||
2256 | iucv_sock_init(sk, NULL); | |
2257 | ||
2258 | return 0; | |
2259 | } | |
2260 | ||
ec1b4cf7 | 2261 | static const struct net_proto_family iucv_sock_family_ops = { |
eac3731b JH |
2262 | .family = AF_IUCV, |
2263 | .owner = THIS_MODULE, | |
2264 | .create = iucv_sock_create, | |
2265 | }; | |
2266 | ||
3881ac44 UB |
2267 | static struct packet_type iucv_packet_type = { |
2268 | .type = cpu_to_be16(ETH_P_AF_IUCV), | |
2269 | .func = afiucv_hs_rcv, | |
2270 | }; | |
2271 | ||
da99f056 | 2272 | static int __init afiucv_init(void) |
eac3731b JH |
2273 | { |
2274 | int err; | |
2275 | ||
52109a06 | 2276 | if (machine_is_vm() && IS_ENABLED(CONFIG_IUCV)) { |
3881ac44 UB |
2277 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); |
2278 | if (unlikely(err)) { | |
2279 | WARN_ON(err); | |
2280 | err = -EPROTONOSUPPORT; | |
2281 | goto out; | |
2282 | } | |
eac3731b | 2283 | |
4eb9eda6 | 2284 | pr_iucv = &iucv_if; |
3881ac44 UB |
2285 | } else { |
2286 | memset(&iucv_userid, 0, sizeof(iucv_userid)); | |
2287 | pr_iucv = NULL; | |
6fcd61f7 FB |
2288 | } |
2289 | ||
eac3731b JH |
2290 | err = proto_register(&iucv_proto, 0); |
2291 | if (err) | |
6fcd61f7 | 2292 | goto out; |
eac3731b JH |
2293 | err = sock_register(&iucv_sock_family_ops); |
2294 | if (err) | |
2295 | goto out_proto; | |
6fcd61f7 | 2296 | |
3881ac44 | 2297 | if (pr_iucv) { |
ff8424be | 2298 | err = pr_iucv->iucv_register(&af_iucv_handler, 0); |
3881ac44 UB |
2299 | if (err) |
2300 | goto out_sock; | |
06996c1d JW |
2301 | } |
2302 | ||
2303 | err = register_netdevice_notifier(&afiucv_netdev_notifier); | |
2304 | if (err) | |
2305 | goto out_notifier; | |
2306 | ||
3881ac44 | 2307 | dev_add_pack(&iucv_packet_type); |
eac3731b JH |
2308 | return 0; |
2309 | ||
06996c1d JW |
2310 | out_notifier: |
2311 | if (pr_iucv) | |
ff8424be | 2312 | pr_iucv->iucv_unregister(&af_iucv_handler, 0); |
c23cad92 UB |
2313 | out_sock: |
2314 | sock_unregister(PF_IUCV); | |
eac3731b JH |
2315 | out_proto: |
2316 | proto_unregister(&iucv_proto); | |
eac3731b JH |
2317 | out: |
2318 | return err; | |
2319 | } | |
2320 | ||
2321 | static void __exit afiucv_exit(void) | |
2322 | { | |
4eb9eda6 | 2323 | if (pr_iucv) |
ff8424be | 2324 | pr_iucv->iucv_unregister(&af_iucv_handler, 0); |
06996c1d JW |
2325 | |
2326 | unregister_netdevice_notifier(&afiucv_netdev_notifier); | |
3881ac44 | 2327 | dev_remove_pack(&iucv_packet_type); |
eac3731b JH |
2328 | sock_unregister(PF_IUCV); |
2329 | proto_unregister(&iucv_proto); | |
eac3731b JH |
2330 | } |
2331 | ||
2332 | module_init(afiucv_init); | |
2333 | module_exit(afiucv_exit); | |
2334 | ||
2335 | MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); | |
2336 | MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); | |
2337 | MODULE_VERSION(VERSION); | |
2338 | MODULE_LICENSE("GPL"); | |
2339 | MODULE_ALIAS_NETPROTO(PF_IUCV); |