Commit | Line | Data |
---|---|---|
ee5d8f4d | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * X.25 Packet Layer release 002 | |
4 | * | |
5 | * This is ALPHA test software. This code may break your machine, | |
6 | * randomly fail to work with new releases, misbehave and/or generally | |
f8e1d201 | 7 | * screw up. It might even work. |
1da177e4 LT |
8 | * |
9 | * This code REQUIRES 2.1.15 or higher | |
10 | * | |
1da177e4 LT |
11 | * History |
12 | * X.25 001 Jonathan Naylor Started coding. | |
13 | * X.25 002 Jonathan Naylor New timer architecture. | |
f8e1d201 | 14 | * mar/20/00 Daniela Squassoni Disabling/enabling of facilities |
1da177e4 LT |
15 | * negotiation. |
16 | * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh. | |
17 | */ | |
18 | ||
b73e9e3c | 19 | #define pr_fmt(fmt) "X25: " fmt |
20 | ||
1da177e4 LT |
21 | #include <linux/kernel.h> |
22 | #include <linux/jiffies.h> | |
23 | #include <linux/timer.h> | |
5a0e3ad6 | 24 | #include <linux/slab.h> |
1da177e4 LT |
25 | #include <linux/netdevice.h> |
26 | #include <linux/skbuff.h> | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
1da177e4 LT |
28 | #include <linux/init.h> |
29 | #include <net/x25.h> | |
30 | ||
5595a1a5 | 31 | LIST_HEAD(x25_neigh_list); |
32 | DEFINE_RWLOCK(x25_neigh_list_lock); | |
1da177e4 | 33 | |
e99e88a9 | 34 | static void x25_t20timer_expiry(struct timer_list *); |
1da177e4 LT |
35 | |
36 | static void x25_transmit_restart_confirmation(struct x25_neigh *nb); | |
37 | static void x25_transmit_restart_request(struct x25_neigh *nb); | |
38 | ||
39 | /* | |
40 | * Linux set/reset timer routines | |
41 | */ | |
42 | static inline void x25_start_t20timer(struct x25_neigh *nb) | |
43 | { | |
44 | mod_timer(&nb->t20timer, jiffies + nb->t20); | |
45 | } | |
46 | ||
e99e88a9 | 47 | static void x25_t20timer_expiry(struct timer_list *t) |
1da177e4 | 48 | { |
e99e88a9 | 49 | struct x25_neigh *nb = from_timer(nb, t, t20timer); |
1da177e4 LT |
50 | |
51 | x25_transmit_restart_request(nb); | |
52 | ||
53 | x25_start_t20timer(nb); | |
54 | } | |
55 | ||
56 | static inline void x25_stop_t20timer(struct x25_neigh *nb) | |
57 | { | |
58 | del_timer(&nb->t20timer); | |
59 | } | |
60 | ||
1da177e4 LT |
61 | /* |
62 | * This handles all restart and diagnostic frames. | |
63 | */ | |
64 | void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, | |
65 | unsigned short frametype) | |
66 | { | |
67 | struct sk_buff *skbn; | |
1da177e4 LT |
68 | |
69 | switch (frametype) { | |
fddc5f3e | 70 | case X25_RESTART_REQUEST: |
d023b2b9 | 71 | switch (nb->state) { |
6b21c0bb XH |
72 | case X25_LINK_STATE_0: |
73 | /* This can happen when the x25 module just gets loaded | |
74 | * and doesn't know layer 2 has already connected | |
75 | */ | |
76 | nb->state = X25_LINK_STATE_3; | |
77 | x25_transmit_restart_confirmation(nb); | |
78 | break; | |
d023b2b9 | 79 | case X25_LINK_STATE_2: |
d023b2b9 MS |
80 | x25_stop_t20timer(nb); |
81 | nb->state = X25_LINK_STATE_3; | |
d023b2b9 MS |
82 | break; |
83 | case X25_LINK_STATE_3: | |
84 | /* clear existing virtual calls */ | |
85 | x25_kill_by_neigh(nb); | |
86 | ||
fddc5f3e | 87 | x25_transmit_restart_confirmation(nb); |
d023b2b9 MS |
88 | break; |
89 | } | |
fddc5f3e JP |
90 | break; |
91 | ||
92 | case X25_RESTART_CONFIRMATION: | |
d023b2b9 MS |
93 | switch (nb->state) { |
94 | case X25_LINK_STATE_2: | |
6b21c0bb XH |
95 | x25_stop_t20timer(nb); |
96 | nb->state = X25_LINK_STATE_3; | |
d023b2b9 MS |
97 | break; |
98 | case X25_LINK_STATE_3: | |
99 | /* clear existing virtual calls */ | |
100 | x25_kill_by_neigh(nb); | |
101 | ||
102 | x25_transmit_restart_request(nb); | |
103 | nb->state = X25_LINK_STATE_2; | |
104 | x25_start_t20timer(nb); | |
105 | break; | |
106 | } | |
fddc5f3e JP |
107 | break; |
108 | ||
109 | case X25_DIAGNOSTIC: | |
cb101ed2 MD |
110 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4)) |
111 | break; | |
112 | ||
b73e9e3c | 113 | pr_warn("diagnostic #%d - %02X %02X %02X\n", |
fddc5f3e JP |
114 | skb->data[3], skb->data[4], |
115 | skb->data[5], skb->data[6]); | |
116 | break; | |
117 | ||
118 | default: | |
b73e9e3c | 119 | pr_warn("received unknown %02X with LCI 000\n", |
fddc5f3e JP |
120 | frametype); |
121 | break; | |
1da177e4 LT |
122 | } |
123 | ||
124 | if (nb->state == X25_LINK_STATE_3) | |
125 | while ((skbn = skb_dequeue(&nb->queue)) != NULL) | |
126 | x25_send_frame(skbn, nb); | |
127 | } | |
128 | ||
129 | /* | |
130 | * This routine is called when a Restart Request is needed | |
131 | */ | |
132 | static void x25_transmit_restart_request(struct x25_neigh *nb) | |
133 | { | |
134 | unsigned char *dptr; | |
135 | int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2; | |
136 | struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); | |
137 | ||
138 | if (!skb) | |
139 | return; | |
140 | ||
141 | skb_reserve(skb, X25_MAX_L2_LEN); | |
142 | ||
143 | dptr = skb_put(skb, X25_STD_MIN_LEN + 2); | |
144 | ||
145 | *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ; | |
146 | *dptr++ = 0x00; | |
147 | *dptr++ = X25_RESTART_REQUEST; | |
148 | *dptr++ = 0x00; | |
149 | *dptr++ = 0; | |
150 | ||
151 | skb->sk = NULL; | |
152 | ||
153 | x25_send_frame(skb, nb); | |
154 | } | |
155 | ||
156 | /* | |
157 | * This routine is called when a Restart Confirmation is needed | |
158 | */ | |
159 | static void x25_transmit_restart_confirmation(struct x25_neigh *nb) | |
160 | { | |
161 | unsigned char *dptr; | |
162 | int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN; | |
163 | struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); | |
164 | ||
165 | if (!skb) | |
166 | return; | |
167 | ||
168 | skb_reserve(skb, X25_MAX_L2_LEN); | |
169 | ||
170 | dptr = skb_put(skb, X25_STD_MIN_LEN); | |
171 | ||
172 | *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ; | |
173 | *dptr++ = 0x00; | |
174 | *dptr++ = X25_RESTART_CONFIRMATION; | |
175 | ||
176 | skb->sk = NULL; | |
177 | ||
178 | x25_send_frame(skb, nb); | |
179 | } | |
180 | ||
181 | /* | |
182 | * This routine is called when a Clear Request is needed outside of the context | |
183 | * of a connected socket. | |
184 | */ | |
185 | void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci, | |
186 | unsigned char cause) | |
187 | { | |
188 | unsigned char *dptr; | |
189 | int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2; | |
190 | struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); | |
191 | ||
192 | if (!skb) | |
193 | return; | |
194 | ||
195 | skb_reserve(skb, X25_MAX_L2_LEN); | |
196 | ||
197 | dptr = skb_put(skb, X25_STD_MIN_LEN + 2); | |
198 | ||
199 | *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ? | |
200 | X25_GFI_EXTSEQ : | |
201 | X25_GFI_STDSEQ); | |
202 | *dptr++ = (lci >> 0) & 0xFF; | |
203 | *dptr++ = X25_CLEAR_REQUEST; | |
204 | *dptr++ = cause; | |
205 | *dptr++ = 0x00; | |
206 | ||
207 | skb->sk = NULL; | |
208 | ||
209 | x25_send_frame(skb, nb); | |
210 | } | |
211 | ||
212 | void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb) | |
213 | { | |
214 | switch (nb->state) { | |
fddc5f3e JP |
215 | case X25_LINK_STATE_0: |
216 | skb_queue_tail(&nb->queue, skb); | |
217 | nb->state = X25_LINK_STATE_1; | |
218 | x25_establish_link(nb); | |
219 | break; | |
220 | case X25_LINK_STATE_1: | |
221 | case X25_LINK_STATE_2: | |
222 | skb_queue_tail(&nb->queue, skb); | |
223 | break; | |
224 | case X25_LINK_STATE_3: | |
225 | x25_send_frame(skb, nb); | |
226 | break; | |
1da177e4 LT |
227 | } |
228 | } | |
229 | ||
230 | /* | |
231 | * Called when the link layer has become established. | |
232 | */ | |
233 | void x25_link_established(struct x25_neigh *nb) | |
234 | { | |
235 | switch (nb->state) { | |
fddc5f3e | 236 | case X25_LINK_STATE_0: |
fddc5f3e JP |
237 | case X25_LINK_STATE_1: |
238 | x25_transmit_restart_request(nb); | |
239 | nb->state = X25_LINK_STATE_2; | |
240 | x25_start_t20timer(nb); | |
241 | break; | |
1da177e4 LT |
242 | } |
243 | } | |
244 | ||
245 | /* | |
246 | * Called when the link layer has terminated, or an establishment | |
247 | * request has failed. | |
248 | */ | |
249 | ||
250 | void x25_link_terminated(struct x25_neigh *nb) | |
251 | { | |
252 | nb->state = X25_LINK_STATE_0; | |
7eed751b MS |
253 | skb_queue_purge(&nb->queue); |
254 | x25_stop_t20timer(nb); | |
255 | ||
1da177e4 LT |
256 | /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */ |
257 | x25_kill_by_neigh(nb); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Add a new device. | |
262 | */ | |
263 | void x25_link_device_up(struct net_device *dev) | |
264 | { | |
265 | struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC); | |
266 | ||
267 | if (!nb) | |
268 | return; | |
269 | ||
270 | skb_queue_head_init(&nb->queue); | |
e99e88a9 | 271 | timer_setup(&nb->t20timer, x25_t20timer_expiry, 0); |
1da177e4 LT |
272 | |
273 | dev_hold(dev); | |
274 | nb->dev = dev; | |
275 | nb->state = X25_LINK_STATE_0; | |
276 | nb->extended = 0; | |
277 | /* | |
278 | * Enables negotiation | |
279 | */ | |
280 | nb->global_facil_mask = X25_MASK_REVERSE | | |
281 | X25_MASK_THROUGHPUT | | |
282 | X25_MASK_PACKET_SIZE | | |
283 | X25_MASK_WINDOW_SIZE; | |
284 | nb->t20 = sysctl_x25_restart_request_timeout; | |
5534a51a | 285 | refcount_set(&nb->refcnt, 1); |
1da177e4 LT |
286 | |
287 | write_lock_bh(&x25_neigh_list_lock); | |
288 | list_add(&nb->node, &x25_neigh_list); | |
289 | write_unlock_bh(&x25_neigh_list_lock); | |
290 | } | |
291 | ||
292 | /** | |
293 | * __x25_remove_neigh - remove neighbour from x25_neigh_list | |
62c89238 | 294 | * @nb: - neigh to remove |
1da177e4 LT |
295 | * |
296 | * Remove neighbour from x25_neigh_list. If it was there. | |
297 | * Caller must hold x25_neigh_list_lock. | |
298 | */ | |
299 | static void __x25_remove_neigh(struct x25_neigh *nb) | |
300 | { | |
1da177e4 LT |
301 | if (nb->node.next) { |
302 | list_del(&nb->node); | |
303 | x25_neigh_put(nb); | |
304 | } | |
305 | } | |
306 | ||
307 | /* | |
308 | * A device has been removed, remove its links. | |
309 | */ | |
310 | void x25_link_device_down(struct net_device *dev) | |
311 | { | |
312 | struct x25_neigh *nb; | |
313 | struct list_head *entry, *tmp; | |
314 | ||
315 | write_lock_bh(&x25_neigh_list_lock); | |
316 | ||
317 | list_for_each_safe(entry, tmp, &x25_neigh_list) { | |
318 | nb = list_entry(entry, struct x25_neigh, node); | |
319 | ||
320 | if (nb->dev == dev) { | |
321 | __x25_remove_neigh(nb); | |
322 | dev_put(dev); | |
323 | } | |
324 | } | |
325 | ||
326 | write_unlock_bh(&x25_neigh_list_lock); | |
327 | } | |
328 | ||
329 | /* | |
330 | * Given a device, return the neighbour address. | |
331 | */ | |
332 | struct x25_neigh *x25_get_neigh(struct net_device *dev) | |
333 | { | |
334 | struct x25_neigh *nb, *use = NULL; | |
1da177e4 LT |
335 | |
336 | read_lock_bh(&x25_neigh_list_lock); | |
3835a661 | 337 | list_for_each_entry(nb, &x25_neigh_list, node) { |
1da177e4 LT |
338 | if (nb->dev == dev) { |
339 | use = nb; | |
340 | break; | |
341 | } | |
342 | } | |
343 | ||
344 | if (use) | |
345 | x25_neigh_hold(use); | |
346 | read_unlock_bh(&x25_neigh_list_lock); | |
347 | return use; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Handle the ioctls that control the subscription functions. | |
352 | */ | |
353 | int x25_subscr_ioctl(unsigned int cmd, void __user *arg) | |
354 | { | |
355 | struct x25_subscrip_struct x25_subscr; | |
356 | struct x25_neigh *nb; | |
357 | struct net_device *dev; | |
358 | int rc = -EINVAL; | |
359 | ||
360 | if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP) | |
361 | goto out; | |
362 | ||
363 | rc = -EFAULT; | |
364 | if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr))) | |
365 | goto out; | |
366 | ||
367 | rc = -EINVAL; | |
368 | if ((dev = x25_dev_get(x25_subscr.device)) == NULL) | |
369 | goto out; | |
370 | ||
371 | if ((nb = x25_get_neigh(dev)) == NULL) | |
372 | goto out_dev_put; | |
373 | ||
374 | dev_put(dev); | |
375 | ||
376 | if (cmd == SIOCX25GSUBSCRIP) { | |
5595a1a5 | 377 | read_lock_bh(&x25_neigh_list_lock); |
1da177e4 LT |
378 | x25_subscr.extended = nb->extended; |
379 | x25_subscr.global_facil_mask = nb->global_facil_mask; | |
5595a1a5 | 380 | read_unlock_bh(&x25_neigh_list_lock); |
1da177e4 LT |
381 | rc = copy_to_user(arg, &x25_subscr, |
382 | sizeof(x25_subscr)) ? -EFAULT : 0; | |
383 | } else { | |
384 | rc = -EINVAL; | |
385 | if (!(x25_subscr.extended && x25_subscr.extended != 1)) { | |
386 | rc = 0; | |
5595a1a5 | 387 | write_lock_bh(&x25_neigh_list_lock); |
1da177e4 LT |
388 | nb->extended = x25_subscr.extended; |
389 | nb->global_facil_mask = x25_subscr.global_facil_mask; | |
5595a1a5 | 390 | write_unlock_bh(&x25_neigh_list_lock); |
1da177e4 LT |
391 | } |
392 | } | |
393 | x25_neigh_put(nb); | |
394 | out: | |
395 | return rc; | |
396 | out_dev_put: | |
397 | dev_put(dev); | |
398 | goto out; | |
399 | } | |
400 | ||
401 | ||
402 | /* | |
403 | * Release all memory associated with X.25 neighbour structures. | |
404 | */ | |
405 | void __exit x25_link_free(void) | |
406 | { | |
407 | struct x25_neigh *nb; | |
408 | struct list_head *entry, *tmp; | |
409 | ||
410 | write_lock_bh(&x25_neigh_list_lock); | |
411 | ||
412 | list_for_each_safe(entry, tmp, &x25_neigh_list) { | |
96642d42 DM |
413 | struct net_device *dev; |
414 | ||
1da177e4 | 415 | nb = list_entry(entry, struct x25_neigh, node); |
96642d42 | 416 | dev = nb->dev; |
1da177e4 | 417 | __x25_remove_neigh(nb); |
96642d42 | 418 | dev_put(dev); |
1da177e4 LT |
419 | } |
420 | write_unlock_bh(&x25_neigh_list_lock); | |
421 | } |