Merge tag 'vfio-v3.19-rc1' of git://github.com/awilliam/linux-vfio
[linux-block.git] / drivers / s390 / net / netiucv.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * IUCV network driver
3 *
1175b257 4 * Copyright IBM Corp. 2001, 2009
1da177e4 5 *
1175b257
UB
6 * Author(s):
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 * PM functions:
12 * Ursula Braun (ursula.braun@de.ibm.com)
1da177e4
LT
13 *
14 * Documentation used:
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
25 * any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 *
1da177e4 36 */
e82b0f2c 37
8f7c502c
UB
38#define KMSG_COMPONENT "netiucv"
39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40
1da177e4
LT
41#undef DEBUG
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/slab.h>
47#include <linux/errno.h>
48#include <linux/types.h>
49#include <linux/interrupt.h>
50#include <linux/timer.h>
1da177e4
LT
51#include <linux/bitops.h>
52
53#include <linux/signal.h>
54#include <linux/string.h>
55#include <linux/device.h>
56
57#include <linux/ip.h>
58#include <linux/if_arp.h>
59#include <linux/tcp.h>
60#include <linux/skbuff.h>
61#include <linux/ctype.h>
62#include <net/dst.h>
63
64#include <asm/io.h>
65#include <asm/uaccess.h>
08e3356c 66#include <asm/ebcdic.h>
1da177e4 67
eebce385 68#include <net/iucv/iucv.h>
1da177e4
LT
69#include "fsm.h"
70
71MODULE_AUTHOR
72 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
74
eebce385
MS
75/**
76 * Debug Facility stuff
77 */
78#define IUCV_DBF_SETUP_NAME "iucv_setup"
08e3356c 79#define IUCV_DBF_SETUP_LEN 64
eebce385
MS
80#define IUCV_DBF_SETUP_PAGES 2
81#define IUCV_DBF_SETUP_NR_AREAS 1
82#define IUCV_DBF_SETUP_LEVEL 3
83
84#define IUCV_DBF_DATA_NAME "iucv_data"
85#define IUCV_DBF_DATA_LEN 128
86#define IUCV_DBF_DATA_PAGES 2
87#define IUCV_DBF_DATA_NR_AREAS 1
88#define IUCV_DBF_DATA_LEVEL 2
89
90#define IUCV_DBF_TRACE_NAME "iucv_trace"
91#define IUCV_DBF_TRACE_LEN 16
92#define IUCV_DBF_TRACE_PAGES 4
93#define IUCV_DBF_TRACE_NR_AREAS 1
94#define IUCV_DBF_TRACE_LEVEL 3
95
96#define IUCV_DBF_TEXT(name,level,text) \
97 do { \
98 debug_text_event(iucv_dbf_##name,level,text); \
99 } while (0)
100
101#define IUCV_DBF_HEX(name,level,addr,len) \
102 do { \
103 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
104 } while (0)
105
106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107
f33780d3
PT
108#define IUCV_DBF_TEXT_(name, level, text...) \
109 do { \
8e6a8285 110 if (debug_level_enabled(iucv_dbf_##name, level)) { \
390dfd95
TH
111 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
112 sprintf(__buf, text); \
113 debug_text_event(iucv_dbf_##name, level, __buf); \
f33780d3
PT
114 put_cpu_var(iucv_dbf_txt_buf); \
115 } \
eebce385
MS
116 } while (0)
117
118#define IUCV_DBF_SPRINTF(name,level,text...) \
119 do { \
120 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 debug_sprintf_event(iucv_dbf_trace, level, text ); \
122 } while (0)
123
124/**
125 * some more debug stuff
126 */
1da177e4
LT
127#define PRINTK_HEADER " iucv: " /* for debugging */
128
1175b257
UB
129/* dummy device to make sure netiucv_pm functions are called */
130static struct device *netiucv_dev;
131
132static int netiucv_pm_prepare(struct device *);
133static void netiucv_pm_complete(struct device *);
134static int netiucv_pm_freeze(struct device *);
135static int netiucv_pm_restore_thaw(struct device *);
136
47145210 137static const struct dev_pm_ops netiucv_pm_ops = {
1175b257
UB
138 .prepare = netiucv_pm_prepare,
139 .complete = netiucv_pm_complete,
140 .freeze = netiucv_pm_freeze,
141 .thaw = netiucv_pm_restore_thaw,
142 .restore = netiucv_pm_restore_thaw,
143};
144
1da177e4 145static struct device_driver netiucv_driver = {
2219510f 146 .owner = THIS_MODULE,
1da177e4
LT
147 .name = "netiucv",
148 .bus = &iucv_bus,
1175b257 149 .pm = &netiucv_pm_ops,
1da177e4
LT
150};
151
eebce385
MS
152static int netiucv_callback_connreq(struct iucv_path *,
153 u8 ipvmid[8], u8 ipuser[16]);
154static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
155static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
156static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
157static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
158static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
159static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
160
161static struct iucv_handler netiucv_handler = {
162 .path_pending = netiucv_callback_connreq,
163 .path_complete = netiucv_callback_connack,
164 .path_severed = netiucv_callback_connrej,
165 .path_quiesced = netiucv_callback_connsusp,
166 .path_resumed = netiucv_callback_connres,
167 .message_pending = netiucv_callback_rx,
168 .message_complete = netiucv_callback_txdone
169};
170
1da177e4
LT
171/**
172 * Per connection profiling data
173 */
174struct connection_profile {
175 unsigned long maxmulti;
176 unsigned long maxcqueue;
177 unsigned long doios_single;
178 unsigned long doios_multi;
179 unsigned long txlen;
180 unsigned long tx_time;
181 struct timespec send_stamp;
182 unsigned long tx_pending;
183 unsigned long tx_max_pending;
184};
185
186/**
187 * Representation of one iucv connection
188 */
189struct iucv_connection {
eebce385
MS
190 struct list_head list;
191 struct iucv_path *path;
1da177e4
LT
192 struct sk_buff *rx_buff;
193 struct sk_buff *tx_buff;
194 struct sk_buff_head collect_queue;
195 struct sk_buff_head commit_queue;
196 spinlock_t collect_lock;
197 int collect_len;
198 int max_buffsize;
199 fsm_timer timer;
200 fsm_instance *fsm;
201 struct net_device *netdev;
202 struct connection_profile prof;
203 char userid[9];
08e3356c 204 char userdata[17];
1da177e4
LT
205};
206
207/**
208 * Linked list of all connection structs.
209 */
c11ca97e 210static LIST_HEAD(iucv_connection_list);
bfac0d0b 211static DEFINE_RWLOCK(iucv_connection_rwlock);
1da177e4
LT
212
213/**
214 * Representation of event-data for the
215 * connection state machine.
216 */
217struct iucv_event {
218 struct iucv_connection *conn;
219 void *data;
220};
221
222/**
223 * Private part of the network device structure
224 */
225struct netiucv_priv {
226 struct net_device_stats stats;
227 unsigned long tbusy;
228 fsm_instance *fsm;
229 struct iucv_connection *conn;
230 struct device *dev;
1175b257 231 int pm_state;
1da177e4
LT
232};
233
234/**
235 * Link level header for a packet.
236 */
eebce385
MS
237struct ll_header {
238 u16 next;
239};
1da177e4 240
eebce385 241#define NETIUCV_HDRLEN (sizeof(struct ll_header))
08e3356c 242#define NETIUCV_BUFSIZE_MAX 65537
1da177e4
LT
243#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
244#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
245#define NETIUCV_MTU_DEFAULT 9216
246#define NETIUCV_QUEUELEN_DEFAULT 50
247#define NETIUCV_TIMEOUT_5SEC 5000
248
249/**
250 * Compatibility macros for busy handling
251 * of network devices.
252 */
eebce385 253static inline void netiucv_clear_busy(struct net_device *dev)
1da177e4 254{
eebce385
MS
255 struct netiucv_priv *priv = netdev_priv(dev);
256 clear_bit(0, &priv->tbusy);
1da177e4
LT
257 netif_wake_queue(dev);
258}
259
eebce385 260static inline int netiucv_test_and_set_busy(struct net_device *dev)
1da177e4 261{
eebce385 262 struct netiucv_priv *priv = netdev_priv(dev);
1da177e4 263 netif_stop_queue(dev);
eebce385 264 return test_and_set_bit(0, &priv->tbusy);
1da177e4
LT
265}
266
08e3356c
UB
267static u8 iucvMagic_ascii[16] = {
268 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
269 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
270};
271
272static u8 iucvMagic_ebcdic[16] = {
1da177e4
LT
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
274 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
275};
276
1da177e4
LT
277/**
278 * Convert an iucv userId to its printable
279 * form (strip whitespace at end).
280 *
281 * @param An iucv userId
282 *
283 * @returns The printable string (static data!!)
284 */
08e3356c 285static char *netiucv_printname(char *name, int len)
1da177e4 286{
08e3356c 287 static char tmp[17];
1da177e4 288 char *p = tmp;
08e3356c
UB
289 memcpy(tmp, name, len);
290 tmp[len] = '\0';
291 while (*p && ((p - tmp) < len) && (!isspace(*p)))
1da177e4
LT
292 p++;
293 *p = '\0';
294 return tmp;
295}
e82b0f2c 296
08e3356c
UB
297static char *netiucv_printuser(struct iucv_connection *conn)
298{
299 static char tmp_uid[9];
300 static char tmp_udat[17];
301 static char buf[100];
302
303 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
304 tmp_uid[8] = '\0';
305 tmp_udat[16] = '\0';
306 memcpy(tmp_uid, conn->userid, 8);
307 memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
308 memcpy(tmp_udat, conn->userdata, 16);
309 EBCASC(tmp_udat, 16);
310 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
311 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
312 return buf;
313 } else
314 return netiucv_printname(conn->userid, 8);
315}
316
1da177e4
LT
317/**
318 * States of the interface statemachine.
319 */
320enum dev_states {
321 DEV_STATE_STOPPED,
322 DEV_STATE_STARTWAIT,
323 DEV_STATE_STOPWAIT,
324 DEV_STATE_RUNNING,
325 /**
326 * MUST be always the last element!!
327 */
328 NR_DEV_STATES
329};
330
331static const char *dev_state_names[] = {
332 "Stopped",
333 "StartWait",
334 "StopWait",
335 "Running",
336};
337
338/**
339 * Events of the interface statemachine.
340 */
341enum dev_events {
342 DEV_EVENT_START,
343 DEV_EVENT_STOP,
344 DEV_EVENT_CONUP,
345 DEV_EVENT_CONDOWN,
346 /**
347 * MUST be always the last element!!
348 */
349 NR_DEV_EVENTS
350};
351
352static const char *dev_event_names[] = {
353 "Start",
354 "Stop",
355 "Connection up",
356 "Connection down",
357};
e82b0f2c 358
1da177e4
LT
359/**
360 * Events of the connection statemachine
361 */
362enum conn_events {
363 /**
364 * Events, representing callbacks from
365 * lowlevel iucv layer)
366 */
367 CONN_EVENT_CONN_REQ,
368 CONN_EVENT_CONN_ACK,
369 CONN_EVENT_CONN_REJ,
370 CONN_EVENT_CONN_SUS,
371 CONN_EVENT_CONN_RES,
372 CONN_EVENT_RX,
373 CONN_EVENT_TXDONE,
374
375 /**
376 * Events, representing errors return codes from
377 * calls to lowlevel iucv layer
378 */
379
380 /**
381 * Event, representing timer expiry.
382 */
383 CONN_EVENT_TIMER,
384
385 /**
386 * Events, representing commands from upper levels.
387 */
388 CONN_EVENT_START,
389 CONN_EVENT_STOP,
390
391 /**
392 * MUST be always the last element!!
393 */
394 NR_CONN_EVENTS,
395};
396
397static const char *conn_event_names[] = {
398 "Remote connection request",
399 "Remote connection acknowledge",
400 "Remote connection reject",
401 "Connection suspended",
402 "Connection resumed",
403 "Data received",
404 "Data sent",
405
406 "Timer",
407
408 "Start",
409 "Stop",
410};
411
412/**
413 * States of the connection statemachine.
414 */
415enum conn_states {
416 /**
417 * Connection not assigned to any device,
418 * initial state, invalid
419 */
420 CONN_STATE_INVALID,
421
422 /**
423 * Userid assigned but not operating
424 */
425 CONN_STATE_STOPPED,
426
427 /**
428 * Connection registered,
429 * no connection request sent yet,
430 * no connection request received
431 */
432 CONN_STATE_STARTWAIT,
433
434 /**
435 * Connection registered and connection request sent,
436 * no acknowledge and no connection request received yet.
437 */
438 CONN_STATE_SETUPWAIT,
439
440 /**
441 * Connection up and running idle
442 */
443 CONN_STATE_IDLE,
444
445 /**
446 * Data sent, awaiting CONN_EVENT_TXDONE
447 */
448 CONN_STATE_TX,
449
450 /**
451 * Error during registration.
452 */
453 CONN_STATE_REGERR,
454
455 /**
456 * Error during registration.
457 */
458 CONN_STATE_CONNERR,
459
460 /**
461 * MUST be always the last element!!
462 */
463 NR_CONN_STATES,
464};
465
466static const char *conn_state_names[] = {
467 "Invalid",
468 "Stopped",
469 "StartWait",
470 "SetupWait",
471 "Idle",
472 "TX",
473 "Terminating",
474 "Registration error",
475 "Connect error",
476};
477
e82b0f2c 478
1da177e4
LT
479/**
480 * Debug Facility Stuff
481 */
482static debug_info_t *iucv_dbf_setup = NULL;
483static debug_info_t *iucv_dbf_data = NULL;
484static debug_info_t *iucv_dbf_trace = NULL;
485
486DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
487
eebce385 488static void iucv_unregister_dbf_views(void)
1da177e4
LT
489{
490 if (iucv_dbf_setup)
491 debug_unregister(iucv_dbf_setup);
492 if (iucv_dbf_data)
493 debug_unregister(iucv_dbf_data);
494 if (iucv_dbf_trace)
495 debug_unregister(iucv_dbf_trace);
496}
eebce385 497static int iucv_register_dbf_views(void)
1da177e4
LT
498{
499 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
66a464db 500 IUCV_DBF_SETUP_PAGES,
1da177e4
LT
501 IUCV_DBF_SETUP_NR_AREAS,
502 IUCV_DBF_SETUP_LEN);
503 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
66a464db 504 IUCV_DBF_DATA_PAGES,
1da177e4
LT
505 IUCV_DBF_DATA_NR_AREAS,
506 IUCV_DBF_DATA_LEN);
507 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
66a464db 508 IUCV_DBF_TRACE_PAGES,
1da177e4
LT
509 IUCV_DBF_TRACE_NR_AREAS,
510 IUCV_DBF_TRACE_LEN);
511
512 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
513 (iucv_dbf_trace == NULL)) {
514 iucv_unregister_dbf_views();
515 return -ENOMEM;
516 }
517 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
518 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
519
520 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
521 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
522
523 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
524 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
525
526 return 0;
527}
528
eebce385 529/*
1da177e4 530 * Callback-wrappers, called from lowlevel iucv layer.
eebce385 531 */
1da177e4 532
eebce385
MS
533static void netiucv_callback_rx(struct iucv_path *path,
534 struct iucv_message *msg)
1da177e4 535{
eebce385 536 struct iucv_connection *conn = path->private;
1da177e4
LT
537 struct iucv_event ev;
538
539 ev.conn = conn;
eebce385 540 ev.data = msg;
1da177e4
LT
541 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
542}
543
eebce385
MS
544static void netiucv_callback_txdone(struct iucv_path *path,
545 struct iucv_message *msg)
1da177e4 546{
eebce385 547 struct iucv_connection *conn = path->private;
1da177e4
LT
548 struct iucv_event ev;
549
550 ev.conn = conn;
eebce385 551 ev.data = msg;
1da177e4
LT
552 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
553}
554
eebce385 555static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1da177e4 556{
eebce385 557 struct iucv_connection *conn = path->private;
1da177e4 558
eebce385 559 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
1da177e4
LT
560}
561
eebce385
MS
562static int netiucv_callback_connreq(struct iucv_path *path,
563 u8 ipvmid[8], u8 ipuser[16])
1da177e4 564{
eebce385 565 struct iucv_connection *conn = path->private;
1da177e4 566 struct iucv_event ev;
08e3356c
UB
567 static char tmp_user[9];
568 static char tmp_udat[17];
eebce385 569 int rc;
1da177e4 570
eebce385 571 rc = -EINVAL;
08e3356c
UB
572 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
573 memcpy(tmp_udat, ipuser, 16);
574 EBCASC(tmp_udat, 16);
eebce385
MS
575 read_lock_bh(&iucv_connection_rwlock);
576 list_for_each_entry(conn, &iucv_connection_list, list) {
08e3356c
UB
577 if (strncmp(ipvmid, conn->userid, 8) ||
578 strncmp(ipuser, conn->userdata, 16))
eebce385
MS
579 continue;
580 /* Found a matching connection for this path. */
581 conn->path = path;
582 ev.conn = conn;
583 ev.data = path;
584 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
585 rc = 0;
586 }
08e3356c
UB
587 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
588 tmp_user, netiucv_printname(tmp_udat, 16));
eebce385
MS
589 read_unlock_bh(&iucv_connection_rwlock);
590 return rc;
1da177e4
LT
591}
592
eebce385 593static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1da177e4 594{
eebce385 595 struct iucv_connection *conn = path->private;
1da177e4 596
eebce385 597 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
1da177e4
LT
598}
599
eebce385 600static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
1da177e4 601{
eebce385 602 struct iucv_connection *conn = path->private;
1da177e4 603
eebce385 604 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
1da177e4
LT
605}
606
eebce385 607static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
1da177e4 608{
eebce385 609 struct iucv_connection *conn = path->private;
1da177e4 610
eebce385
MS
611 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
612}
1da177e4
LT
613
614/**
21b26f2f 615 * NOP action for statemachines
1da177e4 616 */
21b26f2f 617static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
1da177e4
LT
618{
619}
e82b0f2c 620
eebce385 621/*
1da177e4 622 * Actions of the connection statemachine
eebce385 623 */
1da177e4
LT
624
625/**
eebce385
MS
626 * netiucv_unpack_skb
627 * @conn: The connection where this skb has been received.
628 * @pskb: The received skb.
1da177e4 629 *
eebce385
MS
630 * Unpack a just received skb and hand it over to upper layers.
631 * Helper function for conn_action_rx.
1da177e4 632 */
eebce385
MS
633static void netiucv_unpack_skb(struct iucv_connection *conn,
634 struct sk_buff *pskb)
1da177e4
LT
635{
636 struct net_device *dev = conn->netdev;
eebce385
MS
637 struct netiucv_priv *privptr = netdev_priv(dev);
638 u16 offset = 0;
1da177e4
LT
639
640 skb_put(pskb, NETIUCV_HDRLEN);
641 pskb->dev = dev;
642 pskb->ip_summed = CHECKSUM_NONE;
643 pskb->protocol = ntohs(ETH_P_IP);
644
645 while (1) {
646 struct sk_buff *skb;
eebce385 647 struct ll_header *header = (struct ll_header *) pskb->data;
1da177e4
LT
648
649 if (!header->next)
650 break;
651
652 skb_pull(pskb, NETIUCV_HDRLEN);
653 header->next -= offset;
654 offset += header->next;
655 header->next -= NETIUCV_HDRLEN;
656 if (skb_tailroom(pskb) < header->next) {
1da177e4
LT
657 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
658 header->next, skb_tailroom(pskb));
659 return;
660 }
661 skb_put(pskb, header->next);
459a98ed 662 skb_reset_mac_header(pskb);
1da177e4
LT
663 skb = dev_alloc_skb(pskb->len);
664 if (!skb) {
1da177e4
LT
665 IUCV_DBF_TEXT(data, 2,
666 "Out of memory in netiucv_unpack_skb\n");
667 privptr->stats.rx_dropped++;
668 return;
669 }
d626f62b
ACM
670 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
671 pskb->len);
459a98ed 672 skb_reset_mac_header(skb);
1da177e4
LT
673 skb->dev = pskb->dev;
674 skb->protocol = pskb->protocol;
675 pskb->ip_summed = CHECKSUM_UNNECESSARY;
9b3efc01
JL
676 privptr->stats.rx_packets++;
677 privptr->stats.rx_bytes += skb->len;
1da177e4
LT
678 /*
679 * Since receiving is always initiated from a tasklet (in iucv.c),
680 * we must use netif_rx_ni() instead of netif_rx()
681 */
682 netif_rx_ni(skb);
1da177e4
LT
683 skb_pull(pskb, header->next);
684 skb_put(pskb, NETIUCV_HDRLEN);
685 }
686}
687
eebce385 688static void conn_action_rx(fsm_instance *fi, int event, void *arg)
1da177e4 689{
eebce385 690 struct iucv_event *ev = arg;
1da177e4 691 struct iucv_connection *conn = ev->conn;
eebce385
MS
692 struct iucv_message *msg = ev->data;
693 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
1da177e4
LT
694 int rc;
695
2a2cf6b1 696 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
697
698 if (!conn->netdev) {
eebce385 699 iucv_message_reject(conn->path, msg);
1da177e4 700 IUCV_DBF_TEXT(data, 2,
eebce385 701 "Received data for unlinked connection\n");
1da177e4
LT
702 return;
703 }
eebce385
MS
704 if (msg->length > conn->max_buffsize) {
705 iucv_message_reject(conn->path, msg);
1da177e4 706 privptr->stats.rx_dropped++;
1da177e4 707 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
eebce385 708 msg->length, conn->max_buffsize);
1da177e4
LT
709 return;
710 }
27a884dc
ACM
711 conn->rx_buff->data = conn->rx_buff->head;
712 skb_reset_tail_pointer(conn->rx_buff);
1da177e4 713 conn->rx_buff->len = 0;
eebce385
MS
714 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
715 msg->length, NULL);
716 if (rc || msg->length < 5) {
1da177e4 717 privptr->stats.rx_errors++;
1da177e4
LT
718 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
719 return;
720 }
721 netiucv_unpack_skb(conn, conn->rx_buff);
722}
723
eebce385 724static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
1da177e4 725{
eebce385 726 struct iucv_event *ev = arg;
1da177e4 727 struct iucv_connection *conn = ev->conn;
eebce385
MS
728 struct iucv_message *msg = ev->data;
729 struct iucv_message txmsg;
1da177e4 730 struct netiucv_priv *privptr = NULL;
eebce385
MS
731 u32 single_flag = msg->tag;
732 u32 txbytes = 0;
733 u32 txpackets = 0;
734 u32 stat_maxcq = 0;
1da177e4
LT
735 struct sk_buff *skb;
736 unsigned long saveflags;
eebce385
MS
737 struct ll_header header;
738 int rc;
1da177e4 739
2a2cf6b1 740 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4 741
d239ae33
UB
742 if (!conn || !conn->netdev) {
743 IUCV_DBF_TEXT(data, 2,
744 "Send confirmation for unlinked connection\n");
745 return;
746 }
747 privptr = netdev_priv(conn->netdev);
1da177e4
LT
748 conn->prof.tx_pending--;
749 if (single_flag) {
750 if ((skb = skb_dequeue(&conn->commit_queue))) {
751 atomic_dec(&skb->users);
1da177e4
LT
752 if (privptr) {
753 privptr->stats.tx_packets++;
754 privptr->stats.tx_bytes +=
755 (skb->len - NETIUCV_HDRLEN
998221c2 756 - NETIUCV_HDRLEN);
1da177e4 757 }
998221c2 758 dev_kfree_skb_any(skb);
1da177e4
LT
759 }
760 }
27a884dc
ACM
761 conn->tx_buff->data = conn->tx_buff->head;
762 skb_reset_tail_pointer(conn->tx_buff);
1da177e4
LT
763 conn->tx_buff->len = 0;
764 spin_lock_irqsave(&conn->collect_lock, saveflags);
765 while ((skb = skb_dequeue(&conn->collect_queue))) {
766 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
767 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
768 NETIUCV_HDRLEN);
d626f62b
ACM
769 skb_copy_from_linear_data(skb,
770 skb_put(conn->tx_buff, skb->len),
771 skb->len);
1da177e4
LT
772 txbytes += skb->len;
773 txpackets++;
774 stat_maxcq++;
775 atomic_dec(&skb->users);
776 dev_kfree_skb_any(skb);
777 }
778 if (conn->collect_len > conn->prof.maxmulti)
779 conn->prof.maxmulti = conn->collect_len;
780 conn->collect_len = 0;
781 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
eebce385
MS
782 if (conn->tx_buff->len == 0) {
783 fsm_newstate(fi, CONN_STATE_IDLE);
784 return;
785 }
1da177e4 786
eebce385
MS
787 header.next = 0;
788 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
2c6b47de 789 conn->prof.send_stamp = current_kernel_time();
eebce385
MS
790 txmsg.class = 0;
791 txmsg.tag = 0;
792 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
1da177e4 793 conn->tx_buff->data, conn->tx_buff->len);
eebce385
MS
794 conn->prof.doios_multi++;
795 conn->prof.txlen += conn->tx_buff->len;
796 conn->prof.tx_pending++;
797 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
798 conn->prof.tx_max_pending = conn->prof.tx_pending;
799 if (rc) {
800 conn->prof.tx_pending--;
1da177e4 801 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385
MS
802 if (privptr)
803 privptr->stats.tx_errors += txpackets;
eebce385
MS
804 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
805 } else {
806 if (privptr) {
807 privptr->stats.tx_packets += txpackets;
808 privptr->stats.tx_bytes += txbytes;
809 }
810 if (stat_maxcq > conn->prof.maxcqueue)
811 conn->prof.maxcqueue = stat_maxcq;
812 }
1da177e4
LT
813}
814
eebce385 815static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
1da177e4 816{
eebce385 817 struct iucv_event *ev = arg;
1da177e4 818 struct iucv_connection *conn = ev->conn;
eebce385 819 struct iucv_path *path = ev->data;
1da177e4 820 struct net_device *netdev = conn->netdev;
eebce385 821 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 822 int rc;
1da177e4 823
2a2cf6b1 824 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 825
eebce385
MS
826 conn->path = path;
827 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
828 path->flags = 0;
08e3356c 829 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
1da177e4 830 if (rc) {
1da177e4
LT
831 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
832 return;
833 }
834 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 835 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
836 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
837}
838
eebce385 839static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
1da177e4 840{
eebce385
MS
841 struct iucv_event *ev = arg;
842 struct iucv_path *path = ev->data;
1da177e4 843
2a2cf6b1 844 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 845 iucv_path_sever(path, NULL);
1da177e4
LT
846}
847
eebce385 848static void conn_action_connack(fsm_instance *fi, int event, void *arg)
1da177e4 849{
eebce385 850 struct iucv_connection *conn = arg;
1da177e4 851 struct net_device *netdev = conn->netdev;
eebce385 852 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 853
2a2cf6b1 854 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
855 fsm_deltimer(&conn->timer);
856 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 857 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
858 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
859}
860
eebce385 861static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
1da177e4 862{
eebce385 863 struct iucv_connection *conn = arg;
1da177e4 864
2a2cf6b1 865 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 866 fsm_deltimer(&conn->timer);
08e3356c 867 iucv_path_sever(conn->path, conn->userdata);
1da177e4
LT
868 fsm_newstate(fi, CONN_STATE_STARTWAIT);
869}
870
eebce385 871static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
1da177e4 872{
eebce385 873 struct iucv_connection *conn = arg;
1da177e4 874 struct net_device *netdev = conn->netdev;
eebce385 875 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 876
2a2cf6b1 877 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
878
879 fsm_deltimer(&conn->timer);
08e3356c
UB
880 iucv_path_sever(conn->path, conn->userdata);
881 dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
882 "connection\n", netiucv_printuser(conn));
1da177e4 883 IUCV_DBF_TEXT(data, 2,
eebce385 884 "conn_action_connsever: Remote dropped connection\n");
1da177e4
LT
885 fsm_newstate(fi, CONN_STATE_STARTWAIT);
886 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
887}
888
eebce385 889static void conn_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 890{
eebce385 891 struct iucv_connection *conn = arg;
8f7c502c
UB
892 struct net_device *netdev = conn->netdev;
893 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4
LT
894 int rc;
895
2a2cf6b1 896 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 897
eebce385 898 fsm_newstate(fi, CONN_STATE_STARTWAIT);
1da177e4 899
eebce385
MS
900 /*
901 * We must set the state before calling iucv_connect because the
902 * callback handler could be called at any point after the connection
903 * request is sent
904 */
1da177e4
LT
905
906 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
eebce385 907 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
08e3356c
UB
908 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
909 netdev->name, netiucv_printuser(conn));
910
eebce385 911 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
08e3356c 912 NULL, conn->userdata, conn);
1da177e4 913 switch (rc) {
eebce385 914 case 0:
8f7c502c 915 netdev->tx_queue_len = conn->path->msglim;
eebce385
MS
916 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
917 CONN_EVENT_TIMER, conn);
918 return;
919 case 11:
8f7c502c
UB
920 dev_warn(privptr->dev,
921 "The IUCV device failed to connect to z/VM guest %s\n",
08e3356c 922 netiucv_printname(conn->userid, 8));
eebce385
MS
923 fsm_newstate(fi, CONN_STATE_STARTWAIT);
924 break;
925 case 12:
8f7c502c
UB
926 dev_warn(privptr->dev,
927 "The IUCV device failed to connect to the peer on z/VM"
08e3356c 928 " guest %s\n", netiucv_printname(conn->userid, 8));
eebce385
MS
929 fsm_newstate(fi, CONN_STATE_STARTWAIT);
930 break;
931 case 13:
8f7c502c
UB
932 dev_err(privptr->dev,
933 "Connecting the IUCV device would exceed the maximum"
934 " number of IUCV connections\n");
eebce385
MS
935 fsm_newstate(fi, CONN_STATE_CONNERR);
936 break;
937 case 14:
8f7c502c
UB
938 dev_err(privptr->dev,
939 "z/VM guest %s has too many IUCV connections"
940 " to connect with the IUCV device\n",
08e3356c 941 netiucv_printname(conn->userid, 8));
eebce385
MS
942 fsm_newstate(fi, CONN_STATE_CONNERR);
943 break;
944 case 15:
8f7c502c
UB
945 dev_err(privptr->dev,
946 "The IUCV device cannot connect to a z/VM guest with no"
947 " IUCV authorization\n");
eebce385
MS
948 fsm_newstate(fi, CONN_STATE_CONNERR);
949 break;
950 default:
8f7c502c
UB
951 dev_err(privptr->dev,
952 "Connecting the IUCV device failed with error %d\n",
953 rc);
eebce385
MS
954 fsm_newstate(fi, CONN_STATE_CONNERR);
955 break;
1da177e4
LT
956 }
957 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
eebce385
MS
958 kfree(conn->path);
959 conn->path = NULL;
1da177e4
LT
960}
961
eebce385 962static void netiucv_purge_skb_queue(struct sk_buff_head *q)
1da177e4
LT
963{
964 struct sk_buff *skb;
965
966 while ((skb = skb_dequeue(q))) {
967 atomic_dec(&skb->users);
968 dev_kfree_skb_any(skb);
969 }
970}
971
eebce385 972static void conn_action_stop(fsm_instance *fi, int event, void *arg)
1da177e4 973{
eebce385 974 struct iucv_event *ev = arg;
1da177e4
LT
975 struct iucv_connection *conn = ev->conn;
976 struct net_device *netdev = conn->netdev;
eebce385 977 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 978
2a2cf6b1 979 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
980
981 fsm_deltimer(&conn->timer);
982 fsm_newstate(fi, CONN_STATE_STOPPED);
983 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385
MS
984 if (conn->path) {
985 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
08e3356c 986 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
987 kfree(conn->path);
988 conn->path = NULL;
989 }
1da177e4
LT
990 netiucv_purge_skb_queue(&conn->commit_queue);
991 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
992}
993
eebce385 994static void conn_action_inval(fsm_instance *fi, int event, void *arg)
1da177e4 995{
eebce385 996 struct iucv_connection *conn = arg;
1da177e4
LT
997 struct net_device *netdev = conn->netdev;
998
f082bcae
UB
999 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
1000 netdev->name, conn->userid);
1da177e4
LT
1001}
1002
1003static const fsm_node conn_fsm[] = {
1004 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
1005 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
1006
1007 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
1008 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
1009 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
1010 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
1011 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
1012 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
1013 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
1014
1015 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
1016 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1017 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1018 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
1019 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
1020
1021 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
1022 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
1023
1024 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
1025 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
1026 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1027
1028 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1029 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1030
1031 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1032 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1033};
1034
1035static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1036
e82b0f2c 1037
eebce385 1038/*
1da177e4 1039 * Actions for interface - statemachine.
eebce385 1040 */
1da177e4
LT
1041
1042/**
eebce385
MS
1043 * dev_action_start
1044 * @fi: An instance of an interface statemachine.
1045 * @event: The event, just happened.
1046 * @arg: Generic pointer, casted from struct net_device * upon call.
1da177e4 1047 *
eebce385 1048 * Startup connection by sending CONN_EVENT_START to it.
1da177e4 1049 */
eebce385 1050static void dev_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 1051{
eebce385
MS
1052 struct net_device *dev = arg;
1053 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1054
2a2cf6b1 1055 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 1056
1da177e4 1057 fsm_newstate(fi, DEV_STATE_STARTWAIT);
eebce385 1058 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1da177e4
LT
1059}
1060
1061/**
1062 * Shutdown connection by sending CONN_EVENT_STOP to it.
1063 *
1064 * @param fi An instance of an interface statemachine.
1065 * @param event The event, just happened.
1066 * @param arg Generic pointer, casted from struct net_device * upon call.
1067 */
1068static void
1069dev_action_stop(fsm_instance *fi, int event, void *arg)
1070{
eebce385
MS
1071 struct net_device *dev = arg;
1072 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4
LT
1073 struct iucv_event ev;
1074
2a2cf6b1 1075 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1076
1077 ev.conn = privptr->conn;
1078
1079 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1080 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1081}
1082
1083/**
1084 * Called from connection statemachine
1085 * when a connection is up and running.
1086 *
1087 * @param fi An instance of an interface statemachine.
1088 * @param event The event, just happened.
1089 * @param arg Generic pointer, casted from struct net_device * upon call.
1090 */
1091static void
1092dev_action_connup(fsm_instance *fi, int event, void *arg)
1093{
eebce385
MS
1094 struct net_device *dev = arg;
1095 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1096
2a2cf6b1 1097 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1098
1099 switch (fsm_getstate(fi)) {
1100 case DEV_STATE_STARTWAIT:
1101 fsm_newstate(fi, DEV_STATE_RUNNING);
8f7c502c
UB
1102 dev_info(privptr->dev,
1103 "The IUCV device has been connected"
08e3356c
UB
1104 " successfully to %s\n",
1105 netiucv_printuser(privptr->conn));
1da177e4
LT
1106 IUCV_DBF_TEXT(setup, 3,
1107 "connection is up and running\n");
1108 break;
1109 case DEV_STATE_STOPWAIT:
1da177e4
LT
1110 IUCV_DBF_TEXT(data, 2,
1111 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1112 break;
1113 }
1114}
1115
1116/**
1117 * Called from connection statemachine
1118 * when a connection has been shutdown.
1119 *
1120 * @param fi An instance of an interface statemachine.
1121 * @param event The event, just happened.
1122 * @param arg Generic pointer, casted from struct net_device * upon call.
1123 */
1124static void
1125dev_action_conndown(fsm_instance *fi, int event, void *arg)
1126{
2a2cf6b1 1127 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1128
1129 switch (fsm_getstate(fi)) {
1130 case DEV_STATE_RUNNING:
1131 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1132 break;
1133 case DEV_STATE_STOPWAIT:
1134 fsm_newstate(fi, DEV_STATE_STOPPED);
1135 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1136 break;
1137 }
1138}
1139
1140static const fsm_node dev_fsm[] = {
1141 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1142
1143 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1144 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1145
1146 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1147 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1148
1149 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1150 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
21b26f2f 1151 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1da177e4
LT
1152};
1153
1154static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1155
1156/**
1157 * Transmit a packet.
1158 * This is a helper function for netiucv_tx().
1159 *
1160 * @param conn Connection to be used for sending.
1161 * @param skb Pointer to struct sk_buff of packet to send.
1162 * The linklevel header has already been set up
1163 * by netiucv_tx().
1164 *
1165 * @return 0 on success, -ERRNO on failure. (Never fails.)
1166 */
eebce385
MS
1167static int netiucv_transmit_skb(struct iucv_connection *conn,
1168 struct sk_buff *skb)
1169{
1170 struct iucv_message msg;
1da177e4 1171 unsigned long saveflags;
eebce385
MS
1172 struct ll_header header;
1173 int rc;
1da177e4
LT
1174
1175 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1176 int l = skb->len + NETIUCV_HDRLEN;
1177
1178 spin_lock_irqsave(&conn->collect_lock, saveflags);
1179 if (conn->collect_len + l >
1180 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1181 rc = -EBUSY;
1182 IUCV_DBF_TEXT(data, 2,
eebce385 1183 "EBUSY from netiucv_transmit_skb\n");
1da177e4
LT
1184 } else {
1185 atomic_inc(&skb->users);
1186 skb_queue_tail(&conn->collect_queue, skb);
1187 conn->collect_len += l;
eebce385 1188 rc = 0;
1da177e4
LT
1189 }
1190 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1191 } else {
1192 struct sk_buff *nskb = skb;
1193 /**
1194 * Copy the skb to a new allocated skb in lowmem only if the
1195 * data is located above 2G in memory or tailroom is < 2.
1196 */
27a884dc
ACM
1197 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1198 NETIUCV_HDRLEN)) >> 31;
1da177e4
LT
1199 int copied = 0;
1200 if (hi || (skb_tailroom(skb) < 2)) {
1201 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1202 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1203 if (!nskb) {
1da177e4
LT
1204 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1205 rc = -ENOMEM;
1206 return rc;
1207 } else {
1208 skb_reserve(nskb, NETIUCV_HDRLEN);
1209 memcpy(skb_put(nskb, skb->len),
1210 skb->data, skb->len);
1211 }
1212 copied = 1;
1213 }
1214 /**
1215 * skb now is below 2G and has enough room. Add headers.
1216 */
1217 header.next = nskb->len + NETIUCV_HDRLEN;
1218 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1219 header.next = 0;
1220 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1221
1222 fsm_newstate(conn->fsm, CONN_STATE_TX);
2c6b47de 1223 conn->prof.send_stamp = current_kernel_time();
e82b0f2c 1224
eebce385
MS
1225 msg.tag = 1;
1226 msg.class = 0;
1227 rc = iucv_message_send(conn->path, &msg, 0, 0,
1228 nskb->data, nskb->len);
1da177e4
LT
1229 conn->prof.doios_single++;
1230 conn->prof.txlen += skb->len;
1231 conn->prof.tx_pending++;
1232 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1233 conn->prof.tx_max_pending = conn->prof.tx_pending;
1234 if (rc) {
1235 struct netiucv_priv *privptr;
1236 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1237 conn->prof.tx_pending--;
eebce385 1238 privptr = netdev_priv(conn->netdev);
1da177e4
LT
1239 if (privptr)
1240 privptr->stats.tx_errors++;
1241 if (copied)
1242 dev_kfree_skb(nskb);
1243 else {
1244 /**
1245 * Remove our headers. They get added
1246 * again on retransmit.
1247 */
1248 skb_pull(skb, NETIUCV_HDRLEN);
1249 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1250 }
1da177e4
LT
1251 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1252 } else {
1253 if (copied)
1254 dev_kfree_skb(skb);
1255 atomic_inc(&nskb->users);
1256 skb_queue_tail(&conn->commit_queue, nskb);
1257 }
1258 }
1259
1260 return rc;
1261}
e82b0f2c 1262
eebce385 1263/*
1da177e4 1264 * Interface API for upper network layers
eebce385 1265 */
1da177e4
LT
1266
1267/**
1268 * Open an interface.
1269 * Called from generic network layer when ifconfig up is run.
1270 *
1271 * @param dev Pointer to interface struct.
1272 *
1273 * @return 0 on success, -ERRNO on failure. (Never fails.)
1274 */
eebce385
MS
1275static int netiucv_open(struct net_device *dev)
1276{
1277 struct netiucv_priv *priv = netdev_priv(dev);
1278
1279 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1da177e4
LT
1280 return 0;
1281}
1282
1283/**
1284 * Close an interface.
1285 * Called from generic network layer when ifconfig down is run.
1286 *
1287 * @param dev Pointer to interface struct.
1288 *
1289 * @return 0 on success, -ERRNO on failure. (Never fails.)
1290 */
eebce385
MS
1291static int netiucv_close(struct net_device *dev)
1292{
1293 struct netiucv_priv *priv = netdev_priv(dev);
1294
1295 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1da177e4
LT
1296 return 0;
1297}
1298
1175b257
UB
1299static int netiucv_pm_prepare(struct device *dev)
1300{
1301 IUCV_DBF_TEXT(trace, 3, __func__);
1302 return 0;
1303}
1304
1305static void netiucv_pm_complete(struct device *dev)
1306{
1307 IUCV_DBF_TEXT(trace, 3, __func__);
1308 return;
1309}
1310
1311/**
1312 * netiucv_pm_freeze() - Freeze PM callback
1313 * @dev: netiucv device
1314 *
1315 * close open netiucv interfaces
1316 */
1317static int netiucv_pm_freeze(struct device *dev)
1318{
4f0076f7 1319 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1320 struct net_device *ndev = NULL;
1321 int rc = 0;
1322
1323 IUCV_DBF_TEXT(trace, 3, __func__);
1324 if (priv && priv->conn)
1325 ndev = priv->conn->netdev;
1326 if (!ndev)
1327 goto out;
1328 netif_device_detach(ndev);
1329 priv->pm_state = fsm_getstate(priv->fsm);
1330 rc = netiucv_close(ndev);
1331out:
1332 return rc;
1333}
1334
1335/**
1336 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1337 * @dev: netiucv device
1338 *
1339 * re-open netiucv interfaces closed during freeze
1340 */
1341static int netiucv_pm_restore_thaw(struct device *dev)
1342{
4f0076f7 1343 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1344 struct net_device *ndev = NULL;
1345 int rc = 0;
1346
1347 IUCV_DBF_TEXT(trace, 3, __func__);
1348 if (priv && priv->conn)
1349 ndev = priv->conn->netdev;
1350 if (!ndev)
1351 goto out;
1352 switch (priv->pm_state) {
1353 case DEV_STATE_RUNNING:
1354 case DEV_STATE_STARTWAIT:
1355 rc = netiucv_open(ndev);
1356 break;
1357 default:
1358 break;
1359 }
1360 netif_device_attach(ndev);
1361out:
1362 return rc;
1363}
1364
1da177e4
LT
1365/**
1366 * Start transmission of a packet.
1367 * Called from generic network device layer.
1368 *
1369 * @param skb Pointer to buffer containing the packet.
1370 * @param dev Pointer to interface struct.
1371 *
1372 * @return 0 if packet consumed, !0 if packet rejected.
1373 * Note: If we return !0, then the packet is free'd by
1374 * the generic network layer.
1375 */
1376static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1377{
eebce385
MS
1378 struct netiucv_priv *privptr = netdev_priv(dev);
1379 int rc;
1da177e4 1380
2a2cf6b1 1381 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1382 /**
1383 * Some sanity checks ...
1384 */
1385 if (skb == NULL) {
1da177e4
LT
1386 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1387 privptr->stats.tx_dropped++;
ec634fe3 1388 return NETDEV_TX_OK;
1da177e4
LT
1389 }
1390 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1da177e4
LT
1391 IUCV_DBF_TEXT(data, 2,
1392 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1393 dev_kfree_skb(skb);
1394 privptr->stats.tx_dropped++;
ec634fe3 1395 return NETDEV_TX_OK;
1da177e4
LT
1396 }
1397
1398 /**
1399 * If connection is not running, try to restart it
e82b0f2c 1400 * and throw away packet.
1da177e4
LT
1401 */
1402 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1da177e4
LT
1403 dev_kfree_skb(skb);
1404 privptr->stats.tx_dropped++;
1405 privptr->stats.tx_errors++;
1406 privptr->stats.tx_carrier_errors++;
ec634fe3 1407 return NETDEV_TX_OK;
1da177e4
LT
1408 }
1409
1410 if (netiucv_test_and_set_busy(dev)) {
1411 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
4e584d66 1412 return NETDEV_TX_BUSY;
1da177e4
LT
1413 }
1414 dev->trans_start = jiffies;
5b548140 1415 rc = netiucv_transmit_skb(privptr->conn, skb);
1da177e4 1416 netiucv_clear_busy(dev);
5b548140 1417 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1da177e4
LT
1418}
1419
1420/**
eebce385
MS
1421 * netiucv_stats
1422 * @dev: Pointer to interface struct.
1da177e4 1423 *
eebce385 1424 * Returns interface statistics of a device.
1da177e4 1425 *
eebce385 1426 * Returns pointer to stats struct of this interface.
1da177e4 1427 */
eebce385 1428static struct net_device_stats *netiucv_stats (struct net_device * dev)
1da177e4 1429{
eebce385
MS
1430 struct netiucv_priv *priv = netdev_priv(dev);
1431
2a2cf6b1 1432 IUCV_DBF_TEXT(trace, 5, __func__);
eebce385 1433 return &priv->stats;
1da177e4
LT
1434}
1435
1436/**
eebce385
MS
1437 * netiucv_change_mtu
1438 * @dev: Pointer to interface struct.
1439 * @new_mtu: The new MTU to use for this interface.
1da177e4 1440 *
eebce385 1441 * Sets MTU of an interface.
1da177e4 1442 *
eebce385 1443 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1da177e4
LT
1444 * (valid range is 576 .. NETIUCV_MTU_MAX).
1445 */
eebce385 1446static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1da177e4 1447{
2a2cf6b1 1448 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 1449 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1da177e4
LT
1450 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1451 return -EINVAL;
1452 }
1453 dev->mtu = new_mtu;
1454 return 0;
1455}
1456
eebce385 1457/*
1da177e4 1458 * attributes in sysfs
eebce385 1459 */
1da177e4 1460
eebce385
MS
1461static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1462 char *buf)
1da177e4 1463{
dff59b64 1464 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1465
2a2cf6b1 1466 IUCV_DBF_TEXT(trace, 5, __func__);
08e3356c 1467 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1da177e4
LT
1468}
1469
08e3356c
UB
1470static int netiucv_check_user(const char *buf, size_t count, char *username,
1471 char *userdata)
1da177e4 1472{
08e3356c
UB
1473 const char *p;
1474 int i;
1da177e4 1475
08e3356c
UB
1476 p = strchr(buf, '.');
1477 if ((p && ((count > 26) ||
1478 ((p - buf) > 8) ||
1479 (buf + count - p > 18))) ||
1480 (!p && (count > 9))) {
1481 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1da177e4
LT
1482 return -EINVAL;
1483 }
1484
08e3356c
UB
1485 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1486 if (isalnum(*p) || *p == '$') {
1487 username[i] = toupper(*p);
eebce385
MS
1488 continue;
1489 }
08e3356c 1490 if (*p == '\n')
1da177e4
LT
1491 /* trailing lf, grr */
1492 break;
eebce385 1493 IUCV_DBF_TEXT_(setup, 2,
08e3356c 1494 "conn_write: invalid character %02x\n", *p);
eebce385 1495 return -EINVAL;
1da177e4 1496 }
eebce385 1497 while (i < 8)
1da177e4 1498 username[i++] = ' ';
16a83b30 1499 username[8] = '\0';
1da177e4 1500
08e3356c
UB
1501 if (*p == '.') {
1502 p++;
1503 for (i = 0; i < 16 && *p; i++, p++) {
1504 if (*p == '\n')
1505 break;
1506 userdata[i] = toupper(*p);
1507 }
1508 while (i > 0 && i < 16)
1509 userdata[i++] = ' ';
1510 } else
1511 memcpy(userdata, iucvMagic_ascii, 16);
1512 userdata[16] = '\0';
1513 ASCEBC(userdata, 16);
1514
1515 return 0;
1516}
1517
1518static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1519 const char *buf, size_t count)
1520{
1521 struct netiucv_priv *priv = dev_get_drvdata(dev);
1522 struct net_device *ndev = priv->conn->netdev;
1523 char username[9];
1524 char userdata[17];
1525 int rc;
1526 struct iucv_connection *cp;
1527
1528 IUCV_DBF_TEXT(trace, 3, __func__);
1529 rc = netiucv_check_user(buf, count, username, userdata);
1530 if (rc)
1531 return rc;
1532
eebce385
MS
1533 if (memcmp(username, priv->conn->userid, 9) &&
1534 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1535 /* username changed while the interface is active. */
eebce385 1536 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
f082bcae 1537 return -EPERM;
eebce385
MS
1538 }
1539 read_lock_bh(&iucv_connection_rwlock);
1540 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
1541 if (!strncmp(username, cp->userid, 9) &&
1542 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
eebce385 1543 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
1544 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1545 "already exists\n", netiucv_printuser(cp));
eebce385 1546 return -EEXIST;
1da177e4
LT
1547 }
1548 }
eebce385 1549 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 1550 memcpy(priv->conn->userid, username, 9);
08e3356c 1551 memcpy(priv->conn->userdata, userdata, 17);
1da177e4 1552 return count;
1da177e4
LT
1553}
1554
1555static DEVICE_ATTR(user, 0644, user_show, user_write);
1556
eebce385
MS
1557static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1558 char *buf)
dff59b64
GKH
1559{
1560 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1561
2a2cf6b1 1562 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1563 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1564}
1565
eebce385
MS
1566static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1567 const char *buf, size_t count)
1da177e4 1568{
dff59b64 1569 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4
LT
1570 struct net_device *ndev = priv->conn->netdev;
1571 char *e;
1572 int bs1;
1573
2a2cf6b1 1574 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1575 if (count >= 39)
1576 return -EINVAL;
1577
1578 bs1 = simple_strtoul(buf, &e, 0);
1579
1580 if (e && (!isspace(*e))) {
08e3356c
UB
1581 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1582 *e);
1da177e4
LT
1583 return -EINVAL;
1584 }
1585 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1da177e4
LT
1586 IUCV_DBF_TEXT_(setup, 2,
1587 "buffer_write: buffer size %d too large\n",
1588 bs1);
1589 return -EINVAL;
1590 }
1591 if ((ndev->flags & IFF_RUNNING) &&
1592 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1da177e4
LT
1593 IUCV_DBF_TEXT_(setup, 2,
1594 "buffer_write: buffer size %d too small\n",
1595 bs1);
1596 return -EINVAL;
1597 }
1598 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1da177e4
LT
1599 IUCV_DBF_TEXT_(setup, 2,
1600 "buffer_write: buffer size %d too small\n",
1601 bs1);
1602 return -EINVAL;
1603 }
1604
1605 priv->conn->max_buffsize = bs1;
1606 if (!(ndev->flags & IFF_RUNNING))
1607 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1608
1609 return count;
1610
1611}
1612
1613static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1614
eebce385
MS
1615static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1616 char *buf)
1da177e4 1617{
dff59b64 1618 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1619
2a2cf6b1 1620 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1621 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1622}
1623
1624static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1625
eebce385
MS
1626static ssize_t conn_fsm_show (struct device *dev,
1627 struct device_attribute *attr, char *buf)
1da177e4 1628{
dff59b64 1629 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1630
2a2cf6b1 1631 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1632 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1633}
1634
1635static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1636
eebce385
MS
1637static ssize_t maxmulti_show (struct device *dev,
1638 struct device_attribute *attr, char *buf)
1da177e4 1639{
dff59b64 1640 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1641
2a2cf6b1 1642 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1643 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1644}
1645
eebce385
MS
1646static ssize_t maxmulti_write (struct device *dev,
1647 struct device_attribute *attr,
1648 const char *buf, size_t count)
1da177e4 1649{
dff59b64 1650 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1651
2a2cf6b1 1652 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1653 priv->conn->prof.maxmulti = 0;
1654 return count;
1655}
1656
1657static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1658
eebce385
MS
1659static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1660 char *buf)
1da177e4 1661{
dff59b64 1662 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1663
2a2cf6b1 1664 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1665 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1666}
1667
eebce385
MS
1668static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1669 const char *buf, size_t count)
1da177e4 1670{
dff59b64 1671 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1672
2a2cf6b1 1673 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1674 priv->conn->prof.maxcqueue = 0;
1675 return count;
1676}
1677
1678static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1679
eebce385
MS
1680static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1681 char *buf)
1da177e4 1682{
dff59b64 1683 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1684
2a2cf6b1 1685 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1686 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1687}
1688
eebce385
MS
1689static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1690 const char *buf, size_t count)
1da177e4 1691{
dff59b64 1692 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1693
2a2cf6b1 1694 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1695 priv->conn->prof.doios_single = 0;
1696 return count;
1697}
1698
1699static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1700
eebce385
MS
1701static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1702 char *buf)
1da177e4 1703{
dff59b64 1704 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1705
2a2cf6b1 1706 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1707 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1708}
1709
eebce385
MS
1710static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1711 const char *buf, size_t count)
1da177e4 1712{
dff59b64 1713 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1714
2a2cf6b1 1715 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1716 priv->conn->prof.doios_multi = 0;
1717 return count;
1718}
1719
1720static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1721
eebce385
MS
1722static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1723 char *buf)
1da177e4 1724{
dff59b64 1725 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1726
2a2cf6b1 1727 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1728 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1729}
1730
eebce385
MS
1731static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1732 const char *buf, size_t count)
1da177e4 1733{
dff59b64 1734 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1735
2a2cf6b1 1736 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1737 priv->conn->prof.txlen = 0;
1738 return count;
1739}
1740
1741static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1742
eebce385
MS
1743static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1744 char *buf)
1da177e4 1745{
dff59b64 1746 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1747
2a2cf6b1 1748 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1749 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1750}
1751
eebce385
MS
1752static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1753 const char *buf, size_t count)
1da177e4 1754{
dff59b64 1755 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1756
2a2cf6b1 1757 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1758 priv->conn->prof.tx_time = 0;
1759 return count;
1760}
1761
1762static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1763
eebce385
MS
1764static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1765 char *buf)
1da177e4 1766{
dff59b64 1767 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1768
2a2cf6b1 1769 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1770 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1771}
1772
eebce385
MS
1773static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1774 const char *buf, size_t count)
1da177e4 1775{
dff59b64 1776 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1777
2a2cf6b1 1778 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1779 priv->conn->prof.tx_pending = 0;
1780 return count;
1781}
1782
1783static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1784
eebce385
MS
1785static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1786 char *buf)
1da177e4 1787{
dff59b64 1788 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1789
2a2cf6b1 1790 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1791 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1792}
1793
eebce385
MS
1794static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1795 const char *buf, size_t count)
1da177e4 1796{
dff59b64 1797 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1798
2a2cf6b1 1799 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1800 priv->conn->prof.tx_max_pending = 0;
1801 return count;
1802}
1803
1804static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1805
1806static struct attribute *netiucv_attrs[] = {
1807 &dev_attr_buffer.attr,
1808 &dev_attr_user.attr,
1809 NULL,
1810};
1811
1812static struct attribute_group netiucv_attr_group = {
1813 .attrs = netiucv_attrs,
1814};
1815
1816static struct attribute *netiucv_stat_attrs[] = {
1817 &dev_attr_device_fsm_state.attr,
1818 &dev_attr_connection_fsm_state.attr,
1819 &dev_attr_max_tx_buffer_used.attr,
1820 &dev_attr_max_chained_skbs.attr,
1821 &dev_attr_tx_single_write_ops.attr,
1822 &dev_attr_tx_multi_write_ops.attr,
1823 &dev_attr_netto_bytes.attr,
1824 &dev_attr_max_tx_io_time.attr,
1825 &dev_attr_tx_pending.attr,
1826 &dev_attr_tx_max_pending.attr,
1827 NULL,
1828};
1829
1830static struct attribute_group netiucv_stat_attr_group = {
1831 .name = "stats",
1832 .attrs = netiucv_stat_attrs,
1833};
1834
0b945293 1835static const struct attribute_group *netiucv_attr_groups[] = {
1836 &netiucv_stat_attr_group,
1837 &netiucv_attr_group,
1838 NULL,
1839};
1da177e4 1840
eebce385 1841static int netiucv_register_device(struct net_device *ndev)
1da177e4 1842{
eebce385 1843 struct netiucv_priv *priv = netdev_priv(ndev);
88abaab4 1844 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1da177e4
LT
1845 int ret;
1846
2a2cf6b1 1847 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1848
1849 if (dev) {
1bf5b285 1850 dev_set_name(dev, "net%s", ndev->name);
1da177e4
LT
1851 dev->bus = &iucv_bus;
1852 dev->parent = iucv_root;
0b945293 1853 dev->groups = netiucv_attr_groups;
1da177e4
LT
1854 /*
1855 * The release function could be called after the
1856 * module has been unloaded. It's _only_ task is to
1857 * free the struct. Therefore, we specify kfree()
1858 * directly here. (Probably a little bit obfuscating
1859 * but legitime ...).
1860 */
1861 dev->release = (void (*)(struct device *))kfree;
1862 dev->driver = &netiucv_driver;
1863 } else
1864 return -ENOMEM;
1865
1866 ret = device_register(dev);
c6304933
SO
1867 if (ret) {
1868 put_device(dev);
1da177e4 1869 return ret;
c6304933 1870 }
1da177e4 1871 priv->dev = dev;
dff59b64 1872 dev_set_drvdata(dev, priv);
1da177e4 1873 return 0;
1da177e4
LT
1874}
1875
eebce385 1876static void netiucv_unregister_device(struct device *dev)
1da177e4 1877{
2a2cf6b1 1878 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1879 device_unregister(dev);
1880}
1881
1882/**
1883 * Allocate and initialize a new connection structure.
1884 * Add it to the list of netiucv connections;
1885 */
eebce385 1886static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
08e3356c
UB
1887 char *username,
1888 char *userdata)
eebce385
MS
1889{
1890 struct iucv_connection *conn;
1da177e4 1891
eebce385
MS
1892 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1893 if (!conn)
1894 goto out;
1895 skb_queue_head_init(&conn->collect_queue);
1896 skb_queue_head_init(&conn->commit_queue);
1897 spin_lock_init(&conn->collect_lock);
1898 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1899 conn->netdev = dev;
1900
1901 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1902 if (!conn->rx_buff)
1903 goto out_conn;
1904 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1905 if (!conn->tx_buff)
1906 goto out_rx;
1907 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1908 conn_event_names, NR_CONN_STATES,
1909 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1910 GFP_KERNEL);
1911 if (!conn->fsm)
1912 goto out_tx;
1913
1914 fsm_settimer(conn->fsm, &conn->timer);
1915 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1916
08e3356c
UB
1917 if (userdata)
1918 memcpy(conn->userdata, userdata, 17);
eebce385
MS
1919 if (username) {
1920 memcpy(conn->userid, username, 9);
1921 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1da177e4 1922 }
eebce385
MS
1923
1924 write_lock_bh(&iucv_connection_rwlock);
1925 list_add_tail(&conn->list, &iucv_connection_list);
1926 write_unlock_bh(&iucv_connection_rwlock);
1da177e4 1927 return conn;
eebce385
MS
1928
1929out_tx:
1930 kfree_skb(conn->tx_buff);
1931out_rx:
1932 kfree_skb(conn->rx_buff);
1933out_conn:
1934 kfree(conn);
1935out:
1936 return NULL;
1da177e4
LT
1937}
1938
1939/**
1940 * Release a connection structure and remove it from the
1941 * list of netiucv connections.
1942 */
eebce385 1943static void netiucv_remove_connection(struct iucv_connection *conn)
1da177e4 1944{
08e3356c 1945
2a2cf6b1 1946 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
1947 write_lock_bh(&iucv_connection_rwlock);
1948 list_del_init(&conn->list);
1949 write_unlock_bh(&iucv_connection_rwlock);
0be4acec
UB
1950 fsm_deltimer(&conn->timer);
1951 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385 1952 if (conn->path) {
08e3356c 1953 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
1954 kfree(conn->path);
1955 conn->path = NULL;
1da177e4 1956 }
0be4acec 1957 netiucv_purge_skb_queue(&conn->commit_queue);
eebce385
MS
1958 kfree_fsm(conn->fsm);
1959 kfree_skb(conn->rx_buff);
1960 kfree_skb(conn->tx_buff);
1da177e4
LT
1961}
1962
1963/**
1964 * Release everything of a net device.
1965 */
eebce385 1966static void netiucv_free_netdevice(struct net_device *dev)
1da177e4 1967{
eebce385 1968 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1969
2a2cf6b1 1970 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1971
1972 if (!dev)
1973 return;
1974
1da177e4
LT
1975 if (privptr) {
1976 if (privptr->conn)
1977 netiucv_remove_connection(privptr->conn);
1978 if (privptr->fsm)
1979 kfree_fsm(privptr->fsm);
1980 privptr->conn = NULL; privptr->fsm = NULL;
1981 /* privptr gets freed by free_netdev() */
1982 }
1983 free_netdev(dev);
1984}
1985
1986/**
1987 * Initialize a net device. (Called from kernel in alloc_netdev())
1988 */
4edd73b5
FB
1989static const struct net_device_ops netiucv_netdev_ops = {
1990 .ndo_open = netiucv_open,
1991 .ndo_stop = netiucv_close,
1992 .ndo_get_stats = netiucv_stats,
1993 .ndo_start_xmit = netiucv_tx,
1994 .ndo_change_mtu = netiucv_change_mtu,
1995};
1996
eebce385 1997static void netiucv_setup_netdevice(struct net_device *dev)
1da177e4 1998{
1da177e4 1999 dev->mtu = NETIUCV_MTU_DEFAULT;
1da177e4
LT
2000 dev->destructor = netiucv_free_netdevice;
2001 dev->hard_header_len = NETIUCV_HDRLEN;
2002 dev->addr_len = 0;
2003 dev->type = ARPHRD_SLIP;
2004 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
2005 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
4edd73b5 2006 dev->netdev_ops = &netiucv_netdev_ops;
1da177e4
LT
2007}
2008
2009/**
2010 * Allocate and initialize everything of a net device.
2011 */
08e3356c 2012static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1da177e4
LT
2013{
2014 struct netiucv_priv *privptr;
2015 struct net_device *dev;
2016
2017 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
c835a677 2018 NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1da177e4
LT
2019 if (!dev)
2020 return NULL;
aaf9522d 2021 rtnl_lock();
1d503563
UB
2022 if (dev_alloc_name(dev, dev->name) < 0)
2023 goto out_netdev;
1da177e4 2024
eebce385 2025 privptr = netdev_priv(dev);
1da177e4
LT
2026 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2027 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2028 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
eebce385
MS
2029 if (!privptr->fsm)
2030 goto out_netdev;
2031
08e3356c 2032 privptr->conn = netiucv_new_connection(dev, username, userdata);
1da177e4 2033 if (!privptr->conn) {
1da177e4 2034 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
eebce385 2035 goto out_fsm;
1da177e4
LT
2036 }
2037 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1da177e4 2038 return dev;
eebce385
MS
2039
2040out_fsm:
2041 kfree_fsm(privptr->fsm);
2042out_netdev:
aaf9522d 2043 rtnl_unlock();
eebce385
MS
2044 free_netdev(dev);
2045 return NULL;
1da177e4
LT
2046}
2047
eebce385
MS
2048static ssize_t conn_write(struct device_driver *drv,
2049 const char *buf, size_t count)
1da177e4 2050{
16a83b30 2051 char username[9];
08e3356c
UB
2052 char userdata[17];
2053 int rc;
1da177e4 2054 struct net_device *dev;
eebce385
MS
2055 struct netiucv_priv *priv;
2056 struct iucv_connection *cp;
1da177e4 2057
2a2cf6b1 2058 IUCV_DBF_TEXT(trace, 3, __func__);
08e3356c
UB
2059 rc = netiucv_check_user(buf, count, username, userdata);
2060 if (rc)
2061 return rc;
16a83b30 2062
eebce385
MS
2063 read_lock_bh(&iucv_connection_rwlock);
2064 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
2065 if (!strncmp(username, cp->userid, 9) &&
2066 !strncmp(userdata, cp->userdata, 17)) {
eebce385 2067 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
2068 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2069 "already exists\n", netiucv_printuser(cp));
eebce385
MS
2070 return -EEXIST;
2071 }
16a83b30 2072 }
eebce385
MS
2073 read_unlock_bh(&iucv_connection_rwlock);
2074
08e3356c 2075 dev = netiucv_init_netdevice(username, userdata);
1da177e4 2076 if (!dev) {
1da177e4
LT
2077 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2078 return -ENODEV;
2079 }
2080
eebce385
MS
2081 rc = netiucv_register_device(dev);
2082 if (rc) {
aaf9522d 2083 rtnl_unlock();
1da177e4 2084 IUCV_DBF_TEXT_(setup, 2,
eebce385 2085 "ret %d from netiucv_register_device\n", rc);
1da177e4
LT
2086 goto out_free_ndev;
2087 }
2088
2089 /* sysfs magic */
eebce385
MS
2090 priv = netdev_priv(dev);
2091 SET_NETDEV_DEV(dev, priv->dev);
1da177e4 2092
aaf9522d
BP
2093 rc = register_netdevice(dev);
2094 rtnl_unlock();
eebce385
MS
2095 if (rc)
2096 goto out_unreg;
1da177e4 2097
08e3356c
UB
2098 dev_info(priv->dev, "The IUCV interface to %s has been established "
2099 "successfully\n",
2100 netiucv_printuser(priv->conn));
e82b0f2c 2101
1da177e4
LT
2102 return count;
2103
eebce385
MS
2104out_unreg:
2105 netiucv_unregister_device(priv->dev);
1da177e4 2106out_free_ndev:
1da177e4 2107 netiucv_free_netdevice(dev);
eebce385 2108 return rc;
1da177e4
LT
2109}
2110
2b67fc46 2111static DRIVER_ATTR(connection, 0200, NULL, conn_write);
1da177e4 2112
eebce385
MS
2113static ssize_t remove_write (struct device_driver *drv,
2114 const char *buf, size_t count)
1da177e4 2115{
eebce385 2116 struct iucv_connection *cp;
1da177e4
LT
2117 struct net_device *ndev;
2118 struct netiucv_priv *priv;
2119 struct device *dev;
2120 char name[IFNAMSIZ];
eebce385 2121 const char *p;
1da177e4
LT
2122 int i;
2123
2a2cf6b1 2124 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
2125
2126 if (count >= IFNAMSIZ)
a419aef8 2127 count = IFNAMSIZ - 1;
1da177e4 2128
eebce385
MS
2129 for (i = 0, p = buf; i < count && *p; i++, p++) {
2130 if (*p == '\n' || *p == ' ')
1da177e4
LT
2131 /* trailing lf, grr */
2132 break;
eebce385 2133 name[i] = *p;
1da177e4
LT
2134 }
2135 name[i] = '\0';
2136
eebce385
MS
2137 read_lock_bh(&iucv_connection_rwlock);
2138 list_for_each_entry(cp, &iucv_connection_list, list) {
2139 ndev = cp->netdev;
2140 priv = netdev_priv(ndev);
1da177e4 2141 dev = priv->dev;
eebce385
MS
2142 if (strncmp(name, ndev->name, count))
2143 continue;
2144 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 2145 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
8f7c502c
UB
2146 dev_warn(dev, "The IUCV device is connected"
2147 " to %s and cannot be removed\n",
2148 priv->conn->userid);
1da177e4 2149 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
f082bcae 2150 return -EPERM;
1da177e4
LT
2151 }
2152 unregister_netdev(ndev);
2153 netiucv_unregister_device(dev);
2154 return count;
2155 }
eebce385 2156 read_unlock_bh(&iucv_connection_rwlock);
1da177e4
LT
2157 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2158 return -EINVAL;
2159}
2160
2b67fc46 2161static DRIVER_ATTR(remove, 0200, NULL, remove_write);
1da177e4 2162
eebce385
MS
2163static struct attribute * netiucv_drv_attrs[] = {
2164 &driver_attr_connection.attr,
2165 &driver_attr_remove.attr,
2166 NULL,
2167};
2168
2169static struct attribute_group netiucv_drv_attr_group = {
2170 .attrs = netiucv_drv_attrs,
2171};
2172
a4dbd674 2173static const struct attribute_group *netiucv_drv_attr_groups[] = {
5b88feb1
CH
2174 &netiucv_drv_attr_group,
2175 NULL,
2176};
2177
eebce385 2178static void netiucv_banner(void)
1da177e4 2179{
8f7c502c 2180 pr_info("driver initialized\n");
1da177e4
LT
2181}
2182
eebce385 2183static void __exit netiucv_exit(void)
1da177e4 2184{
eebce385
MS
2185 struct iucv_connection *cp;
2186 struct net_device *ndev;
2187 struct netiucv_priv *priv;
2188 struct device *dev;
2189
2a2cf6b1 2190 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
2191 while (!list_empty(&iucv_connection_list)) {
2192 cp = list_entry(iucv_connection_list.next,
2193 struct iucv_connection, list);
eebce385
MS
2194 ndev = cp->netdev;
2195 priv = netdev_priv(ndev);
2196 dev = priv->dev;
1da177e4
LT
2197
2198 unregister_netdev(ndev);
2199 netiucv_unregister_device(dev);
2200 }
2201
1175b257 2202 device_unregister(netiucv_dev);
1da177e4 2203 driver_unregister(&netiucv_driver);
eebce385 2204 iucv_unregister(&netiucv_handler, 1);
1da177e4
LT
2205 iucv_unregister_dbf_views();
2206
8f7c502c 2207 pr_info("driver unloaded\n");
1da177e4
LT
2208 return;
2209}
2210
eebce385 2211static int __init netiucv_init(void)
1da177e4 2212{
eebce385 2213 int rc;
e82b0f2c 2214
eebce385
MS
2215 rc = iucv_register_dbf_views();
2216 if (rc)
2217 goto out;
2218 rc = iucv_register(&netiucv_handler, 1);
2219 if (rc)
2220 goto out_dbf;
2a2cf6b1 2221 IUCV_DBF_TEXT(trace, 3, __func__);
0a0a8310 2222 netiucv_driver.groups = netiucv_drv_attr_groups;
eebce385
MS
2223 rc = driver_register(&netiucv_driver);
2224 if (rc) {
eebce385
MS
2225 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2226 goto out_iucv;
1da177e4 2227 }
1175b257
UB
2228 /* establish dummy device */
2229 netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2230 if (!netiucv_dev) {
2231 rc = -ENOMEM;
2232 goto out_driver;
2233 }
2234 dev_set_name(netiucv_dev, "netiucv");
2235 netiucv_dev->bus = &iucv_bus;
2236 netiucv_dev->parent = iucv_root;
2237 netiucv_dev->release = (void (*)(struct device *))kfree;
2238 netiucv_dev->driver = &netiucv_driver;
2239 rc = device_register(netiucv_dev);
c6304933
SO
2240 if (rc) {
2241 put_device(netiucv_dev);
1175b257 2242 goto out_driver;
c6304933 2243 }
eebce385
MS
2244 netiucv_banner();
2245 return rc;
2246
1175b257
UB
2247out_driver:
2248 driver_unregister(&netiucv_driver);
eebce385
MS
2249out_iucv:
2250 iucv_unregister(&netiucv_handler, 1);
2251out_dbf:
2252 iucv_unregister_dbf_views();
2253out:
2254 return rc;
1da177e4 2255}
e82b0f2c 2256
1da177e4
LT
2257module_init(netiucv_init);
2258module_exit(netiucv_exit);
2259MODULE_LICENSE("GPL");