mm: update get_user_pages_longterm to migrate pages allocated from CMA region
[linux-2.6-block.git] / drivers / s390 / net / netiucv.c
CommitLineData
ab9953ff 1// SPDX-License-Identifier: GPL-2.0+
1da177e4 2/*
1da177e4
LT
3 * IUCV network driver
4 *
1175b257 5 * Copyright IBM Corp. 2001, 2009
1da177e4 6 *
1175b257
UB
7 * Author(s):
8 * Original netiucv driver:
9 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
10 * Sysfs integration and all bugs therein:
11 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * PM functions:
13 * Ursula Braun (ursula.braun@de.ibm.com)
1da177e4
LT
14 *
15 * Documentation used:
16 * the source of the original IUCV driver by:
17 * Stefan Hegewald <hegewald@de.ibm.com>
18 * Hartmut Penner <hpenner@de.ibm.com>
19 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
1da177e4 22 */
e82b0f2c 23
8f7c502c
UB
24#define KMSG_COMPONENT "netiucv"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26
1da177e4
LT
27#undef DEBUG
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/types.h>
35#include <linux/interrupt.h>
36#include <linux/timer.h>
1da177e4
LT
37#include <linux/bitops.h>
38
39#include <linux/signal.h>
40#include <linux/string.h>
41#include <linux/device.h>
42
43#include <linux/ip.h>
44#include <linux/if_arp.h>
45#include <linux/tcp.h>
46#include <linux/skbuff.h>
47#include <linux/ctype.h>
48#include <net/dst.h>
49
50#include <asm/io.h>
7c0f6ba6 51#include <linux/uaccess.h>
08e3356c 52#include <asm/ebcdic.h>
1da177e4 53
eebce385 54#include <net/iucv/iucv.h>
1da177e4
LT
55#include "fsm.h"
56
57MODULE_AUTHOR
58 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
59MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
60
eebce385
MS
61/**
62 * Debug Facility stuff
63 */
64#define IUCV_DBF_SETUP_NAME "iucv_setup"
08e3356c 65#define IUCV_DBF_SETUP_LEN 64
eebce385
MS
66#define IUCV_DBF_SETUP_PAGES 2
67#define IUCV_DBF_SETUP_NR_AREAS 1
68#define IUCV_DBF_SETUP_LEVEL 3
69
70#define IUCV_DBF_DATA_NAME "iucv_data"
71#define IUCV_DBF_DATA_LEN 128
72#define IUCV_DBF_DATA_PAGES 2
73#define IUCV_DBF_DATA_NR_AREAS 1
74#define IUCV_DBF_DATA_LEVEL 2
75
76#define IUCV_DBF_TRACE_NAME "iucv_trace"
77#define IUCV_DBF_TRACE_LEN 16
78#define IUCV_DBF_TRACE_PAGES 4
79#define IUCV_DBF_TRACE_NR_AREAS 1
80#define IUCV_DBF_TRACE_LEVEL 3
81
82#define IUCV_DBF_TEXT(name,level,text) \
83 do { \
84 debug_text_event(iucv_dbf_##name,level,text); \
85 } while (0)
86
87#define IUCV_DBF_HEX(name,level,addr,len) \
88 do { \
89 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
90 } while (0)
91
92DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
93
f33780d3
PT
94#define IUCV_DBF_TEXT_(name, level, text...) \
95 do { \
8e6a8285 96 if (debug_level_enabled(iucv_dbf_##name, level)) { \
390dfd95
TH
97 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
98 sprintf(__buf, text); \
99 debug_text_event(iucv_dbf_##name, level, __buf); \
f33780d3
PT
100 put_cpu_var(iucv_dbf_txt_buf); \
101 } \
eebce385
MS
102 } while (0)
103
104#define IUCV_DBF_SPRINTF(name,level,text...) \
105 do { \
106 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
107 debug_sprintf_event(iucv_dbf_trace, level, text ); \
108 } while (0)
109
110/**
111 * some more debug stuff
112 */
1da177e4
LT
113#define PRINTK_HEADER " iucv: " /* for debugging */
114
1175b257
UB
115/* dummy device to make sure netiucv_pm functions are called */
116static struct device *netiucv_dev;
117
118static int netiucv_pm_prepare(struct device *);
119static void netiucv_pm_complete(struct device *);
120static int netiucv_pm_freeze(struct device *);
121static int netiucv_pm_restore_thaw(struct device *);
122
47145210 123static const struct dev_pm_ops netiucv_pm_ops = {
1175b257
UB
124 .prepare = netiucv_pm_prepare,
125 .complete = netiucv_pm_complete,
126 .freeze = netiucv_pm_freeze,
127 .thaw = netiucv_pm_restore_thaw,
128 .restore = netiucv_pm_restore_thaw,
129};
130
1da177e4 131static struct device_driver netiucv_driver = {
2219510f 132 .owner = THIS_MODULE,
1da177e4
LT
133 .name = "netiucv",
134 .bus = &iucv_bus,
1175b257 135 .pm = &netiucv_pm_ops,
1da177e4
LT
136};
137
91e60eb6
UB
138static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
139static void netiucv_callback_connack(struct iucv_path *, u8 *);
140static void netiucv_callback_connrej(struct iucv_path *, u8 *);
141static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
142static void netiucv_callback_connres(struct iucv_path *, u8 *);
eebce385
MS
143static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
144static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
145
146static struct iucv_handler netiucv_handler = {
147 .path_pending = netiucv_callback_connreq,
148 .path_complete = netiucv_callback_connack,
149 .path_severed = netiucv_callback_connrej,
150 .path_quiesced = netiucv_callback_connsusp,
151 .path_resumed = netiucv_callback_connres,
152 .message_pending = netiucv_callback_rx,
153 .message_complete = netiucv_callback_txdone
154};
155
1da177e4
LT
156/**
157 * Per connection profiling data
158 */
159struct connection_profile {
160 unsigned long maxmulti;
161 unsigned long maxcqueue;
162 unsigned long doios_single;
163 unsigned long doios_multi;
164 unsigned long txlen;
165 unsigned long tx_time;
ee6edb97 166 unsigned long send_stamp;
1da177e4
LT
167 unsigned long tx_pending;
168 unsigned long tx_max_pending;
169};
170
171/**
172 * Representation of one iucv connection
173 */
174struct iucv_connection {
eebce385
MS
175 struct list_head list;
176 struct iucv_path *path;
1da177e4
LT
177 struct sk_buff *rx_buff;
178 struct sk_buff *tx_buff;
179 struct sk_buff_head collect_queue;
180 struct sk_buff_head commit_queue;
181 spinlock_t collect_lock;
182 int collect_len;
183 int max_buffsize;
184 fsm_timer timer;
185 fsm_instance *fsm;
186 struct net_device *netdev;
187 struct connection_profile prof;
188 char userid[9];
08e3356c 189 char userdata[17];
1da177e4
LT
190};
191
192/**
193 * Linked list of all connection structs.
194 */
c11ca97e 195static LIST_HEAD(iucv_connection_list);
bfac0d0b 196static DEFINE_RWLOCK(iucv_connection_rwlock);
1da177e4
LT
197
198/**
199 * Representation of event-data for the
200 * connection state machine.
201 */
202struct iucv_event {
203 struct iucv_connection *conn;
204 void *data;
205};
206
207/**
208 * Private part of the network device structure
209 */
210struct netiucv_priv {
211 struct net_device_stats stats;
212 unsigned long tbusy;
213 fsm_instance *fsm;
214 struct iucv_connection *conn;
215 struct device *dev;
1175b257 216 int pm_state;
1da177e4
LT
217};
218
219/**
220 * Link level header for a packet.
221 */
eebce385
MS
222struct ll_header {
223 u16 next;
224};
1da177e4 225
eebce385 226#define NETIUCV_HDRLEN (sizeof(struct ll_header))
08e3356c 227#define NETIUCV_BUFSIZE_MAX 65537
1da177e4
LT
228#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
229#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
230#define NETIUCV_MTU_DEFAULT 9216
231#define NETIUCV_QUEUELEN_DEFAULT 50
232#define NETIUCV_TIMEOUT_5SEC 5000
233
234/**
235 * Compatibility macros for busy handling
236 * of network devices.
237 */
cef6ff22 238static void netiucv_clear_busy(struct net_device *dev)
1da177e4 239{
eebce385
MS
240 struct netiucv_priv *priv = netdev_priv(dev);
241 clear_bit(0, &priv->tbusy);
1da177e4
LT
242 netif_wake_queue(dev);
243}
244
cef6ff22 245static int netiucv_test_and_set_busy(struct net_device *dev)
1da177e4 246{
eebce385 247 struct netiucv_priv *priv = netdev_priv(dev);
1da177e4 248 netif_stop_queue(dev);
eebce385 249 return test_and_set_bit(0, &priv->tbusy);
1da177e4
LT
250}
251
08e3356c
UB
252static u8 iucvMagic_ascii[16] = {
253 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
254 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
255};
256
257static u8 iucvMagic_ebcdic[16] = {
1da177e4
LT
258 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
259 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
260};
261
1da177e4
LT
262/**
263 * Convert an iucv userId to its printable
264 * form (strip whitespace at end).
265 *
266 * @param An iucv userId
267 *
268 * @returns The printable string (static data!!)
269 */
08e3356c 270static char *netiucv_printname(char *name, int len)
1da177e4 271{
08e3356c 272 static char tmp[17];
1da177e4 273 char *p = tmp;
08e3356c
UB
274 memcpy(tmp, name, len);
275 tmp[len] = '\0';
276 while (*p && ((p - tmp) < len) && (!isspace(*p)))
1da177e4
LT
277 p++;
278 *p = '\0';
279 return tmp;
280}
e82b0f2c 281
08e3356c
UB
282static char *netiucv_printuser(struct iucv_connection *conn)
283{
284 static char tmp_uid[9];
285 static char tmp_udat[17];
286 static char buf[100];
287
288 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
289 tmp_uid[8] = '\0';
290 tmp_udat[16] = '\0';
baac7898 291 memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
08e3356c
UB
292 memcpy(tmp_udat, conn->userdata, 16);
293 EBCASC(tmp_udat, 16);
294 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
295 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
296 return buf;
297 } else
298 return netiucv_printname(conn->userid, 8);
299}
300
1da177e4
LT
301/**
302 * States of the interface statemachine.
303 */
304enum dev_states {
305 DEV_STATE_STOPPED,
306 DEV_STATE_STARTWAIT,
307 DEV_STATE_STOPWAIT,
308 DEV_STATE_RUNNING,
309 /**
310 * MUST be always the last element!!
311 */
312 NR_DEV_STATES
313};
314
315static const char *dev_state_names[] = {
316 "Stopped",
317 "StartWait",
318 "StopWait",
319 "Running",
320};
321
322/**
323 * Events of the interface statemachine.
324 */
325enum dev_events {
326 DEV_EVENT_START,
327 DEV_EVENT_STOP,
328 DEV_EVENT_CONUP,
329 DEV_EVENT_CONDOWN,
330 /**
331 * MUST be always the last element!!
332 */
333 NR_DEV_EVENTS
334};
335
336static const char *dev_event_names[] = {
337 "Start",
338 "Stop",
339 "Connection up",
340 "Connection down",
341};
e82b0f2c 342
1da177e4
LT
343/**
344 * Events of the connection statemachine
345 */
346enum conn_events {
347 /**
348 * Events, representing callbacks from
349 * lowlevel iucv layer)
350 */
351 CONN_EVENT_CONN_REQ,
352 CONN_EVENT_CONN_ACK,
353 CONN_EVENT_CONN_REJ,
354 CONN_EVENT_CONN_SUS,
355 CONN_EVENT_CONN_RES,
356 CONN_EVENT_RX,
357 CONN_EVENT_TXDONE,
358
359 /**
360 * Events, representing errors return codes from
361 * calls to lowlevel iucv layer
362 */
363
364 /**
365 * Event, representing timer expiry.
366 */
367 CONN_EVENT_TIMER,
368
369 /**
370 * Events, representing commands from upper levels.
371 */
372 CONN_EVENT_START,
373 CONN_EVENT_STOP,
374
375 /**
376 * MUST be always the last element!!
377 */
378 NR_CONN_EVENTS,
379};
380
381static const char *conn_event_names[] = {
382 "Remote connection request",
383 "Remote connection acknowledge",
384 "Remote connection reject",
385 "Connection suspended",
386 "Connection resumed",
387 "Data received",
388 "Data sent",
389
390 "Timer",
391
392 "Start",
393 "Stop",
394};
395
396/**
397 * States of the connection statemachine.
398 */
399enum conn_states {
400 /**
401 * Connection not assigned to any device,
402 * initial state, invalid
403 */
404 CONN_STATE_INVALID,
405
406 /**
407 * Userid assigned but not operating
408 */
409 CONN_STATE_STOPPED,
410
411 /**
412 * Connection registered,
413 * no connection request sent yet,
414 * no connection request received
415 */
416 CONN_STATE_STARTWAIT,
417
418 /**
419 * Connection registered and connection request sent,
420 * no acknowledge and no connection request received yet.
421 */
422 CONN_STATE_SETUPWAIT,
423
424 /**
425 * Connection up and running idle
426 */
427 CONN_STATE_IDLE,
428
429 /**
430 * Data sent, awaiting CONN_EVENT_TXDONE
431 */
432 CONN_STATE_TX,
433
434 /**
435 * Error during registration.
436 */
437 CONN_STATE_REGERR,
438
439 /**
440 * Error during registration.
441 */
442 CONN_STATE_CONNERR,
443
444 /**
445 * MUST be always the last element!!
446 */
447 NR_CONN_STATES,
448};
449
450static const char *conn_state_names[] = {
451 "Invalid",
452 "Stopped",
453 "StartWait",
454 "SetupWait",
455 "Idle",
456 "TX",
457 "Terminating",
458 "Registration error",
459 "Connect error",
460};
461
e82b0f2c 462
1da177e4
LT
463/**
464 * Debug Facility Stuff
465 */
466static debug_info_t *iucv_dbf_setup = NULL;
467static debug_info_t *iucv_dbf_data = NULL;
468static debug_info_t *iucv_dbf_trace = NULL;
469
470DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
471
eebce385 472static void iucv_unregister_dbf_views(void)
1da177e4 473{
b646c08e
ME
474 debug_unregister(iucv_dbf_setup);
475 debug_unregister(iucv_dbf_data);
476 debug_unregister(iucv_dbf_trace);
1da177e4 477}
eebce385 478static int iucv_register_dbf_views(void)
1da177e4
LT
479{
480 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
66a464db 481 IUCV_DBF_SETUP_PAGES,
1da177e4
LT
482 IUCV_DBF_SETUP_NR_AREAS,
483 IUCV_DBF_SETUP_LEN);
484 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
66a464db 485 IUCV_DBF_DATA_PAGES,
1da177e4
LT
486 IUCV_DBF_DATA_NR_AREAS,
487 IUCV_DBF_DATA_LEN);
488 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
66a464db 489 IUCV_DBF_TRACE_PAGES,
1da177e4
LT
490 IUCV_DBF_TRACE_NR_AREAS,
491 IUCV_DBF_TRACE_LEN);
492
493 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
494 (iucv_dbf_trace == NULL)) {
495 iucv_unregister_dbf_views();
496 return -ENOMEM;
497 }
498 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
499 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
500
501 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
502 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
503
504 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
505 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
506
507 return 0;
508}
509
eebce385 510/*
1da177e4 511 * Callback-wrappers, called from lowlevel iucv layer.
eebce385 512 */
1da177e4 513
eebce385
MS
514static void netiucv_callback_rx(struct iucv_path *path,
515 struct iucv_message *msg)
1da177e4 516{
eebce385 517 struct iucv_connection *conn = path->private;
1da177e4
LT
518 struct iucv_event ev;
519
520 ev.conn = conn;
eebce385 521 ev.data = msg;
1da177e4
LT
522 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
523}
524
eebce385
MS
525static void netiucv_callback_txdone(struct iucv_path *path,
526 struct iucv_message *msg)
1da177e4 527{
eebce385 528 struct iucv_connection *conn = path->private;
1da177e4
LT
529 struct iucv_event ev;
530
531 ev.conn = conn;
eebce385 532 ev.data = msg;
1da177e4
LT
533 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
534}
535
eebce385 536static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1da177e4 537{
eebce385 538 struct iucv_connection *conn = path->private;
1da177e4 539
eebce385 540 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
1da177e4
LT
541}
542
91e60eb6
UB
543static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
544 u8 *ipuser)
1da177e4 545{
eebce385 546 struct iucv_connection *conn = path->private;
1da177e4 547 struct iucv_event ev;
08e3356c
UB
548 static char tmp_user[9];
549 static char tmp_udat[17];
eebce385 550 int rc;
1da177e4 551
eebce385 552 rc = -EINVAL;
08e3356c
UB
553 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
554 memcpy(tmp_udat, ipuser, 16);
555 EBCASC(tmp_udat, 16);
eebce385
MS
556 read_lock_bh(&iucv_connection_rwlock);
557 list_for_each_entry(conn, &iucv_connection_list, list) {
08e3356c
UB
558 if (strncmp(ipvmid, conn->userid, 8) ||
559 strncmp(ipuser, conn->userdata, 16))
eebce385
MS
560 continue;
561 /* Found a matching connection for this path. */
562 conn->path = path;
563 ev.conn = conn;
564 ev.data = path;
565 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
566 rc = 0;
567 }
08e3356c
UB
568 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
569 tmp_user, netiucv_printname(tmp_udat, 16));
eebce385
MS
570 read_unlock_bh(&iucv_connection_rwlock);
571 return rc;
1da177e4
LT
572}
573
91e60eb6 574static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
1da177e4 575{
eebce385 576 struct iucv_connection *conn = path->private;
1da177e4 577
eebce385 578 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
1da177e4
LT
579}
580
91e60eb6 581static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
1da177e4 582{
eebce385 583 struct iucv_connection *conn = path->private;
1da177e4 584
eebce385 585 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
1da177e4
LT
586}
587
91e60eb6 588static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
1da177e4 589{
eebce385 590 struct iucv_connection *conn = path->private;
1da177e4 591
eebce385
MS
592 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
593}
1da177e4
LT
594
595/**
21b26f2f 596 * NOP action for statemachines
1da177e4 597 */
21b26f2f 598static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
1da177e4
LT
599{
600}
e82b0f2c 601
eebce385 602/*
1da177e4 603 * Actions of the connection statemachine
eebce385 604 */
1da177e4
LT
605
606/**
eebce385
MS
607 * netiucv_unpack_skb
608 * @conn: The connection where this skb has been received.
609 * @pskb: The received skb.
1da177e4 610 *
eebce385
MS
611 * Unpack a just received skb and hand it over to upper layers.
612 * Helper function for conn_action_rx.
1da177e4 613 */
eebce385
MS
614static void netiucv_unpack_skb(struct iucv_connection *conn,
615 struct sk_buff *pskb)
1da177e4
LT
616{
617 struct net_device *dev = conn->netdev;
eebce385
MS
618 struct netiucv_priv *privptr = netdev_priv(dev);
619 u16 offset = 0;
1da177e4
LT
620
621 skb_put(pskb, NETIUCV_HDRLEN);
622 pskb->dev = dev;
623 pskb->ip_summed = CHECKSUM_NONE;
6c37c60c 624 pskb->protocol = cpu_to_be16(ETH_P_IP);
1da177e4
LT
625
626 while (1) {
627 struct sk_buff *skb;
eebce385 628 struct ll_header *header = (struct ll_header *) pskb->data;
1da177e4
LT
629
630 if (!header->next)
631 break;
632
633 skb_pull(pskb, NETIUCV_HDRLEN);
634 header->next -= offset;
635 offset += header->next;
636 header->next -= NETIUCV_HDRLEN;
637 if (skb_tailroom(pskb) < header->next) {
1da177e4
LT
638 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
639 header->next, skb_tailroom(pskb));
640 return;
641 }
642 skb_put(pskb, header->next);
459a98ed 643 skb_reset_mac_header(pskb);
1da177e4
LT
644 skb = dev_alloc_skb(pskb->len);
645 if (!skb) {
1da177e4
LT
646 IUCV_DBF_TEXT(data, 2,
647 "Out of memory in netiucv_unpack_skb\n");
648 privptr->stats.rx_dropped++;
649 return;
650 }
d626f62b
ACM
651 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
652 pskb->len);
459a98ed 653 skb_reset_mac_header(skb);
1da177e4
LT
654 skb->dev = pskb->dev;
655 skb->protocol = pskb->protocol;
656 pskb->ip_summed = CHECKSUM_UNNECESSARY;
9b3efc01
JL
657 privptr->stats.rx_packets++;
658 privptr->stats.rx_bytes += skb->len;
1da177e4
LT
659 /*
660 * Since receiving is always initiated from a tasklet (in iucv.c),
661 * we must use netif_rx_ni() instead of netif_rx()
662 */
663 netif_rx_ni(skb);
1da177e4
LT
664 skb_pull(pskb, header->next);
665 skb_put(pskb, NETIUCV_HDRLEN);
666 }
667}
668
eebce385 669static void conn_action_rx(fsm_instance *fi, int event, void *arg)
1da177e4 670{
eebce385 671 struct iucv_event *ev = arg;
1da177e4 672 struct iucv_connection *conn = ev->conn;
eebce385
MS
673 struct iucv_message *msg = ev->data;
674 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
1da177e4
LT
675 int rc;
676
2a2cf6b1 677 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
678
679 if (!conn->netdev) {
eebce385 680 iucv_message_reject(conn->path, msg);
1da177e4 681 IUCV_DBF_TEXT(data, 2,
eebce385 682 "Received data for unlinked connection\n");
1da177e4
LT
683 return;
684 }
eebce385
MS
685 if (msg->length > conn->max_buffsize) {
686 iucv_message_reject(conn->path, msg);
1da177e4 687 privptr->stats.rx_dropped++;
1da177e4 688 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
eebce385 689 msg->length, conn->max_buffsize);
1da177e4
LT
690 return;
691 }
27a884dc
ACM
692 conn->rx_buff->data = conn->rx_buff->head;
693 skb_reset_tail_pointer(conn->rx_buff);
1da177e4 694 conn->rx_buff->len = 0;
eebce385
MS
695 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
696 msg->length, NULL);
697 if (rc || msg->length < 5) {
1da177e4 698 privptr->stats.rx_errors++;
1da177e4
LT
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return;
701 }
702 netiucv_unpack_skb(conn, conn->rx_buff);
703}
704
eebce385 705static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
1da177e4 706{
eebce385 707 struct iucv_event *ev = arg;
1da177e4 708 struct iucv_connection *conn = ev->conn;
eebce385
MS
709 struct iucv_message *msg = ev->data;
710 struct iucv_message txmsg;
1da177e4 711 struct netiucv_priv *privptr = NULL;
eebce385
MS
712 u32 single_flag = msg->tag;
713 u32 txbytes = 0;
714 u32 txpackets = 0;
715 u32 stat_maxcq = 0;
1da177e4
LT
716 struct sk_buff *skb;
717 unsigned long saveflags;
eebce385
MS
718 struct ll_header header;
719 int rc;
1da177e4 720
2a2cf6b1 721 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4 722
d239ae33
UB
723 if (!conn || !conn->netdev) {
724 IUCV_DBF_TEXT(data, 2,
725 "Send confirmation for unlinked connection\n");
726 return;
727 }
728 privptr = netdev_priv(conn->netdev);
1da177e4
LT
729 conn->prof.tx_pending--;
730 if (single_flag) {
731 if ((skb = skb_dequeue(&conn->commit_queue))) {
63354797 732 refcount_dec(&skb->users);
1da177e4
LT
733 if (privptr) {
734 privptr->stats.tx_packets++;
735 privptr->stats.tx_bytes +=
736 (skb->len - NETIUCV_HDRLEN
998221c2 737 - NETIUCV_HDRLEN);
1da177e4 738 }
998221c2 739 dev_kfree_skb_any(skb);
1da177e4
LT
740 }
741 }
27a884dc
ACM
742 conn->tx_buff->data = conn->tx_buff->head;
743 skb_reset_tail_pointer(conn->tx_buff);
1da177e4
LT
744 conn->tx_buff->len = 0;
745 spin_lock_irqsave(&conn->collect_lock, saveflags);
746 while ((skb = skb_dequeue(&conn->collect_queue))) {
747 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
59ae1d12 748 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
d626f62b
ACM
749 skb_copy_from_linear_data(skb,
750 skb_put(conn->tx_buff, skb->len),
751 skb->len);
1da177e4
LT
752 txbytes += skb->len;
753 txpackets++;
754 stat_maxcq++;
63354797 755 refcount_dec(&skb->users);
1da177e4
LT
756 dev_kfree_skb_any(skb);
757 }
758 if (conn->collect_len > conn->prof.maxmulti)
759 conn->prof.maxmulti = conn->collect_len;
760 conn->collect_len = 0;
761 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
eebce385
MS
762 if (conn->tx_buff->len == 0) {
763 fsm_newstate(fi, CONN_STATE_IDLE);
764 return;
765 }
1da177e4 766
eebce385 767 header.next = 0;
59ae1d12 768 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
ee6edb97 769 conn->prof.send_stamp = jiffies;
eebce385
MS
770 txmsg.class = 0;
771 txmsg.tag = 0;
772 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
1da177e4 773 conn->tx_buff->data, conn->tx_buff->len);
eebce385
MS
774 conn->prof.doios_multi++;
775 conn->prof.txlen += conn->tx_buff->len;
776 conn->prof.tx_pending++;
777 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
778 conn->prof.tx_max_pending = conn->prof.tx_pending;
779 if (rc) {
780 conn->prof.tx_pending--;
1da177e4 781 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385
MS
782 if (privptr)
783 privptr->stats.tx_errors += txpackets;
eebce385
MS
784 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
785 } else {
786 if (privptr) {
787 privptr->stats.tx_packets += txpackets;
788 privptr->stats.tx_bytes += txbytes;
789 }
790 if (stat_maxcq > conn->prof.maxcqueue)
791 conn->prof.maxcqueue = stat_maxcq;
792 }
1da177e4
LT
793}
794
eebce385 795static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
1da177e4 796{
eebce385 797 struct iucv_event *ev = arg;
1da177e4 798 struct iucv_connection *conn = ev->conn;
eebce385 799 struct iucv_path *path = ev->data;
1da177e4 800 struct net_device *netdev = conn->netdev;
eebce385 801 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 802 int rc;
1da177e4 803
2a2cf6b1 804 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 805
eebce385
MS
806 conn->path = path;
807 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
808 path->flags = 0;
08e3356c 809 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
1da177e4 810 if (rc) {
1da177e4
LT
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return;
813 }
814 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 815 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
816 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
817}
818
eebce385 819static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
1da177e4 820{
eebce385
MS
821 struct iucv_event *ev = arg;
822 struct iucv_path *path = ev->data;
1da177e4 823
2a2cf6b1 824 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385 825 iucv_path_sever(path, NULL);
1da177e4
LT
826}
827
eebce385 828static void conn_action_connack(fsm_instance *fi, int event, void *arg)
1da177e4 829{
eebce385 830 struct iucv_connection *conn = arg;
1da177e4 831 struct net_device *netdev = conn->netdev;
eebce385 832 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 833
2a2cf6b1 834 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
835 fsm_deltimer(&conn->timer);
836 fsm_newstate(fi, CONN_STATE_IDLE);
eebce385 837 netdev->tx_queue_len = conn->path->msglim;
1da177e4
LT
838 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
839}
840
eebce385 841static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
1da177e4 842{
eebce385 843 struct iucv_connection *conn = arg;
1da177e4 844
2a2cf6b1 845 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 846 fsm_deltimer(&conn->timer);
08e3356c 847 iucv_path_sever(conn->path, conn->userdata);
1da177e4
LT
848 fsm_newstate(fi, CONN_STATE_STARTWAIT);
849}
850
eebce385 851static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
1da177e4 852{
eebce385 853 struct iucv_connection *conn = arg;
1da177e4 854 struct net_device *netdev = conn->netdev;
eebce385 855 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 856
2a2cf6b1 857 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
858
859 fsm_deltimer(&conn->timer);
08e3356c
UB
860 iucv_path_sever(conn->path, conn->userdata);
861 dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
862 "connection\n", netiucv_printuser(conn));
1da177e4 863 IUCV_DBF_TEXT(data, 2,
eebce385 864 "conn_action_connsever: Remote dropped connection\n");
1da177e4
LT
865 fsm_newstate(fi, CONN_STATE_STARTWAIT);
866 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
867}
868
eebce385 869static void conn_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 870{
eebce385 871 struct iucv_connection *conn = arg;
8f7c502c
UB
872 struct net_device *netdev = conn->netdev;
873 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4
LT
874 int rc;
875
2a2cf6b1 876 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 877
eebce385 878 fsm_newstate(fi, CONN_STATE_STARTWAIT);
1da177e4 879
eebce385
MS
880 /*
881 * We must set the state before calling iucv_connect because the
882 * callback handler could be called at any point after the connection
883 * request is sent
884 */
1da177e4
LT
885
886 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
eebce385 887 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
08e3356c
UB
888 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
889 netdev->name, netiucv_printuser(conn));
890
eebce385 891 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
08e3356c 892 NULL, conn->userdata, conn);
1da177e4 893 switch (rc) {
eebce385 894 case 0:
8f7c502c 895 netdev->tx_queue_len = conn->path->msglim;
eebce385
MS
896 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
897 CONN_EVENT_TIMER, conn);
898 return;
899 case 11:
8f7c502c
UB
900 dev_warn(privptr->dev,
901 "The IUCV device failed to connect to z/VM guest %s\n",
08e3356c 902 netiucv_printname(conn->userid, 8));
eebce385
MS
903 fsm_newstate(fi, CONN_STATE_STARTWAIT);
904 break;
905 case 12:
8f7c502c
UB
906 dev_warn(privptr->dev,
907 "The IUCV device failed to connect to the peer on z/VM"
08e3356c 908 " guest %s\n", netiucv_printname(conn->userid, 8));
eebce385
MS
909 fsm_newstate(fi, CONN_STATE_STARTWAIT);
910 break;
911 case 13:
8f7c502c
UB
912 dev_err(privptr->dev,
913 "Connecting the IUCV device would exceed the maximum"
914 " number of IUCV connections\n");
eebce385
MS
915 fsm_newstate(fi, CONN_STATE_CONNERR);
916 break;
917 case 14:
8f7c502c
UB
918 dev_err(privptr->dev,
919 "z/VM guest %s has too many IUCV connections"
920 " to connect with the IUCV device\n",
08e3356c 921 netiucv_printname(conn->userid, 8));
eebce385
MS
922 fsm_newstate(fi, CONN_STATE_CONNERR);
923 break;
924 case 15:
8f7c502c
UB
925 dev_err(privptr->dev,
926 "The IUCV device cannot connect to a z/VM guest with no"
927 " IUCV authorization\n");
eebce385
MS
928 fsm_newstate(fi, CONN_STATE_CONNERR);
929 break;
930 default:
8f7c502c
UB
931 dev_err(privptr->dev,
932 "Connecting the IUCV device failed with error %d\n",
933 rc);
eebce385
MS
934 fsm_newstate(fi, CONN_STATE_CONNERR);
935 break;
1da177e4
LT
936 }
937 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
eebce385
MS
938 kfree(conn->path);
939 conn->path = NULL;
1da177e4
LT
940}
941
eebce385 942static void netiucv_purge_skb_queue(struct sk_buff_head *q)
1da177e4
LT
943{
944 struct sk_buff *skb;
945
946 while ((skb = skb_dequeue(q))) {
63354797 947 refcount_dec(&skb->users);
1da177e4
LT
948 dev_kfree_skb_any(skb);
949 }
950}
951
eebce385 952static void conn_action_stop(fsm_instance *fi, int event, void *arg)
1da177e4 953{
eebce385 954 struct iucv_event *ev = arg;
1da177e4
LT
955 struct iucv_connection *conn = ev->conn;
956 struct net_device *netdev = conn->netdev;
eebce385 957 struct netiucv_priv *privptr = netdev_priv(netdev);
1da177e4 958
2a2cf6b1 959 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
960
961 fsm_deltimer(&conn->timer);
962 fsm_newstate(fi, CONN_STATE_STOPPED);
963 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385
MS
964 if (conn->path) {
965 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
08e3356c 966 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
967 kfree(conn->path);
968 conn->path = NULL;
969 }
1da177e4
LT
970 netiucv_purge_skb_queue(&conn->commit_queue);
971 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
972}
973
eebce385 974static void conn_action_inval(fsm_instance *fi, int event, void *arg)
1da177e4 975{
eebce385 976 struct iucv_connection *conn = arg;
1da177e4
LT
977 struct net_device *netdev = conn->netdev;
978
f082bcae
UB
979 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
980 netdev->name, conn->userid);
1da177e4
LT
981}
982
983static const fsm_node conn_fsm[] = {
984 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
985 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
986
987 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
988 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
989 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
990 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
991 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
992 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
993 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
994
995 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
996 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
997 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
998 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
999 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
1000
1001 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
1002 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
1003
1004 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
1005 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
1006 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1007
1008 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1009 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1010
1011 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1012 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1013};
1014
1015static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1016
e82b0f2c 1017
eebce385 1018/*
1da177e4 1019 * Actions for interface - statemachine.
eebce385 1020 */
1da177e4
LT
1021
1022/**
eebce385
MS
1023 * dev_action_start
1024 * @fi: An instance of an interface statemachine.
1025 * @event: The event, just happened.
1026 * @arg: Generic pointer, casted from struct net_device * upon call.
1da177e4 1027 *
eebce385 1028 * Startup connection by sending CONN_EVENT_START to it.
1da177e4 1029 */
eebce385 1030static void dev_action_start(fsm_instance *fi, int event, void *arg)
1da177e4 1031{
eebce385
MS
1032 struct net_device *dev = arg;
1033 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1034
2a2cf6b1 1035 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4 1036
1da177e4 1037 fsm_newstate(fi, DEV_STATE_STARTWAIT);
eebce385 1038 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1da177e4
LT
1039}
1040
1041/**
1042 * Shutdown connection by sending CONN_EVENT_STOP to it.
1043 *
1044 * @param fi An instance of an interface statemachine.
1045 * @param event The event, just happened.
1046 * @param arg Generic pointer, casted from struct net_device * upon call.
1047 */
1048static void
1049dev_action_stop(fsm_instance *fi, int event, void *arg)
1050{
eebce385
MS
1051 struct net_device *dev = arg;
1052 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4
LT
1053 struct iucv_event ev;
1054
2a2cf6b1 1055 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1056
1057 ev.conn = privptr->conn;
1058
1059 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1060 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1061}
1062
1063/**
1064 * Called from connection statemachine
1065 * when a connection is up and running.
1066 *
1067 * @param fi An instance of an interface statemachine.
1068 * @param event The event, just happened.
1069 * @param arg Generic pointer, casted from struct net_device * upon call.
1070 */
1071static void
1072dev_action_connup(fsm_instance *fi, int event, void *arg)
1073{
eebce385
MS
1074 struct net_device *dev = arg;
1075 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1076
2a2cf6b1 1077 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1078
1079 switch (fsm_getstate(fi)) {
1080 case DEV_STATE_STARTWAIT:
1081 fsm_newstate(fi, DEV_STATE_RUNNING);
8f7c502c
UB
1082 dev_info(privptr->dev,
1083 "The IUCV device has been connected"
08e3356c
UB
1084 " successfully to %s\n",
1085 netiucv_printuser(privptr->conn));
1da177e4
LT
1086 IUCV_DBF_TEXT(setup, 3,
1087 "connection is up and running\n");
1088 break;
1089 case DEV_STATE_STOPWAIT:
1da177e4
LT
1090 IUCV_DBF_TEXT(data, 2,
1091 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1092 break;
1093 }
1094}
1095
1096/**
1097 * Called from connection statemachine
1098 * when a connection has been shutdown.
1099 *
1100 * @param fi An instance of an interface statemachine.
1101 * @param event The event, just happened.
1102 * @param arg Generic pointer, casted from struct net_device * upon call.
1103 */
1104static void
1105dev_action_conndown(fsm_instance *fi, int event, void *arg)
1106{
2a2cf6b1 1107 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1108
1109 switch (fsm_getstate(fi)) {
1110 case DEV_STATE_RUNNING:
1111 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1112 break;
1113 case DEV_STATE_STOPWAIT:
1114 fsm_newstate(fi, DEV_STATE_STOPPED);
1115 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1116 break;
1117 }
1118}
1119
1120static const fsm_node dev_fsm[] = {
1121 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1122
1123 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1124 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1125
1126 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1127 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1128
1129 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1130 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
21b26f2f 1131 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1da177e4
LT
1132};
1133
1134static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1135
1136/**
1137 * Transmit a packet.
1138 * This is a helper function for netiucv_tx().
1139 *
1140 * @param conn Connection to be used for sending.
1141 * @param skb Pointer to struct sk_buff of packet to send.
1142 * The linklevel header has already been set up
1143 * by netiucv_tx().
1144 *
1145 * @return 0 on success, -ERRNO on failure. (Never fails.)
1146 */
eebce385
MS
1147static int netiucv_transmit_skb(struct iucv_connection *conn,
1148 struct sk_buff *skb)
1149{
1150 struct iucv_message msg;
1da177e4 1151 unsigned long saveflags;
eebce385
MS
1152 struct ll_header header;
1153 int rc;
1da177e4
LT
1154
1155 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1156 int l = skb->len + NETIUCV_HDRLEN;
1157
1158 spin_lock_irqsave(&conn->collect_lock, saveflags);
1159 if (conn->collect_len + l >
1160 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1161 rc = -EBUSY;
1162 IUCV_DBF_TEXT(data, 2,
eebce385 1163 "EBUSY from netiucv_transmit_skb\n");
1da177e4 1164 } else {
63354797 1165 refcount_inc(&skb->users);
1da177e4
LT
1166 skb_queue_tail(&conn->collect_queue, skb);
1167 conn->collect_len += l;
eebce385 1168 rc = 0;
1da177e4
LT
1169 }
1170 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1171 } else {
1172 struct sk_buff *nskb = skb;
1173 /**
1174 * Copy the skb to a new allocated skb in lowmem only if the
1175 * data is located above 2G in memory or tailroom is < 2.
1176 */
27a884dc
ACM
1177 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1178 NETIUCV_HDRLEN)) >> 31;
1da177e4
LT
1179 int copied = 0;
1180 if (hi || (skb_tailroom(skb) < 2)) {
1181 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1182 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1183 if (!nskb) {
1da177e4
LT
1184 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1185 rc = -ENOMEM;
1186 return rc;
1187 } else {
1188 skb_reserve(nskb, NETIUCV_HDRLEN);
59ae1d12 1189 skb_put_data(nskb, skb->data, skb->len);
1da177e4
LT
1190 }
1191 copied = 1;
1192 }
1193 /**
1194 * skb now is below 2G and has enough room. Add headers.
1195 */
1196 header.next = nskb->len + NETIUCV_HDRLEN;
1197 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1198 header.next = 0;
59ae1d12 1199 skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1da177e4
LT
1200
1201 fsm_newstate(conn->fsm, CONN_STATE_TX);
ee6edb97 1202 conn->prof.send_stamp = jiffies;
e82b0f2c 1203
eebce385
MS
1204 msg.tag = 1;
1205 msg.class = 0;
1206 rc = iucv_message_send(conn->path, &msg, 0, 0,
1207 nskb->data, nskb->len);
1da177e4
LT
1208 conn->prof.doios_single++;
1209 conn->prof.txlen += skb->len;
1210 conn->prof.tx_pending++;
1211 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1212 conn->prof.tx_max_pending = conn->prof.tx_pending;
1213 if (rc) {
1214 struct netiucv_priv *privptr;
1215 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1216 conn->prof.tx_pending--;
eebce385 1217 privptr = netdev_priv(conn->netdev);
1da177e4
LT
1218 if (privptr)
1219 privptr->stats.tx_errors++;
1220 if (copied)
1221 dev_kfree_skb(nskb);
1222 else {
1223 /**
1224 * Remove our headers. They get added
1225 * again on retransmit.
1226 */
1227 skb_pull(skb, NETIUCV_HDRLEN);
1228 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1229 }
1da177e4
LT
1230 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1231 } else {
1232 if (copied)
1233 dev_kfree_skb(skb);
63354797 1234 refcount_inc(&nskb->users);
1da177e4
LT
1235 skb_queue_tail(&conn->commit_queue, nskb);
1236 }
1237 }
1238
1239 return rc;
1240}
e82b0f2c 1241
eebce385 1242/*
1da177e4 1243 * Interface API for upper network layers
eebce385 1244 */
1da177e4
LT
1245
1246/**
1247 * Open an interface.
1248 * Called from generic network layer when ifconfig up is run.
1249 *
1250 * @param dev Pointer to interface struct.
1251 *
1252 * @return 0 on success, -ERRNO on failure. (Never fails.)
1253 */
eebce385
MS
1254static int netiucv_open(struct net_device *dev)
1255{
1256 struct netiucv_priv *priv = netdev_priv(dev);
1257
1258 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1da177e4
LT
1259 return 0;
1260}
1261
1262/**
1263 * Close an interface.
1264 * Called from generic network layer when ifconfig down is run.
1265 *
1266 * @param dev Pointer to interface struct.
1267 *
1268 * @return 0 on success, -ERRNO on failure. (Never fails.)
1269 */
eebce385
MS
1270static int netiucv_close(struct net_device *dev)
1271{
1272 struct netiucv_priv *priv = netdev_priv(dev);
1273
1274 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1da177e4
LT
1275 return 0;
1276}
1277
1175b257
UB
1278static int netiucv_pm_prepare(struct device *dev)
1279{
1280 IUCV_DBF_TEXT(trace, 3, __func__);
1281 return 0;
1282}
1283
1284static void netiucv_pm_complete(struct device *dev)
1285{
1286 IUCV_DBF_TEXT(trace, 3, __func__);
1287 return;
1288}
1289
1290/**
1291 * netiucv_pm_freeze() - Freeze PM callback
1292 * @dev: netiucv device
1293 *
1294 * close open netiucv interfaces
1295 */
1296static int netiucv_pm_freeze(struct device *dev)
1297{
4f0076f7 1298 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1299 struct net_device *ndev = NULL;
1300 int rc = 0;
1301
1302 IUCV_DBF_TEXT(trace, 3, __func__);
1303 if (priv && priv->conn)
1304 ndev = priv->conn->netdev;
1305 if (!ndev)
1306 goto out;
1307 netif_device_detach(ndev);
1308 priv->pm_state = fsm_getstate(priv->fsm);
1309 rc = netiucv_close(ndev);
1310out:
1311 return rc;
1312}
1313
1314/**
1315 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1316 * @dev: netiucv device
1317 *
1318 * re-open netiucv interfaces closed during freeze
1319 */
1320static int netiucv_pm_restore_thaw(struct device *dev)
1321{
4f0076f7 1322 struct netiucv_priv *priv = dev_get_drvdata(dev);
1175b257
UB
1323 struct net_device *ndev = NULL;
1324 int rc = 0;
1325
1326 IUCV_DBF_TEXT(trace, 3, __func__);
1327 if (priv && priv->conn)
1328 ndev = priv->conn->netdev;
1329 if (!ndev)
1330 goto out;
1331 switch (priv->pm_state) {
1332 case DEV_STATE_RUNNING:
1333 case DEV_STATE_STARTWAIT:
1334 rc = netiucv_open(ndev);
1335 break;
1336 default:
1337 break;
1338 }
1339 netif_device_attach(ndev);
1340out:
1341 return rc;
1342}
1343
1da177e4
LT
1344/**
1345 * Start transmission of a packet.
1346 * Called from generic network device layer.
1347 *
1348 * @param skb Pointer to buffer containing the packet.
1349 * @param dev Pointer to interface struct.
1350 *
1351 * @return 0 if packet consumed, !0 if packet rejected.
1352 * Note: If we return !0, then the packet is free'd by
1353 * the generic network layer.
1354 */
1355static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1356{
eebce385
MS
1357 struct netiucv_priv *privptr = netdev_priv(dev);
1358 int rc;
1da177e4 1359
2a2cf6b1 1360 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1361 /**
1362 * Some sanity checks ...
1363 */
1364 if (skb == NULL) {
1da177e4
LT
1365 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1366 privptr->stats.tx_dropped++;
ec634fe3 1367 return NETDEV_TX_OK;
1da177e4
LT
1368 }
1369 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1da177e4
LT
1370 IUCV_DBF_TEXT(data, 2,
1371 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1372 dev_kfree_skb(skb);
1373 privptr->stats.tx_dropped++;
ec634fe3 1374 return NETDEV_TX_OK;
1da177e4
LT
1375 }
1376
1377 /**
1378 * If connection is not running, try to restart it
e82b0f2c 1379 * and throw away packet.
1da177e4
LT
1380 */
1381 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1da177e4
LT
1382 dev_kfree_skb(skb);
1383 privptr->stats.tx_dropped++;
1384 privptr->stats.tx_errors++;
1385 privptr->stats.tx_carrier_errors++;
ec634fe3 1386 return NETDEV_TX_OK;
1da177e4
LT
1387 }
1388
1389 if (netiucv_test_and_set_busy(dev)) {
1390 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
4e584d66 1391 return NETDEV_TX_BUSY;
1da177e4 1392 }
860e9538 1393 netif_trans_update(dev);
5b548140 1394 rc = netiucv_transmit_skb(privptr->conn, skb);
1da177e4 1395 netiucv_clear_busy(dev);
5b548140 1396 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1da177e4
LT
1397}
1398
1399/**
eebce385
MS
1400 * netiucv_stats
1401 * @dev: Pointer to interface struct.
1da177e4 1402 *
eebce385 1403 * Returns interface statistics of a device.
1da177e4 1404 *
eebce385 1405 * Returns pointer to stats struct of this interface.
1da177e4 1406 */
eebce385 1407static struct net_device_stats *netiucv_stats (struct net_device * dev)
1da177e4 1408{
eebce385
MS
1409 struct netiucv_priv *priv = netdev_priv(dev);
1410
2a2cf6b1 1411 IUCV_DBF_TEXT(trace, 5, __func__);
eebce385 1412 return &priv->stats;
1da177e4
LT
1413}
1414
eebce385 1415/*
1da177e4 1416 * attributes in sysfs
eebce385 1417 */
1da177e4 1418
eebce385
MS
1419static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1420 char *buf)
1da177e4 1421{
dff59b64 1422 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1423
2a2cf6b1 1424 IUCV_DBF_TEXT(trace, 5, __func__);
08e3356c 1425 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1da177e4
LT
1426}
1427
08e3356c
UB
1428static int netiucv_check_user(const char *buf, size_t count, char *username,
1429 char *userdata)
1da177e4 1430{
08e3356c
UB
1431 const char *p;
1432 int i;
1da177e4 1433
08e3356c
UB
1434 p = strchr(buf, '.');
1435 if ((p && ((count > 26) ||
1436 ((p - buf) > 8) ||
1437 (buf + count - p > 18))) ||
1438 (!p && (count > 9))) {
1439 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1da177e4
LT
1440 return -EINVAL;
1441 }
1442
08e3356c
UB
1443 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1444 if (isalnum(*p) || *p == '$') {
1445 username[i] = toupper(*p);
eebce385
MS
1446 continue;
1447 }
08e3356c 1448 if (*p == '\n')
1da177e4
LT
1449 /* trailing lf, grr */
1450 break;
eebce385 1451 IUCV_DBF_TEXT_(setup, 2,
08e3356c 1452 "conn_write: invalid character %02x\n", *p);
eebce385 1453 return -EINVAL;
1da177e4 1454 }
eebce385 1455 while (i < 8)
1da177e4 1456 username[i++] = ' ';
16a83b30 1457 username[8] = '\0';
1da177e4 1458
08e3356c
UB
1459 if (*p == '.') {
1460 p++;
1461 for (i = 0; i < 16 && *p; i++, p++) {
1462 if (*p == '\n')
1463 break;
1464 userdata[i] = toupper(*p);
1465 }
1466 while (i > 0 && i < 16)
1467 userdata[i++] = ' ';
1468 } else
1469 memcpy(userdata, iucvMagic_ascii, 16);
1470 userdata[16] = '\0';
1471 ASCEBC(userdata, 16);
1472
1473 return 0;
1474}
1475
1476static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1477 const char *buf, size_t count)
1478{
1479 struct netiucv_priv *priv = dev_get_drvdata(dev);
1480 struct net_device *ndev = priv->conn->netdev;
1481 char username[9];
1482 char userdata[17];
1483 int rc;
1484 struct iucv_connection *cp;
1485
1486 IUCV_DBF_TEXT(trace, 3, __func__);
1487 rc = netiucv_check_user(buf, count, username, userdata);
1488 if (rc)
1489 return rc;
1490
eebce385
MS
1491 if (memcmp(username, priv->conn->userid, 9) &&
1492 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1493 /* username changed while the interface is active. */
eebce385 1494 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
f082bcae 1495 return -EPERM;
eebce385
MS
1496 }
1497 read_lock_bh(&iucv_connection_rwlock);
1498 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
1499 if (!strncmp(username, cp->userid, 9) &&
1500 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
eebce385 1501 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
1502 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1503 "already exists\n", netiucv_printuser(cp));
eebce385 1504 return -EEXIST;
1da177e4
LT
1505 }
1506 }
eebce385 1507 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 1508 memcpy(priv->conn->userid, username, 9);
08e3356c 1509 memcpy(priv->conn->userdata, userdata, 17);
1da177e4 1510 return count;
1da177e4
LT
1511}
1512
1513static DEVICE_ATTR(user, 0644, user_show, user_write);
1514
eebce385
MS
1515static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1516 char *buf)
dff59b64
GKH
1517{
1518 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1519
2a2cf6b1 1520 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1521 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1522}
1523
eebce385
MS
1524static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1525 const char *buf, size_t count)
1da177e4 1526{
dff59b64 1527 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1528 struct net_device *ndev = priv->conn->netdev;
9edebf11
UB
1529 unsigned int bs1;
1530 int rc;
1da177e4 1531
2a2cf6b1 1532 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1533 if (count >= 39)
1534 return -EINVAL;
1535
9edebf11 1536 rc = kstrtouint(buf, 0, &bs1);
1da177e4 1537
9edebf11
UB
1538 if (rc == -EINVAL) {
1539 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1540 buf);
1da177e4
LT
1541 return -EINVAL;
1542 }
9edebf11 1543 if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1da177e4
LT
1544 IUCV_DBF_TEXT_(setup, 2,
1545 "buffer_write: buffer size %d too large\n",
1546 bs1);
1547 return -EINVAL;
1548 }
1549 if ((ndev->flags & IFF_RUNNING) &&
1550 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1da177e4
LT
1551 IUCV_DBF_TEXT_(setup, 2,
1552 "buffer_write: buffer size %d too small\n",
1553 bs1);
1554 return -EINVAL;
1555 }
1556 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1da177e4
LT
1557 IUCV_DBF_TEXT_(setup, 2,
1558 "buffer_write: buffer size %d too small\n",
1559 bs1);
1560 return -EINVAL;
1561 }
1562
1563 priv->conn->max_buffsize = bs1;
1564 if (!(ndev->flags & IFF_RUNNING))
1565 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1566
1567 return count;
1568
1569}
1570
1571static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1572
eebce385
MS
1573static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1574 char *buf)
1da177e4 1575{
dff59b64 1576 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1577
2a2cf6b1 1578 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1579 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1580}
1581
1582static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1583
eebce385
MS
1584static ssize_t conn_fsm_show (struct device *dev,
1585 struct device_attribute *attr, char *buf)
1da177e4 1586{
dff59b64 1587 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1588
2a2cf6b1 1589 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1590 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1591}
1592
1593static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1594
eebce385
MS
1595static ssize_t maxmulti_show (struct device *dev,
1596 struct device_attribute *attr, char *buf)
1da177e4 1597{
dff59b64 1598 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1599
2a2cf6b1 1600 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1601 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1602}
1603
eebce385
MS
1604static ssize_t maxmulti_write (struct device *dev,
1605 struct device_attribute *attr,
1606 const char *buf, size_t count)
1da177e4 1607{
dff59b64 1608 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1609
2a2cf6b1 1610 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1611 priv->conn->prof.maxmulti = 0;
1612 return count;
1613}
1614
1615static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1616
eebce385
MS
1617static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1618 char *buf)
1da177e4 1619{
dff59b64 1620 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1621
2a2cf6b1 1622 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1623 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1624}
1625
eebce385
MS
1626static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1627 const char *buf, size_t count)
1da177e4 1628{
dff59b64 1629 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1630
2a2cf6b1 1631 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1632 priv->conn->prof.maxcqueue = 0;
1633 return count;
1634}
1635
1636static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1637
eebce385
MS
1638static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1639 char *buf)
1da177e4 1640{
dff59b64 1641 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1642
2a2cf6b1 1643 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1644 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1645}
1646
eebce385
MS
1647static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1648 const char *buf, size_t count)
1da177e4 1649{
dff59b64 1650 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1651
2a2cf6b1 1652 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1653 priv->conn->prof.doios_single = 0;
1654 return count;
1655}
1656
1657static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1658
eebce385
MS
1659static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1660 char *buf)
1da177e4 1661{
dff59b64 1662 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1663
2a2cf6b1 1664 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1665 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1666}
1667
eebce385
MS
1668static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1669 const char *buf, size_t count)
1da177e4 1670{
dff59b64 1671 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1672
2a2cf6b1 1673 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1674 priv->conn->prof.doios_multi = 0;
1675 return count;
1676}
1677
1678static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1679
eebce385
MS
1680static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1681 char *buf)
1da177e4 1682{
dff59b64 1683 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1684
2a2cf6b1 1685 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1686 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1687}
1688
eebce385
MS
1689static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1690 const char *buf, size_t count)
1da177e4 1691{
dff59b64 1692 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1693
2a2cf6b1 1694 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1695 priv->conn->prof.txlen = 0;
1696 return count;
1697}
1698
1699static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1700
eebce385
MS
1701static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1702 char *buf)
1da177e4 1703{
dff59b64 1704 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1705
2a2cf6b1 1706 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1707 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1708}
1709
eebce385
MS
1710static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1711 const char *buf, size_t count)
1da177e4 1712{
dff59b64 1713 struct netiucv_priv *priv = dev_get_drvdata(dev);
e82b0f2c 1714
2a2cf6b1 1715 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1716 priv->conn->prof.tx_time = 0;
1717 return count;
1718}
1719
1720static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1721
eebce385
MS
1722static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1723 char *buf)
1da177e4 1724{
dff59b64 1725 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1726
2a2cf6b1 1727 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1728 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1729}
1730
eebce385
MS
1731static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1732 const char *buf, size_t count)
1da177e4 1733{
dff59b64 1734 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1735
2a2cf6b1 1736 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1737 priv->conn->prof.tx_pending = 0;
1738 return count;
1739}
1740
1741static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1742
eebce385
MS
1743static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1744 char *buf)
1da177e4 1745{
dff59b64 1746 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1747
2a2cf6b1 1748 IUCV_DBF_TEXT(trace, 5, __func__);
1da177e4
LT
1749 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1750}
1751
eebce385
MS
1752static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1753 const char *buf, size_t count)
1da177e4 1754{
dff59b64 1755 struct netiucv_priv *priv = dev_get_drvdata(dev);
1da177e4 1756
2a2cf6b1 1757 IUCV_DBF_TEXT(trace, 4, __func__);
1da177e4
LT
1758 priv->conn->prof.tx_max_pending = 0;
1759 return count;
1760}
1761
1762static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1763
1764static struct attribute *netiucv_attrs[] = {
1765 &dev_attr_buffer.attr,
1766 &dev_attr_user.attr,
1767 NULL,
1768};
1769
1770static struct attribute_group netiucv_attr_group = {
1771 .attrs = netiucv_attrs,
1772};
1773
1774static struct attribute *netiucv_stat_attrs[] = {
1775 &dev_attr_device_fsm_state.attr,
1776 &dev_attr_connection_fsm_state.attr,
1777 &dev_attr_max_tx_buffer_used.attr,
1778 &dev_attr_max_chained_skbs.attr,
1779 &dev_attr_tx_single_write_ops.attr,
1780 &dev_attr_tx_multi_write_ops.attr,
1781 &dev_attr_netto_bytes.attr,
1782 &dev_attr_max_tx_io_time.attr,
1783 &dev_attr_tx_pending.attr,
1784 &dev_attr_tx_max_pending.attr,
1785 NULL,
1786};
1787
1788static struct attribute_group netiucv_stat_attr_group = {
1789 .name = "stats",
1790 .attrs = netiucv_stat_attrs,
1791};
1792
0b945293 1793static const struct attribute_group *netiucv_attr_groups[] = {
1794 &netiucv_stat_attr_group,
1795 &netiucv_attr_group,
1796 NULL,
1797};
1da177e4 1798
eebce385 1799static int netiucv_register_device(struct net_device *ndev)
1da177e4 1800{
eebce385 1801 struct netiucv_priv *priv = netdev_priv(ndev);
88abaab4 1802 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1da177e4
LT
1803 int ret;
1804
2a2cf6b1 1805 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1806
1807 if (dev) {
1bf5b285 1808 dev_set_name(dev, "net%s", ndev->name);
1da177e4
LT
1809 dev->bus = &iucv_bus;
1810 dev->parent = iucv_root;
0b945293 1811 dev->groups = netiucv_attr_groups;
1da177e4
LT
1812 /*
1813 * The release function could be called after the
1814 * module has been unloaded. It's _only_ task is to
1815 * free the struct. Therefore, we specify kfree()
1816 * directly here. (Probably a little bit obfuscating
1817 * but legitime ...).
1818 */
1819 dev->release = (void (*)(struct device *))kfree;
1820 dev->driver = &netiucv_driver;
1821 } else
1822 return -ENOMEM;
1823
1824 ret = device_register(dev);
c6304933
SO
1825 if (ret) {
1826 put_device(dev);
1da177e4 1827 return ret;
c6304933 1828 }
1da177e4 1829 priv->dev = dev;
dff59b64 1830 dev_set_drvdata(dev, priv);
1da177e4 1831 return 0;
1da177e4
LT
1832}
1833
eebce385 1834static void netiucv_unregister_device(struct device *dev)
1da177e4 1835{
2a2cf6b1 1836 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1837 device_unregister(dev);
1838}
1839
1840/**
1841 * Allocate and initialize a new connection structure.
1842 * Add it to the list of netiucv connections;
1843 */
eebce385 1844static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
08e3356c
UB
1845 char *username,
1846 char *userdata)
eebce385
MS
1847{
1848 struct iucv_connection *conn;
1da177e4 1849
eebce385
MS
1850 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1851 if (!conn)
1852 goto out;
1853 skb_queue_head_init(&conn->collect_queue);
1854 skb_queue_head_init(&conn->commit_queue);
1855 spin_lock_init(&conn->collect_lock);
1856 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1857 conn->netdev = dev;
1858
1859 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1860 if (!conn->rx_buff)
1861 goto out_conn;
1862 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1863 if (!conn->tx_buff)
1864 goto out_rx;
1865 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1866 conn_event_names, NR_CONN_STATES,
1867 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1868 GFP_KERNEL);
1869 if (!conn->fsm)
1870 goto out_tx;
1871
1872 fsm_settimer(conn->fsm, &conn->timer);
1873 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1874
08e3356c
UB
1875 if (userdata)
1876 memcpy(conn->userdata, userdata, 17);
eebce385
MS
1877 if (username) {
1878 memcpy(conn->userid, username, 9);
1879 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1da177e4 1880 }
eebce385
MS
1881
1882 write_lock_bh(&iucv_connection_rwlock);
1883 list_add_tail(&conn->list, &iucv_connection_list);
1884 write_unlock_bh(&iucv_connection_rwlock);
1da177e4 1885 return conn;
eebce385
MS
1886
1887out_tx:
1888 kfree_skb(conn->tx_buff);
1889out_rx:
1890 kfree_skb(conn->rx_buff);
1891out_conn:
1892 kfree(conn);
1893out:
1894 return NULL;
1da177e4
LT
1895}
1896
1897/**
1898 * Release a connection structure and remove it from the
1899 * list of netiucv connections.
1900 */
eebce385 1901static void netiucv_remove_connection(struct iucv_connection *conn)
1da177e4 1902{
08e3356c 1903
2a2cf6b1 1904 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
1905 write_lock_bh(&iucv_connection_rwlock);
1906 list_del_init(&conn->list);
1907 write_unlock_bh(&iucv_connection_rwlock);
0be4acec
UB
1908 fsm_deltimer(&conn->timer);
1909 netiucv_purge_skb_queue(&conn->collect_queue);
eebce385 1910 if (conn->path) {
08e3356c 1911 iucv_path_sever(conn->path, conn->userdata);
eebce385
MS
1912 kfree(conn->path);
1913 conn->path = NULL;
1da177e4 1914 }
0be4acec 1915 netiucv_purge_skb_queue(&conn->commit_queue);
eebce385
MS
1916 kfree_fsm(conn->fsm);
1917 kfree_skb(conn->rx_buff);
1918 kfree_skb(conn->tx_buff);
1da177e4
LT
1919}
1920
1921/**
1922 * Release everything of a net device.
1923 */
eebce385 1924static void netiucv_free_netdevice(struct net_device *dev)
1da177e4 1925{
eebce385 1926 struct netiucv_priv *privptr = netdev_priv(dev);
1da177e4 1927
2a2cf6b1 1928 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
1929
1930 if (!dev)
1931 return;
1932
1da177e4
LT
1933 if (privptr) {
1934 if (privptr->conn)
1935 netiucv_remove_connection(privptr->conn);
1936 if (privptr->fsm)
1937 kfree_fsm(privptr->fsm);
1938 privptr->conn = NULL; privptr->fsm = NULL;
1939 /* privptr gets freed by free_netdev() */
1940 }
1da177e4
LT
1941}
1942
1943/**
1944 * Initialize a net device. (Called from kernel in alloc_netdev())
1945 */
4edd73b5
FB
1946static const struct net_device_ops netiucv_netdev_ops = {
1947 .ndo_open = netiucv_open,
1948 .ndo_stop = netiucv_close,
1949 .ndo_get_stats = netiucv_stats,
1950 .ndo_start_xmit = netiucv_tx,
4edd73b5
FB
1951};
1952
eebce385 1953static void netiucv_setup_netdevice(struct net_device *dev)
1da177e4 1954{
1da177e4 1955 dev->mtu = NETIUCV_MTU_DEFAULT;
46b3ef4c
JW
1956 dev->min_mtu = 576;
1957 dev->max_mtu = NETIUCV_MTU_MAX;
cd1997f6
SR
1958 dev->needs_free_netdev = true;
1959 dev->priv_destructor = netiucv_free_netdevice;
1da177e4
LT
1960 dev->hard_header_len = NETIUCV_HDRLEN;
1961 dev->addr_len = 0;
1962 dev->type = ARPHRD_SLIP;
1963 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1964 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
4edd73b5 1965 dev->netdev_ops = &netiucv_netdev_ops;
1da177e4
LT
1966}
1967
1968/**
1969 * Allocate and initialize everything of a net device.
1970 */
08e3356c 1971static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1da177e4
LT
1972{
1973 struct netiucv_priv *privptr;
1974 struct net_device *dev;
1975
1976 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
c835a677 1977 NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1da177e4
LT
1978 if (!dev)
1979 return NULL;
aaf9522d 1980 rtnl_lock();
1d503563
UB
1981 if (dev_alloc_name(dev, dev->name) < 0)
1982 goto out_netdev;
1da177e4 1983
eebce385 1984 privptr = netdev_priv(dev);
1da177e4
LT
1985 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1986 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1987 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
eebce385
MS
1988 if (!privptr->fsm)
1989 goto out_netdev;
1990
08e3356c 1991 privptr->conn = netiucv_new_connection(dev, username, userdata);
1da177e4 1992 if (!privptr->conn) {
1da177e4 1993 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
eebce385 1994 goto out_fsm;
1da177e4
LT
1995 }
1996 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1da177e4 1997 return dev;
eebce385
MS
1998
1999out_fsm:
2000 kfree_fsm(privptr->fsm);
2001out_netdev:
aaf9522d 2002 rtnl_unlock();
eebce385
MS
2003 free_netdev(dev);
2004 return NULL;
1da177e4
LT
2005}
2006
36369569
GKH
2007static ssize_t connection_store(struct device_driver *drv, const char *buf,
2008 size_t count)
1da177e4 2009{
16a83b30 2010 char username[9];
08e3356c
UB
2011 char userdata[17];
2012 int rc;
1da177e4 2013 struct net_device *dev;
eebce385
MS
2014 struct netiucv_priv *priv;
2015 struct iucv_connection *cp;
1da177e4 2016
2a2cf6b1 2017 IUCV_DBF_TEXT(trace, 3, __func__);
08e3356c
UB
2018 rc = netiucv_check_user(buf, count, username, userdata);
2019 if (rc)
2020 return rc;
16a83b30 2021
eebce385
MS
2022 read_lock_bh(&iucv_connection_rwlock);
2023 list_for_each_entry(cp, &iucv_connection_list, list) {
08e3356c
UB
2024 if (!strncmp(username, cp->userid, 9) &&
2025 !strncmp(userdata, cp->userdata, 17)) {
eebce385 2026 read_unlock_bh(&iucv_connection_rwlock);
08e3356c
UB
2027 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2028 "already exists\n", netiucv_printuser(cp));
eebce385
MS
2029 return -EEXIST;
2030 }
16a83b30 2031 }
eebce385
MS
2032 read_unlock_bh(&iucv_connection_rwlock);
2033
08e3356c 2034 dev = netiucv_init_netdevice(username, userdata);
1da177e4 2035 if (!dev) {
1da177e4
LT
2036 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2037 return -ENODEV;
2038 }
2039
eebce385
MS
2040 rc = netiucv_register_device(dev);
2041 if (rc) {
aaf9522d 2042 rtnl_unlock();
1da177e4 2043 IUCV_DBF_TEXT_(setup, 2,
eebce385 2044 "ret %d from netiucv_register_device\n", rc);
1da177e4
LT
2045 goto out_free_ndev;
2046 }
2047
2048 /* sysfs magic */
eebce385
MS
2049 priv = netdev_priv(dev);
2050 SET_NETDEV_DEV(dev, priv->dev);
1da177e4 2051
aaf9522d
BP
2052 rc = register_netdevice(dev);
2053 rtnl_unlock();
eebce385
MS
2054 if (rc)
2055 goto out_unreg;
1da177e4 2056
08e3356c
UB
2057 dev_info(priv->dev, "The IUCV interface to %s has been established "
2058 "successfully\n",
2059 netiucv_printuser(priv->conn));
e82b0f2c 2060
1da177e4
LT
2061 return count;
2062
eebce385
MS
2063out_unreg:
2064 netiucv_unregister_device(priv->dev);
1da177e4 2065out_free_ndev:
1da177e4 2066 netiucv_free_netdevice(dev);
eebce385 2067 return rc;
1da177e4 2068}
36369569 2069static DRIVER_ATTR_WO(connection);
1da177e4 2070
36369569
GKH
2071static ssize_t remove_store(struct device_driver *drv, const char *buf,
2072 size_t count)
1da177e4 2073{
eebce385 2074 struct iucv_connection *cp;
1da177e4
LT
2075 struct net_device *ndev;
2076 struct netiucv_priv *priv;
2077 struct device *dev;
2078 char name[IFNAMSIZ];
eebce385 2079 const char *p;
1da177e4
LT
2080 int i;
2081
2a2cf6b1 2082 IUCV_DBF_TEXT(trace, 3, __func__);
1da177e4
LT
2083
2084 if (count >= IFNAMSIZ)
a419aef8 2085 count = IFNAMSIZ - 1;
1da177e4 2086
eebce385
MS
2087 for (i = 0, p = buf; i < count && *p; i++, p++) {
2088 if (*p == '\n' || *p == ' ')
1da177e4
LT
2089 /* trailing lf, grr */
2090 break;
eebce385 2091 name[i] = *p;
1da177e4
LT
2092 }
2093 name[i] = '\0';
2094
eebce385
MS
2095 read_lock_bh(&iucv_connection_rwlock);
2096 list_for_each_entry(cp, &iucv_connection_list, list) {
2097 ndev = cp->netdev;
2098 priv = netdev_priv(ndev);
1da177e4 2099 dev = priv->dev;
eebce385
MS
2100 if (strncmp(name, ndev->name, count))
2101 continue;
2102 read_unlock_bh(&iucv_connection_rwlock);
1da177e4 2103 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
8f7c502c
UB
2104 dev_warn(dev, "The IUCV device is connected"
2105 " to %s and cannot be removed\n",
2106 priv->conn->userid);
1da177e4 2107 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
f082bcae 2108 return -EPERM;
1da177e4
LT
2109 }
2110 unregister_netdev(ndev);
2111 netiucv_unregister_device(dev);
2112 return count;
2113 }
eebce385 2114 read_unlock_bh(&iucv_connection_rwlock);
1da177e4
LT
2115 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2116 return -EINVAL;
2117}
36369569 2118static DRIVER_ATTR_WO(remove);
1da177e4 2119
eebce385
MS
2120static struct attribute * netiucv_drv_attrs[] = {
2121 &driver_attr_connection.attr,
2122 &driver_attr_remove.attr,
2123 NULL,
2124};
2125
2126static struct attribute_group netiucv_drv_attr_group = {
2127 .attrs = netiucv_drv_attrs,
2128};
2129
a4dbd674 2130static const struct attribute_group *netiucv_drv_attr_groups[] = {
5b88feb1
CH
2131 &netiucv_drv_attr_group,
2132 NULL,
2133};
2134
eebce385 2135static void netiucv_banner(void)
1da177e4 2136{
8f7c502c 2137 pr_info("driver initialized\n");
1da177e4
LT
2138}
2139
eebce385 2140static void __exit netiucv_exit(void)
1da177e4 2141{
eebce385
MS
2142 struct iucv_connection *cp;
2143 struct net_device *ndev;
2144 struct netiucv_priv *priv;
2145 struct device *dev;
2146
2a2cf6b1 2147 IUCV_DBF_TEXT(trace, 3, __func__);
eebce385
MS
2148 while (!list_empty(&iucv_connection_list)) {
2149 cp = list_entry(iucv_connection_list.next,
2150 struct iucv_connection, list);
eebce385
MS
2151 ndev = cp->netdev;
2152 priv = netdev_priv(ndev);
2153 dev = priv->dev;
1da177e4
LT
2154
2155 unregister_netdev(ndev);
2156 netiucv_unregister_device(dev);
2157 }
2158
1175b257 2159 device_unregister(netiucv_dev);
1da177e4 2160 driver_unregister(&netiucv_driver);
eebce385 2161 iucv_unregister(&netiucv_handler, 1);
1da177e4
LT
2162 iucv_unregister_dbf_views();
2163
8f7c502c 2164 pr_info("driver unloaded\n");
1da177e4
LT
2165 return;
2166}
2167
eebce385 2168static int __init netiucv_init(void)
1da177e4 2169{
eebce385 2170 int rc;
e82b0f2c 2171
eebce385
MS
2172 rc = iucv_register_dbf_views();
2173 if (rc)
2174 goto out;
2175 rc = iucv_register(&netiucv_handler, 1);
2176 if (rc)
2177 goto out_dbf;
2a2cf6b1 2178 IUCV_DBF_TEXT(trace, 3, __func__);
0a0a8310 2179 netiucv_driver.groups = netiucv_drv_attr_groups;
eebce385
MS
2180 rc = driver_register(&netiucv_driver);
2181 if (rc) {
eebce385
MS
2182 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2183 goto out_iucv;
1da177e4 2184 }
1175b257
UB
2185 /* establish dummy device */
2186 netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2187 if (!netiucv_dev) {
2188 rc = -ENOMEM;
2189 goto out_driver;
2190 }
2191 dev_set_name(netiucv_dev, "netiucv");
2192 netiucv_dev->bus = &iucv_bus;
2193 netiucv_dev->parent = iucv_root;
2194 netiucv_dev->release = (void (*)(struct device *))kfree;
2195 netiucv_dev->driver = &netiucv_driver;
2196 rc = device_register(netiucv_dev);
c6304933
SO
2197 if (rc) {
2198 put_device(netiucv_dev);
1175b257 2199 goto out_driver;
c6304933 2200 }
eebce385
MS
2201 netiucv_banner();
2202 return rc;
2203
1175b257
UB
2204out_driver:
2205 driver_unregister(&netiucv_driver);
eebce385
MS
2206out_iucv:
2207 iucv_unregister(&netiucv_handler, 1);
2208out_dbf:
2209 iucv_unregister_dbf_views();
2210out:
2211 return rc;
1da177e4 2212}
e82b0f2c 2213
1da177e4
LT
2214module_init(netiucv_init);
2215module_exit(netiucv_exit);
2216MODULE_LICENSE("GPL");