qeth: add OSA concurrent hardware trap
[linux-block.git] / drivers / s390 / net / qeth_core_main.c
CommitLineData
4a71df50
FB
1/*
2 * drivers/s390/net/qeth_core_main.c
3 *
bbcfcdc8 4 * Copyright IBM Corp. 2007, 2009
4a71df50
FB
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
74eacdb9
FB
11#define KMSG_COMPONENT "qeth"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
4a71df50
FB
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/ip.h>
4a71df50
FB
20#include <linux/tcp.h>
21#include <linux/mii.h>
22#include <linux/kthread.h>
5a0e3ad6 23#include <linux/slab.h>
4a71df50 24
ab4227cb
MS
25#include <asm/ebcdic.h>
26#include <asm/io.h>
1da74b1c 27#include <asm/sysinfo.h>
4a71df50
FB
28
29#include "qeth_core.h"
4a71df50 30
d11ba0c4
PT
31struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
32 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
33 /* N P A M L V H */
34 [QETH_DBF_SETUP] = {"qeth_setup",
35 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
d11ba0c4
PT
36 [QETH_DBF_MSG] = {"qeth_msg",
37 8, 1, 128, 3, &debug_sprintf_view, NULL},
d11ba0c4
PT
38 [QETH_DBF_CTRL] = {"qeth_control",
39 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
40};
41EXPORT_SYMBOL_GPL(qeth_dbf);
4a71df50
FB
42
43struct qeth_card_list_struct qeth_core_card_list;
44EXPORT_SYMBOL_GPL(qeth_core_card_list);
683d718a
FB
45struct kmem_cache *qeth_core_header_cache;
46EXPORT_SYMBOL_GPL(qeth_core_header_cache);
4a71df50
FB
47
48static struct device *qeth_core_root_dev;
5113fec0 49static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
4a71df50 50static struct lock_class_key qdio_out_skb_queue_key;
4a71df50
FB
51
52static void qeth_send_control_data_cb(struct qeth_channel *,
53 struct qeth_cmd_buffer *);
54static int qeth_issue_next_read(struct qeth_card *);
55static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
56static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
57static void qeth_free_buffer_pool(struct qeth_card *);
58static int qeth_qdio_establish(struct qeth_card *);
59
60
4a71df50
FB
61static inline const char *qeth_get_cardname(struct qeth_card *card)
62{
63 if (card->info.guestlan) {
64 switch (card->info.type) {
5113fec0 65 case QETH_CARD_TYPE_OSD:
4a71df50
FB
66 return " Guest LAN QDIO";
67 case QETH_CARD_TYPE_IQD:
68 return " Guest LAN Hiper";
5113fec0
UB
69 case QETH_CARD_TYPE_OSM:
70 return " Guest LAN QDIO - OSM";
71 case QETH_CARD_TYPE_OSX:
72 return " Guest LAN QDIO - OSX";
4a71df50
FB
73 default:
74 return " unknown";
75 }
76 } else {
77 switch (card->info.type) {
5113fec0 78 case QETH_CARD_TYPE_OSD:
4a71df50
FB
79 return " OSD Express";
80 case QETH_CARD_TYPE_IQD:
81 return " HiperSockets";
82 case QETH_CARD_TYPE_OSN:
83 return " OSN QDIO";
5113fec0
UB
84 case QETH_CARD_TYPE_OSM:
85 return " OSM QDIO";
86 case QETH_CARD_TYPE_OSX:
87 return " OSX QDIO";
4a71df50
FB
88 default:
89 return " unknown";
90 }
91 }
92 return " n/a";
93}
94
95/* max length to be returned: 14 */
96const char *qeth_get_cardname_short(struct qeth_card *card)
97{
98 if (card->info.guestlan) {
99 switch (card->info.type) {
5113fec0 100 case QETH_CARD_TYPE_OSD:
4a71df50
FB
101 return "GuestLAN QDIO";
102 case QETH_CARD_TYPE_IQD:
103 return "GuestLAN Hiper";
5113fec0
UB
104 case QETH_CARD_TYPE_OSM:
105 return "GuestLAN OSM";
106 case QETH_CARD_TYPE_OSX:
107 return "GuestLAN OSX";
4a71df50
FB
108 default:
109 return "unknown";
110 }
111 } else {
112 switch (card->info.type) {
5113fec0 113 case QETH_CARD_TYPE_OSD:
4a71df50
FB
114 switch (card->info.link_type) {
115 case QETH_LINK_TYPE_FAST_ETH:
116 return "OSD_100";
117 case QETH_LINK_TYPE_HSTR:
118 return "HSTR";
119 case QETH_LINK_TYPE_GBIT_ETH:
120 return "OSD_1000";
121 case QETH_LINK_TYPE_10GBIT_ETH:
122 return "OSD_10GIG";
123 case QETH_LINK_TYPE_LANE_ETH100:
124 return "OSD_FE_LANE";
125 case QETH_LINK_TYPE_LANE_TR:
126 return "OSD_TR_LANE";
127 case QETH_LINK_TYPE_LANE_ETH1000:
128 return "OSD_GbE_LANE";
129 case QETH_LINK_TYPE_LANE:
130 return "OSD_ATM_LANE";
131 default:
132 return "OSD_Express";
133 }
134 case QETH_CARD_TYPE_IQD:
135 return "HiperSockets";
136 case QETH_CARD_TYPE_OSN:
137 return "OSN";
5113fec0
UB
138 case QETH_CARD_TYPE_OSM:
139 return "OSM_1000";
140 case QETH_CARD_TYPE_OSX:
141 return "OSX_10GIG";
4a71df50
FB
142 default:
143 return "unknown";
144 }
145 }
146 return "n/a";
147}
148
149void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
150 int clear_start_mask)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&card->thread_mask_lock, flags);
155 card->thread_allowed_mask = threads;
156 if (clear_start_mask)
157 card->thread_start_mask &= threads;
158 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
159 wake_up(&card->wait_q);
160}
161EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
162
163int qeth_threads_running(struct qeth_card *card, unsigned long threads)
164{
165 unsigned long flags;
166 int rc = 0;
167
168 spin_lock_irqsave(&card->thread_mask_lock, flags);
169 rc = (card->thread_running_mask & threads);
170 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
171 return rc;
172}
173EXPORT_SYMBOL_GPL(qeth_threads_running);
174
175int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
176{
177 return wait_event_interruptible(card->wait_q,
178 qeth_threads_running(card, threads) == 0);
179}
180EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
181
182void qeth_clear_working_pool_list(struct qeth_card *card)
183{
184 struct qeth_buffer_pool_entry *pool_entry, *tmp;
185
847a50fd 186 QETH_CARD_TEXT(card, 5, "clwrklst");
4a71df50
FB
187 list_for_each_entry_safe(pool_entry, tmp,
188 &card->qdio.in_buf_pool.entry_list, list){
189 list_del(&pool_entry->list);
190 }
191}
192EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
193
194static int qeth_alloc_buffer_pool(struct qeth_card *card)
195{
196 struct qeth_buffer_pool_entry *pool_entry;
197 void *ptr;
198 int i, j;
199
847a50fd 200 QETH_CARD_TEXT(card, 5, "alocpool");
4a71df50
FB
201 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
202 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
203 if (!pool_entry) {
204 qeth_free_buffer_pool(card);
205 return -ENOMEM;
206 }
207 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
508b3c4f 208 ptr = (void *) __get_free_page(GFP_KERNEL);
4a71df50
FB
209 if (!ptr) {
210 while (j > 0)
211 free_page((unsigned long)
212 pool_entry->elements[--j]);
213 kfree(pool_entry);
214 qeth_free_buffer_pool(card);
215 return -ENOMEM;
216 }
217 pool_entry->elements[j] = ptr;
218 }
219 list_add(&pool_entry->init_list,
220 &card->qdio.init_pool.entry_list);
221 }
222 return 0;
223}
224
225int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
226{
847a50fd 227 QETH_CARD_TEXT(card, 2, "realcbp");
4a71df50
FB
228
229 if ((card->state != CARD_STATE_DOWN) &&
230 (card->state != CARD_STATE_RECOVER))
231 return -EPERM;
232
233 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
234 qeth_clear_working_pool_list(card);
235 qeth_free_buffer_pool(card);
236 card->qdio.in_buf_pool.buf_count = bufcnt;
237 card->qdio.init_pool.buf_count = bufcnt;
238 return qeth_alloc_buffer_pool(card);
239}
76b11f8e 240EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
4a71df50 241
4a71df50
FB
242static int qeth_issue_next_read(struct qeth_card *card)
243{
244 int rc;
245 struct qeth_cmd_buffer *iob;
246
847a50fd 247 QETH_CARD_TEXT(card, 5, "issnxrd");
4a71df50
FB
248 if (card->read.state != CH_STATE_UP)
249 return -EIO;
250 iob = qeth_get_buffer(&card->read);
251 if (!iob) {
74eacdb9
FB
252 dev_warn(&card->gdev->dev, "The qeth device driver "
253 "failed to recover an error on the device\n");
254 QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
255 "available\n", dev_name(&card->gdev->dev));
4a71df50
FB
256 return -ENOMEM;
257 }
258 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
847a50fd 259 QETH_CARD_TEXT(card, 6, "noirqpnd");
4a71df50
FB
260 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
261 (addr_t) iob, 0, 0);
262 if (rc) {
74eacdb9
FB
263 QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
264 "rc=%i\n", dev_name(&card->gdev->dev), rc);
4a71df50 265 atomic_set(&card->read.irq_pending, 0);
908abbb5 266 card->read_or_write_problem = 1;
4a71df50
FB
267 qeth_schedule_recovery(card);
268 wake_up(&card->wait_q);
269 }
270 return rc;
271}
272
273static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
274{
275 struct qeth_reply *reply;
276
277 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
278 if (reply) {
279 atomic_set(&reply->refcnt, 1);
280 atomic_set(&reply->received, 0);
281 reply->card = card;
282 };
283 return reply;
284}
285
286static void qeth_get_reply(struct qeth_reply *reply)
287{
288 WARN_ON(atomic_read(&reply->refcnt) <= 0);
289 atomic_inc(&reply->refcnt);
290}
291
292static void qeth_put_reply(struct qeth_reply *reply)
293{
294 WARN_ON(atomic_read(&reply->refcnt) <= 0);
295 if (atomic_dec_and_test(&reply->refcnt))
296 kfree(reply);
297}
298
d11ba0c4 299static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
4a71df50
FB
300 struct qeth_card *card)
301{
4a71df50 302 char *ipa_name;
d11ba0c4 303 int com = cmd->hdr.command;
4a71df50 304 ipa_name = qeth_get_ipa_cmd_name(com);
d11ba0c4 305 if (rc)
70919e23
UB
306 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
307 "x%X \"%s\"\n",
308 ipa_name, com, dev_name(&card->gdev->dev),
309 QETH_CARD_IFNAME(card), rc,
310 qeth_get_ipa_msg(rc));
d11ba0c4 311 else
70919e23
UB
312 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
313 ipa_name, com, dev_name(&card->gdev->dev),
314 QETH_CARD_IFNAME(card));
4a71df50
FB
315}
316
317static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
318 struct qeth_cmd_buffer *iob)
319{
320 struct qeth_ipa_cmd *cmd = NULL;
321
847a50fd 322 QETH_CARD_TEXT(card, 5, "chkipad");
4a71df50
FB
323 if (IS_IPA(iob->data)) {
324 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
325 if (IS_IPA_REPLY(cmd)) {
76b11f8e
UB
326 if (cmd->hdr.command != IPA_CMD_SETCCID &&
327 cmd->hdr.command != IPA_CMD_DELCCID &&
328 cmd->hdr.command != IPA_CMD_MODCCID &&
329 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
d11ba0c4
PT
330 qeth_issue_ipa_msg(cmd,
331 cmd->hdr.return_code, card);
4a71df50
FB
332 return cmd;
333 } else {
334 switch (cmd->hdr.command) {
335 case IPA_CMD_STOPLAN:
74eacdb9
FB
336 dev_warn(&card->gdev->dev,
337 "The link for interface %s on CHPID"
338 " 0x%X failed\n",
4a71df50
FB
339 QETH_CARD_IFNAME(card),
340 card->info.chpid);
341 card->lan_online = 0;
342 if (card->dev && netif_carrier_ok(card->dev))
343 netif_carrier_off(card->dev);
344 return NULL;
345 case IPA_CMD_STARTLAN:
74eacdb9
FB
346 dev_info(&card->gdev->dev,
347 "The link for %s on CHPID 0x%X has"
348 " been restored\n",
4a71df50
FB
349 QETH_CARD_IFNAME(card),
350 card->info.chpid);
351 netif_carrier_on(card->dev);
922dc062 352 card->lan_online = 1;
1da74b1c
FB
353 if (card->info.hwtrap)
354 card->info.hwtrap = 2;
4a71df50
FB
355 qeth_schedule_recovery(card);
356 return NULL;
357 case IPA_CMD_MODCCID:
358 return cmd;
359 case IPA_CMD_REGISTER_LOCAL_ADDR:
847a50fd 360 QETH_CARD_TEXT(card, 3, "irla");
4a71df50
FB
361 break;
362 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
847a50fd 363 QETH_CARD_TEXT(card, 3, "urla");
4a71df50
FB
364 break;
365 default:
c4cef07c 366 QETH_DBF_MESSAGE(2, "Received data is IPA "
4a71df50
FB
367 "but not a reply!\n");
368 break;
369 }
370 }
371 }
372 return cmd;
373}
374
375void qeth_clear_ipacmd_list(struct qeth_card *card)
376{
377 struct qeth_reply *reply, *r;
378 unsigned long flags;
379
847a50fd 380 QETH_CARD_TEXT(card, 4, "clipalst");
4a71df50
FB
381
382 spin_lock_irqsave(&card->lock, flags);
383 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
384 qeth_get_reply(reply);
385 reply->rc = -EIO;
386 atomic_inc(&reply->received);
387 list_del_init(&reply->list);
388 wake_up(&reply->wait_q);
389 qeth_put_reply(reply);
390 }
391 spin_unlock_irqrestore(&card->lock, flags);
908abbb5 392 atomic_set(&card->write.irq_pending, 0);
4a71df50
FB
393}
394EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
395
5113fec0
UB
396static int qeth_check_idx_response(struct qeth_card *card,
397 unsigned char *buffer)
4a71df50
FB
398{
399 if (!buffer)
400 return 0;
401
d11ba0c4 402 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
4a71df50 403 if ((buffer[2] & 0xc0) == 0xc0) {
74eacdb9 404 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
4a71df50
FB
405 "with cause code 0x%02x%s\n",
406 buffer[4],
407 ((buffer[4] == 0x22) ?
408 " -- try another portname" : ""));
847a50fd
CO
409 QETH_CARD_TEXT(card, 2, "ckidxres");
410 QETH_CARD_TEXT(card, 2, " idxterm");
411 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
5113fec0
UB
412 if (buffer[4] == 0xf6) {
413 dev_err(&card->gdev->dev,
414 "The qeth device is not configured "
415 "for the OSI layer required by z/VM\n");
416 return -EPERM;
417 }
4a71df50
FB
418 return -EIO;
419 }
420 return 0;
421}
422
423static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
424 __u32 len)
425{
426 struct qeth_card *card;
427
4a71df50 428 card = CARD_FROM_CDEV(channel->ccwdev);
847a50fd 429 QETH_CARD_TEXT(card, 4, "setupccw");
4a71df50
FB
430 if (channel == &card->read)
431 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
432 else
433 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
434 channel->ccw.count = len;
435 channel->ccw.cda = (__u32) __pa(iob);
436}
437
438static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
439{
440 __u8 index;
441
847a50fd 442 QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
4a71df50
FB
443 index = channel->io_buf_no;
444 do {
445 if (channel->iob[index].state == BUF_STATE_FREE) {
446 channel->iob[index].state = BUF_STATE_LOCKED;
447 channel->io_buf_no = (channel->io_buf_no + 1) %
448 QETH_CMD_BUFFER_NO;
449 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
450 return channel->iob + index;
451 }
452 index = (index + 1) % QETH_CMD_BUFFER_NO;
453 } while (index != channel->io_buf_no);
454
455 return NULL;
456}
457
458void qeth_release_buffer(struct qeth_channel *channel,
459 struct qeth_cmd_buffer *iob)
460{
461 unsigned long flags;
462
847a50fd 463 QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
4a71df50
FB
464 spin_lock_irqsave(&channel->iob_lock, flags);
465 memset(iob->data, 0, QETH_BUFSIZE);
466 iob->state = BUF_STATE_FREE;
467 iob->callback = qeth_send_control_data_cb;
468 iob->rc = 0;
469 spin_unlock_irqrestore(&channel->iob_lock, flags);
470}
471EXPORT_SYMBOL_GPL(qeth_release_buffer);
472
473static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
474{
475 struct qeth_cmd_buffer *buffer = NULL;
476 unsigned long flags;
477
478 spin_lock_irqsave(&channel->iob_lock, flags);
479 buffer = __qeth_get_buffer(channel);
480 spin_unlock_irqrestore(&channel->iob_lock, flags);
481 return buffer;
482}
483
484struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
485{
486 struct qeth_cmd_buffer *buffer;
487 wait_event(channel->wait_q,
488 ((buffer = qeth_get_buffer(channel)) != NULL));
489 return buffer;
490}
491EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
492
493void qeth_clear_cmd_buffers(struct qeth_channel *channel)
494{
495 int cnt;
496
497 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
498 qeth_release_buffer(channel, &channel->iob[cnt]);
499 channel->buf_no = 0;
500 channel->io_buf_no = 0;
501}
502EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
503
504static void qeth_send_control_data_cb(struct qeth_channel *channel,
505 struct qeth_cmd_buffer *iob)
506{
507 struct qeth_card *card;
508 struct qeth_reply *reply, *r;
509 struct qeth_ipa_cmd *cmd;
510 unsigned long flags;
511 int keep_reply;
5113fec0 512 int rc = 0;
4a71df50 513
4a71df50 514 card = CARD_FROM_CDEV(channel->ccwdev);
847a50fd 515 QETH_CARD_TEXT(card, 4, "sndctlcb");
5113fec0
UB
516 rc = qeth_check_idx_response(card, iob->data);
517 switch (rc) {
518 case 0:
519 break;
520 case -EIO:
4a71df50 521 qeth_clear_ipacmd_list(card);
5113fec0 522 qeth_schedule_recovery(card);
01fc3e86 523 /* fall through */
5113fec0 524 default:
4a71df50
FB
525 goto out;
526 }
527
528 cmd = qeth_check_ipa_data(card, iob);
529 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
530 goto out;
531 /*in case of OSN : check if cmd is set */
532 if (card->info.type == QETH_CARD_TYPE_OSN &&
533 cmd &&
534 cmd->hdr.command != IPA_CMD_STARTLAN &&
535 card->osn_info.assist_cb != NULL) {
536 card->osn_info.assist_cb(card->dev, cmd);
537 goto out;
538 }
539
540 spin_lock_irqsave(&card->lock, flags);
541 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
542 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
543 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
544 qeth_get_reply(reply);
545 list_del_init(&reply->list);
546 spin_unlock_irqrestore(&card->lock, flags);
547 keep_reply = 0;
548 if (reply->callback != NULL) {
549 if (cmd) {
550 reply->offset = (__u16)((char *)cmd -
551 (char *)iob->data);
552 keep_reply = reply->callback(card,
553 reply,
554 (unsigned long)cmd);
555 } else
556 keep_reply = reply->callback(card,
557 reply,
558 (unsigned long)iob);
559 }
560 if (cmd)
561 reply->rc = (u16) cmd->hdr.return_code;
562 else if (iob->rc)
563 reply->rc = iob->rc;
564 if (keep_reply) {
565 spin_lock_irqsave(&card->lock, flags);
566 list_add_tail(&reply->list,
567 &card->cmd_waiter_list);
568 spin_unlock_irqrestore(&card->lock, flags);
569 } else {
570 atomic_inc(&reply->received);
571 wake_up(&reply->wait_q);
572 }
573 qeth_put_reply(reply);
574 goto out;
575 }
576 }
577 spin_unlock_irqrestore(&card->lock, flags);
578out:
579 memcpy(&card->seqno.pdu_hdr_ack,
580 QETH_PDU_HEADER_SEQ_NO(iob->data),
581 QETH_SEQ_NO_LENGTH);
582 qeth_release_buffer(channel, iob);
583}
584
585static int qeth_setup_channel(struct qeth_channel *channel)
586{
587 int cnt;
588
d11ba0c4 589 QETH_DBF_TEXT(SETUP, 2, "setupch");
4a71df50 590 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
ae57b20a 591 channel->iob[cnt].data =
4a71df50
FB
592 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
593 if (channel->iob[cnt].data == NULL)
594 break;
595 channel->iob[cnt].state = BUF_STATE_FREE;
596 channel->iob[cnt].channel = channel;
597 channel->iob[cnt].callback = qeth_send_control_data_cb;
598 channel->iob[cnt].rc = 0;
599 }
600 if (cnt < QETH_CMD_BUFFER_NO) {
601 while (cnt-- > 0)
602 kfree(channel->iob[cnt].data);
603 return -ENOMEM;
604 }
605 channel->buf_no = 0;
606 channel->io_buf_no = 0;
607 atomic_set(&channel->irq_pending, 0);
608 spin_lock_init(&channel->iob_lock);
609
610 init_waitqueue_head(&channel->wait_q);
611 return 0;
612}
613
614static int qeth_set_thread_start_bit(struct qeth_card *card,
615 unsigned long thread)
616{
617 unsigned long flags;
618
619 spin_lock_irqsave(&card->thread_mask_lock, flags);
620 if (!(card->thread_allowed_mask & thread) ||
621 (card->thread_start_mask & thread)) {
622 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
623 return -EPERM;
624 }
625 card->thread_start_mask |= thread;
626 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
627 return 0;
628}
629
630void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
631{
632 unsigned long flags;
633
634 spin_lock_irqsave(&card->thread_mask_lock, flags);
635 card->thread_start_mask &= ~thread;
636 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
637 wake_up(&card->wait_q);
638}
639EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
640
641void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&card->thread_mask_lock, flags);
646 card->thread_running_mask &= ~thread;
647 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
648 wake_up(&card->wait_q);
649}
650EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
651
652static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
653{
654 unsigned long flags;
655 int rc = 0;
656
657 spin_lock_irqsave(&card->thread_mask_lock, flags);
658 if (card->thread_start_mask & thread) {
659 if ((card->thread_allowed_mask & thread) &&
660 !(card->thread_running_mask & thread)) {
661 rc = 1;
662 card->thread_start_mask &= ~thread;
663 card->thread_running_mask |= thread;
664 } else
665 rc = -EPERM;
666 }
667 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
668 return rc;
669}
670
671int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
672{
673 int rc = 0;
674
675 wait_event(card->wait_q,
676 (rc = __qeth_do_run_thread(card, thread)) >= 0);
677 return rc;
678}
679EXPORT_SYMBOL_GPL(qeth_do_run_thread);
680
681void qeth_schedule_recovery(struct qeth_card *card)
682{
847a50fd 683 QETH_CARD_TEXT(card, 2, "startrec");
4a71df50
FB
684 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
685 schedule_work(&card->kernel_thread_starter);
686}
687EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
688
689static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
690{
691 int dstat, cstat;
692 char *sense;
847a50fd 693 struct qeth_card *card;
4a71df50
FB
694
695 sense = (char *) irb->ecw;
23d805b6
PO
696 cstat = irb->scsw.cmd.cstat;
697 dstat = irb->scsw.cmd.dstat;
847a50fd 698 card = CARD_FROM_CDEV(cdev);
4a71df50
FB
699
700 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
701 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
702 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
847a50fd 703 QETH_CARD_TEXT(card, 2, "CGENCHK");
74eacdb9
FB
704 dev_warn(&cdev->dev, "The qeth device driver "
705 "failed to recover an error on the device\n");
5113fec0 706 QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
74eacdb9 707 dev_name(&cdev->dev), dstat, cstat);
4a71df50
FB
708 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
709 16, 1, irb, 64, 1);
710 return 1;
711 }
712
713 if (dstat & DEV_STAT_UNIT_CHECK) {
714 if (sense[SENSE_RESETTING_EVENT_BYTE] &
715 SENSE_RESETTING_EVENT_FLAG) {
847a50fd 716 QETH_CARD_TEXT(card, 2, "REVIND");
4a71df50
FB
717 return 1;
718 }
719 if (sense[SENSE_COMMAND_REJECT_BYTE] &
720 SENSE_COMMAND_REJECT_FLAG) {
847a50fd 721 QETH_CARD_TEXT(card, 2, "CMDREJi");
28a7e4c9 722 return 1;
4a71df50
FB
723 }
724 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
847a50fd 725 QETH_CARD_TEXT(card, 2, "AFFE");
4a71df50
FB
726 return 1;
727 }
728 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
847a50fd 729 QETH_CARD_TEXT(card, 2, "ZEROSEN");
4a71df50
FB
730 return 0;
731 }
847a50fd 732 QETH_CARD_TEXT(card, 2, "DGENCHK");
4a71df50
FB
733 return 1;
734 }
735 return 0;
736}
737
738static long __qeth_check_irb_error(struct ccw_device *cdev,
739 unsigned long intparm, struct irb *irb)
740{
847a50fd
CO
741 struct qeth_card *card;
742
743 card = CARD_FROM_CDEV(cdev);
744
4a71df50
FB
745 if (!IS_ERR(irb))
746 return 0;
747
748 switch (PTR_ERR(irb)) {
749 case -EIO:
74eacdb9
FB
750 QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
751 dev_name(&cdev->dev));
847a50fd
CO
752 QETH_CARD_TEXT(card, 2, "ckirberr");
753 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
4a71df50
FB
754 break;
755 case -ETIMEDOUT:
74eacdb9
FB
756 dev_warn(&cdev->dev, "A hardware operation timed out"
757 " on the device\n");
847a50fd
CO
758 QETH_CARD_TEXT(card, 2, "ckirberr");
759 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
4a71df50 760 if (intparm == QETH_RCD_PARM) {
4a71df50
FB
761 if (card && (card->data.ccwdev == cdev)) {
762 card->data.state = CH_STATE_DOWN;
763 wake_up(&card->wait_q);
764 }
765 }
766 break;
767 default:
74eacdb9
FB
768 QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
769 dev_name(&cdev->dev), PTR_ERR(irb));
847a50fd
CO
770 QETH_CARD_TEXT(card, 2, "ckirberr");
771 QETH_CARD_TEXT(card, 2, " rc???");
4a71df50
FB
772 }
773 return PTR_ERR(irb);
774}
775
776static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
777 struct irb *irb)
778{
779 int rc;
780 int cstat, dstat;
781 struct qeth_cmd_buffer *buffer;
782 struct qeth_channel *channel;
783 struct qeth_card *card;
784 struct qeth_cmd_buffer *iob;
785 __u8 index;
786
4a71df50
FB
787 if (__qeth_check_irb_error(cdev, intparm, irb))
788 return;
23d805b6
PO
789 cstat = irb->scsw.cmd.cstat;
790 dstat = irb->scsw.cmd.dstat;
4a71df50
FB
791
792 card = CARD_FROM_CDEV(cdev);
793 if (!card)
794 return;
795
847a50fd
CO
796 QETH_CARD_TEXT(card, 5, "irq");
797
4a71df50
FB
798 if (card->read.ccwdev == cdev) {
799 channel = &card->read;
847a50fd 800 QETH_CARD_TEXT(card, 5, "read");
4a71df50
FB
801 } else if (card->write.ccwdev == cdev) {
802 channel = &card->write;
847a50fd 803 QETH_CARD_TEXT(card, 5, "write");
4a71df50
FB
804 } else {
805 channel = &card->data;
847a50fd 806 QETH_CARD_TEXT(card, 5, "data");
4a71df50
FB
807 }
808 atomic_set(&channel->irq_pending, 0);
809
23d805b6 810 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
4a71df50
FB
811 channel->state = CH_STATE_STOPPED;
812
23d805b6 813 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
4a71df50
FB
814 channel->state = CH_STATE_HALTED;
815
816 /*let's wake up immediately on data channel*/
817 if ((channel == &card->data) && (intparm != 0) &&
818 (intparm != QETH_RCD_PARM))
819 goto out;
820
821 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
847a50fd 822 QETH_CARD_TEXT(card, 6, "clrchpar");
4a71df50
FB
823 /* we don't have to handle this further */
824 intparm = 0;
825 }
826 if (intparm == QETH_HALT_CHANNEL_PARM) {
847a50fd 827 QETH_CARD_TEXT(card, 6, "hltchpar");
4a71df50
FB
828 /* we don't have to handle this further */
829 intparm = 0;
830 }
831 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
832 (dstat & DEV_STAT_UNIT_CHECK) ||
833 (cstat)) {
834 if (irb->esw.esw0.erw.cons) {
74eacdb9
FB
835 dev_warn(&channel->ccwdev->dev,
836 "The qeth device driver failed to recover "
837 "an error on the device\n");
838 QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
839 "0x%X dstat 0x%X\n",
840 dev_name(&channel->ccwdev->dev), cstat, dstat);
4a71df50
FB
841 print_hex_dump(KERN_WARNING, "qeth: irb ",
842 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
843 print_hex_dump(KERN_WARNING, "qeth: sense data ",
844 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
845 }
846 if (intparm == QETH_RCD_PARM) {
847 channel->state = CH_STATE_DOWN;
848 goto out;
849 }
850 rc = qeth_get_problem(cdev, irb);
851 if (rc) {
28a7e4c9 852 qeth_clear_ipacmd_list(card);
4a71df50
FB
853 qeth_schedule_recovery(card);
854 goto out;
855 }
856 }
857
858 if (intparm == QETH_RCD_PARM) {
859 channel->state = CH_STATE_RCD_DONE;
860 goto out;
861 }
862 if (intparm) {
863 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
864 buffer->state = BUF_STATE_PROCESSED;
865 }
866 if (channel == &card->data)
867 return;
868 if (channel == &card->read &&
869 channel->state == CH_STATE_UP)
870 qeth_issue_next_read(card);
871
872 iob = channel->iob;
873 index = channel->buf_no;
874 while (iob[index].state == BUF_STATE_PROCESSED) {
875 if (iob[index].callback != NULL)
876 iob[index].callback(channel, iob + index);
877
878 index = (index + 1) % QETH_CMD_BUFFER_NO;
879 }
880 channel->buf_no = index;
881out:
882 wake_up(&card->wait_q);
883 return;
884}
885
b67d801f
UB
886static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
887 struct qeth_qdio_out_buffer *buf)
4a71df50
FB
888{
889 int i;
890 struct sk_buff *skb;
891
892 /* is PCI flag set on buffer? */
893 if (buf->buffer->element[0].flags & 0x40)
894 atomic_dec(&queue->set_pci_flags_count);
895
b67d801f
UB
896 skb = skb_dequeue(&buf->skb_list);
897 while (skb) {
898 atomic_dec(&skb->users);
899 dev_kfree_skb_any(skb);
4a71df50
FB
900 skb = skb_dequeue(&buf->skb_list);
901 }
4a71df50 902 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
683d718a
FB
903 if (buf->buffer->element[i].addr && buf->is_header[i])
904 kmem_cache_free(qeth_core_header_cache,
905 buf->buffer->element[i].addr);
906 buf->is_header[i] = 0;
4a71df50
FB
907 buf->buffer->element[i].length = 0;
908 buf->buffer->element[i].addr = NULL;
909 buf->buffer->element[i].flags = 0;
910 }
9f29f6de 911 buf->buffer->element[15].flags = 0;
4a71df50
FB
912 buf->next_element_to_fill = 0;
913 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
914}
915
916void qeth_clear_qdio_buffers(struct qeth_card *card)
917{
918 int i, j;
919
847a50fd 920 QETH_CARD_TEXT(card, 2, "clearqdbf");
4a71df50
FB
921 /* clear outbound buffers to free skbs */
922 for (i = 0; i < card->qdio.no_out_queues; ++i)
923 if (card->qdio.out_qs[i]) {
924 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
925 qeth_clear_output_buffer(card->qdio.out_qs[i],
926 &card->qdio.out_qs[i]->bufs[j]);
927 }
928}
929EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
930
931static void qeth_free_buffer_pool(struct qeth_card *card)
932{
933 struct qeth_buffer_pool_entry *pool_entry, *tmp;
934 int i = 0;
4a71df50
FB
935 list_for_each_entry_safe(pool_entry, tmp,
936 &card->qdio.init_pool.entry_list, init_list){
937 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
938 free_page((unsigned long)pool_entry->elements[i]);
939 list_del(&pool_entry->init_list);
940 kfree(pool_entry);
941 }
942}
943
944static void qeth_free_qdio_buffers(struct qeth_card *card)
945{
946 int i, j;
947
4a71df50
FB
948 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
949 QETH_QDIO_UNINITIALIZED)
950 return;
951 kfree(card->qdio.in_q);
952 card->qdio.in_q = NULL;
953 /* inbound buffer pool */
954 qeth_free_buffer_pool(card);
955 /* free outbound qdio_qs */
956 if (card->qdio.out_qs) {
957 for (i = 0; i < card->qdio.no_out_queues; ++i) {
958 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
959 qeth_clear_output_buffer(card->qdio.out_qs[i],
960 &card->qdio.out_qs[i]->bufs[j]);
961 kfree(card->qdio.out_qs[i]);
962 }
963 kfree(card->qdio.out_qs);
964 card->qdio.out_qs = NULL;
965 }
966}
967
968static void qeth_clean_channel(struct qeth_channel *channel)
969{
970 int cnt;
971
d11ba0c4 972 QETH_DBF_TEXT(SETUP, 2, "freech");
4a71df50
FB
973 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
974 kfree(channel->iob[cnt].data);
975}
976
5113fec0 977static void qeth_get_channel_path_desc(struct qeth_card *card)
4a71df50 978{
4a71df50
FB
979 struct ccw_device *ccwdev;
980 struct channelPath_dsc {
981 u8 flags;
982 u8 lsn;
983 u8 desc;
984 u8 chpid;
985 u8 swla;
986 u8 zeroes;
987 u8 chla;
988 u8 chpp;
989 } *chp_dsc;
990
5113fec0 991 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
4a71df50
FB
992
993 ccwdev = card->data.ccwdev;
994 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
995 if (chp_dsc != NULL) {
996 /* CHPP field bit 6 == 1 -> single queue */
d0ff1f52
UB
997 if ((chp_dsc->chpp & 0x02) == 0x02) {
998 if ((atomic_read(&card->qdio.state) !=
999 QETH_QDIO_UNINITIALIZED) &&
1000 (card->qdio.no_out_queues == 4))
1001 /* change from 4 to 1 outbound queues */
1002 qeth_free_qdio_buffers(card);
5113fec0 1003 card->qdio.no_out_queues = 1;
d0ff1f52
UB
1004 if (card->qdio.default_out_queue != 0)
1005 dev_info(&card->gdev->dev,
1006 "Priority Queueing not supported\n");
1007 card->qdio.default_out_queue = 0;
1008 } else {
1009 if ((atomic_read(&card->qdio.state) !=
1010 QETH_QDIO_UNINITIALIZED) &&
1011 (card->qdio.no_out_queues == 1)) {
1012 /* change from 1 to 4 outbound queues */
1013 qeth_free_qdio_buffers(card);
1014 card->qdio.default_out_queue = 2;
1015 }
1016 card->qdio.no_out_queues = 4;
1017 }
5113fec0 1018 card->info.func_level = 0x4100 + chp_dsc->desc;
4a71df50
FB
1019 kfree(chp_dsc);
1020 }
5113fec0
UB
1021 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1022 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1023 return;
4a71df50
FB
1024}
1025
1026static void qeth_init_qdio_info(struct qeth_card *card)
1027{
d11ba0c4 1028 QETH_DBF_TEXT(SETUP, 4, "intqdinf");
4a71df50
FB
1029 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1030 /* inbound */
1031 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
dcf4ae2d
FB
1032 if (card->info.type == QETH_CARD_TYPE_IQD)
1033 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1034 else
1035 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
4a71df50
FB
1036 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1037 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1038 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1039}
1040
1041static void qeth_set_intial_options(struct qeth_card *card)
1042{
1043 card->options.route4.type = NO_ROUTER;
1044 card->options.route6.type = NO_ROUTER;
4a71df50
FB
1045 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1046 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1047 card->options.fake_broadcast = 0;
1048 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
4a71df50
FB
1049 card->options.performance_stats = 0;
1050 card->options.rx_sg_cb = QETH_RX_SG_CB;
d64ecc22 1051 card->options.isolation = ISOLATION_MODE_NONE;
4a71df50
FB
1052}
1053
1054static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1055{
1056 unsigned long flags;
1057 int rc = 0;
1058
1059 spin_lock_irqsave(&card->thread_mask_lock, flags);
847a50fd 1060 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
4a71df50
FB
1061 (u8) card->thread_start_mask,
1062 (u8) card->thread_allowed_mask,
1063 (u8) card->thread_running_mask);
1064 rc = (card->thread_start_mask & thread);
1065 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1066 return rc;
1067}
1068
1069static void qeth_start_kernel_thread(struct work_struct *work)
1070{
1071 struct qeth_card *card = container_of(work, struct qeth_card,
1072 kernel_thread_starter);
847a50fd 1073 QETH_CARD_TEXT(card , 2, "strthrd");
4a71df50
FB
1074
1075 if (card->read.state != CH_STATE_UP &&
1076 card->write.state != CH_STATE_UP)
1077 return;
1078 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1079 kthread_run(card->discipline.recover, (void *) card,
1080 "qeth_recover");
1081}
1082
1083static int qeth_setup_card(struct qeth_card *card)
1084{
1085
d11ba0c4
PT
1086 QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1087 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4a71df50
FB
1088
1089 card->read.state = CH_STATE_DOWN;
1090 card->write.state = CH_STATE_DOWN;
1091 card->data.state = CH_STATE_DOWN;
1092 card->state = CARD_STATE_DOWN;
1093 card->lan_online = 0;
908abbb5 1094 card->read_or_write_problem = 0;
4a71df50
FB
1095 card->dev = NULL;
1096 spin_lock_init(&card->vlanlock);
1097 spin_lock_init(&card->mclock);
1098 card->vlangrp = NULL;
1099 spin_lock_init(&card->lock);
1100 spin_lock_init(&card->ip_lock);
1101 spin_lock_init(&card->thread_mask_lock);
c4949f07 1102 mutex_init(&card->conf_mutex);
9dc48ccc 1103 mutex_init(&card->discipline_mutex);
4a71df50
FB
1104 card->thread_start_mask = 0;
1105 card->thread_allowed_mask = 0;
1106 card->thread_running_mask = 0;
1107 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1108 INIT_LIST_HEAD(&card->ip_list);
4a71df50
FB
1109 INIT_LIST_HEAD(card->ip_tbd_list);
1110 INIT_LIST_HEAD(&card->cmd_waiter_list);
1111 init_waitqueue_head(&card->wait_q);
25985edc 1112 /* initial options */
4a71df50
FB
1113 qeth_set_intial_options(card);
1114 /* IP address takeover */
1115 INIT_LIST_HEAD(&card->ipato.entries);
1116 card->ipato.enabled = 0;
1117 card->ipato.invert4 = 0;
1118 card->ipato.invert6 = 0;
1119 /* init QDIO stuff */
1120 qeth_init_qdio_info(card);
1121 return 0;
1122}
1123
6bcac508
MS
1124static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1125{
1126 struct qeth_card *card = container_of(slr, struct qeth_card,
1127 qeth_service_level);
0d788c7d
KDW
1128 if (card->info.mcl_level[0])
1129 seq_printf(m, "qeth: %s firmware level %s\n",
1130 CARD_BUS_ID(card), card->info.mcl_level);
6bcac508
MS
1131}
1132
4a71df50
FB
1133static struct qeth_card *qeth_alloc_card(void)
1134{
1135 struct qeth_card *card;
1136
d11ba0c4 1137 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
4a71df50
FB
1138 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1139 if (!card)
76b11f8e 1140 goto out;
d11ba0c4 1141 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
76b11f8e
UB
1142 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1143 if (!card->ip_tbd_list) {
1144 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1145 goto out_card;
4a71df50 1146 }
76b11f8e
UB
1147 if (qeth_setup_channel(&card->read))
1148 goto out_ip;
1149 if (qeth_setup_channel(&card->write))
1150 goto out_channel;
4a71df50 1151 card->options.layer2 = -1;
6bcac508
MS
1152 card->qeth_service_level.seq_print = qeth_core_sl_print;
1153 register_service_level(&card->qeth_service_level);
4a71df50 1154 return card;
76b11f8e
UB
1155
1156out_channel:
1157 qeth_clean_channel(&card->read);
1158out_ip:
1159 kfree(card->ip_tbd_list);
1160out_card:
1161 kfree(card);
1162out:
1163 return NULL;
4a71df50
FB
1164}
1165
1166static int qeth_determine_card_type(struct qeth_card *card)
1167{
1168 int i = 0;
1169
d11ba0c4 1170 QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
4a71df50
FB
1171
1172 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1173 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
5113fec0
UB
1174 while (known_devices[i][QETH_DEV_MODEL_IND]) {
1175 if ((CARD_RDEV(card)->id.dev_type ==
1176 known_devices[i][QETH_DEV_TYPE_IND]) &&
1177 (CARD_RDEV(card)->id.dev_model ==
1178 known_devices[i][QETH_DEV_MODEL_IND])) {
1179 card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
1180 card->qdio.no_out_queues =
1181 known_devices[i][QETH_QUEUE_NO_IND];
1182 card->info.is_multicast_different =
1183 known_devices[i][QETH_MULTICAST_IND];
1184 qeth_get_channel_path_desc(card);
4a71df50
FB
1185 return 0;
1186 }
1187 i++;
1188 }
1189 card->info.type = QETH_CARD_TYPE_UNKNOWN;
74eacdb9
FB
1190 dev_err(&card->gdev->dev, "The adapter hardware is of an "
1191 "unknown type\n");
4a71df50
FB
1192 return -ENOENT;
1193}
1194
1195static int qeth_clear_channel(struct qeth_channel *channel)
1196{
1197 unsigned long flags;
1198 struct qeth_card *card;
1199 int rc;
1200
4a71df50 1201 card = CARD_FROM_CDEV(channel->ccwdev);
847a50fd 1202 QETH_CARD_TEXT(card, 3, "clearch");
4a71df50
FB
1203 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1204 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1205 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1206
1207 if (rc)
1208 return rc;
1209 rc = wait_event_interruptible_timeout(card->wait_q,
1210 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1211 if (rc == -ERESTARTSYS)
1212 return rc;
1213 if (channel->state != CH_STATE_STOPPED)
1214 return -ETIME;
1215 channel->state = CH_STATE_DOWN;
1216 return 0;
1217}
1218
1219static int qeth_halt_channel(struct qeth_channel *channel)
1220{
1221 unsigned long flags;
1222 struct qeth_card *card;
1223 int rc;
1224
4a71df50 1225 card = CARD_FROM_CDEV(channel->ccwdev);
847a50fd 1226 QETH_CARD_TEXT(card, 3, "haltch");
4a71df50
FB
1227 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1228 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1229 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1230
1231 if (rc)
1232 return rc;
1233 rc = wait_event_interruptible_timeout(card->wait_q,
1234 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1235 if (rc == -ERESTARTSYS)
1236 return rc;
1237 if (channel->state != CH_STATE_HALTED)
1238 return -ETIME;
1239 return 0;
1240}
1241
1242static int qeth_halt_channels(struct qeth_card *card)
1243{
1244 int rc1 = 0, rc2 = 0, rc3 = 0;
1245
847a50fd 1246 QETH_CARD_TEXT(card, 3, "haltchs");
4a71df50
FB
1247 rc1 = qeth_halt_channel(&card->read);
1248 rc2 = qeth_halt_channel(&card->write);
1249 rc3 = qeth_halt_channel(&card->data);
1250 if (rc1)
1251 return rc1;
1252 if (rc2)
1253 return rc2;
1254 return rc3;
1255}
1256
1257static int qeth_clear_channels(struct qeth_card *card)
1258{
1259 int rc1 = 0, rc2 = 0, rc3 = 0;
1260
847a50fd 1261 QETH_CARD_TEXT(card, 3, "clearchs");
4a71df50
FB
1262 rc1 = qeth_clear_channel(&card->read);
1263 rc2 = qeth_clear_channel(&card->write);
1264 rc3 = qeth_clear_channel(&card->data);
1265 if (rc1)
1266 return rc1;
1267 if (rc2)
1268 return rc2;
1269 return rc3;
1270}
1271
1272static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1273{
1274 int rc = 0;
1275
847a50fd 1276 QETH_CARD_TEXT(card, 3, "clhacrd");
4a71df50
FB
1277
1278 if (halt)
1279 rc = qeth_halt_channels(card);
1280 if (rc)
1281 return rc;
1282 return qeth_clear_channels(card);
1283}
1284
1285int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1286{
1287 int rc = 0;
1288
847a50fd 1289 QETH_CARD_TEXT(card, 3, "qdioclr");
4a71df50
FB
1290 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1291 QETH_QDIO_CLEANING)) {
1292 case QETH_QDIO_ESTABLISHED:
1293 if (card->info.type == QETH_CARD_TYPE_IQD)
cc961d40 1294 rc = qdio_shutdown(CARD_DDEV(card),
4a71df50
FB
1295 QDIO_FLAG_CLEANUP_USING_HALT);
1296 else
cc961d40 1297 rc = qdio_shutdown(CARD_DDEV(card),
4a71df50
FB
1298 QDIO_FLAG_CLEANUP_USING_CLEAR);
1299 if (rc)
847a50fd 1300 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
cc961d40 1301 qdio_free(CARD_DDEV(card));
4a71df50
FB
1302 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1303 break;
1304 case QETH_QDIO_CLEANING:
1305 return rc;
1306 default:
1307 break;
1308 }
1309 rc = qeth_clear_halt_card(card, use_halt);
1310 if (rc)
847a50fd 1311 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
4a71df50
FB
1312 card->state = CARD_STATE_DOWN;
1313 return rc;
1314}
1315EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1316
1317static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1318 int *length)
1319{
1320 struct ciw *ciw;
1321 char *rcd_buf;
1322 int ret;
1323 struct qeth_channel *channel = &card->data;
1324 unsigned long flags;
1325
1326 /*
1327 * scan for RCD command in extended SenseID data
1328 */
1329 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1330 if (!ciw || ciw->cmd == 0)
1331 return -EOPNOTSUPP;
1332 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1333 if (!rcd_buf)
1334 return -ENOMEM;
1335
1336 channel->ccw.cmd_code = ciw->cmd;
1337 channel->ccw.cda = (__u32) __pa(rcd_buf);
1338 channel->ccw.count = ciw->count;
1339 channel->ccw.flags = CCW_FLAG_SLI;
1340 channel->state = CH_STATE_RCD;
1341 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1342 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1343 QETH_RCD_PARM, LPM_ANYPATH, 0,
1344 QETH_RCD_TIMEOUT);
1345 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1346 if (!ret)
1347 wait_event(card->wait_q,
1348 (channel->state == CH_STATE_RCD_DONE ||
1349 channel->state == CH_STATE_DOWN));
1350 if (channel->state == CH_STATE_DOWN)
1351 ret = -EIO;
1352 else
1353 channel->state = CH_STATE_DOWN;
1354 if (ret) {
1355 kfree(rcd_buf);
1356 *buffer = NULL;
1357 *length = 0;
1358 } else {
1359 *length = ciw->count;
1360 *buffer = rcd_buf;
1361 }
1362 return ret;
1363}
1364
a60389ab 1365static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
4a71df50 1366{
a60389ab 1367 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
4a71df50
FB
1368 card->info.chpid = prcd[30];
1369 card->info.unit_addr2 = prcd[31];
1370 card->info.cula = prcd[63];
1371 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1372 (prcd[0x11] == _ascebc['M']));
a60389ab
EL
1373}
1374
1375static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1376{
1377 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1378
1379 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1380 card->info.blkt.time_total = 250;
1381 card->info.blkt.inter_packet = 5;
1382 card->info.blkt.inter_packet_jumbo = 15;
1383 } else {
1384 card->info.blkt.time_total = 0;
1385 card->info.blkt.inter_packet = 0;
1386 card->info.blkt.inter_packet_jumbo = 0;
1387 }
4a71df50
FB
1388}
1389
1390static void qeth_init_tokens(struct qeth_card *card)
1391{
1392 card->token.issuer_rm_w = 0x00010103UL;
1393 card->token.cm_filter_w = 0x00010108UL;
1394 card->token.cm_connection_w = 0x0001010aUL;
1395 card->token.ulp_filter_w = 0x0001010bUL;
1396 card->token.ulp_connection_w = 0x0001010dUL;
1397}
1398
1399static void qeth_init_func_level(struct qeth_card *card)
1400{
5113fec0
UB
1401 switch (card->info.type) {
1402 case QETH_CARD_TYPE_IQD:
6298263a 1403 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
5113fec0
UB
1404 break;
1405 case QETH_CARD_TYPE_OSD:
0132951e 1406 case QETH_CARD_TYPE_OSN:
5113fec0
UB
1407 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1408 break;
1409 default:
1410 break;
4a71df50
FB
1411 }
1412}
1413
4a71df50
FB
1414static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1415 void (*idx_reply_cb)(struct qeth_channel *,
1416 struct qeth_cmd_buffer *))
1417{
1418 struct qeth_cmd_buffer *iob;
1419 unsigned long flags;
1420 int rc;
1421 struct qeth_card *card;
1422
d11ba0c4 1423 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
4a71df50
FB
1424 card = CARD_FROM_CDEV(channel->ccwdev);
1425 iob = qeth_get_buffer(channel);
1426 iob->callback = idx_reply_cb;
1427 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1428 channel->ccw.count = QETH_BUFSIZE;
1429 channel->ccw.cda = (__u32) __pa(iob->data);
1430
1431 wait_event(card->wait_q,
1432 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
d11ba0c4 1433 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
4a71df50
FB
1434 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1435 rc = ccw_device_start(channel->ccwdev,
1436 &channel->ccw, (addr_t) iob, 0, 0);
1437 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1438
1439 if (rc) {
14cc21b6 1440 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
d11ba0c4 1441 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
4a71df50
FB
1442 atomic_set(&channel->irq_pending, 0);
1443 wake_up(&card->wait_q);
1444 return rc;
1445 }
1446 rc = wait_event_interruptible_timeout(card->wait_q,
1447 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1448 if (rc == -ERESTARTSYS)
1449 return rc;
1450 if (channel->state != CH_STATE_UP) {
1451 rc = -ETIME;
d11ba0c4 1452 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4a71df50
FB
1453 qeth_clear_cmd_buffers(channel);
1454 } else
1455 rc = 0;
1456 return rc;
1457}
1458
1459static int qeth_idx_activate_channel(struct qeth_channel *channel,
1460 void (*idx_reply_cb)(struct qeth_channel *,
1461 struct qeth_cmd_buffer *))
1462{
1463 struct qeth_card *card;
1464 struct qeth_cmd_buffer *iob;
1465 unsigned long flags;
1466 __u16 temp;
1467 __u8 tmp;
1468 int rc;
f06f6f32 1469 struct ccw_dev_id temp_devid;
4a71df50
FB
1470
1471 card = CARD_FROM_CDEV(channel->ccwdev);
1472
d11ba0c4 1473 QETH_DBF_TEXT(SETUP, 2, "idxactch");
4a71df50
FB
1474
1475 iob = qeth_get_buffer(channel);
1476 iob->callback = idx_reply_cb;
1477 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1478 channel->ccw.count = IDX_ACTIVATE_SIZE;
1479 channel->ccw.cda = (__u32) __pa(iob->data);
1480 if (channel == &card->write) {
1481 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1482 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1483 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1484 card->seqno.trans_hdr++;
1485 } else {
1486 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1487 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1488 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1489 }
1490 tmp = ((__u8)card->info.portno) | 0x80;
1491 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1492 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1493 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1494 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1495 &card->info.func_level, sizeof(__u16));
f06f6f32
CH
1496 ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1497 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
4a71df50
FB
1498 temp = (card->info.cula << 8) + card->info.unit_addr2;
1499 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1500
1501 wait_event(card->wait_q,
1502 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
d11ba0c4 1503 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
4a71df50
FB
1504 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1505 rc = ccw_device_start(channel->ccwdev,
1506 &channel->ccw, (addr_t) iob, 0, 0);
1507 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1508
1509 if (rc) {
14cc21b6
FB
1510 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1511 rc);
d11ba0c4 1512 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50
FB
1513 atomic_set(&channel->irq_pending, 0);
1514 wake_up(&card->wait_q);
1515 return rc;
1516 }
1517 rc = wait_event_interruptible_timeout(card->wait_q,
1518 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1519 if (rc == -ERESTARTSYS)
1520 return rc;
1521 if (channel->state != CH_STATE_ACTIVATING) {
74eacdb9
FB
1522 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1523 " failed to recover an error on the device\n");
1524 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1525 dev_name(&channel->ccwdev->dev));
d11ba0c4 1526 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
4a71df50
FB
1527 qeth_clear_cmd_buffers(channel);
1528 return -ETIME;
1529 }
1530 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
1531}
1532
1533static int qeth_peer_func_level(int level)
1534{
1535 if ((level & 0xff) == 8)
1536 return (level & 0xff) + 0x400;
1537 if (((level >> 8) & 3) == 1)
1538 return (level & 0xff) + 0x200;
1539 return level;
1540}
1541
1542static void qeth_idx_write_cb(struct qeth_channel *channel,
1543 struct qeth_cmd_buffer *iob)
1544{
1545 struct qeth_card *card;
1546 __u16 temp;
1547
d11ba0c4 1548 QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
4a71df50
FB
1549
1550 if (channel->state == CH_STATE_DOWN) {
1551 channel->state = CH_STATE_ACTIVATING;
1552 goto out;
1553 }
1554 card = CARD_FROM_CDEV(channel->ccwdev);
1555
1556 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
5113fec0 1557 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
74eacdb9
FB
1558 dev_err(&card->write.ccwdev->dev,
1559 "The adapter is used exclusively by another "
1560 "host\n");
4a71df50 1561 else
74eacdb9
FB
1562 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
1563 " negative reply\n",
1564 dev_name(&card->write.ccwdev->dev));
4a71df50
FB
1565 goto out;
1566 }
1567 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1568 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
74eacdb9
FB
1569 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
1570 "function level mismatch (sent: 0x%x, received: "
1571 "0x%x)\n", dev_name(&card->write.ccwdev->dev),
1572 card->info.func_level, temp);
4a71df50
FB
1573 goto out;
1574 }
1575 channel->state = CH_STATE_UP;
1576out:
1577 qeth_release_buffer(channel, iob);
1578}
1579
1580static void qeth_idx_read_cb(struct qeth_channel *channel,
1581 struct qeth_cmd_buffer *iob)
1582{
1583 struct qeth_card *card;
1584 __u16 temp;
1585
d11ba0c4 1586 QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
4a71df50
FB
1587 if (channel->state == CH_STATE_DOWN) {
1588 channel->state = CH_STATE_ACTIVATING;
1589 goto out;
1590 }
1591
1592 card = CARD_FROM_CDEV(channel->ccwdev);
5113fec0 1593 if (qeth_check_idx_response(card, iob->data))
4a71df50
FB
1594 goto out;
1595
1596 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
5113fec0
UB
1597 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1598 case QETH_IDX_ACT_ERR_EXCL:
74eacdb9
FB
1599 dev_err(&card->write.ccwdev->dev,
1600 "The adapter is used exclusively by another "
1601 "host\n");
5113fec0
UB
1602 break;
1603 case QETH_IDX_ACT_ERR_AUTH:
01fc3e86 1604 case QETH_IDX_ACT_ERR_AUTH_USER:
5113fec0
UB
1605 dev_err(&card->read.ccwdev->dev,
1606 "Setting the device online failed because of "
01fc3e86 1607 "insufficient authorization\n");
5113fec0
UB
1608 break;
1609 default:
74eacdb9
FB
1610 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
1611 " negative reply\n",
1612 dev_name(&card->read.ccwdev->dev));
5113fec0 1613 }
01fc3e86
UB
1614 QETH_CARD_TEXT_(card, 2, "idxread%c",
1615 QETH_IDX_ACT_CAUSE_CODE(iob->data));
4a71df50
FB
1616 goto out;
1617 }
1618
1619/**
5113fec0
UB
1620 * * temporary fix for microcode bug
1621 * * to revert it,replace OR by AND
1622 * */
4a71df50 1623 if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
5113fec0 1624 (card->info.type == QETH_CARD_TYPE_OSD))
4a71df50
FB
1625 card->info.portname_required = 1;
1626
1627 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1628 if (temp != qeth_peer_func_level(card->info.func_level)) {
74eacdb9
FB
1629 QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
1630 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1631 dev_name(&card->read.ccwdev->dev),
1632 card->info.func_level, temp);
4a71df50
FB
1633 goto out;
1634 }
1635 memcpy(&card->token.issuer_rm_r,
1636 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1637 QETH_MPC_TOKEN_LENGTH);
1638 memcpy(&card->info.mcl_level[0],
1639 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1640 channel->state = CH_STATE_UP;
1641out:
1642 qeth_release_buffer(channel, iob);
1643}
1644
1645void qeth_prepare_control_data(struct qeth_card *card, int len,
1646 struct qeth_cmd_buffer *iob)
1647{
1648 qeth_setup_ccw(&card->write, iob->data, len);
1649 iob->callback = qeth_release_buffer;
1650
1651 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1652 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1653 card->seqno.trans_hdr++;
1654 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1655 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1656 card->seqno.pdu_hdr++;
1657 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1658 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
d11ba0c4 1659 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
4a71df50
FB
1660}
1661EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1662
1663int qeth_send_control_data(struct qeth_card *card, int len,
1664 struct qeth_cmd_buffer *iob,
1665 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1666 unsigned long),
1667 void *reply_param)
1668{
1669 int rc;
1670 unsigned long flags;
1671 struct qeth_reply *reply = NULL;
7834cd5a 1672 unsigned long timeout, event_timeout;
5b54e16f 1673 struct qeth_ipa_cmd *cmd;
4a71df50 1674
847a50fd 1675 QETH_CARD_TEXT(card, 2, "sendctl");
4a71df50 1676
908abbb5
UB
1677 if (card->read_or_write_problem) {
1678 qeth_release_buffer(iob->channel, iob);
1679 return -EIO;
1680 }
4a71df50
FB
1681 reply = qeth_alloc_reply(card);
1682 if (!reply) {
4a71df50
FB
1683 return -ENOMEM;
1684 }
1685 reply->callback = reply_cb;
1686 reply->param = reply_param;
1687 if (card->state == CARD_STATE_DOWN)
1688 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1689 else
1690 reply->seqno = card->seqno.ipa++;
1691 init_waitqueue_head(&reply->wait_q);
1692 spin_lock_irqsave(&card->lock, flags);
1693 list_add_tail(&reply->list, &card->cmd_waiter_list);
1694 spin_unlock_irqrestore(&card->lock, flags);
d11ba0c4 1695 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
4a71df50
FB
1696
1697 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1698 qeth_prepare_control_data(card, len, iob);
1699
1700 if (IS_IPA(iob->data))
7834cd5a 1701 event_timeout = QETH_IPA_TIMEOUT;
4a71df50 1702 else
7834cd5a
HC
1703 event_timeout = QETH_TIMEOUT;
1704 timeout = jiffies + event_timeout;
4a71df50 1705
847a50fd 1706 QETH_CARD_TEXT(card, 6, "noirqpnd");
4a71df50
FB
1707 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1708 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1709 (addr_t) iob, 0, 0);
1710 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1711 if (rc) {
74eacdb9
FB
1712 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
1713 "ccw_device_start rc = %i\n",
1714 dev_name(&card->write.ccwdev->dev), rc);
847a50fd 1715 QETH_CARD_TEXT_(card, 2, " err%d", rc);
4a71df50
FB
1716 spin_lock_irqsave(&card->lock, flags);
1717 list_del_init(&reply->list);
1718 qeth_put_reply(reply);
1719 spin_unlock_irqrestore(&card->lock, flags);
1720 qeth_release_buffer(iob->channel, iob);
1721 atomic_set(&card->write.irq_pending, 0);
1722 wake_up(&card->wait_q);
1723 return rc;
1724 }
5b54e16f
FB
1725
1726 /* we have only one long running ipassist, since we can ensure
1727 process context of this command we can sleep */
1728 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1729 if ((cmd->hdr.command == IPA_CMD_SETIP) &&
1730 (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
1731 if (!wait_event_timeout(reply->wait_q,
7834cd5a 1732 atomic_read(&reply->received), event_timeout))
5b54e16f
FB
1733 goto time_err;
1734 } else {
1735 while (!atomic_read(&reply->received)) {
1736 if (time_after(jiffies, timeout))
1737 goto time_err;
1738 cpu_relax();
1739 };
1740 }
1741
70919e23
UB
1742 if (reply->rc == -EIO)
1743 goto error;
5b54e16f
FB
1744 rc = reply->rc;
1745 qeth_put_reply(reply);
1746 return rc;
1747
1748time_err:
70919e23 1749 reply->rc = -ETIME;
5b54e16f
FB
1750 spin_lock_irqsave(&reply->card->lock, flags);
1751 list_del_init(&reply->list);
1752 spin_unlock_irqrestore(&reply->card->lock, flags);
5b54e16f 1753 atomic_inc(&reply->received);
70919e23 1754error:
908abbb5
UB
1755 atomic_set(&card->write.irq_pending, 0);
1756 qeth_release_buffer(iob->channel, iob);
1757 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
4a71df50
FB
1758 rc = reply->rc;
1759 qeth_put_reply(reply);
1760 return rc;
1761}
1762EXPORT_SYMBOL_GPL(qeth_send_control_data);
1763
1764static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1765 unsigned long data)
1766{
1767 struct qeth_cmd_buffer *iob;
1768
d11ba0c4 1769 QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
4a71df50
FB
1770
1771 iob = (struct qeth_cmd_buffer *) data;
1772 memcpy(&card->token.cm_filter_r,
1773 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1774 QETH_MPC_TOKEN_LENGTH);
d11ba0c4 1775 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
4a71df50
FB
1776 return 0;
1777}
1778
1779static int qeth_cm_enable(struct qeth_card *card)
1780{
1781 int rc;
1782 struct qeth_cmd_buffer *iob;
1783
d11ba0c4 1784 QETH_DBF_TEXT(SETUP, 2, "cmenable");
4a71df50
FB
1785
1786 iob = qeth_wait_for_buffer(&card->write);
1787 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1788 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1789 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1790 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1791 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1792
1793 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1794 qeth_cm_enable_cb, NULL);
1795 return rc;
1796}
1797
1798static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1799 unsigned long data)
1800{
1801
1802 struct qeth_cmd_buffer *iob;
1803
d11ba0c4 1804 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
4a71df50
FB
1805
1806 iob = (struct qeth_cmd_buffer *) data;
1807 memcpy(&card->token.cm_connection_r,
1808 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1809 QETH_MPC_TOKEN_LENGTH);
d11ba0c4 1810 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
4a71df50
FB
1811 return 0;
1812}
1813
1814static int qeth_cm_setup(struct qeth_card *card)
1815{
1816 int rc;
1817 struct qeth_cmd_buffer *iob;
1818
d11ba0c4 1819 QETH_DBF_TEXT(SETUP, 2, "cmsetup");
4a71df50
FB
1820
1821 iob = qeth_wait_for_buffer(&card->write);
1822 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1823 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1824 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1825 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1826 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1827 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1828 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1829 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1830 qeth_cm_setup_cb, NULL);
1831 return rc;
1832
1833}
1834
1835static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1836{
1837 switch (card->info.type) {
1838 case QETH_CARD_TYPE_UNKNOWN:
1839 return 1500;
1840 case QETH_CARD_TYPE_IQD:
1841 return card->info.max_mtu;
5113fec0 1842 case QETH_CARD_TYPE_OSD:
4a71df50
FB
1843 switch (card->info.link_type) {
1844 case QETH_LINK_TYPE_HSTR:
1845 case QETH_LINK_TYPE_LANE_TR:
1846 return 2000;
1847 default:
1848 return 1492;
1849 }
5113fec0
UB
1850 case QETH_CARD_TYPE_OSM:
1851 case QETH_CARD_TYPE_OSX:
1852 return 1492;
4a71df50
FB
1853 default:
1854 return 1500;
1855 }
1856}
1857
4a71df50
FB
1858static inline int qeth_get_mtu_outof_framesize(int framesize)
1859{
1860 switch (framesize) {
1861 case 0x4000:
1862 return 8192;
1863 case 0x6000:
1864 return 16384;
1865 case 0xa000:
1866 return 32768;
1867 case 0xffff:
1868 return 57344;
1869 default:
1870 return 0;
1871 }
1872}
1873
1874static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1875{
1876 switch (card->info.type) {
5113fec0
UB
1877 case QETH_CARD_TYPE_OSD:
1878 case QETH_CARD_TYPE_OSM:
1879 case QETH_CARD_TYPE_OSX:
4a71df50
FB
1880 case QETH_CARD_TYPE_IQD:
1881 return ((mtu >= 576) &&
9853b97b 1882 (mtu <= card->info.max_mtu));
4a71df50
FB
1883 case QETH_CARD_TYPE_OSN:
1884 case QETH_CARD_TYPE_UNKNOWN:
1885 default:
1886 return 1;
1887 }
1888}
1889
1890static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1891 unsigned long data)
1892{
1893
1894 __u16 mtu, framesize;
1895 __u16 len;
1896 __u8 link_type;
1897 struct qeth_cmd_buffer *iob;
1898
d11ba0c4 1899 QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
4a71df50
FB
1900
1901 iob = (struct qeth_cmd_buffer *) data;
1902 memcpy(&card->token.ulp_filter_r,
1903 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1904 QETH_MPC_TOKEN_LENGTH);
9853b97b 1905 if (card->info.type == QETH_CARD_TYPE_IQD) {
4a71df50
FB
1906 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1907 mtu = qeth_get_mtu_outof_framesize(framesize);
1908 if (!mtu) {
1909 iob->rc = -EINVAL;
d11ba0c4 1910 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
4a71df50
FB
1911 return 0;
1912 }
8b2e18f6
UB
1913 if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
1914 /* frame size has changed */
1915 if (card->dev &&
1916 ((card->dev->mtu == card->info.initial_mtu) ||
1917 (card->dev->mtu > mtu)))
1918 card->dev->mtu = mtu;
1919 qeth_free_qdio_buffers(card);
1920 }
4a71df50 1921 card->info.initial_mtu = mtu;
8b2e18f6 1922 card->info.max_mtu = mtu;
4a71df50
FB
1923 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1924 } else {
1925 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
9853b97b
FB
1926 card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
1927 iob->data);
4a71df50
FB
1928 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1929 }
1930
1931 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1932 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1933 memcpy(&link_type,
1934 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1935 card->info.link_type = link_type;
1936 } else
1937 card->info.link_type = 0;
01fc3e86 1938 QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
d11ba0c4 1939 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
4a71df50
FB
1940 return 0;
1941}
1942
1943static int qeth_ulp_enable(struct qeth_card *card)
1944{
1945 int rc;
1946 char prot_type;
1947 struct qeth_cmd_buffer *iob;
1948
1949 /*FIXME: trace view callbacks*/
d11ba0c4 1950 QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
4a71df50
FB
1951
1952 iob = qeth_wait_for_buffer(&card->write);
1953 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1954
1955 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1956 (__u8) card->info.portno;
1957 if (card->options.layer2)
1958 if (card->info.type == QETH_CARD_TYPE_OSN)
1959 prot_type = QETH_PROT_OSN2;
1960 else
1961 prot_type = QETH_PROT_LAYER2;
1962 else
1963 prot_type = QETH_PROT_TCPIP;
1964
1965 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
1966 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1967 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1968 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1969 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1970 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1971 card->info.portname, 9);
1972 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1973 qeth_ulp_enable_cb, NULL);
1974 return rc;
1975
1976}
1977
1978static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1979 unsigned long data)
1980{
1981 struct qeth_cmd_buffer *iob;
65a1f898 1982 int rc = 0;
4a71df50 1983
d11ba0c4 1984 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
4a71df50
FB
1985
1986 iob = (struct qeth_cmd_buffer *) data;
1987 memcpy(&card->token.ulp_connection_r,
1988 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1989 QETH_MPC_TOKEN_LENGTH);
65a1f898
UB
1990 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1991 3)) {
1992 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
1993 dev_err(&card->gdev->dev, "A connection could not be "
1994 "established because of an OLM limit\n");
bbb822a8 1995 iob->rc = -EMLINK;
65a1f898 1996 }
d11ba0c4 1997 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
65a1f898 1998 return rc;
4a71df50
FB
1999}
2000
2001static int qeth_ulp_setup(struct qeth_card *card)
2002{
2003 int rc;
2004 __u16 temp;
2005 struct qeth_cmd_buffer *iob;
2006 struct ccw_dev_id dev_id;
2007
d11ba0c4 2008 QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
4a71df50
FB
2009
2010 iob = qeth_wait_for_buffer(&card->write);
2011 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2012
2013 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2014 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2015 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2016 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2017 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2018 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2019
2020 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2021 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2022 temp = (card->info.cula << 8) + card->info.unit_addr2;
2023 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2024 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2025 qeth_ulp_setup_cb, NULL);
2026 return rc;
2027}
2028
2029static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2030{
2031 int i, j;
2032
d11ba0c4 2033 QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
4a71df50
FB
2034
2035 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2036 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2037 return 0;
2038
2039 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
508b3c4f 2040 GFP_KERNEL);
4a71df50
FB
2041 if (!card->qdio.in_q)
2042 goto out_nomem;
d11ba0c4
PT
2043 QETH_DBF_TEXT(SETUP, 2, "inq");
2044 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
4a71df50
FB
2045 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2046 /* give inbound qeth_qdio_buffers their qdio_buffers */
2047 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2048 card->qdio.in_q->bufs[i].buffer =
2049 &card->qdio.in_q->qdio_bufs[i];
2050 /* inbound buffer pool */
2051 if (qeth_alloc_buffer_pool(card))
2052 goto out_freeinq;
2053 /* outbound */
2054 card->qdio.out_qs =
2055 kmalloc(card->qdio.no_out_queues *
2056 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2057 if (!card->qdio.out_qs)
2058 goto out_freepool;
2059 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2060 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
508b3c4f 2061 GFP_KERNEL);
4a71df50
FB
2062 if (!card->qdio.out_qs[i])
2063 goto out_freeoutq;
d11ba0c4
PT
2064 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2065 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
4a71df50
FB
2066 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2067 card->qdio.out_qs[i]->queue_no = i;
2068 /* give outbound qeth_qdio_buffers their qdio_buffers */
2069 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2070 card->qdio.out_qs[i]->bufs[j].buffer =
2071 &card->qdio.out_qs[i]->qdio_bufs[j];
2072 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2073 skb_list);
2074 lockdep_set_class(
2075 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2076 &qdio_out_skb_queue_key);
2077 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2078 }
2079 }
2080 return 0;
2081
2082out_freeoutq:
2083 while (i > 0)
2084 kfree(card->qdio.out_qs[--i]);
2085 kfree(card->qdio.out_qs);
2086 card->qdio.out_qs = NULL;
2087out_freepool:
2088 qeth_free_buffer_pool(card);
2089out_freeinq:
2090 kfree(card->qdio.in_q);
2091 card->qdio.in_q = NULL;
2092out_nomem:
2093 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2094 return -ENOMEM;
2095}
2096
2097static void qeth_create_qib_param_field(struct qeth_card *card,
2098 char *param_field)
2099{
2100
2101 param_field[0] = _ascebc['P'];
2102 param_field[1] = _ascebc['C'];
2103 param_field[2] = _ascebc['I'];
2104 param_field[3] = _ascebc['T'];
2105 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2106 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2107 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2108}
2109
2110static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2111 char *param_field)
2112{
2113 param_field[16] = _ascebc['B'];
2114 param_field[17] = _ascebc['L'];
2115 param_field[18] = _ascebc['K'];
2116 param_field[19] = _ascebc['T'];
2117 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2118 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2119 *((unsigned int *) (&param_field[28])) =
2120 card->info.blkt.inter_packet_jumbo;
2121}
2122
2123static int qeth_qdio_activate(struct qeth_card *card)
2124{
d11ba0c4 2125 QETH_DBF_TEXT(SETUP, 3, "qdioact");
779e6e1c 2126 return qdio_activate(CARD_DDEV(card));
4a71df50
FB
2127}
2128
2129static int qeth_dm_act(struct qeth_card *card)
2130{
2131 int rc;
2132 struct qeth_cmd_buffer *iob;
2133
d11ba0c4 2134 QETH_DBF_TEXT(SETUP, 2, "dmact");
4a71df50
FB
2135
2136 iob = qeth_wait_for_buffer(&card->write);
2137 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2138
2139 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2140 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2141 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2142 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2143 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2144 return rc;
2145}
2146
2147static int qeth_mpc_initialize(struct qeth_card *card)
2148{
2149 int rc;
2150
d11ba0c4 2151 QETH_DBF_TEXT(SETUP, 2, "mpcinit");
4a71df50
FB
2152
2153 rc = qeth_issue_next_read(card);
2154 if (rc) {
d11ba0c4 2155 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50
FB
2156 return rc;
2157 }
2158 rc = qeth_cm_enable(card);
2159 if (rc) {
d11ba0c4 2160 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
4a71df50
FB
2161 goto out_qdio;
2162 }
2163 rc = qeth_cm_setup(card);
2164 if (rc) {
d11ba0c4 2165 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4a71df50
FB
2166 goto out_qdio;
2167 }
2168 rc = qeth_ulp_enable(card);
2169 if (rc) {
d11ba0c4 2170 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
4a71df50
FB
2171 goto out_qdio;
2172 }
2173 rc = qeth_ulp_setup(card);
2174 if (rc) {
d11ba0c4 2175 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4a71df50
FB
2176 goto out_qdio;
2177 }
2178 rc = qeth_alloc_qdio_buffers(card);
2179 if (rc) {
d11ba0c4 2180 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4a71df50
FB
2181 goto out_qdio;
2182 }
2183 rc = qeth_qdio_establish(card);
2184 if (rc) {
d11ba0c4 2185 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4a71df50
FB
2186 qeth_free_qdio_buffers(card);
2187 goto out_qdio;
2188 }
2189 rc = qeth_qdio_activate(card);
2190 if (rc) {
d11ba0c4 2191 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
4a71df50
FB
2192 goto out_qdio;
2193 }
2194 rc = qeth_dm_act(card);
2195 if (rc) {
d11ba0c4 2196 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
4a71df50
FB
2197 goto out_qdio;
2198 }
2199
2200 return 0;
2201out_qdio:
2202 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2203 return rc;
2204}
2205
2206static void qeth_print_status_with_portname(struct qeth_card *card)
2207{
2208 char dbf_text[15];
2209 int i;
2210
2211 sprintf(dbf_text, "%s", card->info.portname + 1);
2212 for (i = 0; i < 8; i++)
2213 dbf_text[i] =
2214 (char) _ebcasc[(__u8) dbf_text[i]];
2215 dbf_text[8] = 0;
74eacdb9 2216 dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
4a71df50 2217 "with link type %s (portname: %s)\n",
4a71df50
FB
2218 qeth_get_cardname(card),
2219 (card->info.mcl_level[0]) ? " (level: " : "",
2220 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2221 (card->info.mcl_level[0]) ? ")" : "",
2222 qeth_get_cardname_short(card),
2223 dbf_text);
2224
2225}
2226
2227static void qeth_print_status_no_portname(struct qeth_card *card)
2228{
2229 if (card->info.portname[0])
74eacdb9 2230 dev_info(&card->gdev->dev, "Device is a%s "
4a71df50
FB
2231 "card%s%s%s\nwith link type %s "
2232 "(no portname needed by interface).\n",
4a71df50
FB
2233 qeth_get_cardname(card),
2234 (card->info.mcl_level[0]) ? " (level: " : "",
2235 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2236 (card->info.mcl_level[0]) ? ")" : "",
2237 qeth_get_cardname_short(card));
2238 else
74eacdb9 2239 dev_info(&card->gdev->dev, "Device is a%s "
4a71df50 2240 "card%s%s%s\nwith link type %s.\n",
4a71df50
FB
2241 qeth_get_cardname(card),
2242 (card->info.mcl_level[0]) ? " (level: " : "",
2243 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2244 (card->info.mcl_level[0]) ? ")" : "",
2245 qeth_get_cardname_short(card));
2246}
2247
2248void qeth_print_status_message(struct qeth_card *card)
2249{
2250 switch (card->info.type) {
5113fec0
UB
2251 case QETH_CARD_TYPE_OSD:
2252 case QETH_CARD_TYPE_OSM:
2253 case QETH_CARD_TYPE_OSX:
4a71df50
FB
2254 /* VM will use a non-zero first character
2255 * to indicate a HiperSockets like reporting
2256 * of the level OSA sets the first character to zero
2257 * */
2258 if (!card->info.mcl_level[0]) {
2259 sprintf(card->info.mcl_level, "%02x%02x",
2260 card->info.mcl_level[2],
2261 card->info.mcl_level[3]);
2262
2263 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2264 break;
2265 }
2266 /* fallthrough */
2267 case QETH_CARD_TYPE_IQD:
906f1f07
KDW
2268 if ((card->info.guestlan) ||
2269 (card->info.mcl_level[0] & 0x80)) {
4a71df50
FB
2270 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2271 card->info.mcl_level[0]];
2272 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2273 card->info.mcl_level[1]];
2274 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2275 card->info.mcl_level[2]];
2276 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2277 card->info.mcl_level[3]];
2278 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2279 }
2280 break;
2281 default:
2282 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2283 }
2284 if (card->info.portname_required)
2285 qeth_print_status_with_portname(card);
2286 else
2287 qeth_print_status_no_portname(card);
2288}
2289EXPORT_SYMBOL_GPL(qeth_print_status_message);
2290
4a71df50
FB
2291static void qeth_initialize_working_pool_list(struct qeth_card *card)
2292{
2293 struct qeth_buffer_pool_entry *entry;
2294
847a50fd 2295 QETH_CARD_TEXT(card, 5, "inwrklst");
4a71df50
FB
2296
2297 list_for_each_entry(entry,
2298 &card->qdio.init_pool.entry_list, init_list) {
2299 qeth_put_buffer_pool_entry(card, entry);
2300 }
2301}
2302
2303static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2304 struct qeth_card *card)
2305{
2306 struct list_head *plh;
2307 struct qeth_buffer_pool_entry *entry;
2308 int i, free;
2309 struct page *page;
2310
2311 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2312 return NULL;
2313
2314 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2315 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2316 free = 1;
2317 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2318 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2319 free = 0;
2320 break;
2321 }
2322 }
2323 if (free) {
2324 list_del_init(&entry->list);
2325 return entry;
2326 }
2327 }
2328
2329 /* no free buffer in pool so take first one and swap pages */
2330 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2331 struct qeth_buffer_pool_entry, list);
2332 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2333 if (page_count(virt_to_page(entry->elements[i])) > 1) {
508b3c4f 2334 page = alloc_page(GFP_ATOMIC);
4a71df50
FB
2335 if (!page) {
2336 return NULL;
2337 } else {
2338 free_page((unsigned long)entry->elements[i]);
2339 entry->elements[i] = page_address(page);
2340 if (card->options.performance_stats)
2341 card->perf_stats.sg_alloc_page_rx++;
2342 }
2343 }
2344 }
2345 list_del_init(&entry->list);
2346 return entry;
2347}
2348
2349static int qeth_init_input_buffer(struct qeth_card *card,
2350 struct qeth_qdio_buffer *buf)
2351{
2352 struct qeth_buffer_pool_entry *pool_entry;
2353 int i;
2354
2355 pool_entry = qeth_find_free_buffer_pool_entry(card);
2356 if (!pool_entry)
2357 return 1;
2358
2359 /*
2360 * since the buffer is accessed only from the input_tasklet
2361 * there shouldn't be a need to synchronize; also, since we use
2362 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2363 * buffers
2364 */
4a71df50
FB
2365
2366 buf->pool_entry = pool_entry;
2367 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2368 buf->buffer->element[i].length = PAGE_SIZE;
2369 buf->buffer->element[i].addr = pool_entry->elements[i];
2370 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2371 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2372 else
2373 buf->buffer->element[i].flags = 0;
2374 }
2375 return 0;
2376}
2377
2378int qeth_init_qdio_queues(struct qeth_card *card)
2379{
2380 int i, j;
2381 int rc;
2382
d11ba0c4 2383 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
4a71df50
FB
2384
2385 /* inbound queue */
2386 memset(card->qdio.in_q->qdio_bufs, 0,
2387 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2388 qeth_initialize_working_pool_list(card);
2389 /*give only as many buffers to hardware as we have buffer pool entries*/
2390 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2391 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2392 card->qdio.in_q->next_buf_to_init =
2393 card->qdio.in_buf_pool.buf_count - 1;
2394 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
779e6e1c 2395 card->qdio.in_buf_pool.buf_count - 1);
4a71df50 2396 if (rc) {
d11ba0c4 2397 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
4a71df50
FB
2398 return rc;
2399 }
4a71df50
FB
2400 /* outbound queue */
2401 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2402 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2403 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2404 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2405 qeth_clear_output_buffer(card->qdio.out_qs[i],
2406 &card->qdio.out_qs[i]->bufs[j]);
2407 }
2408 card->qdio.out_qs[i]->card = card;
2409 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2410 card->qdio.out_qs[i]->do_pack = 0;
2411 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2412 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2413 atomic_set(&card->qdio.out_qs[i]->state,
2414 QETH_OUT_Q_UNLOCKED);
2415 }
2416 return 0;
2417}
2418EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2419
2420static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2421{
2422 switch (link_type) {
2423 case QETH_LINK_TYPE_HSTR:
2424 return 2;
2425 default:
2426 return 1;
2427 }
2428}
2429
2430static void qeth_fill_ipacmd_header(struct qeth_card *card,
2431 struct qeth_ipa_cmd *cmd, __u8 command,
2432 enum qeth_prot_versions prot)
2433{
2434 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2435 cmd->hdr.command = command;
2436 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2437 cmd->hdr.seqno = card->seqno.ipa;
2438 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2439 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2440 if (card->options.layer2)
2441 cmd->hdr.prim_version_no = 2;
2442 else
2443 cmd->hdr.prim_version_no = 1;
2444 cmd->hdr.param_count = 1;
2445 cmd->hdr.prot_version = prot;
2446 cmd->hdr.ipa_supported = 0;
2447 cmd->hdr.ipa_enabled = 0;
2448}
2449
2450struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2451 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2452{
2453 struct qeth_cmd_buffer *iob;
2454 struct qeth_ipa_cmd *cmd;
2455
2456 iob = qeth_wait_for_buffer(&card->write);
2457 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2458 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2459
2460 return iob;
2461}
2462EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2463
2464void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2465 char prot_type)
2466{
2467 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2468 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2469 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2470 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2471}
2472EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2473
2474int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2475 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2476 unsigned long),
2477 void *reply_param)
2478{
2479 int rc;
2480 char prot_type;
4a71df50 2481
847a50fd 2482 QETH_CARD_TEXT(card, 4, "sendipa");
4a71df50
FB
2483
2484 if (card->options.layer2)
2485 if (card->info.type == QETH_CARD_TYPE_OSN)
2486 prot_type = QETH_PROT_OSN2;
2487 else
2488 prot_type = QETH_PROT_LAYER2;
2489 else
2490 prot_type = QETH_PROT_TCPIP;
2491 qeth_prepare_ipa_cmd(card, iob, prot_type);
d11ba0c4
PT
2492 rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2493 iob, reply_cb, reply_param);
908abbb5
UB
2494 if (rc == -ETIME) {
2495 qeth_clear_ipacmd_list(card);
2496 qeth_schedule_recovery(card);
2497 }
4a71df50
FB
2498 return rc;
2499}
2500EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2501
4a71df50
FB
2502int qeth_send_startlan(struct qeth_card *card)
2503{
2504 int rc;
70919e23 2505 struct qeth_cmd_buffer *iob;
4a71df50 2506
d11ba0c4 2507 QETH_DBF_TEXT(SETUP, 2, "strtlan");
4a71df50 2508
70919e23
UB
2509 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2510 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
4a71df50
FB
2511 return rc;
2512}
2513EXPORT_SYMBOL_GPL(qeth_send_startlan);
2514
4a71df50
FB
2515int qeth_default_setadapterparms_cb(struct qeth_card *card,
2516 struct qeth_reply *reply, unsigned long data)
2517{
2518 struct qeth_ipa_cmd *cmd;
2519
847a50fd 2520 QETH_CARD_TEXT(card, 4, "defadpcb");
4a71df50
FB
2521
2522 cmd = (struct qeth_ipa_cmd *) data;
2523 if (cmd->hdr.return_code == 0)
2524 cmd->hdr.return_code =
2525 cmd->data.setadapterparms.hdr.return_code;
2526 return 0;
2527}
2528EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
2529
2530static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2531 struct qeth_reply *reply, unsigned long data)
2532{
2533 struct qeth_ipa_cmd *cmd;
2534
847a50fd 2535 QETH_CARD_TEXT(card, 3, "quyadpcb");
4a71df50
FB
2536
2537 cmd = (struct qeth_ipa_cmd *) data;
5113fec0 2538 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
4a71df50
FB
2539 card->info.link_type =
2540 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
5113fec0
UB
2541 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2542 }
4a71df50
FB
2543 card->options.adp.supported_funcs =
2544 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2545 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2546}
2547
2548struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2549 __u32 command, __u32 cmdlen)
2550{
2551 struct qeth_cmd_buffer *iob;
2552 struct qeth_ipa_cmd *cmd;
2553
2554 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2555 QETH_PROT_IPV4);
2556 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2557 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2558 cmd->data.setadapterparms.hdr.command_code = command;
2559 cmd->data.setadapterparms.hdr.used_total = 1;
2560 cmd->data.setadapterparms.hdr.seq_no = 1;
2561
2562 return iob;
2563}
2564EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
2565
2566int qeth_query_setadapterparms(struct qeth_card *card)
2567{
2568 int rc;
2569 struct qeth_cmd_buffer *iob;
2570
847a50fd 2571 QETH_CARD_TEXT(card, 3, "queryadp");
4a71df50
FB
2572 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2573 sizeof(struct qeth_ipacmd_setadpparms));
2574 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2575 return rc;
2576}
2577EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2578
1da74b1c
FB
2579static int qeth_query_ipassists_cb(struct qeth_card *card,
2580 struct qeth_reply *reply, unsigned long data)
2581{
2582 struct qeth_ipa_cmd *cmd;
2583
2584 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2585
2586 cmd = (struct qeth_ipa_cmd *) data;
2587 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2588 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2589 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2590 } else {
2591 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2592 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2593 }
2594 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
2595 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
2596 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
2597 return 0;
2598}
2599
2600int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
2601{
2602 int rc;
2603 struct qeth_cmd_buffer *iob;
2604
2605 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
2606 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
2607 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2608 return rc;
2609}
2610EXPORT_SYMBOL_GPL(qeth_query_ipassists);
2611
2612static int qeth_query_setdiagass_cb(struct qeth_card *card,
2613 struct qeth_reply *reply, unsigned long data)
2614{
2615 struct qeth_ipa_cmd *cmd;
2616 __u16 rc;
2617
2618 cmd = (struct qeth_ipa_cmd *)data;
2619 rc = cmd->hdr.return_code;
2620 if (rc)
2621 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
2622 else
2623 card->info.diagass_support = cmd->data.diagass.ext;
2624 return 0;
2625}
2626
2627static int qeth_query_setdiagass(struct qeth_card *card)
2628{
2629 struct qeth_cmd_buffer *iob;
2630 struct qeth_ipa_cmd *cmd;
2631
2632 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
2633 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2634 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2635 cmd->data.diagass.subcmd_len = 16;
2636 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
2637 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
2638}
2639
2640static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
2641{
2642 unsigned long info = get_zeroed_page(GFP_KERNEL);
2643 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
2644 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
2645 struct ccw_dev_id ccwid;
2646 int level, rc;
2647
2648 tid->chpid = card->info.chpid;
2649 ccw_device_get_id(CARD_RDEV(card), &ccwid);
2650 tid->ssid = ccwid.ssid;
2651 tid->devno = ccwid.devno;
2652 if (!info)
2653 return;
2654
2655 rc = stsi(NULL, 0, 0, 0);
2656 if (rc == -ENOSYS)
2657 level = rc;
2658 else
2659 level = (((unsigned int) rc) >> 28);
2660
2661 if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
2662 tid->lparnr = info222->lpar_number;
2663
2664 if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
2665 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
2666 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
2667 }
2668 free_page(info);
2669 return;
2670}
2671
2672static int qeth_hw_trap_cb(struct qeth_card *card,
2673 struct qeth_reply *reply, unsigned long data)
2674{
2675 struct qeth_ipa_cmd *cmd;
2676 __u16 rc;
2677
2678 cmd = (struct qeth_ipa_cmd *)data;
2679 rc = cmd->hdr.return_code;
2680 if (rc)
2681 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
2682 return 0;
2683}
2684
2685int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
2686{
2687 struct qeth_cmd_buffer *iob;
2688 struct qeth_ipa_cmd *cmd;
2689
2690 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
2691 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2692 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2693 cmd->data.diagass.subcmd_len = 80;
2694 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
2695 cmd->data.diagass.type = 1;
2696 cmd->data.diagass.action = action;
2697 switch (action) {
2698 case QETH_DIAGS_TRAP_ARM:
2699 cmd->data.diagass.options = 0x0003;
2700 cmd->data.diagass.ext = 0x00010000 +
2701 sizeof(struct qeth_trap_id);
2702 qeth_get_trap_id(card,
2703 (struct qeth_trap_id *)cmd->data.diagass.cdata);
2704 break;
2705 case QETH_DIAGS_TRAP_DISARM:
2706 cmd->data.diagass.options = 0x0001;
2707 break;
2708 case QETH_DIAGS_TRAP_CAPTURE:
2709 break;
2710 }
2711 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
2712}
2713EXPORT_SYMBOL_GPL(qeth_hw_trap);
2714
76b11f8e
UB
2715int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2716 unsigned int qdio_error, const char *dbftext)
4a71df50 2717{
779e6e1c 2718 if (qdio_error) {
847a50fd 2719 QETH_CARD_TEXT(card, 2, dbftext);
38593d01 2720 QETH_CARD_TEXT_(card, 2, " F15=%02X",
4a71df50 2721 buf->element[15].flags & 0xff);
38593d01 2722 QETH_CARD_TEXT_(card, 2, " F14=%02X",
4a71df50 2723 buf->element[14].flags & 0xff);
38593d01 2724 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
76b11f8e
UB
2725 if ((buf->element[15].flags & 0xff) == 0x12) {
2726 card->stats.rx_dropped++;
2727 return 0;
2728 } else
2729 return 1;
4a71df50
FB
2730 }
2731 return 0;
2732}
2733EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2734
2735void qeth_queue_input_buffer(struct qeth_card *card, int index)
2736{
2737 struct qeth_qdio_q *queue = card->qdio.in_q;
2738 int count;
2739 int i;
2740 int rc;
2741 int newcount = 0;
2742
4a71df50
FB
2743 count = (index < queue->next_buf_to_init)?
2744 card->qdio.in_buf_pool.buf_count -
2745 (queue->next_buf_to_init - index) :
2746 card->qdio.in_buf_pool.buf_count -
2747 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2748 /* only requeue at a certain threshold to avoid SIGAs */
2749 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
2750 for (i = queue->next_buf_to_init;
2751 i < queue->next_buf_to_init + count; ++i) {
2752 if (qeth_init_input_buffer(card,
2753 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2754 break;
2755 } else {
2756 newcount++;
2757 }
2758 }
2759
2760 if (newcount < count) {
2761 /* we are in memory shortage so we switch back to
2762 traditional skb allocation and drop packages */
4a71df50
FB
2763 atomic_set(&card->force_alloc_skb, 3);
2764 count = newcount;
2765 } else {
4a71df50
FB
2766 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2767 }
2768
2769 /*
2770 * according to old code it should be avoided to requeue all
2771 * 128 buffers in order to benefit from PCI avoidance.
2772 * this function keeps at least one buffer (the buffer at
2773 * 'index') un-requeued -> this buffer is the first buffer that
2774 * will be requeued the next time
2775 */
2776 if (card->options.performance_stats) {
2777 card->perf_stats.inbound_do_qdio_cnt++;
2778 card->perf_stats.inbound_do_qdio_start_time =
2779 qeth_get_micros();
2780 }
779e6e1c
JG
2781 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
2782 queue->next_buf_to_init, count);
4a71df50
FB
2783 if (card->options.performance_stats)
2784 card->perf_stats.inbound_do_qdio_time +=
2785 qeth_get_micros() -
2786 card->perf_stats.inbound_do_qdio_start_time;
2787 if (rc) {
74eacdb9
FB
2788 dev_warn(&card->gdev->dev,
2789 "QDIO reported an error, rc=%i\n", rc);
847a50fd 2790 QETH_CARD_TEXT(card, 2, "qinberr");
4a71df50
FB
2791 }
2792 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2793 QDIO_MAX_BUFFERS_PER_Q;
2794 }
2795}
2796EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2797
2798static int qeth_handle_send_error(struct qeth_card *card,
779e6e1c 2799 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
4a71df50
FB
2800{
2801 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
4a71df50 2802
847a50fd 2803 QETH_CARD_TEXT(card, 6, "hdsnderr");
58490f18
KDW
2804 if (card->info.type == QETH_CARD_TYPE_IQD) {
2805 if (sbalf15 == 0) {
2806 qdio_err = 0;
2807 } else {
2808 qdio_err = 1;
2809 }
2810 }
76b11f8e 2811 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
d303b6fd
JG
2812
2813 if (!qdio_err)
4a71df50 2814 return QETH_SEND_ERROR_NONE;
d303b6fd
JG
2815
2816 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2817 return QETH_SEND_ERROR_RETRY;
2818
847a50fd
CO
2819 QETH_CARD_TEXT(card, 1, "lnkfail");
2820 QETH_CARD_TEXT_(card, 1, "%04x %02x",
d303b6fd
JG
2821 (u16)qdio_err, (u8)sbalf15);
2822 return QETH_SEND_ERROR_LINK_FAILURE;
4a71df50
FB
2823}
2824
2825/*
2826 * Switched to packing state if the number of used buffers on a queue
2827 * reaches a certain limit.
2828 */
2829static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2830{
2831 if (!queue->do_pack) {
2832 if (atomic_read(&queue->used_buffers)
2833 >= QETH_HIGH_WATERMARK_PACK){
2834 /* switch non-PACKING -> PACKING */
847a50fd 2835 QETH_CARD_TEXT(queue->card, 6, "np->pack");
4a71df50
FB
2836 if (queue->card->options.performance_stats)
2837 queue->card->perf_stats.sc_dp_p++;
2838 queue->do_pack = 1;
2839 }
2840 }
2841}
2842
2843/*
2844 * Switches from packing to non-packing mode. If there is a packing
2845 * buffer on the queue this buffer will be prepared to be flushed.
2846 * In that case 1 is returned to inform the caller. If no buffer
2847 * has to be flushed, zero is returned.
2848 */
2849static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2850{
2851 struct qeth_qdio_out_buffer *buffer;
2852 int flush_count = 0;
2853
2854 if (queue->do_pack) {
2855 if (atomic_read(&queue->used_buffers)
2856 <= QETH_LOW_WATERMARK_PACK) {
2857 /* switch PACKING -> non-PACKING */
847a50fd 2858 QETH_CARD_TEXT(queue->card, 6, "pack->np");
4a71df50
FB
2859 if (queue->card->options.performance_stats)
2860 queue->card->perf_stats.sc_p_dp++;
2861 queue->do_pack = 0;
2862 /* flush packing buffers */
2863 buffer = &queue->bufs[queue->next_buf_to_fill];
2864 if ((atomic_read(&buffer->state) ==
2865 QETH_QDIO_BUF_EMPTY) &&
2866 (buffer->next_element_to_fill > 0)) {
2867 atomic_set(&buffer->state,
2868 QETH_QDIO_BUF_PRIMED);
2869 flush_count++;
2870 queue->next_buf_to_fill =
2871 (queue->next_buf_to_fill + 1) %
2872 QDIO_MAX_BUFFERS_PER_Q;
2873 }
2874 }
2875 }
2876 return flush_count;
2877}
2878
2879/*
2880 * Called to flush a packing buffer if no more pci flags are on the queue.
2881 * Checks if there is a packing buffer and prepares it to be flushed.
2882 * In that case returns 1, otherwise zero.
2883 */
2884static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2885{
2886 struct qeth_qdio_out_buffer *buffer;
2887
2888 buffer = &queue->bufs[queue->next_buf_to_fill];
2889 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2890 (buffer->next_element_to_fill > 0)) {
2891 /* it's a packing buffer */
2892 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2893 queue->next_buf_to_fill =
2894 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2895 return 1;
2896 }
2897 return 0;
2898}
2899
779e6e1c
JG
2900static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2901 int count)
4a71df50
FB
2902{
2903 struct qeth_qdio_out_buffer *buf;
2904 int rc;
2905 int i;
2906 unsigned int qdio_flags;
2907
4a71df50
FB
2908 for (i = index; i < index + count; ++i) {
2909 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2910 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2911 SBAL_FLAGS_LAST_ENTRY;
2912
2913 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2914 continue;
2915
2916 if (!queue->do_pack) {
2917 if ((atomic_read(&queue->used_buffers) >=
2918 (QETH_HIGH_WATERMARK_PACK -
2919 QETH_WATERMARK_PACK_FUZZ)) &&
2920 !atomic_read(&queue->set_pci_flags_count)) {
2921 /* it's likely that we'll go to packing
2922 * mode soon */
2923 atomic_inc(&queue->set_pci_flags_count);
2924 buf->buffer->element[0].flags |= 0x40;
2925 }
2926 } else {
2927 if (!atomic_read(&queue->set_pci_flags_count)) {
2928 /*
2929 * there's no outstanding PCI any more, so we
2930 * have to request a PCI to be sure the the PCI
2931 * will wake at some time in the future then we
2932 * can flush packed buffers that might still be
2933 * hanging around, which can happen if no
2934 * further send was requested by the stack
2935 */
2936 atomic_inc(&queue->set_pci_flags_count);
2937 buf->buffer->element[0].flags |= 0x40;
2938 }
2939 }
2940 }
2941
2942 queue->card->dev->trans_start = jiffies;
2943 if (queue->card->options.performance_stats) {
2944 queue->card->perf_stats.outbound_do_qdio_cnt++;
2945 queue->card->perf_stats.outbound_do_qdio_start_time =
2946 qeth_get_micros();
2947 }
2948 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
4a71df50
FB
2949 if (atomic_read(&queue->set_pci_flags_count))
2950 qdio_flags |= QDIO_FLAG_PCI_OUT;
2951 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
779e6e1c 2952 queue->queue_no, index, count);
4a71df50
FB
2953 if (queue->card->options.performance_stats)
2954 queue->card->perf_stats.outbound_do_qdio_time +=
2955 qeth_get_micros() -
2956 queue->card->perf_stats.outbound_do_qdio_start_time;
aa3a41d0 2957 atomic_add(count, &queue->used_buffers);
4a71df50 2958 if (rc) {
d303b6fd
JG
2959 queue->card->stats.tx_errors += count;
2960 /* ignore temporary SIGA errors without busy condition */
2961 if (rc == QDIO_ERROR_SIGA_TARGET)
2962 return;
847a50fd
CO
2963 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
2964 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
d303b6fd 2965
4a71df50
FB
2966 /* this must not happen under normal circumstances. if it
2967 * happens something is really wrong -> recover */
2968 qeth_schedule_recovery(queue->card);
2969 return;
2970 }
4a71df50
FB
2971 if (queue->card->options.performance_stats)
2972 queue->card->perf_stats.bufs_sent += count;
2973}
2974
2975static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2976{
2977 int index;
2978 int flush_cnt = 0;
2979 int q_was_packing = 0;
2980
2981 /*
2982 * check if weed have to switch to non-packing mode or if
2983 * we have to get a pci flag out on the queue
2984 */
2985 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2986 !atomic_read(&queue->set_pci_flags_count)) {
2987 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2988 QETH_OUT_Q_UNLOCKED) {
2989 /*
2990 * If we get in here, there was no action in
2991 * do_send_packet. So, we check if there is a
2992 * packing buffer to be flushed here.
2993 */
2994 netif_stop_queue(queue->card->dev);
2995 index = queue->next_buf_to_fill;
2996 q_was_packing = queue->do_pack;
2997 /* queue->do_pack may change */
2998 barrier();
2999 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3000 if (!flush_cnt &&
3001 !atomic_read(&queue->set_pci_flags_count))
3002 flush_cnt +=
3003 qeth_flush_buffers_on_no_pci(queue);
3004 if (queue->card->options.performance_stats &&
3005 q_was_packing)
3006 queue->card->perf_stats.bufs_sent_pack +=
3007 flush_cnt;
3008 if (flush_cnt)
779e6e1c 3009 qeth_flush_buffers(queue, index, flush_cnt);
4a71df50
FB
3010 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3011 }
3012 }
3013}
3014
a1c3ed4c
FB
3015void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3016 unsigned long card_ptr)
3017{
3018 struct qeth_card *card = (struct qeth_card *)card_ptr;
3019
0cffef48 3020 if (card->dev && (card->dev->flags & IFF_UP))
a1c3ed4c
FB
3021 napi_schedule(&card->napi);
3022}
3023EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
3024
3025void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
3026 unsigned int queue, int first_element, int count,
3027 unsigned long card_ptr)
3028{
3029 struct qeth_card *card = (struct qeth_card *)card_ptr;
3030
3031 if (qdio_err)
3032 qeth_schedule_recovery(card);
3033}
3034EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
3035
779e6e1c
JG
3036void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3037 unsigned int qdio_error, int __queue, int first_element,
3038 int count, unsigned long card_ptr)
4a71df50
FB
3039{
3040 struct qeth_card *card = (struct qeth_card *) card_ptr;
3041 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3042 struct qeth_qdio_out_buffer *buffer;
3043 int i;
3044
847a50fd 3045 QETH_CARD_TEXT(card, 6, "qdouhdl");
779e6e1c 3046 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
847a50fd 3047 QETH_CARD_TEXT(card, 2, "achkcond");
779e6e1c
JG
3048 netif_stop_queue(card->dev);
3049 qeth_schedule_recovery(card);
3050 return;
4a71df50
FB
3051 }
3052 if (card->options.performance_stats) {
3053 card->perf_stats.outbound_handler_cnt++;
3054 card->perf_stats.outbound_handler_start_time =
3055 qeth_get_micros();
3056 }
3057 for (i = first_element; i < (first_element + count); ++i) {
3058 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
b67d801f
UB
3059 qeth_handle_send_error(card, buffer, qdio_error);
3060 qeth_clear_output_buffer(queue, buffer);
4a71df50
FB
3061 }
3062 atomic_sub(count, &queue->used_buffers);
3063 /* check if we need to do something on this outbound queue */
3064 if (card->info.type != QETH_CARD_TYPE_IQD)
3065 qeth_check_outbound_queue(queue);
3066
3067 netif_wake_queue(queue->card->dev);
3068 if (card->options.performance_stats)
3069 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3070 card->perf_stats.outbound_handler_start_time;
3071}
3072EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
3073
4a71df50
FB
3074int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3075 int ipv, int cast_type)
3076{
5113fec0
UB
3077 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
3078 card->info.type == QETH_CARD_TYPE_OSX))
4a71df50
FB
3079 return card->qdio.default_out_queue;
3080 switch (card->qdio.no_out_queues) {
3081 case 4:
3082 if (cast_type && card->info.is_multicast_different)
3083 return card->info.is_multicast_different &
3084 (card->qdio.no_out_queues - 1);
3085 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3086 const u8 tos = ip_hdr(skb)->tos;
3087
3088 if (card->qdio.do_prio_queueing ==
3089 QETH_PRIO_Q_ING_TOS) {
3090 if (tos & IP_TOS_NOTIMPORTANT)
3091 return 3;
3092 if (tos & IP_TOS_HIGHRELIABILITY)
3093 return 2;
3094 if (tos & IP_TOS_HIGHTHROUGHPUT)
3095 return 1;
3096 if (tos & IP_TOS_LOWDELAY)
3097 return 0;
3098 }
3099 if (card->qdio.do_prio_queueing ==
3100 QETH_PRIO_Q_ING_PREC)
3101 return 3 - (tos >> 6);
3102 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3103 /* TODO: IPv6!!! */
3104 }
3105 return card->qdio.default_out_queue;
3106 case 1: /* fallthrough for single-out-queue 1920-device */
3107 default:
3108 return card->qdio.default_out_queue;
3109 }
3110}
3111EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3112
4a71df50
FB
3113int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3114 struct sk_buff *skb, int elems)
3115{
51aa165c
FB
3116 int dlen = skb->len - skb->data_len;
3117 int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
3118 PFN_DOWN((unsigned long)skb->data);
4a71df50 3119
51aa165c 3120 elements_needed += skb_shinfo(skb)->nr_frags;
4a71df50 3121 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
14cc21b6 3122 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
4a71df50
FB
3123 "(Number=%d / Length=%d). Discarded.\n",
3124 (elements_needed+elems), skb->len);
3125 return 0;
3126 }
3127 return elements_needed;
3128}
3129EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3130
51aa165c
FB
3131int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
3132{
3133 int hroom, inpage, rest;
3134
3135 if (((unsigned long)skb->data & PAGE_MASK) !=
3136 (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
3137 hroom = skb_headroom(skb);
3138 inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
3139 rest = len - inpage;
3140 if (rest > hroom)
3141 return 1;
3142 memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
3143 skb->data -= rest;
3144 QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
3145 }
3146 return 0;
3147}
3148EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
3149
f90b744e 3150static inline void __qeth_fill_buffer(struct sk_buff *skb,
683d718a
FB
3151 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
3152 int offset)
4a71df50 3153{
51aa165c 3154 int length = skb->len - skb->data_len;
4a71df50
FB
3155 int length_here;
3156 int element;
3157 char *data;
51aa165c
FB
3158 int first_lap, cnt;
3159 struct skb_frag_struct *frag;
4a71df50
FB
3160
3161 element = *next_element_to_fill;
3162 data = skb->data;
3163 first_lap = (is_tso == 0 ? 1 : 0);
3164
683d718a
FB
3165 if (offset >= 0) {
3166 data = skb->data + offset;
e1f03ae8 3167 length -= offset;
683d718a
FB
3168 first_lap = 0;
3169 }
3170
4a71df50
FB
3171 while (length > 0) {
3172 /* length_here is the remaining amount of data in this page */
3173 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3174 if (length < length_here)
3175 length_here = length;
3176
3177 buffer->element[element].addr = data;
3178 buffer->element[element].length = length_here;
3179 length -= length_here;
3180 if (!length) {
3181 if (first_lap)
51aa165c
FB
3182 if (skb_shinfo(skb)->nr_frags)
3183 buffer->element[element].flags =
3184 SBAL_FLAGS_FIRST_FRAG;
3185 else
3186 buffer->element[element].flags = 0;
4a71df50
FB
3187 else
3188 buffer->element[element].flags =
51aa165c 3189 SBAL_FLAGS_MIDDLE_FRAG;
4a71df50
FB
3190 } else {
3191 if (first_lap)
3192 buffer->element[element].flags =
3193 SBAL_FLAGS_FIRST_FRAG;
3194 else
3195 buffer->element[element].flags =
3196 SBAL_FLAGS_MIDDLE_FRAG;
3197 }
3198 data += length_here;
3199 element++;
3200 first_lap = 0;
3201 }
51aa165c
FB
3202
3203 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3204 frag = &skb_shinfo(skb)->frags[cnt];
3205 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3206 + frag->page_offset;
3207 buffer->element[element].length = frag->size;
3208 buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
3209 element++;
3210 }
3211
3212 if (buffer->element[element - 1].flags)
3213 buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
4a71df50
FB
3214 *next_element_to_fill = element;
3215}
3216
f90b744e 3217static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
683d718a
FB
3218 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
3219 struct qeth_hdr *hdr, int offset, int hd_len)
4a71df50
FB
3220{
3221 struct qdio_buffer *buffer;
4a71df50
FB
3222 int flush_cnt = 0, hdr_len, large_send = 0;
3223
4a71df50
FB
3224 buffer = buf->buffer;
3225 atomic_inc(&skb->users);
3226 skb_queue_tail(&buf->skb_list, skb);
3227
4a71df50 3228 /*check first on TSO ....*/
683d718a 3229 if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4a71df50
FB
3230 int element = buf->next_element_to_fill;
3231
683d718a
FB
3232 hdr_len = sizeof(struct qeth_hdr_tso) +
3233 ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
4a71df50
FB
3234 /*fill first buffer entry only with header information */
3235 buffer->element[element].addr = skb->data;
3236 buffer->element[element].length = hdr_len;
3237 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3238 buf->next_element_to_fill++;
3239 skb->data += hdr_len;
3240 skb->len -= hdr_len;
3241 large_send = 1;
3242 }
683d718a
FB
3243
3244 if (offset >= 0) {
3245 int element = buf->next_element_to_fill;
3246 buffer->element[element].addr = hdr;
3247 buffer->element[element].length = sizeof(struct qeth_hdr) +
3248 hd_len;
3249 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3250 buf->is_header[element] = 1;
3251 buf->next_element_to_fill++;
3252 }
3253
51aa165c
FB
3254 __qeth_fill_buffer(skb, buffer, large_send,
3255 (int *)&buf->next_element_to_fill, offset);
4a71df50
FB
3256
3257 if (!queue->do_pack) {
847a50fd 3258 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
4a71df50
FB
3259 /* set state to PRIMED -> will be flushed */
3260 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3261 flush_cnt = 1;
3262 } else {
847a50fd 3263 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
4a71df50
FB
3264 if (queue->card->options.performance_stats)
3265 queue->card->perf_stats.skbs_sent_pack++;
3266 if (buf->next_element_to_fill >=
3267 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3268 /*
3269 * packed buffer if full -> set state PRIMED
3270 * -> will be flushed
3271 */
3272 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3273 flush_cnt = 1;
3274 }
3275 }
3276 return flush_cnt;
3277}
3278
3279int qeth_do_send_packet_fast(struct qeth_card *card,
3280 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3281 struct qeth_hdr *hdr, int elements_needed,
64ef8957 3282 int offset, int hd_len)
4a71df50
FB
3283{
3284 struct qeth_qdio_out_buffer *buffer;
4a71df50
FB
3285 int index;
3286
4a71df50
FB
3287 /* spin until we get the queue ... */
3288 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3289 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3290 /* ... now we've got the queue */
3291 index = queue->next_buf_to_fill;
3292 buffer = &queue->bufs[queue->next_buf_to_fill];
3293 /*
3294 * check if buffer is empty to make sure that we do not 'overtake'
3295 * ourselves and try to fill a buffer that is already primed
3296 */
3297 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3298 goto out;
64ef8957 3299 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4a71df50 3300 QDIO_MAX_BUFFERS_PER_Q;
4a71df50 3301 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
64ef8957
FB
3302 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3303 qeth_flush_buffers(queue, index, 1);
4a71df50
FB
3304 return 0;
3305out:
3306 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3307 return -EBUSY;
3308}
3309EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
3310
3311int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3312 struct sk_buff *skb, struct qeth_hdr *hdr,
64ef8957 3313 int elements_needed)
4a71df50
FB
3314{
3315 struct qeth_qdio_out_buffer *buffer;
3316 int start_index;
3317 int flush_count = 0;
3318 int do_pack = 0;
3319 int tmp;
3320 int rc = 0;
3321
4a71df50
FB
3322 /* spin until we get the queue ... */
3323 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3324 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3325 start_index = queue->next_buf_to_fill;
3326 buffer = &queue->bufs[queue->next_buf_to_fill];
3327 /*
3328 * check if buffer is empty to make sure that we do not 'overtake'
3329 * ourselves and try to fill a buffer that is already primed
3330 */
3331 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3332 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3333 return -EBUSY;
3334 }
3335 /* check if we need to switch packing state of this queue */
3336 qeth_switch_to_packing_if_needed(queue);
3337 if (queue->do_pack) {
3338 do_pack = 1;
64ef8957
FB
3339 /* does packet fit in current buffer? */
3340 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3341 buffer->next_element_to_fill) < elements_needed) {
3342 /* ... no -> set state PRIMED */
3343 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3344 flush_count++;
3345 queue->next_buf_to_fill =
3346 (queue->next_buf_to_fill + 1) %
3347 QDIO_MAX_BUFFERS_PER_Q;
3348 buffer = &queue->bufs[queue->next_buf_to_fill];
3349 /* we did a step forward, so check buffer state
3350 * again */
3351 if (atomic_read(&buffer->state) !=
3352 QETH_QDIO_BUF_EMPTY) {
3353 qeth_flush_buffers(queue, start_index,
779e6e1c 3354 flush_count);
64ef8957 3355 atomic_set(&queue->state,
4a71df50 3356 QETH_OUT_Q_UNLOCKED);
64ef8957 3357 return -EBUSY;
4a71df50
FB
3358 }
3359 }
3360 }
64ef8957 3361 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
4a71df50
FB
3362 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
3363 QDIO_MAX_BUFFERS_PER_Q;
3364 flush_count += tmp;
4a71df50 3365 if (flush_count)
779e6e1c 3366 qeth_flush_buffers(queue, start_index, flush_count);
4a71df50
FB
3367 else if (!atomic_read(&queue->set_pci_flags_count))
3368 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3369 /*
3370 * queue->state will go from LOCKED -> UNLOCKED or from
3371 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3372 * (switch packing state or flush buffer to get another pci flag out).
3373 * In that case we will enter this loop
3374 */
3375 while (atomic_dec_return(&queue->state)) {
3376 flush_count = 0;
3377 start_index = queue->next_buf_to_fill;
3378 /* check if we can go back to non-packing state */
3379 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3380 /*
3381 * check if we need to flush a packing buffer to get a pci
3382 * flag out on the queue
3383 */
3384 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3385 flush_count += qeth_flush_buffers_on_no_pci(queue);
3386 if (flush_count)
779e6e1c 3387 qeth_flush_buffers(queue, start_index, flush_count);
4a71df50
FB
3388 }
3389 /* at this point the queue is UNLOCKED again */
3390 if (queue->card->options.performance_stats && do_pack)
3391 queue->card->perf_stats.bufs_sent_pack += flush_count;
3392
3393 return rc;
3394}
3395EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3396
3397static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3398 struct qeth_reply *reply, unsigned long data)
3399{
3400 struct qeth_ipa_cmd *cmd;
3401 struct qeth_ipacmd_setadpparms *setparms;
3402
847a50fd 3403 QETH_CARD_TEXT(card, 4, "prmadpcb");
4a71df50
FB
3404
3405 cmd = (struct qeth_ipa_cmd *) data;
3406 setparms = &(cmd->data.setadapterparms);
3407
3408 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
3409 if (cmd->hdr.return_code) {
847a50fd 3410 QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
4a71df50
FB
3411 setparms->data.mode = SET_PROMISC_MODE_OFF;
3412 }
3413 card->info.promisc_mode = setparms->data.mode;
3414 return 0;
3415}
3416
3417void qeth_setadp_promisc_mode(struct qeth_card *card)
3418{
3419 enum qeth_ipa_promisc_modes mode;
3420 struct net_device *dev = card->dev;
3421 struct qeth_cmd_buffer *iob;
3422 struct qeth_ipa_cmd *cmd;
3423
847a50fd 3424 QETH_CARD_TEXT(card, 4, "setprom");
4a71df50
FB
3425
3426 if (((dev->flags & IFF_PROMISC) &&
3427 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
3428 (!(dev->flags & IFF_PROMISC) &&
3429 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
3430 return;
3431 mode = SET_PROMISC_MODE_OFF;
3432 if (dev->flags & IFF_PROMISC)
3433 mode = SET_PROMISC_MODE_ON;
847a50fd 3434 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4a71df50
FB
3435
3436 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
3437 sizeof(struct qeth_ipacmd_setadpparms));
3438 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3439 cmd->data.setadapterparms.data.mode = mode;
3440 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
3441}
3442EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
3443
3444int qeth_change_mtu(struct net_device *dev, int new_mtu)
3445{
3446 struct qeth_card *card;
3447 char dbf_text[15];
3448
509e2562 3449 card = dev->ml_priv;
4a71df50 3450
847a50fd 3451 QETH_CARD_TEXT(card, 4, "chgmtu");
4a71df50 3452 sprintf(dbf_text, "%8x", new_mtu);
847a50fd 3453 QETH_CARD_TEXT(card, 4, dbf_text);
4a71df50
FB
3454
3455 if (new_mtu < 64)
3456 return -EINVAL;
3457 if (new_mtu > 65535)
3458 return -EINVAL;
3459 if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
3460 (!qeth_mtu_is_valid(card, new_mtu)))
3461 return -EINVAL;
3462 dev->mtu = new_mtu;
3463 return 0;
3464}
3465EXPORT_SYMBOL_GPL(qeth_change_mtu);
3466
3467struct net_device_stats *qeth_get_stats(struct net_device *dev)
3468{
3469 struct qeth_card *card;
3470
509e2562 3471 card = dev->ml_priv;
4a71df50 3472
847a50fd 3473 QETH_CARD_TEXT(card, 5, "getstat");
4a71df50
FB
3474
3475 return &card->stats;
3476}
3477EXPORT_SYMBOL_GPL(qeth_get_stats);
3478
3479static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
3480 struct qeth_reply *reply, unsigned long data)
3481{
3482 struct qeth_ipa_cmd *cmd;
3483
847a50fd 3484 QETH_CARD_TEXT(card, 4, "chgmaccb");
4a71df50
FB
3485
3486 cmd = (struct qeth_ipa_cmd *) data;
3487 if (!card->options.layer2 ||
3488 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
3489 memcpy(card->dev->dev_addr,
3490 &cmd->data.setadapterparms.data.change_addr.addr,
3491 OSA_ADDR_LEN);
3492 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
3493 }
3494 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3495 return 0;
3496}
3497
3498int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3499{
3500 int rc;
3501 struct qeth_cmd_buffer *iob;
3502 struct qeth_ipa_cmd *cmd;
3503
847a50fd 3504 QETH_CARD_TEXT(card, 4, "chgmac");
4a71df50
FB
3505
3506 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
3507 sizeof(struct qeth_ipacmd_setadpparms));
3508 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3509 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
3510 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
3511 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
3512 card->dev->dev_addr, OSA_ADDR_LEN);
3513 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
3514 NULL);
3515 return rc;
3516}
3517EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3518
d64ecc22
EL
3519static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3520 struct qeth_reply *reply, unsigned long data)
3521{
3522 struct qeth_ipa_cmd *cmd;
3523 struct qeth_set_access_ctrl *access_ctrl_req;
d64ecc22 3524
847a50fd 3525 QETH_CARD_TEXT(card, 4, "setaccb");
d64ecc22
EL
3526
3527 cmd = (struct qeth_ipa_cmd *) data;
3528 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3529 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3530 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3531 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3532 cmd->data.setadapterparms.hdr.return_code);
3533 switch (cmd->data.setadapterparms.hdr.return_code) {
3534 case SET_ACCESS_CTRL_RC_SUCCESS:
3535 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3536 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3537 {
3538 card->options.isolation = access_ctrl_req->subcmd_code;
3539 if (card->options.isolation == ISOLATION_MODE_NONE) {
3540 dev_info(&card->gdev->dev,
3541 "QDIO data connection isolation is deactivated\n");
3542 } else {
3543 dev_info(&card->gdev->dev,
3544 "QDIO data connection isolation is activated\n");
3545 }
3546 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3547 card->gdev->dev.kobj.name,
3548 access_ctrl_req->subcmd_code,
3549 cmd->data.setadapterparms.hdr.return_code);
d64ecc22
EL
3550 break;
3551 }
3552 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3553 {
3554 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3555 card->gdev->dev.kobj.name,
3556 access_ctrl_req->subcmd_code,
3557 cmd->data.setadapterparms.hdr.return_code);
3558 dev_err(&card->gdev->dev, "Adapter does not "
3559 "support QDIO data connection isolation\n");
3560
3561 /* ensure isolation mode is "none" */
3562 card->options.isolation = ISOLATION_MODE_NONE;
d64ecc22
EL
3563 break;
3564 }
3565 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3566 {
3567 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3568 card->gdev->dev.kobj.name,
3569 access_ctrl_req->subcmd_code,
3570 cmd->data.setadapterparms.hdr.return_code);
3571 dev_err(&card->gdev->dev,
3572 "Adapter is dedicated. "
3573 "QDIO data connection isolation not supported\n");
3574
3575 /* ensure isolation mode is "none" */
3576 card->options.isolation = ISOLATION_MODE_NONE;
d64ecc22
EL
3577 break;
3578 }
3579 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3580 {
3581 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3582 card->gdev->dev.kobj.name,
3583 access_ctrl_req->subcmd_code,
3584 cmd->data.setadapterparms.hdr.return_code);
3585 dev_err(&card->gdev->dev,
3586 "TSO does not permit QDIO data connection isolation\n");
3587
3588 /* ensure isolation mode is "none" */
3589 card->options.isolation = ISOLATION_MODE_NONE;
d64ecc22
EL
3590 break;
3591 }
3592 default:
3593 {
3594 /* this should never happen */
3595 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3596 "==UNKNOWN\n",
3597 card->gdev->dev.kobj.name,
3598 access_ctrl_req->subcmd_code,
3599 cmd->data.setadapterparms.hdr.return_code);
3600
3601 /* ensure isolation mode is "none" */
3602 card->options.isolation = ISOLATION_MODE_NONE;
d64ecc22
EL
3603 break;
3604 }
3605 }
3606 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
bbb822a8 3607 return 0;
d64ecc22
EL
3608}
3609
3610static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3611 enum qeth_ipa_isolation_modes isolation)
3612{
3613 int rc;
3614 struct qeth_cmd_buffer *iob;
3615 struct qeth_ipa_cmd *cmd;
3616 struct qeth_set_access_ctrl *access_ctrl_req;
3617
847a50fd 3618 QETH_CARD_TEXT(card, 4, "setacctl");
d64ecc22
EL
3619
3620 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3621 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3622
3623 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3624 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3625 sizeof(struct qeth_set_access_ctrl));
3626 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3627 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3628 access_ctrl_req->subcmd_code = isolation;
3629
3630 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3631 NULL);
3632 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3633 return rc;
3634}
3635
3636int qeth_set_access_ctrl_online(struct qeth_card *card)
3637{
3638 int rc = 0;
3639
847a50fd 3640 QETH_CARD_TEXT(card, 4, "setactlo");
d64ecc22 3641
5113fec0
UB
3642 if ((card->info.type == QETH_CARD_TYPE_OSD ||
3643 card->info.type == QETH_CARD_TYPE_OSX) &&
3644 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
d64ecc22
EL
3645 rc = qeth_setadpparms_set_access_ctrl(card,
3646 card->options.isolation);
3647 if (rc) {
3648 QETH_DBF_MESSAGE(3,
5113fec0 3649 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
d64ecc22
EL
3650 card->gdev->dev.kobj.name,
3651 rc);
3652 }
3653 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3654 card->options.isolation = ISOLATION_MODE_NONE;
3655
3656 dev_err(&card->gdev->dev, "Adapter does not "
3657 "support QDIO data connection isolation\n");
3658 rc = -EOPNOTSUPP;
3659 }
3660 return rc;
3661}
3662EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3663
4a71df50
FB
3664void qeth_tx_timeout(struct net_device *dev)
3665{
3666 struct qeth_card *card;
3667
509e2562 3668 card = dev->ml_priv;
847a50fd 3669 QETH_CARD_TEXT(card, 4, "txtimeo");
4a71df50
FB
3670 card->stats.tx_errors++;
3671 qeth_schedule_recovery(card);
3672}
3673EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3674
3675int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3676{
509e2562 3677 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
3678 int rc = 0;
3679
3680 switch (regnum) {
3681 case MII_BMCR: /* Basic mode control register */
3682 rc = BMCR_FULLDPLX;
3683 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
3684 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
3685 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3686 rc |= BMCR_SPEED100;
3687 break;
3688 case MII_BMSR: /* Basic mode status register */
3689 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3690 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3691 BMSR_100BASE4;
3692 break;
3693 case MII_PHYSID1: /* PHYS ID 1 */
3694 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3695 dev->dev_addr[2];
3696 rc = (rc >> 5) & 0xFFFF;
3697 break;
3698 case MII_PHYSID2: /* PHYS ID 2 */
3699 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3700 break;
3701 case MII_ADVERTISE: /* Advertisement control reg */
3702 rc = ADVERTISE_ALL;
3703 break;
3704 case MII_LPA: /* Link partner ability reg */
3705 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3706 LPA_100BASE4 | LPA_LPACK;
3707 break;
3708 case MII_EXPANSION: /* Expansion register */
3709 break;
3710 case MII_DCOUNTER: /* disconnect counter */
3711 break;
3712 case MII_FCSCOUNTER: /* false carrier counter */
3713 break;
3714 case MII_NWAYTEST: /* N-way auto-neg test register */
3715 break;
3716 case MII_RERRCOUNTER: /* rx error counter */
3717 rc = card->stats.rx_errors;
3718 break;
3719 case MII_SREVISION: /* silicon revision */
3720 break;
3721 case MII_RESV1: /* reserved 1 */
3722 break;
3723 case MII_LBRERROR: /* loopback, rx, bypass error */
3724 break;
3725 case MII_PHYADDR: /* physical address */
3726 break;
3727 case MII_RESV2: /* reserved 2 */
3728 break;
3729 case MII_TPISTATUS: /* TPI status for 10mbps */
3730 break;
3731 case MII_NCONFIG: /* network interface config */
3732 break;
3733 default:
3734 break;
3735 }
3736 return rc;
3737}
3738EXPORT_SYMBOL_GPL(qeth_mdio_read);
3739
3740static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
3741 struct qeth_cmd_buffer *iob, int len,
3742 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
3743 unsigned long),
3744 void *reply_param)
3745{
3746 u16 s1, s2;
3747
847a50fd 3748 QETH_CARD_TEXT(card, 4, "sendsnmp");
4a71df50
FB
3749
3750 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3751 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3752 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3753 /* adjust PDU length fields in IPA_PDU_HEADER */
3754 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
3755 s2 = (u32) len;
3756 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
3757 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
3758 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
3759 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
3760 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
3761 reply_cb, reply_param);
3762}
3763
3764static int qeth_snmp_command_cb(struct qeth_card *card,
3765 struct qeth_reply *reply, unsigned long sdata)
3766{
3767 struct qeth_ipa_cmd *cmd;
3768 struct qeth_arp_query_info *qinfo;
3769 struct qeth_snmp_cmd *snmp;
3770 unsigned char *data;
3771 __u16 data_len;
3772
847a50fd 3773 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4a71df50
FB
3774
3775 cmd = (struct qeth_ipa_cmd *) sdata;
3776 data = (unsigned char *)((char *)cmd - reply->offset);
3777 qinfo = (struct qeth_arp_query_info *) reply->param;
3778 snmp = &cmd->data.setadapterparms.data.snmp;
3779
3780 if (cmd->hdr.return_code) {
847a50fd 3781 QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
4a71df50
FB
3782 return 0;
3783 }
3784 if (cmd->data.setadapterparms.hdr.return_code) {
3785 cmd->hdr.return_code =
3786 cmd->data.setadapterparms.hdr.return_code;
847a50fd 3787 QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
4a71df50
FB
3788 return 0;
3789 }
3790 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
3791 if (cmd->data.setadapterparms.hdr.seq_no == 1)
3792 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
3793 else
3794 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
3795
3796 /* check if there is enough room in userspace */
3797 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
847a50fd 3798 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4a71df50
FB
3799 cmd->hdr.return_code = -ENOMEM;
3800 return 0;
3801 }
847a50fd 3802 QETH_CARD_TEXT_(card, 4, "snore%i",
4a71df50 3803 cmd->data.setadapterparms.hdr.used_total);
847a50fd 3804 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4a71df50
FB
3805 cmd->data.setadapterparms.hdr.seq_no);
3806 /*copy entries to user buffer*/
3807 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
3808 memcpy(qinfo->udata + qinfo->udata_offset,
3809 (char *)snmp,
3810 data_len + offsetof(struct qeth_snmp_cmd, data));
3811 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
3812 } else {
3813 memcpy(qinfo->udata + qinfo->udata_offset,
3814 (char *)&snmp->request, data_len);
3815 }
3816 qinfo->udata_offset += data_len;
3817 /* check if all replies received ... */
847a50fd 3818 QETH_CARD_TEXT_(card, 4, "srtot%i",
4a71df50 3819 cmd->data.setadapterparms.hdr.used_total);
847a50fd 3820 QETH_CARD_TEXT_(card, 4, "srseq%i",
4a71df50
FB
3821 cmd->data.setadapterparms.hdr.seq_no);
3822 if (cmd->data.setadapterparms.hdr.seq_no <
3823 cmd->data.setadapterparms.hdr.used_total)
3824 return 1;
3825 return 0;
3826}
3827
3828int qeth_snmp_command(struct qeth_card *card, char __user *udata)
3829{
3830 struct qeth_cmd_buffer *iob;
3831 struct qeth_ipa_cmd *cmd;
3832 struct qeth_snmp_ureq *ureq;
3833 int req_len;
3834 struct qeth_arp_query_info qinfo = {0, };
3835 int rc = 0;
3836
847a50fd 3837 QETH_CARD_TEXT(card, 3, "snmpcmd");
4a71df50
FB
3838
3839 if (card->info.guestlan)
3840 return -EOPNOTSUPP;
3841
3842 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
3843 (!card->options.layer2)) {
4a71df50
FB
3844 return -EOPNOTSUPP;
3845 }
3846 /* skip 4 bytes (data_len struct member) to get req_len */
3847 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
3848 return -EFAULT;
4986f3f0
JL
3849 ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
3850 if (IS_ERR(ureq)) {
847a50fd 3851 QETH_CARD_TEXT(card, 2, "snmpnome");
4986f3f0 3852 return PTR_ERR(ureq);
4a71df50
FB
3853 }
3854 qinfo.udata_len = ureq->hdr.data_len;
3855 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
3856 if (!qinfo.udata) {
3857 kfree(ureq);
3858 return -ENOMEM;
3859 }
3860 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
3861
3862 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
3863 QETH_SNMP_SETADP_CMDLENGTH + req_len);
3864 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3865 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
3866 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
3867 qeth_snmp_command_cb, (void *)&qinfo);
3868 if (rc)
14cc21b6 3869 QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
4a71df50
FB
3870 QETH_CARD_IFNAME(card), rc);
3871 else {
3872 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
3873 rc = -EFAULT;
3874 }
3875
3876 kfree(ureq);
3877 kfree(qinfo.udata);
3878 return rc;
3879}
3880EXPORT_SYMBOL_GPL(qeth_snmp_command);
3881
3882static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3883{
3884 switch (card->info.type) {
3885 case QETH_CARD_TYPE_IQD:
3886 return 2;
3887 default:
3888 return 0;
3889 }
3890}
3891
d0ff1f52
UB
3892static void qeth_determine_capabilities(struct qeth_card *card)
3893{
3894 int rc;
3895 int length;
3896 char *prcd;
3897 struct ccw_device *ddev;
3898 int ddev_offline = 0;
3899
3900 QETH_DBF_TEXT(SETUP, 2, "detcapab");
3901 ddev = CARD_DDEV(card);
3902 if (!ddev->online) {
3903 ddev_offline = 1;
3904 rc = ccw_device_set_online(ddev);
3905 if (rc) {
3906 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3907 goto out;
3908 }
3909 }
3910
3911 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
3912 if (rc) {
3913 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
3914 dev_name(&card->gdev->dev), rc);
3915 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3916 goto out_offline;
3917 }
3918 qeth_configure_unitaddr(card, prcd);
3919 qeth_configure_blkt_default(card, prcd);
3920 kfree(prcd);
3921
3922 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
3923 if (rc)
3924 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3925
3926out_offline:
3927 if (ddev_offline == 1)
3928 ccw_device_set_offline(ddev);
3929out:
3930 return;
3931}
3932
4a71df50
FB
3933static int qeth_qdio_establish(struct qeth_card *card)
3934{
3935 struct qdio_initialize init_data;
3936 char *qib_param_field;
3937 struct qdio_buffer **in_sbal_ptrs;
3938 struct qdio_buffer **out_sbal_ptrs;
3939 int i, j, k;
3940 int rc = 0;
3941
d11ba0c4 3942 QETH_DBF_TEXT(SETUP, 2, "qdioest");
4a71df50
FB
3943
3944 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3945 GFP_KERNEL);
3946 if (!qib_param_field)
3947 return -ENOMEM;
3948
3949 qeth_create_qib_param_field(card, qib_param_field);
3950 qeth_create_qib_param_field_blkt(card, qib_param_field);
3951
3952 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3953 GFP_KERNEL);
3954 if (!in_sbal_ptrs) {
3955 kfree(qib_param_field);
3956 return -ENOMEM;
3957 }
3958 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3959 in_sbal_ptrs[i] = (struct qdio_buffer *)
3960 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3961
3962 out_sbal_ptrs =
3963 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3964 sizeof(void *), GFP_KERNEL);
3965 if (!out_sbal_ptrs) {
3966 kfree(in_sbal_ptrs);
3967 kfree(qib_param_field);
3968 return -ENOMEM;
3969 }
3970 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3971 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3972 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3973 card->qdio.out_qs[i]->bufs[j].buffer);
3974 }
3975
3976 memset(&init_data, 0, sizeof(struct qdio_initialize));
3977 init_data.cdev = CARD_DDEV(card);
3978 init_data.q_format = qeth_get_qdio_q_format(card);
3979 init_data.qib_param_field_format = 0;
3980 init_data.qib_param_field = qib_param_field;
4a71df50
FB
3981 init_data.no_input_qs = 1;
3982 init_data.no_output_qs = card->qdio.no_out_queues;
3983 init_data.input_handler = card->discipline.input_handler;
3984 init_data.output_handler = card->discipline.output_handler;
a1c3ed4c 3985 init_data.queue_start_poll = card->discipline.start_poll;
4a71df50 3986 init_data.int_parm = (unsigned long) card;
4a71df50
FB
3987 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3988 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3d6c76ff
JG
3989 init_data.scan_threshold =
3990 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
4a71df50
FB
3991
3992 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3993 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
cc961d40
JG
3994 rc = qdio_allocate(&init_data);
3995 if (rc) {
3996 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3997 goto out;
3998 }
3999 rc = qdio_establish(&init_data);
4000 if (rc) {
4a71df50 4001 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
cc961d40
JG
4002 qdio_free(CARD_DDEV(card));
4003 }
4a71df50 4004 }
cc961d40 4005out:
4a71df50
FB
4006 kfree(out_sbal_ptrs);
4007 kfree(in_sbal_ptrs);
4008 kfree(qib_param_field);
4009 return rc;
4010}
4011
4012static void qeth_core_free_card(struct qeth_card *card)
4013{
4014
d11ba0c4
PT
4015 QETH_DBF_TEXT(SETUP, 2, "freecrd");
4016 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4a71df50
FB
4017 qeth_clean_channel(&card->read);
4018 qeth_clean_channel(&card->write);
4019 if (card->dev)
4020 free_netdev(card->dev);
4021 kfree(card->ip_tbd_list);
4022 qeth_free_qdio_buffers(card);
6bcac508 4023 unregister_service_level(&card->qeth_service_level);
4a71df50
FB
4024 kfree(card);
4025}
4026
4027static struct ccw_device_id qeth_ids[] = {
5113fec0
UB
4028 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4029 .driver_info = QETH_CARD_TYPE_OSD},
4030 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4031 .driver_info = QETH_CARD_TYPE_IQD},
4032 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4033 .driver_info = QETH_CARD_TYPE_OSN},
4034 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4035 .driver_info = QETH_CARD_TYPE_OSM},
4036 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4037 .driver_info = QETH_CARD_TYPE_OSX},
4a71df50
FB
4038 {},
4039};
4040MODULE_DEVICE_TABLE(ccw, qeth_ids);
4041
4042static struct ccw_driver qeth_ccw_driver = {
3bda058b
SO
4043 .driver = {
4044 .name = "qeth",
4045 },
4a71df50
FB
4046 .ids = qeth_ids,
4047 .probe = ccwgroup_probe_ccwdev,
4048 .remove = ccwgroup_remove_ccwdev,
4049};
4050
4051static int qeth_core_driver_group(const char *buf, struct device *root_dev,
4052 unsigned long driver_id)
4053{
022b660a
UB
4054 return ccwgroup_create_from_string(root_dev, driver_id,
4055 &qeth_ccw_driver, 3, buf);
4a71df50
FB
4056}
4057
4058int qeth_core_hardsetup_card(struct qeth_card *card)
4059{
aa909224 4060 int retries = 0;
4a71df50
FB
4061 int rc;
4062
d11ba0c4 4063 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
4a71df50 4064 atomic_set(&card->force_alloc_skb, 0);
d0ff1f52 4065 qeth_get_channel_path_desc(card);
4a71df50 4066retry:
aa909224 4067 if (retries)
74eacdb9
FB
4068 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
4069 dev_name(&card->gdev->dev));
aa909224
UB
4070 ccw_device_set_offline(CARD_DDEV(card));
4071 ccw_device_set_offline(CARD_WDEV(card));
4072 ccw_device_set_offline(CARD_RDEV(card));
4073 rc = ccw_device_set_online(CARD_RDEV(card));
4074 if (rc)
4075 goto retriable;
4076 rc = ccw_device_set_online(CARD_WDEV(card));
4077 if (rc)
4078 goto retriable;
4079 rc = ccw_device_set_online(CARD_DDEV(card));
4080 if (rc)
4081 goto retriable;
4a71df50 4082 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
aa909224 4083retriable:
4a71df50 4084 if (rc == -ERESTARTSYS) {
d11ba0c4 4085 QETH_DBF_TEXT(SETUP, 2, "break1");
4a71df50
FB
4086 return rc;
4087 } else if (rc) {
d11ba0c4 4088 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
aa909224 4089 if (++retries > 3)
4a71df50
FB
4090 goto out;
4091 else
4092 goto retry;
4093 }
d0ff1f52 4094 qeth_determine_capabilities(card);
4a71df50
FB
4095 qeth_init_tokens(card);
4096 qeth_init_func_level(card);
4097 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
4098 if (rc == -ERESTARTSYS) {
d11ba0c4 4099 QETH_DBF_TEXT(SETUP, 2, "break2");
4a71df50
FB
4100 return rc;
4101 } else if (rc) {
d11ba0c4 4102 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4a71df50
FB
4103 if (--retries < 0)
4104 goto out;
4105 else
4106 goto retry;
4107 }
4108 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
4109 if (rc == -ERESTARTSYS) {
d11ba0c4 4110 QETH_DBF_TEXT(SETUP, 2, "break3");
4a71df50
FB
4111 return rc;
4112 } else if (rc) {
d11ba0c4 4113 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
4a71df50
FB
4114 if (--retries < 0)
4115 goto out;
4116 else
4117 goto retry;
4118 }
908abbb5 4119 card->read_or_write_problem = 0;
4a71df50
FB
4120 rc = qeth_mpc_initialize(card);
4121 if (rc) {
d11ba0c4 4122 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4a71df50
FB
4123 goto out;
4124 }
1da74b1c
FB
4125
4126 card->options.ipa4.supported_funcs = 0;
4127 card->options.adp.supported_funcs = 0;
4128 card->info.diagass_support = 0;
4129 qeth_query_ipassists(card, QETH_PROT_IPV4);
4130 if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
4131 qeth_query_setadapterparms(card);
4132 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
4133 qeth_query_setdiagass(card);
4a71df50
FB
4134 return 0;
4135out:
74eacdb9
FB
4136 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
4137 "an error on the device\n");
4138 QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
4139 dev_name(&card->gdev->dev), rc);
4a71df50
FB
4140 return rc;
4141}
4142EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4143
4144static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
4145 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
4146{
4147 struct page *page = virt_to_page(element->addr);
4148 if (*pskb == NULL) {
4149 /* the upper protocol layers assume that there is data in the
4150 * skb itself. Copy a small amount (64 bytes) to make them
4151 * happy. */
4152 *pskb = dev_alloc_skb(64 + ETH_HLEN);
4153 if (!(*pskb))
4154 return -ENOMEM;
4155 skb_reserve(*pskb, ETH_HLEN);
4156 if (data_len <= 64) {
4157 memcpy(skb_put(*pskb, data_len), element->addr + offset,
4158 data_len);
4159 } else {
4160 get_page(page);
4161 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
4162 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
4163 data_len - 64);
4164 (*pskb)->data_len += data_len - 64;
4165 (*pskb)->len += data_len - 64;
4166 (*pskb)->truesize += data_len - 64;
4167 (*pfrag)++;
4168 }
4169 } else {
4170 get_page(page);
4171 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
4172 (*pskb)->data_len += data_len;
4173 (*pskb)->len += data_len;
4174 (*pskb)->truesize += data_len;
4175 (*pfrag)++;
4176 }
4177 return 0;
4178}
4179
4180struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4181 struct qdio_buffer *buffer,
4182 struct qdio_buffer_element **__element, int *__offset,
4183 struct qeth_hdr **hdr)
4184{
4185 struct qdio_buffer_element *element = *__element;
4186 int offset = *__offset;
4187 struct sk_buff *skb = NULL;
76b11f8e 4188 int skb_len = 0;
4a71df50
FB
4189 void *data_ptr;
4190 int data_len;
4191 int headroom = 0;
4192 int use_rx_sg = 0;
4193 int frag = 0;
4194
4a71df50
FB
4195 /* qeth_hdr must not cross element boundaries */
4196 if (element->length < offset + sizeof(struct qeth_hdr)) {
4197 if (qeth_is_last_sbale(element))
4198 return NULL;
4199 element++;
4200 offset = 0;
4201 if (element->length < sizeof(struct qeth_hdr))
4202 return NULL;
4203 }
4204 *hdr = element->addr + offset;
4205
4206 offset += sizeof(struct qeth_hdr);
76b11f8e
UB
4207 switch ((*hdr)->hdr.l2.id) {
4208 case QETH_HEADER_TYPE_LAYER2:
4209 skb_len = (*hdr)->hdr.l2.pkt_length;
4210 break;
4211 case QETH_HEADER_TYPE_LAYER3:
4a71df50 4212 skb_len = (*hdr)->hdr.l3.length;
b403e685
FB
4213 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4214 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4215 headroom = TR_HLEN;
4216 else
4217 headroom = ETH_HLEN;
76b11f8e
UB
4218 break;
4219 case QETH_HEADER_TYPE_OSN:
4220 skb_len = (*hdr)->hdr.osn.pdu_length;
4221 headroom = sizeof(struct qeth_hdr);
4222 break;
4223 default:
4224 break;
4a71df50
FB
4225 }
4226
4227 if (!skb_len)
4228 return NULL;
4229
4230 if ((skb_len >= card->options.rx_sg_cb) &&
4231 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4232 (!atomic_read(&card->force_alloc_skb))) {
4233 use_rx_sg = 1;
4234 } else {
4235 skb = dev_alloc_skb(skb_len + headroom);
4236 if (!skb)
4237 goto no_mem;
4238 if (headroom)
4239 skb_reserve(skb, headroom);
4240 }
4241
4242 data_ptr = element->addr + offset;
4243 while (skb_len) {
4244 data_len = min(skb_len, (int)(element->length - offset));
4245 if (data_len) {
4246 if (use_rx_sg) {
4247 if (qeth_create_skb_frag(element, &skb, offset,
4248 &frag, data_len))
4249 goto no_mem;
4250 } else {
4251 memcpy(skb_put(skb, data_len), data_ptr,
4252 data_len);
4253 }
4254 }
4255 skb_len -= data_len;
4256 if (skb_len) {
4257 if (qeth_is_last_sbale(element)) {
847a50fd 4258 QETH_CARD_TEXT(card, 4, "unexeob");
efd5d9a4 4259 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
4a71df50
FB
4260 dev_kfree_skb_any(skb);
4261 card->stats.rx_errors++;
4262 return NULL;
4263 }
4264 element++;
4265 offset = 0;
4266 data_ptr = element->addr;
4267 } else {
4268 offset += data_len;
4269 }
4270 }
4271 *__element = element;
4272 *__offset = offset;
4273 if (use_rx_sg && card->options.performance_stats) {
4274 card->perf_stats.sg_skbs_rx++;
4275 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
4276 }
4277 return skb;
4278no_mem:
4279 if (net_ratelimit()) {
847a50fd 4280 QETH_CARD_TEXT(card, 2, "noskbmem");
4a71df50
FB
4281 }
4282 card->stats.rx_dropped++;
4283 return NULL;
4284}
4285EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
4286
4287static void qeth_unregister_dbf_views(void)
4288{
d11ba0c4
PT
4289 int x;
4290 for (x = 0; x < QETH_DBF_INFOS; x++) {
4291 debug_unregister(qeth_dbf[x].id);
4292 qeth_dbf[x].id = NULL;
4293 }
4a71df50
FB
4294}
4295
8e96c51c 4296void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
cd023216
PT
4297{
4298 char dbf_txt_buf[32];
345aa66e 4299 va_list args;
cd023216 4300
8e96c51c 4301 if (level > id->level)
cd023216 4302 return;
345aa66e
PT
4303 va_start(args, fmt);
4304 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
4305 va_end(args);
8e96c51c 4306 debug_text_event(id, level, dbf_txt_buf);
cd023216
PT
4307}
4308EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
4309
4a71df50
FB
4310static int qeth_register_dbf_views(void)
4311{
d11ba0c4
PT
4312 int ret;
4313 int x;
4314
4315 for (x = 0; x < QETH_DBF_INFOS; x++) {
4316 /* register the areas */
4317 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
4318 qeth_dbf[x].pages,
4319 qeth_dbf[x].areas,
4320 qeth_dbf[x].len);
4321 if (qeth_dbf[x].id == NULL) {
4322 qeth_unregister_dbf_views();
4323 return -ENOMEM;
4324 }
4a71df50 4325
d11ba0c4
PT
4326 /* register a view */
4327 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
4328 if (ret) {
4329 qeth_unregister_dbf_views();
4330 return ret;
4331 }
4a71df50 4332
d11ba0c4
PT
4333 /* set a passing level */
4334 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
4335 }
4a71df50
FB
4336
4337 return 0;
4338}
4339
4340int qeth_core_load_discipline(struct qeth_card *card,
4341 enum qeth_discipline_id discipline)
4342{
4343 int rc = 0;
4344 switch (discipline) {
4345 case QETH_DISCIPLINE_LAYER3:
4346 card->discipline.ccwgdriver = try_then_request_module(
4347 symbol_get(qeth_l3_ccwgroup_driver),
4348 "qeth_l3");
4349 break;
4350 case QETH_DISCIPLINE_LAYER2:
4351 card->discipline.ccwgdriver = try_then_request_module(
4352 symbol_get(qeth_l2_ccwgroup_driver),
4353 "qeth_l2");
4354 break;
4355 }
4356 if (!card->discipline.ccwgdriver) {
74eacdb9
FB
4357 dev_err(&card->gdev->dev, "There is no kernel module to "
4358 "support discipline %d\n", discipline);
4a71df50
FB
4359 rc = -EINVAL;
4360 }
4361 return rc;
4362}
4363
4364void qeth_core_free_discipline(struct qeth_card *card)
4365{
4366 if (card->options.layer2)
4367 symbol_put(qeth_l2_ccwgroup_driver);
4368 else
4369 symbol_put(qeth_l3_ccwgroup_driver);
4370 card->discipline.ccwgdriver = NULL;
4371}
4372
4373static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4374{
4375 struct qeth_card *card;
4376 struct device *dev;
4377 int rc;
4378 unsigned long flags;
af039068 4379 char dbf_name[20];
4a71df50 4380
d11ba0c4 4381 QETH_DBF_TEXT(SETUP, 2, "probedev");
4a71df50
FB
4382
4383 dev = &gdev->dev;
4384 if (!get_device(dev))
4385 return -ENODEV;
4386
2a0217d5 4387 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
4a71df50
FB
4388
4389 card = qeth_alloc_card();
4390 if (!card) {
d11ba0c4 4391 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
4a71df50
FB
4392 rc = -ENOMEM;
4393 goto err_dev;
4394 }
af039068
CO
4395
4396 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
4397 dev_name(&gdev->dev));
4398 card->debug = debug_register(dbf_name, 2, 1, 8);
4399 if (!card->debug) {
4400 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
4401 rc = -ENOMEM;
4402 goto err_card;
4403 }
4404 debug_register_view(card->debug, &debug_hex_ascii_view);
4405
4a71df50
FB
4406 card->read.ccwdev = gdev->cdev[0];
4407 card->write.ccwdev = gdev->cdev[1];
4408 card->data.ccwdev = gdev->cdev[2];
4409 dev_set_drvdata(&gdev->dev, card);
4410 card->gdev = gdev;
4411 gdev->cdev[0]->handler = qeth_irq;
4412 gdev->cdev[1]->handler = qeth_irq;
4413 gdev->cdev[2]->handler = qeth_irq;
4414
4415 rc = qeth_determine_card_type(card);
4416 if (rc) {
d11ba0c4 4417 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
af039068 4418 goto err_dbf;
4a71df50
FB
4419 }
4420 rc = qeth_setup_card(card);
4421 if (rc) {
d11ba0c4 4422 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
af039068 4423 goto err_dbf;
4a71df50
FB
4424 }
4425
5113fec0 4426 if (card->info.type == QETH_CARD_TYPE_OSN)
4a71df50 4427 rc = qeth_core_create_osn_attributes(dev);
5113fec0
UB
4428 else
4429 rc = qeth_core_create_device_attributes(dev);
4430 if (rc)
af039068 4431 goto err_dbf;
5113fec0
UB
4432 switch (card->info.type) {
4433 case QETH_CARD_TYPE_OSN:
4434 case QETH_CARD_TYPE_OSM:
4a71df50 4435 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5113fec0
UB
4436 if (rc)
4437 goto err_attr;
4a71df50 4438 rc = card->discipline.ccwgdriver->probe(card->gdev);
4a71df50 4439 if (rc)
5113fec0
UB
4440 goto err_disc;
4441 case QETH_CARD_TYPE_OSD:
4442 case QETH_CARD_TYPE_OSX:
4443 default:
4444 break;
4a71df50
FB
4445 }
4446
4447 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4448 list_add_tail(&card->list, &qeth_core_card_list.list);
4449 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
76b11f8e
UB
4450
4451 qeth_determine_capabilities(card);
4a71df50
FB
4452 return 0;
4453
5113fec0
UB
4454err_disc:
4455 qeth_core_free_discipline(card);
4456err_attr:
4457 if (card->info.type == QETH_CARD_TYPE_OSN)
4458 qeth_core_remove_osn_attributes(dev);
4459 else
4460 qeth_core_remove_device_attributes(dev);
af039068
CO
4461err_dbf:
4462 debug_unregister(card->debug);
4a71df50
FB
4463err_card:
4464 qeth_core_free_card(card);
4465err_dev:
4466 put_device(dev);
4467 return rc;
4468}
4469
4470static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4471{
4472 unsigned long flags;
4473 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4474
28a7e4c9 4475 QETH_DBF_TEXT(SETUP, 2, "removedv");
4a71df50
FB
4476
4477 if (card->info.type == QETH_CARD_TYPE_OSN) {
4478 qeth_core_remove_osn_attributes(&gdev->dev);
4479 } else {
4480 qeth_core_remove_device_attributes(&gdev->dev);
4481 }
9dc48ccc
UB
4482
4483 if (card->discipline.ccwgdriver) {
4484 card->discipline.ccwgdriver->remove(gdev);
4485 qeth_core_free_discipline(card);
4486 }
4487
af039068 4488 debug_unregister(card->debug);
4a71df50
FB
4489 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4490 list_del(&card->list);
4491 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4492 qeth_core_free_card(card);
4493 dev_set_drvdata(&gdev->dev, NULL);
4494 put_device(&gdev->dev);
4495 return;
4496}
4497
4498static int qeth_core_set_online(struct ccwgroup_device *gdev)
4499{
4500 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4501 int rc = 0;
4502 int def_discipline;
4503
4504 if (!card->discipline.ccwgdriver) {
4505 if (card->info.type == QETH_CARD_TYPE_IQD)
4506 def_discipline = QETH_DISCIPLINE_LAYER3;
4507 else
4508 def_discipline = QETH_DISCIPLINE_LAYER2;
4509 rc = qeth_core_load_discipline(card, def_discipline);
4510 if (rc)
4511 goto err;
4512 rc = card->discipline.ccwgdriver->probe(card->gdev);
4513 if (rc)
4514 goto err;
4515 }
4516 rc = card->discipline.ccwgdriver->set_online(gdev);
4517err:
4518 return rc;
4519}
4520
4521static int qeth_core_set_offline(struct ccwgroup_device *gdev)
4522{
4523 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4524 return card->discipline.ccwgdriver->set_offline(gdev);
4525}
4526
4527static void qeth_core_shutdown(struct ccwgroup_device *gdev)
4528{
4529 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4530 if (card->discipline.ccwgdriver &&
4531 card->discipline.ccwgdriver->shutdown)
4532 card->discipline.ccwgdriver->shutdown(gdev);
4533}
4534
bbcfcdc8
FB
4535static int qeth_core_prepare(struct ccwgroup_device *gdev)
4536{
4537 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4538 if (card->discipline.ccwgdriver &&
4539 card->discipline.ccwgdriver->prepare)
4540 return card->discipline.ccwgdriver->prepare(gdev);
4541 return 0;
4542}
4543
4544static void qeth_core_complete(struct ccwgroup_device *gdev)
4545{
4546 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4547 if (card->discipline.ccwgdriver &&
4548 card->discipline.ccwgdriver->complete)
4549 card->discipline.ccwgdriver->complete(gdev);
4550}
4551
4552static int qeth_core_freeze(struct ccwgroup_device *gdev)
4553{
4554 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4555 if (card->discipline.ccwgdriver &&
4556 card->discipline.ccwgdriver->freeze)
4557 return card->discipline.ccwgdriver->freeze(gdev);
4558 return 0;
4559}
4560
4561static int qeth_core_thaw(struct ccwgroup_device *gdev)
4562{
4563 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4564 if (card->discipline.ccwgdriver &&
4565 card->discipline.ccwgdriver->thaw)
4566 return card->discipline.ccwgdriver->thaw(gdev);
4567 return 0;
4568}
4569
4570static int qeth_core_restore(struct ccwgroup_device *gdev)
4571{
4572 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4573 if (card->discipline.ccwgdriver &&
4574 card->discipline.ccwgdriver->restore)
4575 return card->discipline.ccwgdriver->restore(gdev);
4576 return 0;
4577}
4578
4a71df50 4579static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
3c190c51
SO
4580 .driver = {
4581 .owner = THIS_MODULE,
4582 .name = "qeth",
4583 },
4a71df50
FB
4584 .driver_id = 0xD8C5E3C8,
4585 .probe = qeth_core_probe_device,
4586 .remove = qeth_core_remove_device,
4587 .set_online = qeth_core_set_online,
4588 .set_offline = qeth_core_set_offline,
4589 .shutdown = qeth_core_shutdown,
bbcfcdc8
FB
4590 .prepare = qeth_core_prepare,
4591 .complete = qeth_core_complete,
4592 .freeze = qeth_core_freeze,
4593 .thaw = qeth_core_thaw,
4594 .restore = qeth_core_restore,
4a71df50
FB
4595};
4596
4597static ssize_t
4598qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4599 size_t count)
4600{
4601 int err;
4602 err = qeth_core_driver_group(buf, qeth_core_root_dev,
4603 qeth_core_ccwgroup_driver.driver_id);
4604 if (err)
4605 return err;
4606 else
4607 return count;
4608}
4609
4610static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
4611
4612static struct {
4613 const char str[ETH_GSTRING_LEN];
4614} qeth_ethtool_stats_keys[] = {
4615/* 0 */{"rx skbs"},
4616 {"rx buffers"},
4617 {"tx skbs"},
4618 {"tx buffers"},
4619 {"tx skbs no packing"},
4620 {"tx buffers no packing"},
4621 {"tx skbs packing"},
4622 {"tx buffers packing"},
4623 {"tx sg skbs"},
4624 {"tx sg frags"},
4625/* 10 */{"rx sg skbs"},
4626 {"rx sg frags"},
4627 {"rx sg page allocs"},
4628 {"tx large kbytes"},
4629 {"tx large count"},
4630 {"tx pk state ch n->p"},
4631 {"tx pk state ch p->n"},
4632 {"tx pk watermark low"},
4633 {"tx pk watermark high"},
4634 {"queue 0 buffer usage"},
4635/* 20 */{"queue 1 buffer usage"},
4636 {"queue 2 buffer usage"},
4637 {"queue 3 buffer usage"},
a1c3ed4c
FB
4638 {"rx poll time"},
4639 {"rx poll count"},
4a71df50
FB
4640 {"rx do_QDIO time"},
4641 {"rx do_QDIO count"},
4642 {"tx handler time"},
4643 {"tx handler count"},
4644 {"tx time"},
4645/* 30 */{"tx count"},
4646 {"tx do_QDIO time"},
4647 {"tx do_QDIO count"},
f61a0d05 4648 {"tx csum"},
c3b4a740 4649 {"tx lin"},
4a71df50
FB
4650};
4651
df8b4ec8 4652int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4a71df50 4653{
df8b4ec8
BH
4654 switch (stringset) {
4655 case ETH_SS_STATS:
4656 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4657 default:
4658 return -EINVAL;
4659 }
4a71df50 4660}
df8b4ec8 4661EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4a71df50
FB
4662
4663void qeth_core_get_ethtool_stats(struct net_device *dev,
4664 struct ethtool_stats *stats, u64 *data)
4665{
509e2562 4666 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
4667 data[0] = card->stats.rx_packets -
4668 card->perf_stats.initial_rx_packets;
4669 data[1] = card->perf_stats.bufs_rec;
4670 data[2] = card->stats.tx_packets -
4671 card->perf_stats.initial_tx_packets;
4672 data[3] = card->perf_stats.bufs_sent;
4673 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
4674 - card->perf_stats.skbs_sent_pack;
4675 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
4676 data[6] = card->perf_stats.skbs_sent_pack;
4677 data[7] = card->perf_stats.bufs_sent_pack;
4678 data[8] = card->perf_stats.sg_skbs_sent;
4679 data[9] = card->perf_stats.sg_frags_sent;
4680 data[10] = card->perf_stats.sg_skbs_rx;
4681 data[11] = card->perf_stats.sg_frags_rx;
4682 data[12] = card->perf_stats.sg_alloc_page_rx;
4683 data[13] = (card->perf_stats.large_send_bytes >> 10);
4684 data[14] = card->perf_stats.large_send_cnt;
4685 data[15] = card->perf_stats.sc_dp_p;
4686 data[16] = card->perf_stats.sc_p_dp;
4687 data[17] = QETH_LOW_WATERMARK_PACK;
4688 data[18] = QETH_HIGH_WATERMARK_PACK;
4689 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
4690 data[20] = (card->qdio.no_out_queues > 1) ?
4691 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
4692 data[21] = (card->qdio.no_out_queues > 2) ?
4693 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
4694 data[22] = (card->qdio.no_out_queues > 3) ?
4695 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
4696 data[23] = card->perf_stats.inbound_time;
4697 data[24] = card->perf_stats.inbound_cnt;
4698 data[25] = card->perf_stats.inbound_do_qdio_time;
4699 data[26] = card->perf_stats.inbound_do_qdio_cnt;
4700 data[27] = card->perf_stats.outbound_handler_time;
4701 data[28] = card->perf_stats.outbound_handler_cnt;
4702 data[29] = card->perf_stats.outbound_time;
4703 data[30] = card->perf_stats.outbound_cnt;
4704 data[31] = card->perf_stats.outbound_do_qdio_time;
4705 data[32] = card->perf_stats.outbound_do_qdio_cnt;
f61a0d05 4706 data[33] = card->perf_stats.tx_csum;
c3b4a740 4707 data[34] = card->perf_stats.tx_lin;
4a71df50
FB
4708}
4709EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4710
4711void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4712{
4713 switch (stringset) {
4714 case ETH_SS_STATS:
4715 memcpy(data, &qeth_ethtool_stats_keys,
4716 sizeof(qeth_ethtool_stats_keys));
4717 break;
4718 default:
4719 WARN_ON(1);
4720 break;
4721 }
4722}
4723EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4724
4725void qeth_core_get_drvinfo(struct net_device *dev,
4726 struct ethtool_drvinfo *info)
4727{
509e2562 4728 struct qeth_card *card = dev->ml_priv;
4a71df50
FB
4729 if (card->options.layer2)
4730 strcpy(info->driver, "qeth_l2");
4731 else
4732 strcpy(info->driver, "qeth_l3");
4733
4734 strcpy(info->version, "1.0");
4735 strcpy(info->fw_version, card->info.mcl_level);
4736 sprintf(info->bus_info, "%s/%s/%s",
4737 CARD_RDEV_ID(card),
4738 CARD_WDEV_ID(card),
4739 CARD_DDEV_ID(card));
4740}
4741EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4742
3f9975aa
FB
4743int qeth_core_ethtool_get_settings(struct net_device *netdev,
4744 struct ethtool_cmd *ecmd)
4745{
509e2562 4746 struct qeth_card *card = netdev->ml_priv;
3f9975aa
FB
4747 enum qeth_link_types link_type;
4748
4749 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
4750 link_type = QETH_LINK_TYPE_10GBIT_ETH;
4751 else
4752 link_type = card->info.link_type;
4753
4754 ecmd->transceiver = XCVR_INTERNAL;
4755 ecmd->supported = SUPPORTED_Autoneg;
4756 ecmd->advertising = ADVERTISED_Autoneg;
4757 ecmd->duplex = DUPLEX_FULL;
4758 ecmd->autoneg = AUTONEG_ENABLE;
4759
4760 switch (link_type) {
4761 case QETH_LINK_TYPE_FAST_ETH:
4762 case QETH_LINK_TYPE_LANE_ETH100:
4763 ecmd->supported |= SUPPORTED_10baseT_Half |
4764 SUPPORTED_10baseT_Full |
4765 SUPPORTED_100baseT_Half |
4766 SUPPORTED_100baseT_Full |
4767 SUPPORTED_TP;
4768 ecmd->advertising |= ADVERTISED_10baseT_Half |
4769 ADVERTISED_10baseT_Full |
4770 ADVERTISED_100baseT_Half |
4771 ADVERTISED_100baseT_Full |
4772 ADVERTISED_TP;
4773 ecmd->speed = SPEED_100;
4774 ecmd->port = PORT_TP;
4775 break;
4776
4777 case QETH_LINK_TYPE_GBIT_ETH:
4778 case QETH_LINK_TYPE_LANE_ETH1000:
4779 ecmd->supported |= SUPPORTED_10baseT_Half |
4780 SUPPORTED_10baseT_Full |
4781 SUPPORTED_100baseT_Half |
4782 SUPPORTED_100baseT_Full |
4783 SUPPORTED_1000baseT_Half |
4784 SUPPORTED_1000baseT_Full |
4785 SUPPORTED_FIBRE;
4786 ecmd->advertising |= ADVERTISED_10baseT_Half |
4787 ADVERTISED_10baseT_Full |
4788 ADVERTISED_100baseT_Half |
4789 ADVERTISED_100baseT_Full |
4790 ADVERTISED_1000baseT_Half |
4791 ADVERTISED_1000baseT_Full |
4792 ADVERTISED_FIBRE;
4793 ecmd->speed = SPEED_1000;
4794 ecmd->port = PORT_FIBRE;
4795 break;
4796
4797 case QETH_LINK_TYPE_10GBIT_ETH:
4798 ecmd->supported |= SUPPORTED_10baseT_Half |
4799 SUPPORTED_10baseT_Full |
4800 SUPPORTED_100baseT_Half |
4801 SUPPORTED_100baseT_Full |
4802 SUPPORTED_1000baseT_Half |
4803 SUPPORTED_1000baseT_Full |
4804 SUPPORTED_10000baseT_Full |
4805 SUPPORTED_FIBRE;
4806 ecmd->advertising |= ADVERTISED_10baseT_Half |
4807 ADVERTISED_10baseT_Full |
4808 ADVERTISED_100baseT_Half |
4809 ADVERTISED_100baseT_Full |
4810 ADVERTISED_1000baseT_Half |
4811 ADVERTISED_1000baseT_Full |
4812 ADVERTISED_10000baseT_Full |
4813 ADVERTISED_FIBRE;
4814 ecmd->speed = SPEED_10000;
4815 ecmd->port = PORT_FIBRE;
4816 break;
4817
4818 default:
4819 ecmd->supported |= SUPPORTED_10baseT_Half |
4820 SUPPORTED_10baseT_Full |
4821 SUPPORTED_TP;
4822 ecmd->advertising |= ADVERTISED_10baseT_Half |
4823 ADVERTISED_10baseT_Full |
4824 ADVERTISED_TP;
4825 ecmd->speed = SPEED_10;
4826 ecmd->port = PORT_TP;
4827 }
4828
4829 return 0;
4830}
4831EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
4832
4a71df50
FB
4833static int __init qeth_core_init(void)
4834{
4835 int rc;
4836
74eacdb9 4837 pr_info("loading core functions\n");
4a71df50
FB
4838 INIT_LIST_HEAD(&qeth_core_card_list.list);
4839 rwlock_init(&qeth_core_card_list.rwlock);
4840
4841 rc = qeth_register_dbf_views();
4842 if (rc)
4843 goto out_err;
4844 rc = ccw_driver_register(&qeth_ccw_driver);
4845 if (rc)
4846 goto ccw_err;
4847 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4848 if (rc)
4849 goto ccwgroup_err;
4850 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4851 &driver_attr_group);
4852 if (rc)
4853 goto driver_err;
035da16f 4854 qeth_core_root_dev = root_device_register("qeth");
4a71df50
FB
4855 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4856 if (rc)
4857 goto register_err;
4a71df50 4858
683d718a
FB
4859 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
4860 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
4861 if (!qeth_core_header_cache) {
4862 rc = -ENOMEM;
4863 goto slab_err;
4864 }
4865
4866 return 0;
4867slab_err:
035da16f 4868 root_device_unregister(qeth_core_root_dev);
4a71df50
FB
4869register_err:
4870 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4871 &driver_attr_group);
4872driver_err:
4873 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4874ccwgroup_err:
4875 ccw_driver_unregister(&qeth_ccw_driver);
4876ccw_err:
74eacdb9 4877 QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
4a71df50
FB
4878 qeth_unregister_dbf_views();
4879out_err:
74eacdb9 4880 pr_err("Initializing the qeth device driver failed\n");
4a71df50
FB
4881 return rc;
4882}
4883
4884static void __exit qeth_core_exit(void)
4885{
035da16f 4886 root_device_unregister(qeth_core_root_dev);
4a71df50
FB
4887 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4888 &driver_attr_group);
4889 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4890 ccw_driver_unregister(&qeth_ccw_driver);
683d718a 4891 kmem_cache_destroy(qeth_core_header_cache);
4a71df50 4892 qeth_unregister_dbf_views();
74eacdb9 4893 pr_info("core functions removed\n");
4a71df50
FB
4894}
4895
4896module_init(qeth_core_init);
4897module_exit(qeth_core_exit);
4898MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
4899MODULE_DESCRIPTION("qeth core functions");
4900MODULE_LICENSE("GPL");