1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
35 #include <asm/ebcdic.h>
36 #include <asm/chpid.h>
38 #include <asm/sysinfo.h>
41 #include <asm/ccwdev.h>
42 #include <asm/cpcmd.h>
44 #include "qeth_core.h"
46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 [QETH_DBF_SETUP] = {"qeth_setup",
50 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
51 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
52 &debug_sprintf_view, NULL},
53 [QETH_DBF_CTRL] = {"qeth_control",
54 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56 EXPORT_SYMBOL_GPL(qeth_dbf);
58 struct kmem_cache *qeth_core_header_cache;
59 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
60 static struct kmem_cache *qeth_qdio_outbuf_cache;
62 static struct device *qeth_core_root_dev;
63 static struct lock_class_key qdio_out_skb_queue_key;
65 static void qeth_issue_next_read_cb(struct qeth_card *card,
66 struct qeth_cmd_buffer *iob,
67 unsigned int data_length);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_queues(struct qeth_card *card);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72 struct qeth_qdio_out_buffer *buf,
73 enum iucv_tx_notify notification);
74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78 static void qeth_close_dev_handler(struct work_struct *work)
80 struct qeth_card *card;
82 card = container_of(work, struct qeth_card, close_dev_work);
83 QETH_CARD_TEXT(card, 2, "cldevhdl");
84 ccwgroup_set_offline(card->gdev);
87 static const char *qeth_get_cardname(struct qeth_card *card)
89 if (IS_VM_NIC(card)) {
90 switch (card->info.type) {
91 case QETH_CARD_TYPE_OSD:
92 return " Virtual NIC QDIO";
93 case QETH_CARD_TYPE_IQD:
94 return " Virtual NIC Hiper";
95 case QETH_CARD_TYPE_OSM:
96 return " Virtual NIC QDIO - OSM";
97 case QETH_CARD_TYPE_OSX:
98 return " Virtual NIC QDIO - OSX";
103 switch (card->info.type) {
104 case QETH_CARD_TYPE_OSD:
105 return " OSD Express";
106 case QETH_CARD_TYPE_IQD:
107 return " HiperSockets";
108 case QETH_CARD_TYPE_OSN:
110 case QETH_CARD_TYPE_OSM:
112 case QETH_CARD_TYPE_OSX:
121 /* max length to be returned: 14 */
122 const char *qeth_get_cardname_short(struct qeth_card *card)
124 if (IS_VM_NIC(card)) {
125 switch (card->info.type) {
126 case QETH_CARD_TYPE_OSD:
127 return "Virt.NIC QDIO";
128 case QETH_CARD_TYPE_IQD:
129 return "Virt.NIC Hiper";
130 case QETH_CARD_TYPE_OSM:
131 return "Virt.NIC OSM";
132 case QETH_CARD_TYPE_OSX:
133 return "Virt.NIC OSX";
138 switch (card->info.type) {
139 case QETH_CARD_TYPE_OSD:
140 switch (card->info.link_type) {
141 case QETH_LINK_TYPE_FAST_ETH:
143 case QETH_LINK_TYPE_HSTR:
145 case QETH_LINK_TYPE_GBIT_ETH:
147 case QETH_LINK_TYPE_10GBIT_ETH:
149 case QETH_LINK_TYPE_25GBIT_ETH:
151 case QETH_LINK_TYPE_LANE_ETH100:
152 return "OSD_FE_LANE";
153 case QETH_LINK_TYPE_LANE_TR:
154 return "OSD_TR_LANE";
155 case QETH_LINK_TYPE_LANE_ETH1000:
156 return "OSD_GbE_LANE";
157 case QETH_LINK_TYPE_LANE:
158 return "OSD_ATM_LANE";
160 return "OSD_Express";
162 case QETH_CARD_TYPE_IQD:
163 return "HiperSockets";
164 case QETH_CARD_TYPE_OSN:
166 case QETH_CARD_TYPE_OSM:
168 case QETH_CARD_TYPE_OSX:
177 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
178 int clear_start_mask)
182 spin_lock_irqsave(&card->thread_mask_lock, flags);
183 card->thread_allowed_mask = threads;
184 if (clear_start_mask)
185 card->thread_start_mask &= threads;
186 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
187 wake_up(&card->wait_q);
189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
196 spin_lock_irqsave(&card->thread_mask_lock, flags);
197 rc = (card->thread_running_mask & threads);
198 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
201 EXPORT_SYMBOL_GPL(qeth_threads_running);
203 void qeth_clear_working_pool_list(struct qeth_card *card)
205 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 QETH_CARD_TEXT(card, 5, "clwrklst");
208 list_for_each_entry_safe(pool_entry, tmp,
209 &card->qdio.in_buf_pool.entry_list, list){
210 list_del(&pool_entry->list);
213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
215 static int qeth_alloc_buffer_pool(struct qeth_card *card)
217 struct qeth_buffer_pool_entry *pool_entry;
221 QETH_CARD_TEXT(card, 5, "alocpool");
222 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
223 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
225 qeth_free_buffer_pool(card);
228 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
229 ptr = (void *) __get_free_page(GFP_KERNEL);
232 free_page((unsigned long)
233 pool_entry->elements[--j]);
235 qeth_free_buffer_pool(card);
238 pool_entry->elements[j] = ptr;
240 list_add(&pool_entry->init_list,
241 &card->qdio.init_pool.entry_list);
246 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
248 QETH_CARD_TEXT(card, 2, "realcbp");
250 if (card->state != CARD_STATE_DOWN)
253 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
254 qeth_clear_working_pool_list(card);
255 qeth_free_buffer_pool(card);
256 card->qdio.in_buf_pool.buf_count = bufcnt;
257 card->qdio.init_pool.buf_count = bufcnt;
258 return qeth_alloc_buffer_pool(card);
260 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
262 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
267 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
271 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
273 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
279 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
284 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
285 q->bufs[i].buffer = q->qdio_bufs[i];
287 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
291 static int qeth_cq_init(struct qeth_card *card)
295 if (card->options.cq == QETH_CQ_ENABLED) {
296 QETH_CARD_TEXT(card, 2, "cqinit");
297 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
298 QDIO_MAX_BUFFERS_PER_Q);
299 card->qdio.c_q->next_buf_to_init = 127;
300 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
301 card->qdio.no_in_queues - 1, 0,
304 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
313 static int qeth_alloc_cq(struct qeth_card *card)
317 if (card->options.cq == QETH_CQ_ENABLED) {
319 struct qdio_outbuf_state *outbuf_states;
321 QETH_CARD_TEXT(card, 2, "cqon");
322 card->qdio.c_q = qeth_alloc_qdio_queue();
323 if (!card->qdio.c_q) {
327 card->qdio.no_in_queues = 2;
328 card->qdio.out_bufstates =
329 kcalloc(card->qdio.no_out_queues *
330 QDIO_MAX_BUFFERS_PER_Q,
331 sizeof(struct qdio_outbuf_state),
333 outbuf_states = card->qdio.out_bufstates;
334 if (outbuf_states == NULL) {
338 for (i = 0; i < card->qdio.no_out_queues; ++i) {
339 card->qdio.out_qs[i]->bufstates = outbuf_states;
340 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
343 QETH_CARD_TEXT(card, 2, "nocq");
344 card->qdio.c_q = NULL;
345 card->qdio.no_in_queues = 1;
347 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
352 qeth_free_qdio_queue(card->qdio.c_q);
353 card->qdio.c_q = NULL;
355 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
359 static void qeth_free_cq(struct qeth_card *card)
361 if (card->qdio.c_q) {
362 --card->qdio.no_in_queues;
363 qeth_free_qdio_queue(card->qdio.c_q);
364 card->qdio.c_q = NULL;
366 kfree(card->qdio.out_bufstates);
367 card->qdio.out_bufstates = NULL;
370 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
373 enum iucv_tx_notify n;
377 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
383 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
384 TX_NOTIFY_UNREACHABLE;
387 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
388 TX_NOTIFY_GENERALERROR;
395 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
398 if (q->card->options.cq != QETH_CQ_ENABLED)
401 if (q->bufs[bidx]->next_pending != NULL) {
402 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
403 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
406 if (forced_cleanup ||
407 atomic_read(&c->state) ==
408 QETH_QDIO_BUF_HANDLED_DELAYED) {
409 struct qeth_qdio_out_buffer *f = c;
410 QETH_CARD_TEXT(f->q->card, 5, "fp");
411 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
412 /* release here to avoid interleaving between
413 outbound tasklet and inbound tasklet
414 regarding notifications and lifecycle */
415 qeth_tx_complete_buf(c, forced_cleanup, 0);
418 WARN_ON_ONCE(head->next_pending != f);
419 head->next_pending = c;
420 kmem_cache_free(qeth_qdio_outbuf_cache, f);
428 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
429 QETH_QDIO_BUF_HANDLED_DELAYED)) {
430 /* for recovery situations */
431 qeth_init_qdio_out_buf(q, bidx);
432 QETH_CARD_TEXT(q->card, 2, "clprecov");
437 static void qeth_qdio_handle_aob(struct qeth_card *card,
438 unsigned long phys_aob_addr)
441 struct qeth_qdio_out_buffer *buffer;
442 enum iucv_tx_notify notification;
445 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
446 QETH_CARD_TEXT(card, 5, "haob");
447 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
448 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
449 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
451 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
452 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
453 notification = TX_NOTIFY_OK;
455 WARN_ON_ONCE(atomic_read(&buffer->state) !=
456 QETH_QDIO_BUF_PENDING);
457 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
458 notification = TX_NOTIFY_DELAYED_OK;
461 if (aob->aorc != 0) {
462 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
463 notification = qeth_compute_cq_notification(aob->aorc, 1);
465 qeth_notify_skbs(buffer->q, buffer, notification);
467 /* Free dangling allocations. The attached skbs are handled by
468 * qeth_cleanup_handled_pending().
471 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
473 if (aob->sba[i] && buffer->is_header[i])
474 kmem_cache_free(qeth_core_header_cache,
475 (void *) aob->sba[i]);
477 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
479 qdio_release_aob(aob);
482 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
484 return card->options.cq == QETH_CQ_ENABLED &&
485 card->qdio.c_q != NULL &&
487 queue == card->qdio.no_in_queues - 1;
490 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
493 ccw->cmd_code = cmd_code;
494 ccw->flags = flags | CCW_FLAG_SLI;
496 ccw->cda = (__u32) __pa(data);
499 static int __qeth_issue_next_read(struct qeth_card *card)
501 struct qeth_cmd_buffer *iob = card->read_cmd;
502 struct qeth_channel *channel = iob->channel;
503 struct ccw1 *ccw = __ccw_from_cmd(iob);
506 QETH_CARD_TEXT(card, 5, "issnxrd");
507 if (channel->state != CH_STATE_UP)
510 memset(iob->data, 0, iob->length);
511 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
512 iob->callback = qeth_issue_next_read_cb;
513 /* keep the cmd alive after completion: */
516 QETH_CARD_TEXT(card, 6, "noirqpnd");
517 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
519 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
520 rc, CARD_DEVID(card));
521 atomic_set(&channel->irq_pending, 0);
523 card->read_or_write_problem = 1;
524 qeth_schedule_recovery(card);
525 wake_up(&card->wait_q);
530 static int qeth_issue_next_read(struct qeth_card *card)
534 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
535 ret = __qeth_issue_next_read(card);
536 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
541 static void qeth_enqueue_cmd(struct qeth_card *card,
542 struct qeth_cmd_buffer *iob)
544 spin_lock_irq(&card->lock);
545 list_add_tail(&iob->list, &card->cmd_waiter_list);
546 spin_unlock_irq(&card->lock);
549 static void qeth_dequeue_cmd(struct qeth_card *card,
550 struct qeth_cmd_buffer *iob)
552 spin_lock_irq(&card->lock);
553 list_del(&iob->list);
554 spin_unlock_irq(&card->lock);
557 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
560 complete(&iob->done);
562 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
564 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
565 struct qeth_card *card)
567 const char *ipa_name;
568 int com = cmd->hdr.command;
569 ipa_name = qeth_get_ipa_cmd_name(com);
572 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
573 ipa_name, com, CARD_DEVID(card), rc,
574 qeth_get_ipa_msg(rc));
576 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
577 ipa_name, com, CARD_DEVID(card));
580 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
581 struct qeth_ipa_cmd *cmd)
583 QETH_CARD_TEXT(card, 5, "chkipad");
585 if (IS_IPA_REPLY(cmd)) {
586 if (cmd->hdr.command != IPA_CMD_SETCCID &&
587 cmd->hdr.command != IPA_CMD_DELCCID &&
588 cmd->hdr.command != IPA_CMD_MODCCID &&
589 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
590 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
594 /* handle unsolicited event: */
595 switch (cmd->hdr.command) {
596 case IPA_CMD_STOPLAN:
597 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
598 dev_err(&card->gdev->dev,
599 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
600 QETH_CARD_IFNAME(card));
601 schedule_work(&card->close_dev_work);
603 dev_warn(&card->gdev->dev,
604 "The link for interface %s on CHPID 0x%X failed\n",
605 QETH_CARD_IFNAME(card), card->info.chpid);
606 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
607 netif_carrier_off(card->dev);
610 case IPA_CMD_STARTLAN:
611 dev_info(&card->gdev->dev,
612 "The link for %s on CHPID 0x%X has been restored\n",
613 QETH_CARD_IFNAME(card), card->info.chpid);
614 if (card->info.hwtrap)
615 card->info.hwtrap = 2;
616 qeth_schedule_recovery(card);
618 case IPA_CMD_SETBRIDGEPORT_IQD:
619 case IPA_CMD_SETBRIDGEPORT_OSA:
620 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
621 if (card->discipline->control_event_handler(card, cmd))
624 case IPA_CMD_MODCCID:
626 case IPA_CMD_REGISTER_LOCAL_ADDR:
627 QETH_CARD_TEXT(card, 3, "irla");
629 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
630 QETH_CARD_TEXT(card, 3, "urla");
633 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
638 void qeth_clear_ipacmd_list(struct qeth_card *card)
640 struct qeth_cmd_buffer *iob;
643 QETH_CARD_TEXT(card, 4, "clipalst");
645 spin_lock_irqsave(&card->lock, flags);
646 list_for_each_entry(iob, &card->cmd_waiter_list, list)
647 qeth_notify_cmd(iob, -EIO);
648 spin_unlock_irqrestore(&card->lock, flags);
650 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
652 static int qeth_check_idx_response(struct qeth_card *card,
653 unsigned char *buffer)
655 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
656 if ((buffer[2] & 0xc0) == 0xc0) {
657 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
659 QETH_CARD_TEXT(card, 2, "ckidxres");
660 QETH_CARD_TEXT(card, 2, " idxterm");
661 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
662 if (buffer[4] == 0xf6) {
663 dev_err(&card->gdev->dev,
664 "The qeth device is not configured "
665 "for the OSI layer required by z/VM\n");
673 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
675 if (refcount_dec_and_test(&iob->ref_count)) {
680 EXPORT_SYMBOL_GPL(qeth_put_cmd);
682 static void qeth_release_buffer_cb(struct qeth_card *card,
683 struct qeth_cmd_buffer *iob,
684 unsigned int data_length)
689 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
691 qeth_notify_cmd(iob, rc);
695 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
696 unsigned int length, unsigned int ccws,
699 struct qeth_cmd_buffer *iob;
701 if (length > QETH_BUFSIZE)
704 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
708 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
709 GFP_KERNEL | GFP_DMA);
715 init_completion(&iob->done);
716 spin_lock_init(&iob->lock);
717 INIT_LIST_HEAD(&iob->list);
718 refcount_set(&iob->ref_count, 1);
719 iob->channel = channel;
720 iob->timeout = timeout;
721 iob->length = length;
724 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
726 static void qeth_issue_next_read_cb(struct qeth_card *card,
727 struct qeth_cmd_buffer *iob,
728 unsigned int data_length)
730 struct qeth_cmd_buffer *request = NULL;
731 struct qeth_ipa_cmd *cmd = NULL;
732 struct qeth_reply *reply = NULL;
733 struct qeth_cmd_buffer *tmp;
737 QETH_CARD_TEXT(card, 4, "sndctlcb");
738 rc = qeth_check_idx_response(card, iob->data);
743 qeth_clear_ipacmd_list(card);
744 qeth_schedule_recovery(card);
750 if (IS_IPA(iob->data)) {
751 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
752 cmd = qeth_check_ipa_data(card, cmd);
755 if (IS_OSN(card) && card->osn_info.assist_cb &&
756 cmd->hdr.command != IPA_CMD_STARTLAN) {
757 card->osn_info.assist_cb(card->dev, cmd);
761 /* non-IPA commands should only flow during initialization */
762 if (card->state != CARD_STATE_DOWN)
766 /* match against pending cmd requests */
767 spin_lock_irqsave(&card->lock, flags);
768 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
769 if (!IS_IPA(tmp->data) ||
770 __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
772 /* take the object outside the lock */
773 qeth_get_cmd(request);
777 spin_unlock_irqrestore(&card->lock, flags);
782 reply = &request->reply;
783 if (!reply->callback) {
788 spin_lock_irqsave(&request->lock, flags);
790 /* Bail out when the requestor has already left: */
793 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
795 spin_unlock_irqrestore(&request->lock, flags);
799 qeth_notify_cmd(request, rc);
800 qeth_put_cmd(request);
802 memcpy(&card->seqno.pdu_hdr_ack,
803 QETH_PDU_HEADER_SEQ_NO(iob->data),
806 __qeth_issue_next_read(card);
809 static int qeth_set_thread_start_bit(struct qeth_card *card,
810 unsigned long thread)
814 spin_lock_irqsave(&card->thread_mask_lock, flags);
815 if (!(card->thread_allowed_mask & thread) ||
816 (card->thread_start_mask & thread)) {
817 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
820 card->thread_start_mask |= thread;
821 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
825 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
829 spin_lock_irqsave(&card->thread_mask_lock, flags);
830 card->thread_start_mask &= ~thread;
831 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
832 wake_up(&card->wait_q);
834 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
836 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
840 spin_lock_irqsave(&card->thread_mask_lock, flags);
841 card->thread_running_mask &= ~thread;
842 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
843 wake_up_all(&card->wait_q);
845 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
847 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
852 spin_lock_irqsave(&card->thread_mask_lock, flags);
853 if (card->thread_start_mask & thread) {
854 if ((card->thread_allowed_mask & thread) &&
855 !(card->thread_running_mask & thread)) {
857 card->thread_start_mask &= ~thread;
858 card->thread_running_mask |= thread;
862 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
866 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
870 wait_event(card->wait_q,
871 (rc = __qeth_do_run_thread(card, thread)) >= 0);
874 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
876 void qeth_schedule_recovery(struct qeth_card *card)
878 QETH_CARD_TEXT(card, 2, "startrec");
879 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
880 schedule_work(&card->kernel_thread_starter);
882 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
884 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
890 sense = (char *) irb->ecw;
891 cstat = irb->scsw.cmd.cstat;
892 dstat = irb->scsw.cmd.dstat;
894 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
895 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
896 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
897 QETH_CARD_TEXT(card, 2, "CGENCHK");
898 dev_warn(&cdev->dev, "The qeth device driver "
899 "failed to recover an error on the device\n");
900 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
901 CCW_DEVID(cdev), dstat, cstat);
902 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
907 if (dstat & DEV_STAT_UNIT_CHECK) {
908 if (sense[SENSE_RESETTING_EVENT_BYTE] &
909 SENSE_RESETTING_EVENT_FLAG) {
910 QETH_CARD_TEXT(card, 2, "REVIND");
913 if (sense[SENSE_COMMAND_REJECT_BYTE] &
914 SENSE_COMMAND_REJECT_FLAG) {
915 QETH_CARD_TEXT(card, 2, "CMDREJi");
918 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
919 QETH_CARD_TEXT(card, 2, "AFFE");
922 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
923 QETH_CARD_TEXT(card, 2, "ZEROSEN");
926 QETH_CARD_TEXT(card, 2, "DGENCHK");
932 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
938 switch (PTR_ERR(irb)) {
940 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
942 QETH_CARD_TEXT(card, 2, "ckirberr");
943 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
946 dev_warn(&cdev->dev, "A hardware operation timed out"
948 QETH_CARD_TEXT(card, 2, "ckirberr");
949 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
952 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
953 PTR_ERR(irb), CCW_DEVID(cdev));
954 QETH_CARD_TEXT(card, 2, "ckirberr");
955 QETH_CARD_TEXT(card, 2, " rc???");
960 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
965 struct qeth_cmd_buffer *iob = NULL;
966 struct ccwgroup_device *gdev;
967 struct qeth_channel *channel;
968 struct qeth_card *card;
970 /* while we hold the ccwdev lock, this stays valid: */
971 gdev = dev_get_drvdata(&cdev->dev);
972 card = dev_get_drvdata(&gdev->dev);
976 QETH_CARD_TEXT(card, 5, "irq");
978 if (card->read.ccwdev == cdev) {
979 channel = &card->read;
980 QETH_CARD_TEXT(card, 5, "read");
981 } else if (card->write.ccwdev == cdev) {
982 channel = &card->write;
983 QETH_CARD_TEXT(card, 5, "write");
985 channel = &card->data;
986 QETH_CARD_TEXT(card, 5, "data");
989 if (qeth_intparm_is_iob(intparm))
990 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
992 rc = qeth_check_irb_error(card, cdev, irb);
994 /* IO was terminated, free its resources. */
996 qeth_cancel_cmd(iob, rc);
997 atomic_set(&channel->irq_pending, 0);
998 wake_up(&card->wait_q);
1002 atomic_set(&channel->irq_pending, 0);
1004 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1005 channel->state = CH_STATE_STOPPED;
1007 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1008 channel->state = CH_STATE_HALTED;
1010 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1011 QETH_CARD_TEXT(card, 6, "clrchpar");
1012 /* we don't have to handle this further */
1015 if (intparm == QETH_HALT_CHANNEL_PARM) {
1016 QETH_CARD_TEXT(card, 6, "hltchpar");
1017 /* we don't have to handle this further */
1021 cstat = irb->scsw.cmd.cstat;
1022 dstat = irb->scsw.cmd.dstat;
1024 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1025 (dstat & DEV_STAT_UNIT_CHECK) ||
1027 if (irb->esw.esw0.erw.cons) {
1028 dev_warn(&channel->ccwdev->dev,
1029 "The qeth device driver failed to recover "
1030 "an error on the device\n");
1031 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1032 CCW_DEVID(channel->ccwdev), cstat,
1034 print_hex_dump(KERN_WARNING, "qeth: irb ",
1035 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1036 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1037 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1040 rc = qeth_get_problem(card, cdev, irb);
1042 card->read_or_write_problem = 1;
1044 qeth_cancel_cmd(iob, rc);
1045 qeth_clear_ipacmd_list(card);
1046 qeth_schedule_recovery(card);
1053 if (irb->scsw.cmd.count > iob->length) {
1054 qeth_cancel_cmd(iob, -EIO);
1058 iob->callback(card, iob,
1059 iob->length - irb->scsw.cmd.count);
1063 wake_up(&card->wait_q);
1067 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1068 struct qeth_qdio_out_buffer *buf,
1069 enum iucv_tx_notify notification)
1071 struct sk_buff *skb;
1073 skb_queue_walk(&buf->skb_list, skb) {
1074 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1075 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1076 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1077 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1081 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1084 struct qeth_qdio_out_q *queue = buf->q;
1085 struct sk_buff *skb;
1087 /* release may never happen from within CQ tasklet scope */
1088 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1090 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1091 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1094 if (buf->next_element_to_fill == 0)
1097 QETH_TXQ_STAT_INC(queue, bufs);
1098 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1099 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1100 unsigned int bytes = qdisc_pkt_len(skb);
1101 bool is_tso = skb_is_gso(skb);
1102 unsigned int packets;
1104 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1106 QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
1108 QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
1109 QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
1110 if (skb->ip_summed == CHECKSUM_PARTIAL)
1111 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1112 if (skb_is_nonlinear(skb))
1113 QETH_TXQ_STAT_INC(queue, skbs_sg);
1115 QETH_TXQ_STAT_INC(queue, skbs_tso);
1116 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1120 napi_consume_skb(skb, budget);
1124 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1125 struct qeth_qdio_out_buffer *buf,
1126 bool error, int budget)
1130 /* is PCI flag set on buffer? */
1131 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1132 atomic_dec(&queue->set_pci_flags_count);
1134 qeth_tx_complete_buf(buf, error, budget);
1136 for (i = 0; i < queue->max_elements; ++i) {
1137 if (buf->buffer->element[i].addr && buf->is_header[i])
1138 kmem_cache_free(qeth_core_header_cache,
1139 buf->buffer->element[i].addr);
1140 buf->is_header[i] = 0;
1143 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1144 buf->next_element_to_fill = 0;
1146 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1149 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1153 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1156 qeth_cleanup_handled_pending(q, j, 1);
1157 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1159 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1165 void qeth_drain_output_queues(struct qeth_card *card)
1169 QETH_CARD_TEXT(card, 2, "clearqdbf");
1170 /* clear outbound buffers to free skbs */
1171 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1172 if (card->qdio.out_qs[i])
1173 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1176 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1178 static void qeth_free_buffer_pool(struct qeth_card *card)
1180 struct qeth_buffer_pool_entry *pool_entry, *tmp;
1182 list_for_each_entry_safe(pool_entry, tmp,
1183 &card->qdio.init_pool.entry_list, init_list){
1184 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1185 free_page((unsigned long)pool_entry->elements[i]);
1186 list_del(&pool_entry->init_list);
1191 static void qeth_clean_channel(struct qeth_channel *channel)
1193 struct ccw_device *cdev = channel->ccwdev;
1195 QETH_DBF_TEXT(SETUP, 2, "freech");
1197 spin_lock_irq(get_ccwdev_lock(cdev));
1198 cdev->handler = NULL;
1199 spin_unlock_irq(get_ccwdev_lock(cdev));
1202 static void qeth_setup_channel(struct qeth_channel *channel)
1204 struct ccw_device *cdev = channel->ccwdev;
1206 QETH_DBF_TEXT(SETUP, 2, "setupch");
1208 channel->state = CH_STATE_DOWN;
1209 atomic_set(&channel->irq_pending, 0);
1211 spin_lock_irq(get_ccwdev_lock(cdev));
1212 cdev->handler = qeth_irq;
1213 spin_unlock_irq(get_ccwdev_lock(cdev));
1216 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1218 unsigned int count = single ? 1 : card->dev->num_tx_queues;
1222 rc = netif_set_real_num_tx_queues(card->dev, count);
1228 if (card->qdio.no_out_queues == count)
1231 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1232 qeth_free_qdio_queues(card);
1235 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1237 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
1238 card->qdio.no_out_queues = count;
1242 static int qeth_update_from_chp_desc(struct qeth_card *card)
1244 struct ccw_device *ccwdev;
1245 struct channel_path_desc_fmt0 *chp_dsc;
1248 QETH_CARD_TEXT(card, 2, "chp_desc");
1250 ccwdev = card->data.ccwdev;
1251 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1255 card->info.func_level = 0x4100 + chp_dsc->desc;
1257 if (IS_OSD(card) || IS_OSX(card))
1258 /* CHPP field bit 6 == 1 -> single queue */
1259 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1262 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1263 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1267 static void qeth_init_qdio_info(struct qeth_card *card)
1269 QETH_CARD_TEXT(card, 4, "intqdinf");
1270 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1271 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1272 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1275 card->qdio.no_in_queues = 1;
1276 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1278 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1280 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1281 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1282 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1283 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1286 static void qeth_set_initial_options(struct qeth_card *card)
1288 card->options.route4.type = NO_ROUTER;
1289 card->options.route6.type = NO_ROUTER;
1290 card->options.rx_sg_cb = QETH_RX_SG_CB;
1291 card->options.isolation = ISOLATION_MODE_NONE;
1292 card->options.cq = QETH_CQ_DISABLED;
1293 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1296 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1298 unsigned long flags;
1301 spin_lock_irqsave(&card->thread_mask_lock, flags);
1302 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1303 (u8) card->thread_start_mask,
1304 (u8) card->thread_allowed_mask,
1305 (u8) card->thread_running_mask);
1306 rc = (card->thread_start_mask & thread);
1307 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1311 static void qeth_start_kernel_thread(struct work_struct *work)
1313 struct task_struct *ts;
1314 struct qeth_card *card = container_of(work, struct qeth_card,
1315 kernel_thread_starter);
1316 QETH_CARD_TEXT(card , 2, "strthrd");
1318 if (card->read.state != CH_STATE_UP &&
1319 card->write.state != CH_STATE_UP)
1321 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1322 ts = kthread_run(card->discipline->recover, (void *)card,
1325 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1326 qeth_clear_thread_running_bit(card,
1327 QETH_RECOVER_THREAD);
1332 static void qeth_buffer_reclaim_work(struct work_struct *);
1333 static void qeth_setup_card(struct qeth_card *card)
1335 QETH_CARD_TEXT(card, 2, "setupcrd");
1337 card->info.type = CARD_RDEV(card)->id.driver_info;
1338 card->state = CARD_STATE_DOWN;
1339 spin_lock_init(&card->lock);
1340 spin_lock_init(&card->thread_mask_lock);
1341 mutex_init(&card->conf_mutex);
1342 mutex_init(&card->discipline_mutex);
1343 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1344 INIT_LIST_HEAD(&card->cmd_waiter_list);
1345 init_waitqueue_head(&card->wait_q);
1346 qeth_set_initial_options(card);
1347 /* IP address takeover */
1348 INIT_LIST_HEAD(&card->ipato.entries);
1349 qeth_init_qdio_info(card);
1350 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1351 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1354 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1356 struct qeth_card *card = container_of(slr, struct qeth_card,
1357 qeth_service_level);
1358 if (card->info.mcl_level[0])
1359 seq_printf(m, "qeth: %s firmware level %s\n",
1360 CARD_BUS_ID(card), card->info.mcl_level);
1363 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1365 struct qeth_card *card;
1367 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1368 card = kzalloc(sizeof(*card), GFP_KERNEL);
1371 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1374 dev_set_drvdata(&gdev->dev, card);
1375 CARD_RDEV(card) = gdev->cdev[0];
1376 CARD_WDEV(card) = gdev->cdev[1];
1377 CARD_DDEV(card) = gdev->cdev[2];
1379 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1380 dev_name(&gdev->dev));
1381 if (!card->event_wq)
1384 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1385 if (!card->read_cmd)
1388 qeth_setup_channel(&card->read);
1389 qeth_setup_channel(&card->write);
1390 qeth_setup_channel(&card->data);
1391 card->qeth_service_level.seq_print = qeth_core_sl_print;
1392 register_service_level(&card->qeth_service_level);
1396 destroy_workqueue(card->event_wq);
1398 dev_set_drvdata(&gdev->dev, NULL);
1404 static int qeth_clear_channel(struct qeth_card *card,
1405 struct qeth_channel *channel)
1409 QETH_CARD_TEXT(card, 3, "clearch");
1410 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1411 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1412 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1416 rc = wait_event_interruptible_timeout(card->wait_q,
1417 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1418 if (rc == -ERESTARTSYS)
1420 if (channel->state != CH_STATE_STOPPED)
1422 channel->state = CH_STATE_DOWN;
1426 static int qeth_halt_channel(struct qeth_card *card,
1427 struct qeth_channel *channel)
1431 QETH_CARD_TEXT(card, 3, "haltch");
1432 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1433 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1434 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1438 rc = wait_event_interruptible_timeout(card->wait_q,
1439 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1440 if (rc == -ERESTARTSYS)
1442 if (channel->state != CH_STATE_HALTED)
1447 static int qeth_halt_channels(struct qeth_card *card)
1449 int rc1 = 0, rc2 = 0, rc3 = 0;
1451 QETH_CARD_TEXT(card, 3, "haltchs");
1452 rc1 = qeth_halt_channel(card, &card->read);
1453 rc2 = qeth_halt_channel(card, &card->write);
1454 rc3 = qeth_halt_channel(card, &card->data);
1462 static int qeth_clear_channels(struct qeth_card *card)
1464 int rc1 = 0, rc2 = 0, rc3 = 0;
1466 QETH_CARD_TEXT(card, 3, "clearchs");
1467 rc1 = qeth_clear_channel(card, &card->read);
1468 rc2 = qeth_clear_channel(card, &card->write);
1469 rc3 = qeth_clear_channel(card, &card->data);
1477 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1481 QETH_CARD_TEXT(card, 3, "clhacrd");
1484 rc = qeth_halt_channels(card);
1487 return qeth_clear_channels(card);
1490 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1494 QETH_CARD_TEXT(card, 3, "qdioclr");
1495 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1496 QETH_QDIO_CLEANING)) {
1497 case QETH_QDIO_ESTABLISHED:
1499 rc = qdio_shutdown(CARD_DDEV(card),
1500 QDIO_FLAG_CLEANUP_USING_HALT);
1502 rc = qdio_shutdown(CARD_DDEV(card),
1503 QDIO_FLAG_CLEANUP_USING_CLEAR);
1505 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1506 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1508 case QETH_QDIO_CLEANING:
1513 rc = qeth_clear_halt_card(card, use_halt);
1515 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1516 card->state = CARD_STATE_DOWN;
1519 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1521 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1523 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1524 struct diag26c_vnic_resp *response = NULL;
1525 struct diag26c_vnic_req *request = NULL;
1526 struct ccw_dev_id id;
1530 QETH_CARD_TEXT(card, 2, "vmlayer");
1532 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1536 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1537 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1538 if (!request || !response) {
1543 ccw_device_get_id(CARD_RDEV(card), &id);
1544 request->resp_buf_len = sizeof(*response);
1545 request->resp_version = DIAG26C_VERSION6_VM65918;
1546 request->req_format = DIAG26C_VNIC_INFO;
1548 memcpy(&request->sys_name, userid, 8);
1549 request->devno = id.devno;
1551 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1552 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1553 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1556 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1558 if (request->resp_buf_len < sizeof(*response) ||
1559 response->version != request->resp_version) {
1564 if (response->protocol == VNIC_INFO_PROT_L2)
1565 disc = QETH_DISCIPLINE_LAYER2;
1566 else if (response->protocol == VNIC_INFO_PROT_L3)
1567 disc = QETH_DISCIPLINE_LAYER3;
1573 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1577 /* Determine whether the device requires a specific layer discipline */
1578 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1580 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1582 if (IS_OSM(card) || IS_OSN(card))
1583 disc = QETH_DISCIPLINE_LAYER2;
1584 else if (IS_VM_NIC(card))
1585 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1586 qeth_vm_detect_layer(card);
1589 case QETH_DISCIPLINE_LAYER2:
1590 QETH_CARD_TEXT(card, 3, "force l2");
1592 case QETH_DISCIPLINE_LAYER3:
1593 QETH_CARD_TEXT(card, 3, "force l3");
1596 QETH_CARD_TEXT(card, 3, "force no");
1602 static void qeth_set_blkt_defaults(struct qeth_card *card)
1604 QETH_CARD_TEXT(card, 2, "cfgblkt");
1606 if (card->info.use_v1_blkt) {
1607 card->info.blkt.time_total = 0;
1608 card->info.blkt.inter_packet = 0;
1609 card->info.blkt.inter_packet_jumbo = 0;
1611 card->info.blkt.time_total = 250;
1612 card->info.blkt.inter_packet = 5;
1613 card->info.blkt.inter_packet_jumbo = 15;
1617 static void qeth_init_tokens(struct qeth_card *card)
1619 card->token.issuer_rm_w = 0x00010103UL;
1620 card->token.cm_filter_w = 0x00010108UL;
1621 card->token.cm_connection_w = 0x0001010aUL;
1622 card->token.ulp_filter_w = 0x0001010bUL;
1623 card->token.ulp_connection_w = 0x0001010dUL;
1626 static void qeth_init_func_level(struct qeth_card *card)
1628 switch (card->info.type) {
1629 case QETH_CARD_TYPE_IQD:
1630 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1632 case QETH_CARD_TYPE_OSD:
1633 case QETH_CARD_TYPE_OSN:
1634 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1641 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1642 struct qeth_cmd_buffer *iob)
1644 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1645 QETH_SEQ_NO_LENGTH);
1646 if (iob->channel == &card->write)
1647 card->seqno.trans_hdr++;
1650 static int qeth_peer_func_level(int level)
1652 if ((level & 0xff) == 8)
1653 return (level & 0xff) + 0x400;
1654 if (((level >> 8) & 3) == 1)
1655 return (level & 0xff) + 0x200;
1659 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1660 struct qeth_cmd_buffer *iob)
1662 qeth_idx_finalize_cmd(card, iob);
1664 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1665 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1666 card->seqno.pdu_hdr++;
1667 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1668 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1670 iob->callback = qeth_release_buffer_cb;
1673 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1675 unsigned int data_length)
1677 struct qeth_cmd_buffer *iob;
1679 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1683 memcpy(iob->data, data, data_length);
1684 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1686 iob->finalize = qeth_mpc_finalize_cmd;
1691 * qeth_send_control_data() - send control command to the card
1692 * @card: qeth_card structure pointer
1693 * @iob: qeth_cmd_buffer pointer
1694 * @reply_cb: callback function pointer
1695 * @cb_card: pointer to the qeth_card structure
1696 * @cb_reply: pointer to the qeth_reply structure
1697 * @cb_cmd: pointer to the original iob for non-IPA
1698 * commands, or to the qeth_ipa_cmd structure
1699 * for the IPA commands.
1700 * @reply_param: private pointer passed to the callback
1702 * Callback function gets called one or more times, with cb_cmd
1703 * pointing to the response returned by the hardware. Callback
1704 * function must return
1705 * > 0 if more reply blocks are expected,
1706 * 0 if the last or only reply block is received, and
1708 * Callback function can get the value of the reply_param pointer from the
1709 * field 'param' of the structure qeth_reply.
1712 static int qeth_send_control_data(struct qeth_card *card,
1713 struct qeth_cmd_buffer *iob,
1714 int (*reply_cb)(struct qeth_card *cb_card,
1715 struct qeth_reply *cb_reply,
1716 unsigned long cb_cmd),
1719 struct qeth_channel *channel = iob->channel;
1720 struct qeth_reply *reply = &iob->reply;
1721 long timeout = iob->timeout;
1724 QETH_CARD_TEXT(card, 2, "sendctl");
1726 reply->callback = reply_cb;
1727 reply->param = reply_param;
1729 timeout = wait_event_interruptible_timeout(card->wait_q,
1730 qeth_trylock_channel(channel),
1734 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1738 iob->finalize(card, iob);
1739 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1741 qeth_enqueue_cmd(card, iob);
1743 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1746 QETH_CARD_TEXT(card, 6, "noirqpnd");
1747 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1748 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1749 (addr_t) iob, 0, 0, timeout);
1750 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1752 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1753 CARD_DEVID(card), rc);
1754 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1755 qeth_dequeue_cmd(card, iob);
1757 atomic_set(&channel->irq_pending, 0);
1758 wake_up(&card->wait_q);
1762 timeout = wait_for_completion_interruptible_timeout(&iob->done,
1765 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1767 qeth_dequeue_cmd(card, iob);
1770 /* Wait until the callback for a late reply has completed: */
1771 spin_lock_irq(&iob->lock);
1773 /* Zap any callback that's still pending: */
1775 spin_unlock_irq(&iob->lock);
1786 struct qeth_node_desc {
1787 struct node_descriptor nd1;
1788 struct node_descriptor nd2;
1789 struct node_descriptor nd3;
1792 static void qeth_read_conf_data_cb(struct qeth_card *card,
1793 struct qeth_cmd_buffer *iob,
1794 unsigned int data_length)
1796 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
1800 QETH_CARD_TEXT(card, 2, "cfgunit");
1802 if (data_length < sizeof(*nd)) {
1807 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
1808 nd->nd1.plant[1] == _ascebc['M'];
1809 tag = (u8 *)&nd->nd1.tag;
1810 card->info.chpid = tag[0];
1811 card->info.unit_addr2 = tag[1];
1813 tag = (u8 *)&nd->nd2.tag;
1814 card->info.cula = tag[1];
1816 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
1817 nd->nd3.model[1] == 0xF0 &&
1818 nd->nd3.model[2] >= 0xF1 &&
1819 nd->nd3.model[2] <= 0xF4;
1822 qeth_notify_cmd(iob, rc);
1826 static int qeth_read_conf_data(struct qeth_card *card)
1828 struct qeth_channel *channel = &card->data;
1829 struct qeth_cmd_buffer *iob;
1832 /* scan for RCD command in extended SenseID data */
1833 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1834 if (!ciw || ciw->cmd == 0)
1836 if (ciw->count < sizeof(struct qeth_node_desc))
1839 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
1843 iob->callback = qeth_read_conf_data_cb;
1844 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
1847 return qeth_send_control_data(card, iob, NULL, NULL);
1850 static int qeth_idx_check_activate_response(struct qeth_card *card,
1851 struct qeth_channel *channel,
1852 struct qeth_cmd_buffer *iob)
1856 rc = qeth_check_idx_response(card, iob->data);
1860 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
1863 /* negative reply: */
1864 QETH_CARD_TEXT_(card, 2, "idxneg%c",
1865 QETH_IDX_ACT_CAUSE_CODE(iob->data));
1867 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1868 case QETH_IDX_ACT_ERR_EXCL:
1869 dev_err(&channel->ccwdev->dev,
1870 "The adapter is used exclusively by another host\n");
1872 case QETH_IDX_ACT_ERR_AUTH:
1873 case QETH_IDX_ACT_ERR_AUTH_USER:
1874 dev_err(&channel->ccwdev->dev,
1875 "Setting the device online failed because of insufficient authorization\n");
1878 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1879 CCW_DEVID(channel->ccwdev));
1884 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
1885 struct qeth_cmd_buffer *iob,
1886 unsigned int data_length)
1888 struct qeth_channel *channel = iob->channel;
1892 QETH_CARD_TEXT(card, 2, "idxrdcb");
1894 rc = qeth_idx_check_activate_response(card, channel, iob);
1898 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1899 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
1900 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1901 CCW_DEVID(channel->ccwdev),
1902 card->info.func_level, peer_level);
1907 memcpy(&card->token.issuer_rm_r,
1908 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1909 QETH_MPC_TOKEN_LENGTH);
1910 memcpy(&card->info.mcl_level[0],
1911 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1914 qeth_notify_cmd(iob, rc);
1918 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
1919 struct qeth_cmd_buffer *iob,
1920 unsigned int data_length)
1922 struct qeth_channel *channel = iob->channel;
1926 QETH_CARD_TEXT(card, 2, "idxwrcb");
1928 rc = qeth_idx_check_activate_response(card, channel, iob);
1932 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1933 if ((peer_level & ~0x0100) !=
1934 qeth_peer_func_level(card->info.func_level)) {
1935 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1936 CCW_DEVID(channel->ccwdev),
1937 card->info.func_level, peer_level);
1942 qeth_notify_cmd(iob, rc);
1946 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
1947 struct qeth_cmd_buffer *iob)
1949 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
1950 u8 port = ((u8)card->dev->dev_port) | 0x80;
1951 struct ccw1 *ccw = __ccw_from_cmd(iob);
1952 struct ccw_dev_id dev_id;
1954 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
1956 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
1957 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1958 iob->finalize = qeth_idx_finalize_cmd;
1960 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
1961 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1962 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1963 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1964 &card->info.func_level, 2);
1965 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
1966 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
1969 static int qeth_idx_activate_read_channel(struct qeth_card *card)
1971 struct qeth_channel *channel = &card->read;
1972 struct qeth_cmd_buffer *iob;
1975 QETH_CARD_TEXT(card, 2, "idxread");
1977 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
1981 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1982 qeth_idx_setup_activate_cmd(card, iob);
1983 iob->callback = qeth_idx_activate_read_channel_cb;
1985 rc = qeth_send_control_data(card, iob, NULL, NULL);
1989 channel->state = CH_STATE_UP;
1993 static int qeth_idx_activate_write_channel(struct qeth_card *card)
1995 struct qeth_channel *channel = &card->write;
1996 struct qeth_cmd_buffer *iob;
1999 QETH_CARD_TEXT(card, 2, "idxwrite");
2001 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2005 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2006 qeth_idx_setup_activate_cmd(card, iob);
2007 iob->callback = qeth_idx_activate_write_channel_cb;
2009 rc = qeth_send_control_data(card, iob, NULL, NULL);
2013 channel->state = CH_STATE_UP;
2017 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2020 struct qeth_cmd_buffer *iob;
2022 QETH_CARD_TEXT(card, 2, "cmenblcb");
2024 iob = (struct qeth_cmd_buffer *) data;
2025 memcpy(&card->token.cm_filter_r,
2026 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2027 QETH_MPC_TOKEN_LENGTH);
2031 static int qeth_cm_enable(struct qeth_card *card)
2033 struct qeth_cmd_buffer *iob;
2035 QETH_CARD_TEXT(card, 2, "cmenable");
2037 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2041 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2042 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2043 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2044 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2046 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2049 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2052 struct qeth_cmd_buffer *iob;
2054 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2056 iob = (struct qeth_cmd_buffer *) data;
2057 memcpy(&card->token.cm_connection_r,
2058 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2059 QETH_MPC_TOKEN_LENGTH);
2063 static int qeth_cm_setup(struct qeth_card *card)
2065 struct qeth_cmd_buffer *iob;
2067 QETH_CARD_TEXT(card, 2, "cmsetup");
2069 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2073 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2074 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2075 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2076 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2077 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2078 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2079 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2082 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2084 struct net_device *dev = card->dev;
2085 unsigned int new_mtu;
2088 /* IQD needs accurate max MTU to set up its RX buffers: */
2091 /* tolerate quirky HW: */
2092 max_mtu = ETH_MAX_MTU;
2097 /* move any device with default MTU to new max MTU: */
2098 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2100 /* adjust RX buffer size to new max MTU: */
2101 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2102 if (dev->max_mtu && dev->max_mtu != max_mtu)
2103 qeth_free_qdio_queues(card);
2107 /* default MTUs for first setup: */
2108 else if (IS_LAYER2(card))
2109 new_mtu = ETH_DATA_LEN;
2111 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2114 dev->max_mtu = max_mtu;
2115 dev->mtu = min(new_mtu, max_mtu);
2120 static int qeth_get_mtu_outof_framesize(int framesize)
2122 switch (framesize) {
2136 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2139 __u16 mtu, framesize;
2142 struct qeth_cmd_buffer *iob;
2144 QETH_CARD_TEXT(card, 2, "ulpenacb");
2146 iob = (struct qeth_cmd_buffer *) data;
2147 memcpy(&card->token.ulp_filter_r,
2148 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2149 QETH_MPC_TOKEN_LENGTH);
2151 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2152 mtu = qeth_get_mtu_outof_framesize(framesize);
2154 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2156 *(u16 *)reply->param = mtu;
2158 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2159 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2161 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2162 card->info.link_type = link_type;
2164 card->info.link_type = 0;
2165 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2169 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2172 return QETH_PROT_OSN2;
2173 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2176 static int qeth_ulp_enable(struct qeth_card *card)
2178 u8 prot_type = qeth_mpc_select_prot_type(card);
2179 struct qeth_cmd_buffer *iob;
2183 QETH_CARD_TEXT(card, 2, "ulpenabl");
2185 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2189 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2190 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2191 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2192 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2193 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2194 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2195 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2198 return qeth_update_max_mtu(card, max_mtu);
2201 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2204 struct qeth_cmd_buffer *iob;
2206 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2208 iob = (struct qeth_cmd_buffer *) data;
2209 memcpy(&card->token.ulp_connection_r,
2210 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2211 QETH_MPC_TOKEN_LENGTH);
2212 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2214 QETH_CARD_TEXT(card, 2, "olmlimit");
2215 dev_err(&card->gdev->dev, "A connection could not be "
2216 "established because of an OLM limit\n");
2222 static int qeth_ulp_setup(struct qeth_card *card)
2225 struct qeth_cmd_buffer *iob;
2226 struct ccw_dev_id dev_id;
2228 QETH_CARD_TEXT(card, 2, "ulpsetup");
2230 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2234 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2235 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2236 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2237 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2238 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2239 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2241 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2242 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2243 temp = (card->info.cula << 8) + card->info.unit_addr2;
2244 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2245 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2248 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2250 struct qeth_qdio_out_buffer *newbuf;
2252 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2256 newbuf->buffer = q->qdio_bufs[bidx];
2257 skb_queue_head_init(&newbuf->skb_list);
2258 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2260 newbuf->next_pending = q->bufs[bidx];
2261 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2262 q->bufs[bidx] = newbuf;
2266 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2271 qeth_drain_output_queue(q, true);
2272 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2276 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2278 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2283 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2290 static void qeth_tx_completion_timer(struct timer_list *timer)
2292 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2294 napi_schedule(&queue->napi);
2295 QETH_TXQ_STAT_INC(queue, completion_timer);
2298 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2302 QETH_CARD_TEXT(card, 2, "allcqdbf");
2304 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2305 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2308 QETH_CARD_TEXT(card, 2, "inq");
2309 card->qdio.in_q = qeth_alloc_qdio_queue();
2310 if (!card->qdio.in_q)
2313 /* inbound buffer pool */
2314 if (qeth_alloc_buffer_pool(card))
2318 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2319 struct qeth_qdio_out_q *queue;
2321 queue = qeth_alloc_output_queue();
2324 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2325 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2326 card->qdio.out_qs[i] = queue;
2328 queue->queue_no = i;
2329 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2331 /* give outbound qeth_qdio_buffers their qdio_buffers */
2332 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2333 WARN_ON(queue->bufs[j]);
2334 if (qeth_init_qdio_out_buf(queue, j))
2335 goto out_freeoutqbufs;
2340 if (qeth_alloc_cq(card))
2348 kmem_cache_free(qeth_qdio_outbuf_cache,
2349 card->qdio.out_qs[i]->bufs[j]);
2350 card->qdio.out_qs[i]->bufs[j] = NULL;
2354 qeth_free_output_queue(card->qdio.out_qs[--i]);
2355 card->qdio.out_qs[i] = NULL;
2357 qeth_free_buffer_pool(card);
2359 qeth_free_qdio_queue(card->qdio.in_q);
2360 card->qdio.in_q = NULL;
2362 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2366 static void qeth_free_qdio_queues(struct qeth_card *card)
2370 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2371 QETH_QDIO_UNINITIALIZED)
2375 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2376 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2377 if (card->qdio.in_q->bufs[j].rx_skb)
2378 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2380 qeth_free_qdio_queue(card->qdio.in_q);
2381 card->qdio.in_q = NULL;
2382 /* inbound buffer pool */
2383 qeth_free_buffer_pool(card);
2384 /* free outbound qdio_qs */
2385 for (i = 0; i < card->qdio.no_out_queues; i++) {
2386 qeth_free_output_queue(card->qdio.out_qs[i]);
2387 card->qdio.out_qs[i] = NULL;
2391 static void qeth_create_qib_param_field(struct qeth_card *card,
2395 param_field[0] = _ascebc['P'];
2396 param_field[1] = _ascebc['C'];
2397 param_field[2] = _ascebc['I'];
2398 param_field[3] = _ascebc['T'];
2399 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2400 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2401 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2404 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2407 param_field[16] = _ascebc['B'];
2408 param_field[17] = _ascebc['L'];
2409 param_field[18] = _ascebc['K'];
2410 param_field[19] = _ascebc['T'];
2411 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
2412 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
2413 *((unsigned int *) (¶m_field[28])) =
2414 card->info.blkt.inter_packet_jumbo;
2417 static int qeth_qdio_activate(struct qeth_card *card)
2419 QETH_CARD_TEXT(card, 3, "qdioact");
2420 return qdio_activate(CARD_DDEV(card));
2423 static int qeth_dm_act(struct qeth_card *card)
2425 struct qeth_cmd_buffer *iob;
2427 QETH_CARD_TEXT(card, 2, "dmact");
2429 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2433 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2434 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2435 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2436 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2437 return qeth_send_control_data(card, iob, NULL, NULL);
2440 static int qeth_mpc_initialize(struct qeth_card *card)
2444 QETH_CARD_TEXT(card, 2, "mpcinit");
2446 rc = qeth_issue_next_read(card);
2448 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2451 rc = qeth_cm_enable(card);
2453 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2456 rc = qeth_cm_setup(card);
2458 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2461 rc = qeth_ulp_enable(card);
2463 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2466 rc = qeth_ulp_setup(card);
2468 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2471 rc = qeth_alloc_qdio_queues(card);
2473 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2476 rc = qeth_qdio_establish(card);
2478 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2479 qeth_free_qdio_queues(card);
2482 rc = qeth_qdio_activate(card);
2484 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2487 rc = qeth_dm_act(card);
2489 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2495 qeth_qdio_clear_card(card, !IS_IQD(card));
2496 qdio_free(CARD_DDEV(card));
2500 void qeth_print_status_message(struct qeth_card *card)
2502 switch (card->info.type) {
2503 case QETH_CARD_TYPE_OSD:
2504 case QETH_CARD_TYPE_OSM:
2505 case QETH_CARD_TYPE_OSX:
2506 /* VM will use a non-zero first character
2507 * to indicate a HiperSockets like reporting
2508 * of the level OSA sets the first character to zero
2510 if (!card->info.mcl_level[0]) {
2511 sprintf(card->info.mcl_level, "%02x%02x",
2512 card->info.mcl_level[2],
2513 card->info.mcl_level[3]);
2517 case QETH_CARD_TYPE_IQD:
2518 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2519 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2520 card->info.mcl_level[0]];
2521 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2522 card->info.mcl_level[1]];
2523 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2524 card->info.mcl_level[2]];
2525 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2526 card->info.mcl_level[3]];
2527 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2531 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2533 dev_info(&card->gdev->dev,
2534 "Device is a%s card%s%s%s\nwith link type %s.\n",
2535 qeth_get_cardname(card),
2536 (card->info.mcl_level[0]) ? " (level: " : "",
2537 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2538 (card->info.mcl_level[0]) ? ")" : "",
2539 qeth_get_cardname_short(card));
2541 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2543 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2545 struct qeth_buffer_pool_entry *entry;
2547 QETH_CARD_TEXT(card, 5, "inwrklst");
2549 list_for_each_entry(entry,
2550 &card->qdio.init_pool.entry_list, init_list) {
2551 qeth_put_buffer_pool_entry(card, entry);
2555 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2556 struct qeth_card *card)
2558 struct list_head *plh;
2559 struct qeth_buffer_pool_entry *entry;
2563 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2566 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2567 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2569 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2570 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2576 list_del_init(&entry->list);
2581 /* no free buffer in pool so take first one and swap pages */
2582 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2583 struct qeth_buffer_pool_entry, list);
2584 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2585 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2586 page = alloc_page(GFP_ATOMIC);
2590 free_page((unsigned long)entry->elements[i]);
2591 entry->elements[i] = page_address(page);
2592 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2596 list_del_init(&entry->list);
2600 static int qeth_init_input_buffer(struct qeth_card *card,
2601 struct qeth_qdio_buffer *buf)
2603 struct qeth_buffer_pool_entry *pool_entry;
2606 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2607 buf->rx_skb = netdev_alloc_skb(card->dev,
2608 QETH_RX_PULL_LEN + ETH_HLEN);
2613 pool_entry = qeth_find_free_buffer_pool_entry(card);
2618 * since the buffer is accessed only from the input_tasklet
2619 * there shouldn't be a need to synchronize; also, since we use
2620 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2624 buf->pool_entry = pool_entry;
2625 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2626 buf->buffer->element[i].length = PAGE_SIZE;
2627 buf->buffer->element[i].addr = pool_entry->elements[i];
2628 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2629 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2631 buf->buffer->element[i].eflags = 0;
2632 buf->buffer->element[i].sflags = 0;
2637 int qeth_init_qdio_queues(struct qeth_card *card)
2642 QETH_CARD_TEXT(card, 2, "initqdqs");
2645 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2646 memset(&card->rx, 0, sizeof(struct qeth_rx));
2647 qeth_initialize_working_pool_list(card);
2648 /*give only as many buffers to hardware as we have buffer pool entries*/
2649 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2650 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2651 card->qdio.in_q->next_buf_to_init =
2652 card->qdio.in_buf_pool.buf_count - 1;
2653 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2654 card->qdio.in_buf_pool.buf_count - 1);
2656 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2661 rc = qeth_cq_init(card);
2666 /* outbound queue */
2667 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2668 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2670 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2671 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2672 queue->next_buf_to_fill = 0;
2674 queue->prev_hdr = NULL;
2675 queue->bulk_start = 0;
2676 atomic_set(&queue->used_buffers, 0);
2677 atomic_set(&queue->set_pci_flags_count, 0);
2678 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2679 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2683 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2685 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2686 struct qeth_cmd_buffer *iob)
2688 qeth_mpc_finalize_cmd(card, iob);
2690 /* override with IPA-specific values: */
2691 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2694 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2697 u8 prot_type = qeth_mpc_select_prot_type(card);
2698 u16 total_length = iob->length;
2700 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2702 iob->finalize = qeth_ipa_finalize_cmd;
2704 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2705 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2706 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2707 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2708 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2709 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2710 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2711 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2713 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2715 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
2716 enum qeth_ipa_cmds cmd_code,
2717 enum qeth_prot_versions prot,
2718 unsigned int data_length)
2720 enum qeth_link_types link_type = card->info.link_type;
2721 struct qeth_cmd_buffer *iob;
2722 struct qeth_ipacmd_hdr *hdr;
2724 data_length += offsetof(struct qeth_ipa_cmd, data);
2725 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
2730 qeth_prepare_ipa_cmd(card, iob, data_length);
2732 hdr = &__ipa_cmd(iob)->hdr;
2733 hdr->command = cmd_code;
2734 hdr->initiator = IPA_CMD_INITIATOR_HOST;
2735 /* hdr->seqno is set by qeth_send_control_data() */
2736 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
2737 hdr->rel_adapter_no = (u8) card->dev->dev_port;
2738 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
2739 hdr->param_count = 1;
2740 hdr->prot_version = prot;
2743 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
2745 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2746 struct qeth_reply *reply, unsigned long data)
2748 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2750 return (cmd->hdr.return_code) ? -EIO : 0;
2754 * qeth_send_ipa_cmd() - send an IPA command
2756 * See qeth_send_control_data() for explanation of the arguments.
2759 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2760 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2766 QETH_CARD_TEXT(card, 4, "sendipa");
2768 if (card->read_or_write_problem) {
2773 if (reply_cb == NULL)
2774 reply_cb = qeth_send_ipa_cmd_cb;
2775 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
2777 qeth_clear_ipacmd_list(card);
2778 qeth_schedule_recovery(card);
2782 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2784 static int qeth_send_startlan_cb(struct qeth_card *card,
2785 struct qeth_reply *reply, unsigned long data)
2787 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2789 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2792 return (cmd->hdr.return_code) ? -EIO : 0;
2795 static int qeth_send_startlan(struct qeth_card *card)
2797 struct qeth_cmd_buffer *iob;
2799 QETH_CARD_TEXT(card, 2, "strtlan");
2801 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
2804 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2807 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2809 if (!cmd->hdr.return_code)
2810 cmd->hdr.return_code =
2811 cmd->data.setadapterparms.hdr.return_code;
2812 return cmd->hdr.return_code;
2815 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2816 struct qeth_reply *reply, unsigned long data)
2818 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2820 QETH_CARD_TEXT(card, 3, "quyadpcb");
2821 if (qeth_setadpparms_inspect_rc(cmd))
2824 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2825 card->info.link_type =
2826 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2827 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
2829 card->options.adp.supported_funcs =
2830 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2834 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2835 enum qeth_ipa_setadp_cmd adp_cmd,
2836 unsigned int data_length)
2838 struct qeth_ipacmd_setadpparms_hdr *hdr;
2839 struct qeth_cmd_buffer *iob;
2841 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
2843 offsetof(struct qeth_ipacmd_setadpparms,
2848 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
2849 hdr->cmdlength = sizeof(*hdr) + data_length;
2850 hdr->command_code = adp_cmd;
2851 hdr->used_total = 1;
2856 static int qeth_query_setadapterparms(struct qeth_card *card)
2859 struct qeth_cmd_buffer *iob;
2861 QETH_CARD_TEXT(card, 3, "queryadp");
2862 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2863 SETADP_DATA_SIZEOF(query_cmds_supp));
2866 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2870 static int qeth_query_ipassists_cb(struct qeth_card *card,
2871 struct qeth_reply *reply, unsigned long data)
2873 struct qeth_ipa_cmd *cmd;
2875 QETH_CARD_TEXT(card, 2, "qipasscb");
2877 cmd = (struct qeth_ipa_cmd *) data;
2879 switch (cmd->hdr.return_code) {
2880 case IPA_RC_SUCCESS:
2882 case IPA_RC_NOTSUPP:
2883 case IPA_RC_L2_UNSUPPORTED_CMD:
2884 QETH_CARD_TEXT(card, 2, "ipaunsup");
2885 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2886 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2889 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2890 CARD_DEVID(card), cmd->hdr.return_code);
2894 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2895 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2896 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2897 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2898 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2899 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2901 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2906 static int qeth_query_ipassists(struct qeth_card *card,
2907 enum qeth_prot_versions prot)
2910 struct qeth_cmd_buffer *iob;
2912 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
2913 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
2916 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2920 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2921 struct qeth_reply *reply, unsigned long data)
2923 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2924 struct qeth_query_switch_attributes *attrs;
2925 struct qeth_switch_info *sw_info;
2927 QETH_CARD_TEXT(card, 2, "qswiatcb");
2928 if (qeth_setadpparms_inspect_rc(cmd))
2931 sw_info = (struct qeth_switch_info *)reply->param;
2932 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2933 sw_info->capabilities = attrs->capabilities;
2934 sw_info->settings = attrs->settings;
2935 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2940 int qeth_query_switch_attributes(struct qeth_card *card,
2941 struct qeth_switch_info *sw_info)
2943 struct qeth_cmd_buffer *iob;
2945 QETH_CARD_TEXT(card, 2, "qswiattr");
2946 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
2948 if (!netif_carrier_ok(card->dev))
2950 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
2953 return qeth_send_ipa_cmd(card, iob,
2954 qeth_query_switch_attributes_cb, sw_info);
2957 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
2958 enum qeth_diags_cmds sub_cmd,
2959 unsigned int data_length)
2961 struct qeth_ipacmd_diagass *cmd;
2962 struct qeth_cmd_buffer *iob;
2964 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
2965 DIAG_HDR_LEN + data_length);
2969 cmd = &__ipa_cmd(iob)->data.diagass;
2970 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
2971 cmd->subcmd = sub_cmd;
2974 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
2976 static int qeth_query_setdiagass_cb(struct qeth_card *card,
2977 struct qeth_reply *reply, unsigned long data)
2979 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2980 u16 rc = cmd->hdr.return_code;
2983 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
2987 card->info.diagass_support = cmd->data.diagass.ext;
2991 static int qeth_query_setdiagass(struct qeth_card *card)
2993 struct qeth_cmd_buffer *iob;
2995 QETH_CARD_TEXT(card, 2, "qdiagass");
2996 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
2999 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3002 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3004 unsigned long info = get_zeroed_page(GFP_KERNEL);
3005 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3006 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3007 struct ccw_dev_id ccwid;
3010 tid->chpid = card->info.chpid;
3011 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3012 tid->ssid = ccwid.ssid;
3013 tid->devno = ccwid.devno;
3016 level = stsi(NULL, 0, 0, 0);
3017 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3018 tid->lparnr = info222->lpar_number;
3019 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3020 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3021 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3027 static int qeth_hw_trap_cb(struct qeth_card *card,
3028 struct qeth_reply *reply, unsigned long data)
3030 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3031 u16 rc = cmd->hdr.return_code;
3034 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3040 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3042 struct qeth_cmd_buffer *iob;
3043 struct qeth_ipa_cmd *cmd;
3045 QETH_CARD_TEXT(card, 2, "diagtrap");
3046 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3049 cmd = __ipa_cmd(iob);
3050 cmd->data.diagass.type = 1;
3051 cmd->data.diagass.action = action;
3053 case QETH_DIAGS_TRAP_ARM:
3054 cmd->data.diagass.options = 0x0003;
3055 cmd->data.diagass.ext = 0x00010000 +
3056 sizeof(struct qeth_trap_id);
3057 qeth_get_trap_id(card,
3058 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3060 case QETH_DIAGS_TRAP_DISARM:
3061 cmd->data.diagass.options = 0x0001;
3063 case QETH_DIAGS_TRAP_CAPTURE:
3066 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3068 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3070 static int qeth_check_qdio_errors(struct qeth_card *card,
3071 struct qdio_buffer *buf,
3072 unsigned int qdio_error,
3073 const char *dbftext)
3076 QETH_CARD_TEXT(card, 2, dbftext);
3077 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3078 buf->element[15].sflags);
3079 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3080 buf->element[14].sflags);
3081 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3082 if ((buf->element[15].sflags) == 0x12) {
3083 QETH_CARD_STAT_INC(card, rx_dropped);
3091 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3093 struct qeth_qdio_q *queue = card->qdio.in_q;
3094 struct list_head *lh;
3100 count = (index < queue->next_buf_to_init)?
3101 card->qdio.in_buf_pool.buf_count -
3102 (queue->next_buf_to_init - index) :
3103 card->qdio.in_buf_pool.buf_count -
3104 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3105 /* only requeue at a certain threshold to avoid SIGAs */
3106 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3107 for (i = queue->next_buf_to_init;
3108 i < queue->next_buf_to_init + count; ++i) {
3109 if (qeth_init_input_buffer(card,
3110 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3117 if (newcount < count) {
3118 /* we are in memory shortage so we switch back to
3119 traditional skb allocation and drop packages */
3120 atomic_set(&card->force_alloc_skb, 3);
3123 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3128 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3130 if (i == card->qdio.in_buf_pool.buf_count) {
3131 QETH_CARD_TEXT(card, 2, "qsarbw");
3132 card->reclaim_index = index;
3133 schedule_delayed_work(
3134 &card->buffer_reclaim_work,
3135 QETH_RECLAIM_WORK_TIME);
3141 * according to old code it should be avoided to requeue all
3142 * 128 buffers in order to benefit from PCI avoidance.
3143 * this function keeps at least one buffer (the buffer at
3144 * 'index') un-requeued -> this buffer is the first buffer that
3145 * will be requeued the next time
3147 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3148 queue->next_buf_to_init, count);
3150 QETH_CARD_TEXT(card, 2, "qinberr");
3152 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3153 QDIO_MAX_BUFFERS_PER_Q;
3157 static void qeth_buffer_reclaim_work(struct work_struct *work)
3159 struct qeth_card *card = container_of(work, struct qeth_card,
3160 buffer_reclaim_work.work);
3162 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3163 qeth_queue_input_buffer(card, card->reclaim_index);
3166 static void qeth_handle_send_error(struct qeth_card *card,
3167 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3169 int sbalf15 = buffer->buffer->element[15].sflags;
3171 QETH_CARD_TEXT(card, 6, "hdsnderr");
3172 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3177 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3180 QETH_CARD_TEXT(card, 1, "lnkfail");
3181 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3182 (u16)qdio_err, (u8)sbalf15);
3186 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3187 * @queue: queue to check for packing buffer
3189 * Returns number of buffers that were prepared for flush.
3191 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3193 struct qeth_qdio_out_buffer *buffer;
3195 buffer = queue->bufs[queue->next_buf_to_fill];
3196 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3197 (buffer->next_element_to_fill > 0)) {
3198 /* it's a packing buffer */
3199 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3200 queue->next_buf_to_fill =
3201 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3208 * Switched to packing state if the number of used buffers on a queue
3209 * reaches a certain limit.
3211 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3213 if (!queue->do_pack) {
3214 if (atomic_read(&queue->used_buffers)
3215 >= QETH_HIGH_WATERMARK_PACK){
3216 /* switch non-PACKING -> PACKING */
3217 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3218 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3225 * Switches from packing to non-packing mode. If there is a packing
3226 * buffer on the queue this buffer will be prepared to be flushed.
3227 * In that case 1 is returned to inform the caller. If no buffer
3228 * has to be flushed, zero is returned.
3230 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3232 if (queue->do_pack) {
3233 if (atomic_read(&queue->used_buffers)
3234 <= QETH_LOW_WATERMARK_PACK) {
3235 /* switch PACKING -> non-PACKING */
3236 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3237 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3239 return qeth_prep_flush_pack_buffer(queue);
3245 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3248 struct qeth_card *card = queue->card;
3249 struct qeth_qdio_out_buffer *buf;
3252 unsigned int qdio_flags;
3254 for (i = index; i < index + count; ++i) {
3255 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3256 buf = queue->bufs[bidx];
3257 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3258 SBAL_EFLAGS_LAST_ENTRY;
3260 if (queue->bufstates)
3261 queue->bufstates[bidx].user = buf;
3263 if (IS_IQD(queue->card))
3266 if (!queue->do_pack) {
3267 if ((atomic_read(&queue->used_buffers) >=
3268 (QETH_HIGH_WATERMARK_PACK -
3269 QETH_WATERMARK_PACK_FUZZ)) &&
3270 !atomic_read(&queue->set_pci_flags_count)) {
3271 /* it's likely that we'll go to packing
3273 atomic_inc(&queue->set_pci_flags_count);
3274 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3277 if (!atomic_read(&queue->set_pci_flags_count)) {
3279 * there's no outstanding PCI any more, so we
3280 * have to request a PCI to be sure the the PCI
3281 * will wake at some time in the future then we
3282 * can flush packed buffers that might still be
3283 * hanging around, which can happen if no
3284 * further send was requested by the stack
3286 atomic_inc(&queue->set_pci_flags_count);
3287 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3292 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3293 if (atomic_read(&queue->set_pci_flags_count))
3294 qdio_flags |= QDIO_FLAG_PCI_OUT;
3295 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3296 queue->queue_no, index, count);
3298 /* Fake the TX completion interrupt: */
3300 napi_schedule(&queue->napi);
3303 /* ignore temporary SIGA errors without busy condition */
3306 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3307 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3308 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3309 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3310 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3312 /* this must not happen under normal circumstances. if it
3313 * happens something is really wrong -> recover */
3314 qeth_schedule_recovery(queue->card);
3319 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3321 qeth_flush_buffers(queue, queue->bulk_start, 1);
3323 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
3324 queue->prev_hdr = NULL;
3327 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3331 int q_was_packing = 0;
3334 * check if weed have to switch to non-packing mode or if
3335 * we have to get a pci flag out on the queue
3337 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3338 !atomic_read(&queue->set_pci_flags_count)) {
3339 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3340 QETH_OUT_Q_UNLOCKED) {
3342 * If we get in here, there was no action in
3343 * do_send_packet. So, we check if there is a
3344 * packing buffer to be flushed here.
3346 index = queue->next_buf_to_fill;
3347 q_was_packing = queue->do_pack;
3348 /* queue->do_pack may change */
3350 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3352 !atomic_read(&queue->set_pci_flags_count))
3353 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3355 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3357 qeth_flush_buffers(queue, index, flush_cnt);
3358 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3363 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3364 unsigned long card_ptr)
3366 struct qeth_card *card = (struct qeth_card *)card_ptr;
3368 if (card->dev->flags & IFF_UP)
3369 napi_schedule(&card->napi);
3372 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3376 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3380 if (card->options.cq == cq) {
3385 if (card->state != CARD_STATE_DOWN) {
3390 qeth_free_qdio_queues(card);
3391 card->options.cq = cq;
3398 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3400 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3401 unsigned int queue, int first_element,
3404 struct qeth_qdio_q *cq = card->qdio.c_q;
3408 if (!qeth_is_cq(card, queue))
3411 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3412 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3413 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3416 netif_tx_stop_all_queues(card->dev);
3417 qeth_schedule_recovery(card);
3421 for (i = first_element; i < first_element + count; ++i) {
3422 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3423 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3426 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3427 buffer->element[e].addr) {
3428 unsigned long phys_aob_addr;
3430 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3431 qeth_qdio_handle_aob(card, phys_aob_addr);
3434 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3436 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3437 card->qdio.c_q->next_buf_to_init,
3440 dev_warn(&card->gdev->dev,
3441 "QDIO reported an error, rc=%i\n", rc);
3442 QETH_CARD_TEXT(card, 2, "qcqherr");
3444 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3445 + count) % QDIO_MAX_BUFFERS_PER_Q;
3448 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3449 unsigned int qdio_err, int queue,
3450 int first_elem, int count,
3451 unsigned long card_ptr)
3453 struct qeth_card *card = (struct qeth_card *)card_ptr;
3455 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3456 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3458 if (qeth_is_cq(card, queue))
3459 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3461 qeth_schedule_recovery(card);
3464 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3465 unsigned int qdio_error, int __queue,
3466 int first_element, int count,
3467 unsigned long card_ptr)
3469 struct qeth_card *card = (struct qeth_card *) card_ptr;
3470 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3471 struct qeth_qdio_out_buffer *buffer;
3472 struct net_device *dev = card->dev;
3473 struct netdev_queue *txq;
3476 QETH_CARD_TEXT(card, 6, "qdouhdl");
3477 if (qdio_error & QDIO_ERROR_FATAL) {
3478 QETH_CARD_TEXT(card, 2, "achkcond");
3479 netif_tx_stop_all_queues(dev);
3480 qeth_schedule_recovery(card);
3484 for (i = first_element; i < (first_element + count); ++i) {
3485 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3486 buffer = queue->bufs[bidx];
3487 qeth_handle_send_error(card, buffer, qdio_error);
3488 qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
3491 atomic_sub(count, &queue->used_buffers);
3492 qeth_check_outbound_queue(queue);
3494 txq = netdev_get_tx_queue(dev, __queue);
3495 /* xmit may have observed the full-condition, but not yet stopped the
3496 * txq. In which case the code below won't trigger. So before returning,
3497 * xmit will re-check the txq's fill level and wake it up if needed.
3499 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3500 netif_tx_wake_queue(txq);
3504 * Note: Function assumes that we have 4 outbound queues.
3506 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3508 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3511 switch (card->qdio.do_prio_queueing) {
3512 case QETH_PRIO_Q_ING_TOS:
3513 case QETH_PRIO_Q_ING_PREC:
3514 switch (qeth_get_ip_version(skb)) {
3516 tos = ipv4_get_dsfield(ip_hdr(skb));
3519 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3522 return card->qdio.default_out_queue;
3524 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3525 return ~tos >> 6 & 3;
3526 if (tos & IPTOS_MINCOST)
3528 if (tos & IPTOS_RELIABILITY)
3530 if (tos & IPTOS_THROUGHPUT)
3532 if (tos & IPTOS_LOWDELAY)
3535 case QETH_PRIO_Q_ING_SKB:
3536 if (skb->priority > 5)
3538 return ~skb->priority >> 1 & 3;
3539 case QETH_PRIO_Q_ING_VLAN:
3540 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3541 return ~ntohs(veth->h_vlan_TCI) >>
3542 (VLAN_PRIO_SHIFT + 1) & 3;
3547 return card->qdio.default_out_queue;
3549 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3552 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3555 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3556 * fragmented part of the SKB. Returns zero for linear SKB.
3558 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3560 int cnt, elements = 0;
3562 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3563 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3565 elements += qeth_get_elements_for_range(
3566 (addr_t)skb_frag_address(frag),
3567 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3573 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3574 * to transmit an skb.
3575 * @skb: the skb to operate on.
3576 * @data_offset: skip this part of the skb's linear data
3578 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3579 * skb's data (both its linear part and paged fragments).
3581 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3583 unsigned int elements = qeth_get_elements_for_frags(skb);
3584 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3585 addr_t start = (addr_t)skb->data + data_offset;
3588 elements += qeth_get_elements_for_range(start, end);
3591 EXPORT_SYMBOL_GPL(qeth_count_elements);
3593 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3597 * qeth_add_hw_header() - add a HW header to an skb.
3598 * @skb: skb that the HW header should be added to.
3599 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3600 * it contains a valid pointer to a qeth_hdr.
3601 * @hdr_len: length of the HW header.
3602 * @proto_len: length of protocol headers that need to be in same page as the
3605 * Returns the pushed length. If the header can't be pushed on
3606 * (eg. because it would cross a page boundary), it is allocated from
3607 * the cache instead and 0 is returned.
3608 * The number of needed buffer elements is returned in @elements.
3609 * Error to create the hdr is indicated by returning with < 0.
3611 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3612 struct sk_buff *skb, struct qeth_hdr **hdr,
3613 unsigned int hdr_len, unsigned int proto_len,
3614 unsigned int *elements)
3616 const unsigned int contiguous = proto_len ? proto_len : 1;
3617 const unsigned int max_elements = queue->max_elements;
3618 unsigned int __elements;
3624 start = (addr_t)skb->data - hdr_len;
3625 end = (addr_t)skb->data;
3627 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3628 /* Push HW header into same page as first protocol header. */
3630 /* ... but TSO always needs a separate element for headers: */
3631 if (skb_is_gso(skb))
3632 __elements = 1 + qeth_count_elements(skb, proto_len);
3634 __elements = qeth_count_elements(skb, 0);
3635 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3636 /* Push HW header into preceding page, flush with skb->data. */
3638 __elements = 1 + qeth_count_elements(skb, 0);
3640 /* Use header cache, copy protocol headers up. */
3642 __elements = 1 + qeth_count_elements(skb, proto_len);
3645 /* Compress skb to fit into one IO buffer: */
3646 if (__elements > max_elements) {
3647 if (!skb_is_nonlinear(skb)) {
3648 /* Drop it, no easy way of shrinking it further. */
3649 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3650 max_elements, __elements, skb->len);
3654 rc = skb_linearize(skb);
3656 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3660 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3661 /* Linearization changed the layout, re-evaluate: */
3665 *elements = __elements;
3666 /* Add the header: */
3668 *hdr = skb_push(skb, hdr_len);
3672 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3674 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3677 /* Copy protocol headers behind HW header: */
3678 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3682 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3683 struct qeth_qdio_out_buffer *buffer,
3684 struct sk_buff *curr_skb,
3685 struct qeth_hdr *curr_hdr)
3687 struct qeth_hdr *prev_hdr = queue->prev_hdr;
3692 /* All packets must have the same target: */
3693 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3694 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3696 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3697 eth_hdr(curr_skb)->h_dest) &&
3698 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3701 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3702 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3705 static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
3706 struct qeth_qdio_out_buffer *buf,
3707 bool is_first_elem, unsigned int offset)
3709 struct qdio_buffer *buffer = buf->buffer;
3710 int element = buf->next_element_to_fill;
3711 int length = skb_headlen(skb) - offset;
3712 char *data = skb->data + offset;
3713 unsigned int elem_length, cnt;
3715 /* map linear part into buffer element(s) */
3716 while (length > 0) {
3717 elem_length = min_t(unsigned int, length,
3718 PAGE_SIZE - offset_in_page(data));
3720 buffer->element[element].addr = data;
3721 buffer->element[element].length = elem_length;
3722 length -= elem_length;
3723 if (is_first_elem) {
3724 is_first_elem = false;
3725 if (length || skb_is_nonlinear(skb))
3726 /* skb needs additional elements */
3727 buffer->element[element].eflags =
3728 SBAL_EFLAGS_FIRST_FRAG;
3730 buffer->element[element].eflags = 0;
3732 buffer->element[element].eflags =
3733 SBAL_EFLAGS_MIDDLE_FRAG;
3736 data += elem_length;
3740 /* map page frags into buffer element(s) */
3741 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3742 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3744 data = skb_frag_address(frag);
3745 length = skb_frag_size(frag);
3746 while (length > 0) {
3747 elem_length = min_t(unsigned int, length,
3748 PAGE_SIZE - offset_in_page(data));
3750 buffer->element[element].addr = data;
3751 buffer->element[element].length = elem_length;
3752 buffer->element[element].eflags =
3753 SBAL_EFLAGS_MIDDLE_FRAG;
3755 length -= elem_length;
3756 data += elem_length;
3761 if (buffer->element[element - 1].eflags)
3762 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3763 buf->next_element_to_fill = element;
3768 * qeth_fill_buffer() - map skb into an output buffer
3769 * @buf: buffer to transport the skb
3770 * @skb: skb to map into the buffer
3771 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3772 * from qeth_core_header_cache.
3773 * @offset: when mapping the skb, start at skb->data + offset
3774 * @hd_len: if > 0, build a dedicated header element of this size
3776 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3777 struct sk_buff *skb, struct qeth_hdr *hdr,
3778 unsigned int offset, unsigned int hd_len)
3780 struct qdio_buffer *buffer = buf->buffer;
3781 bool is_first_elem = true;
3783 __skb_queue_tail(&buf->skb_list, skb);
3785 /* build dedicated header element */
3787 int element = buf->next_element_to_fill;
3788 is_first_elem = false;
3790 buffer->element[element].addr = hdr;
3791 buffer->element[element].length = hd_len;
3792 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3793 /* remember to free cache-allocated qeth_hdr: */
3794 buf->is_header[element] = ((void *)hdr != skb->data);
3795 buf->next_element_to_fill++;
3798 return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3801 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3802 struct sk_buff *skb, unsigned int elements,
3803 struct qeth_hdr *hdr, unsigned int offset,
3804 unsigned int hd_len)
3806 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3807 unsigned int bytes = qdisc_pkt_len(skb);
3808 unsigned int next_element;
3809 struct netdev_queue *txq;
3810 bool stopped = false;
3813 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3815 /* Just a sanity check, the wake/stop logic should ensure that we always
3816 * get a free buffer.
3818 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3821 if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
3822 !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
3823 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3824 qeth_flush_queue(queue);
3825 buffer = queue->bufs[queue->bulk_start];
3827 /* Sanity-check again: */
3828 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3832 if (buffer->next_element_to_fill == 0 &&
3833 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3834 /* If a TX completion happens right _here_ and misses to wake
3835 * the txq, then our re-check below will catch the race.
3837 QETH_TXQ_STAT_INC(queue, stopped);
3838 netif_tx_stop_queue(txq);
3842 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3843 buffer->bytes += bytes;
3844 queue->prev_hdr = hdr;
3846 flush = __netdev_tx_sent_queue(txq, bytes,
3847 !stopped && netdev_xmit_more());
3849 if (flush || next_element >= queue->max_elements) {
3850 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3851 qeth_flush_queue(queue);
3854 if (stopped && !qeth_out_queue_is_full(queue))
3855 netif_tx_start_queue(txq);
3859 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3860 struct sk_buff *skb, struct qeth_hdr *hdr,
3861 unsigned int offset, unsigned int hd_len,
3862 int elements_needed)
3864 struct qeth_qdio_out_buffer *buffer;
3865 unsigned int next_element;
3866 struct netdev_queue *txq;
3867 bool stopped = false;
3869 int flush_count = 0;
3874 /* spin until we get the queue ... */
3875 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3876 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3877 start_index = queue->next_buf_to_fill;
3878 buffer = queue->bufs[queue->next_buf_to_fill];
3880 /* Just a sanity check, the wake/stop logic should ensure that we always
3881 * get a free buffer.
3883 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3884 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3888 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3890 /* check if we need to switch packing state of this queue */
3891 qeth_switch_to_packing_if_needed(queue);
3892 if (queue->do_pack) {
3894 /* does packet fit in current buffer? */
3895 if (buffer->next_element_to_fill + elements_needed >
3896 queue->max_elements) {
3897 /* ... no -> set state PRIMED */
3898 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3900 queue->next_buf_to_fill =
3901 (queue->next_buf_to_fill + 1) %
3902 QDIO_MAX_BUFFERS_PER_Q;
3903 buffer = queue->bufs[queue->next_buf_to_fill];
3905 /* We stepped forward, so sanity-check again: */
3906 if (atomic_read(&buffer->state) !=
3907 QETH_QDIO_BUF_EMPTY) {
3908 qeth_flush_buffers(queue, start_index,
3910 atomic_set(&queue->state,
3911 QETH_OUT_Q_UNLOCKED);
3918 if (buffer->next_element_to_fill == 0 &&
3919 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3920 /* If a TX completion happens right _here_ and misses to wake
3921 * the txq, then our re-check below will catch the race.
3923 QETH_TXQ_STAT_INC(queue, stopped);
3924 netif_tx_stop_queue(txq);
3928 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3931 QETH_TXQ_STAT_INC(queue, skbs_pack);
3932 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
3934 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3935 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3936 QDIO_MAX_BUFFERS_PER_Q;
3940 qeth_flush_buffers(queue, start_index, flush_count);
3941 else if (!atomic_read(&queue->set_pci_flags_count))
3942 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3944 * queue->state will go from LOCKED -> UNLOCKED or from
3945 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3946 * (switch packing state or flush buffer to get another pci flag out).
3947 * In that case we will enter this loop
3949 while (atomic_dec_return(&queue->state)) {
3950 start_index = queue->next_buf_to_fill;
3951 /* check if we can go back to non-packing state */
3952 tmp = qeth_switch_to_nonpacking_if_needed(queue);
3954 * check if we need to flush a packing buffer to get a pci
3955 * flag out on the queue
3957 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
3958 tmp = qeth_prep_flush_pack_buffer(queue);
3960 qeth_flush_buffers(queue, start_index, tmp);
3965 /* at this point the queue is UNLOCKED again */
3967 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
3969 if (stopped && !qeth_out_queue_is_full(queue))
3970 netif_tx_start_queue(txq);
3973 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3975 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
3976 unsigned int payload_len, struct sk_buff *skb,
3977 unsigned int proto_len)
3979 struct qeth_hdr_ext_tso *ext = &hdr->ext;
3981 ext->hdr_tot_len = sizeof(*ext);
3982 ext->imb_hdr_no = 1;
3984 ext->hdr_version = 1;
3986 ext->payload_len = payload_len;
3987 ext->mss = skb_shinfo(skb)->gso_size;
3988 ext->dg_hdr_len = proto_len;
3991 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
3992 struct qeth_qdio_out_q *queue, int ipv,
3993 void (*fill_header)(struct qeth_qdio_out_q *queue,
3994 struct qeth_hdr *hdr, struct sk_buff *skb,
3995 int ipv, unsigned int data_len))
3997 unsigned int proto_len, hw_hdr_len;
3998 unsigned int frame_len = skb->len;
3999 bool is_tso = skb_is_gso(skb);
4000 unsigned int data_offset = 0;
4001 struct qeth_hdr *hdr = NULL;
4002 unsigned int hd_len = 0;
4003 unsigned int elements;
4007 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4008 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4010 hw_hdr_len = sizeof(struct qeth_hdr);
4011 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4014 rc = skb_cow_head(skb, hw_hdr_len);
4018 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4022 if (is_tso || !push_len) {
4023 /* HW header needs its own buffer element. */
4024 hd_len = hw_hdr_len + proto_len;
4025 data_offset = push_len + proto_len;
4027 memset(hdr, 0, hw_hdr_len);
4028 fill_header(queue, hdr, skb, ipv, frame_len);
4030 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4031 frame_len - proto_len, skb, proto_len);
4034 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4037 /* TODO: drop skb_orphan() once TX completion is fast enough */
4039 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4043 if (rc && !push_len)
4044 kmem_cache_free(qeth_core_header_cache, hdr);
4048 EXPORT_SYMBOL_GPL(qeth_xmit);
4050 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4051 struct qeth_reply *reply, unsigned long data)
4053 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4054 struct qeth_ipacmd_setadpparms *setparms;
4056 QETH_CARD_TEXT(card, 4, "prmadpcb");
4058 setparms = &(cmd->data.setadapterparms);
4059 if (qeth_setadpparms_inspect_rc(cmd)) {
4060 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4061 setparms->data.mode = SET_PROMISC_MODE_OFF;
4063 card->info.promisc_mode = setparms->data.mode;
4064 return (cmd->hdr.return_code) ? -EIO : 0;
4067 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4069 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4070 SET_PROMISC_MODE_OFF;
4071 struct qeth_cmd_buffer *iob;
4072 struct qeth_ipa_cmd *cmd;
4074 QETH_CARD_TEXT(card, 4, "setprom");
4075 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4077 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4078 SETADP_DATA_SIZEOF(mode));
4081 cmd = __ipa_cmd(iob);
4082 cmd->data.setadapterparms.data.mode = mode;
4083 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4085 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4087 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4088 struct qeth_reply *reply, unsigned long data)
4090 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4091 struct qeth_ipacmd_setadpparms *adp_cmd;
4093 QETH_CARD_TEXT(card, 4, "chgmaccb");
4094 if (qeth_setadpparms_inspect_rc(cmd))
4097 adp_cmd = &cmd->data.setadapterparms;
4098 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4099 return -EADDRNOTAVAIL;
4101 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4102 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4103 return -EADDRNOTAVAIL;
4105 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4109 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4112 struct qeth_cmd_buffer *iob;
4113 struct qeth_ipa_cmd *cmd;
4115 QETH_CARD_TEXT(card, 4, "chgmac");
4117 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4118 SETADP_DATA_SIZEOF(change_addr));
4121 cmd = __ipa_cmd(iob);
4122 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4123 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4124 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4125 card->dev->dev_addr);
4126 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4130 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4132 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4133 struct qeth_reply *reply, unsigned long data)
4135 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4136 struct qeth_set_access_ctrl *access_ctrl_req;
4137 int fallback = *(int *)reply->param;
4139 QETH_CARD_TEXT(card, 4, "setaccb");
4140 if (cmd->hdr.return_code)
4142 qeth_setadpparms_inspect_rc(cmd);
4144 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4145 QETH_CARD_TEXT_(card, 2, "rc=%d",
4146 cmd->data.setadapterparms.hdr.return_code);
4147 if (cmd->data.setadapterparms.hdr.return_code !=
4148 SET_ACCESS_CTRL_RC_SUCCESS)
4149 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4150 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4151 cmd->data.setadapterparms.hdr.return_code);
4152 switch (cmd->data.setadapterparms.hdr.return_code) {
4153 case SET_ACCESS_CTRL_RC_SUCCESS:
4154 if (card->options.isolation == ISOLATION_MODE_NONE) {
4155 dev_info(&card->gdev->dev,
4156 "QDIO data connection isolation is deactivated\n");
4158 dev_info(&card->gdev->dev,
4159 "QDIO data connection isolation is activated\n");
4162 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4163 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4166 card->options.isolation = card->options.prev_isolation;
4168 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4169 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4172 card->options.isolation = card->options.prev_isolation;
4174 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4175 dev_err(&card->gdev->dev, "Adapter does not "
4176 "support QDIO data connection isolation\n");
4178 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4179 dev_err(&card->gdev->dev,
4180 "Adapter is dedicated. "
4181 "QDIO data connection isolation not supported\n");
4183 card->options.isolation = card->options.prev_isolation;
4185 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4186 dev_err(&card->gdev->dev,
4187 "TSO does not permit QDIO data connection isolation\n");
4189 card->options.isolation = card->options.prev_isolation;
4191 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4192 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4193 "support reflective relay mode\n");
4195 card->options.isolation = card->options.prev_isolation;
4197 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4198 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4199 "enabled at the adjacent switch port");
4201 card->options.isolation = card->options.prev_isolation;
4203 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4204 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4205 "at the adjacent switch failed\n");
4208 /* this should never happen */
4210 card->options.isolation = card->options.prev_isolation;
4213 return (cmd->hdr.return_code) ? -EIO : 0;
4216 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4217 enum qeth_ipa_isolation_modes isolation, int fallback)
4220 struct qeth_cmd_buffer *iob;
4221 struct qeth_ipa_cmd *cmd;
4222 struct qeth_set_access_ctrl *access_ctrl_req;
4224 QETH_CARD_TEXT(card, 4, "setacctl");
4226 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4227 SETADP_DATA_SIZEOF(set_access_ctrl));
4230 cmd = __ipa_cmd(iob);
4231 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4232 access_ctrl_req->subcmd_code = isolation;
4234 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4236 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4240 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4244 QETH_CARD_TEXT(card, 4, "setactlo");
4246 if ((IS_OSD(card) || IS_OSX(card)) &&
4247 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4248 rc = qeth_setadpparms_set_access_ctrl(card,
4249 card->options.isolation, fallback);
4251 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4252 rc, CARD_DEVID(card));
4255 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4256 card->options.isolation = ISOLATION_MODE_NONE;
4258 dev_err(&card->gdev->dev, "Adapter does not "
4259 "support QDIO data connection isolation\n");
4264 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4266 void qeth_tx_timeout(struct net_device *dev)
4268 struct qeth_card *card;
4270 card = dev->ml_priv;
4271 QETH_CARD_TEXT(card, 4, "txtimeo");
4272 qeth_schedule_recovery(card);
4274 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4276 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4278 struct qeth_card *card = dev->ml_priv;
4282 case MII_BMCR: /* Basic mode control register */
4284 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4285 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4286 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4287 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4288 rc |= BMCR_SPEED100;
4290 case MII_BMSR: /* Basic mode status register */
4291 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4292 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4295 case MII_PHYSID1: /* PHYS ID 1 */
4296 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4298 rc = (rc >> 5) & 0xFFFF;
4300 case MII_PHYSID2: /* PHYS ID 2 */
4301 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4303 case MII_ADVERTISE: /* Advertisement control reg */
4306 case MII_LPA: /* Link partner ability reg */
4307 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4308 LPA_100BASE4 | LPA_LPACK;
4310 case MII_EXPANSION: /* Expansion register */
4312 case MII_DCOUNTER: /* disconnect counter */
4314 case MII_FCSCOUNTER: /* false carrier counter */
4316 case MII_NWAYTEST: /* N-way auto-neg test register */
4318 case MII_RERRCOUNTER: /* rx error counter */
4319 rc = card->stats.rx_errors;
4321 case MII_SREVISION: /* silicon revision */
4323 case MII_RESV1: /* reserved 1 */
4325 case MII_LBRERROR: /* loopback, rx, bypass error */
4327 case MII_PHYADDR: /* physical address */
4329 case MII_RESV2: /* reserved 2 */
4331 case MII_TPISTATUS: /* TPI status for 10mbps */
4333 case MII_NCONFIG: /* network interface config */
4341 static int qeth_snmp_command_cb(struct qeth_card *card,
4342 struct qeth_reply *reply, unsigned long data)
4344 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4345 struct qeth_arp_query_info *qinfo = reply->param;
4346 struct qeth_ipacmd_setadpparms *adp_cmd;
4347 unsigned int data_len;
4350 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4352 if (cmd->hdr.return_code) {
4353 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4356 if (cmd->data.setadapterparms.hdr.return_code) {
4357 cmd->hdr.return_code =
4358 cmd->data.setadapterparms.hdr.return_code;
4359 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4363 adp_cmd = &cmd->data.setadapterparms;
4364 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4365 if (adp_cmd->hdr.seq_no == 1) {
4366 snmp_data = &adp_cmd->data.snmp;
4368 snmp_data = &adp_cmd->data.snmp.request;
4369 data_len -= offsetof(struct qeth_snmp_cmd, request);
4372 /* check if there is enough room in userspace */
4373 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4374 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4377 QETH_CARD_TEXT_(card, 4, "snore%i",
4378 cmd->data.setadapterparms.hdr.used_total);
4379 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4380 cmd->data.setadapterparms.hdr.seq_no);
4381 /*copy entries to user buffer*/
4382 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4383 qinfo->udata_offset += data_len;
4385 if (cmd->data.setadapterparms.hdr.seq_no <
4386 cmd->data.setadapterparms.hdr.used_total)
4391 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4393 struct qeth_snmp_ureq __user *ureq;
4394 struct qeth_cmd_buffer *iob;
4395 unsigned int req_len;
4396 struct qeth_arp_query_info qinfo = {0, };
4399 QETH_CARD_TEXT(card, 3, "snmpcmd");
4401 if (IS_VM_NIC(card))
4404 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4408 ureq = (struct qeth_snmp_ureq __user *) udata;
4409 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4410 get_user(req_len, &ureq->hdr.req_len))
4413 /* Sanitize user input, to avoid overflows in iob size calculation: */
4414 if (req_len > QETH_BUFSIZE)
4417 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4421 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4422 &ureq->cmd, req_len)) {
4427 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4432 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4434 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4436 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4437 CARD_DEVID(card), rc);
4439 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4447 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4448 struct qeth_reply *reply, unsigned long data)
4450 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4451 struct qeth_qoat_priv *priv;
4455 QETH_CARD_TEXT(card, 3, "qoatcb");
4456 if (qeth_setadpparms_inspect_rc(cmd))
4459 priv = (struct qeth_qoat_priv *)reply->param;
4460 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4461 resdata = (char *)data + 28;
4463 if (resdatalen > (priv->buffer_len - priv->response_len))
4466 memcpy((priv->buffer + priv->response_len), resdata,
4468 priv->response_len += resdatalen;
4470 if (cmd->data.setadapterparms.hdr.seq_no <
4471 cmd->data.setadapterparms.hdr.used_total)
4476 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4479 struct qeth_cmd_buffer *iob;
4480 struct qeth_ipa_cmd *cmd;
4481 struct qeth_query_oat *oat_req;
4482 struct qeth_query_oat_data oat_data;
4483 struct qeth_qoat_priv priv;
4486 QETH_CARD_TEXT(card, 3, "qoatcmd");
4488 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4493 if (copy_from_user(&oat_data, udata,
4494 sizeof(struct qeth_query_oat_data))) {
4499 priv.buffer_len = oat_data.buffer_len;
4500 priv.response_len = 0;
4501 priv.buffer = vzalloc(oat_data.buffer_len);
4507 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4508 SETADP_DATA_SIZEOF(query_oat));
4513 cmd = __ipa_cmd(iob);
4514 oat_req = &cmd->data.setadapterparms.data.query_oat;
4515 oat_req->subcmd_code = oat_data.command;
4517 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4520 if (is_compat_task())
4521 tmp = compat_ptr(oat_data.ptr);
4523 tmp = (void __user *)(unsigned long)oat_data.ptr;
4525 if (copy_to_user(tmp, priv.buffer,
4526 priv.response_len)) {
4531 oat_data.response_len = priv.response_len;
4533 if (copy_to_user(udata, &oat_data,
4534 sizeof(struct qeth_query_oat_data)))
4544 static int qeth_query_card_info_cb(struct qeth_card *card,
4545 struct qeth_reply *reply, unsigned long data)
4547 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4548 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4549 struct qeth_query_card_info *card_info;
4551 QETH_CARD_TEXT(card, 2, "qcrdincb");
4552 if (qeth_setadpparms_inspect_rc(cmd))
4555 card_info = &cmd->data.setadapterparms.data.card_info;
4556 carrier_info->card_type = card_info->card_type;
4557 carrier_info->port_mode = card_info->port_mode;
4558 carrier_info->port_speed = card_info->port_speed;
4562 int qeth_query_card_info(struct qeth_card *card,
4563 struct carrier_info *carrier_info)
4565 struct qeth_cmd_buffer *iob;
4567 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4568 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4570 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4573 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4574 (void *)carrier_info);
4578 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4579 * @card: pointer to a qeth_card
4582 * 0, if a MAC address has been set for the card's netdevice
4583 * a return code, for various error conditions
4585 int qeth_vm_request_mac(struct qeth_card *card)
4587 struct diag26c_mac_resp *response;
4588 struct diag26c_mac_req *request;
4589 struct ccw_dev_id id;
4592 QETH_CARD_TEXT(card, 2, "vmreqmac");
4594 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4595 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4596 if (!request || !response) {
4601 ccw_device_get_id(CARD_DDEV(card), &id);
4602 request->resp_buf_len = sizeof(*response);
4603 request->resp_version = DIAG26C_VERSION2;
4604 request->op_code = DIAG26C_GET_MAC;
4605 request->devno = id.devno;
4607 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4608 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4609 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4612 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4614 if (request->resp_buf_len < sizeof(*response) ||
4615 response->version != request->resp_version) {
4617 QETH_CARD_TEXT(card, 2, "badresp");
4618 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4619 sizeof(request->resp_buf_len));
4620 } else if (!is_valid_ether_addr(response->mac)) {
4622 QETH_CARD_TEXT(card, 2, "badmac");
4623 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4625 ether_addr_copy(card->dev->dev_addr, response->mac);
4633 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4635 static void qeth_determine_capabilities(struct qeth_card *card)
4638 struct ccw_device *ddev;
4639 int ddev_offline = 0;
4641 QETH_CARD_TEXT(card, 2, "detcapab");
4642 ddev = CARD_DDEV(card);
4643 if (!ddev->online) {
4645 rc = ccw_device_set_online(ddev);
4647 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4652 rc = qeth_read_conf_data(card);
4654 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4655 CARD_DEVID(card), rc);
4656 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4660 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4662 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4664 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4665 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4666 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4667 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4668 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4669 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4670 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4671 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4672 dev_info(&card->gdev->dev,
4673 "Completion Queueing supported\n");
4675 card->options.cq = QETH_CQ_NOTAVAILABLE;
4680 if (ddev_offline == 1)
4681 ccw_device_set_offline(ddev);
4686 static void qeth_qdio_establish_cq(struct qeth_card *card,
4687 struct qdio_buffer **in_sbal_ptrs,
4688 void (**queue_start_poll)
4689 (struct ccw_device *, int,
4694 if (card->options.cq == QETH_CQ_ENABLED) {
4695 int offset = QDIO_MAX_BUFFERS_PER_Q *
4696 (card->qdio.no_in_queues - 1);
4697 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4698 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4699 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4702 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4706 static int qeth_qdio_establish(struct qeth_card *card)
4708 struct qdio_initialize init_data;
4709 char *qib_param_field;
4710 struct qdio_buffer **in_sbal_ptrs;
4711 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4712 struct qdio_buffer **out_sbal_ptrs;
4716 QETH_CARD_TEXT(card, 2, "qdioest");
4718 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4720 if (!qib_param_field) {
4722 goto out_free_nothing;
4725 qeth_create_qib_param_field(card, qib_param_field);
4726 qeth_create_qib_param_field_blkt(card, qib_param_field);
4728 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4731 if (!in_sbal_ptrs) {
4733 goto out_free_qib_param;
4735 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4736 in_sbal_ptrs[i] = (struct qdio_buffer *)
4737 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4740 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4742 if (!queue_start_poll) {
4744 goto out_free_in_sbals;
4746 for (i = 0; i < card->qdio.no_in_queues; ++i)
4747 queue_start_poll[i] = qeth_qdio_start_poll;
4749 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4752 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4755 if (!out_sbal_ptrs) {
4757 goto out_free_queue_start_poll;
4759 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4760 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4761 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4762 card->qdio.out_qs[i]->bufs[j]->buffer);
4765 memset(&init_data, 0, sizeof(struct qdio_initialize));
4766 init_data.cdev = CARD_DDEV(card);
4767 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
4769 init_data.qib_param_field_format = 0;
4770 init_data.qib_param_field = qib_param_field;
4771 init_data.no_input_qs = card->qdio.no_in_queues;
4772 init_data.no_output_qs = card->qdio.no_out_queues;
4773 init_data.input_handler = qeth_qdio_input_handler;
4774 init_data.output_handler = qeth_qdio_output_handler;
4775 init_data.queue_start_poll_array = queue_start_poll;
4776 init_data.int_parm = (unsigned long) card;
4777 init_data.input_sbal_addr_array = in_sbal_ptrs;
4778 init_data.output_sbal_addr_array = out_sbal_ptrs;
4779 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4780 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
4782 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4783 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4784 rc = qdio_allocate(&init_data);
4786 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4789 rc = qdio_establish(&init_data);
4791 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4792 qdio_free(CARD_DDEV(card));
4796 switch (card->options.cq) {
4797 case QETH_CQ_ENABLED:
4798 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4800 case QETH_CQ_DISABLED:
4801 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4807 kfree(out_sbal_ptrs);
4808 out_free_queue_start_poll:
4809 kfree(queue_start_poll);
4811 kfree(in_sbal_ptrs);
4813 kfree(qib_param_field);
4818 static void qeth_core_free_card(struct qeth_card *card)
4820 QETH_CARD_TEXT(card, 2, "freecrd");
4821 qeth_clean_channel(&card->read);
4822 qeth_clean_channel(&card->write);
4823 qeth_clean_channel(&card->data);
4824 qeth_put_cmd(card->read_cmd);
4825 destroy_workqueue(card->event_wq);
4826 qeth_free_qdio_queues(card);
4827 unregister_service_level(&card->qeth_service_level);
4828 dev_set_drvdata(&card->gdev->dev, NULL);
4832 void qeth_trace_features(struct qeth_card *card)
4834 QETH_CARD_TEXT(card, 2, "features");
4835 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4836 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4837 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4838 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4839 sizeof(card->info.diagass_support));
4841 EXPORT_SYMBOL_GPL(qeth_trace_features);
4843 static struct ccw_device_id qeth_ids[] = {
4844 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4845 .driver_info = QETH_CARD_TYPE_OSD},
4846 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4847 .driver_info = QETH_CARD_TYPE_IQD},
4848 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4849 .driver_info = QETH_CARD_TYPE_OSN},
4850 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4851 .driver_info = QETH_CARD_TYPE_OSM},
4852 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4853 .driver_info = QETH_CARD_TYPE_OSX},
4856 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4858 static struct ccw_driver qeth_ccw_driver = {
4860 .owner = THIS_MODULE,
4864 .probe = ccwgroup_probe_ccwdev,
4865 .remove = ccwgroup_remove_ccwdev,
4868 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4873 QETH_CARD_TEXT(card, 2, "hrdsetup");
4874 atomic_set(&card->force_alloc_skb, 0);
4875 rc = qeth_update_from_chp_desc(card);
4880 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4882 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
4883 ccw_device_set_offline(CARD_DDEV(card));
4884 ccw_device_set_offline(CARD_WDEV(card));
4885 ccw_device_set_offline(CARD_RDEV(card));
4886 qdio_free(CARD_DDEV(card));
4887 rc = ccw_device_set_online(CARD_RDEV(card));
4890 rc = ccw_device_set_online(CARD_WDEV(card));
4893 rc = ccw_device_set_online(CARD_DDEV(card));
4897 if (rc == -ERESTARTSYS) {
4898 QETH_CARD_TEXT(card, 2, "break1");
4901 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
4907 qeth_determine_capabilities(card);
4908 qeth_init_tokens(card);
4909 qeth_init_func_level(card);
4911 rc = qeth_idx_activate_read_channel(card);
4913 QETH_CARD_TEXT(card, 2, "break2");
4916 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4923 rc = qeth_idx_activate_write_channel(card);
4925 QETH_CARD_TEXT(card, 2, "break3");
4928 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
4934 card->read_or_write_problem = 0;
4935 rc = qeth_mpc_initialize(card);
4937 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4941 rc = qeth_send_startlan(card);
4943 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4944 if (rc == -ENETDOWN) {
4945 dev_warn(&card->gdev->dev, "The LAN is offline\n");
4946 *carrier_ok = false;
4954 card->options.ipa4.supported_funcs = 0;
4955 card->options.ipa6.supported_funcs = 0;
4956 card->options.adp.supported_funcs = 0;
4957 card->options.sbp.supported_funcs = 0;
4958 card->info.diagass_support = 0;
4959 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
4962 if (qeth_is_supported(card, IPA_IPV6)) {
4963 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
4967 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
4968 rc = qeth_query_setadapterparms(card);
4970 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
4974 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
4975 rc = qeth_query_setdiagass(card);
4977 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
4983 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
4984 "an error on the device\n");
4985 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
4986 CARD_DEVID(card), rc);
4989 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4991 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
4992 struct sk_buff *skb, int offset, int data_len)
4994 struct page *page = virt_to_page(element->addr);
4995 unsigned int next_frag;
4997 /* first fill the linear space */
4999 unsigned int linear = min(data_len, skb_tailroom(skb));
5001 skb_put_data(skb, element->addr + offset, linear);
5006 /* fall through to add page frag for remaining data */
5009 next_frag = skb_shinfo(skb)->nr_frags;
5011 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5014 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5016 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5019 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5020 struct qeth_qdio_buffer *qethbuffer,
5021 struct qdio_buffer_element **__element, int *__offset,
5022 struct qeth_hdr **hdr)
5024 struct qdio_buffer_element *element = *__element;
5025 struct qdio_buffer *buffer = qethbuffer->buffer;
5026 int offset = *__offset;
5027 struct sk_buff *skb;
5034 /* qeth_hdr must not cross element boundaries */
5035 while (element->length < offset + sizeof(struct qeth_hdr)) {
5036 if (qeth_is_last_sbale(element))
5041 *hdr = element->addr + offset;
5043 offset += sizeof(struct qeth_hdr);
5044 switch ((*hdr)->hdr.l2.id) {
5045 case QETH_HEADER_TYPE_LAYER2:
5046 skb_len = (*hdr)->hdr.l2.pkt_length;
5048 case QETH_HEADER_TYPE_LAYER3:
5049 skb_len = (*hdr)->hdr.l3.length;
5050 headroom = ETH_HLEN;
5052 case QETH_HEADER_TYPE_OSN:
5053 skb_len = (*hdr)->hdr.osn.pdu_length;
5054 headroom = sizeof(struct qeth_hdr);
5063 if (((skb_len >= card->options.rx_sg_cb) &&
5065 (!atomic_read(&card->force_alloc_skb))) ||
5066 (card->options.cq == QETH_CQ_ENABLED))
5069 if (use_rx_sg && qethbuffer->rx_skb) {
5070 /* QETH_CQ_ENABLED only: */
5071 skb = qethbuffer->rx_skb;
5072 qethbuffer->rx_skb = NULL;
5074 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5076 skb = napi_alloc_skb(&card->napi, linear + headroom);
5081 skb_reserve(skb, headroom);
5083 data_ptr = element->addr + offset;
5085 data_len = min(skb_len, (int)(element->length - offset));
5088 qeth_create_skb_frag(element, skb, offset,
5091 skb_put_data(skb, data_ptr, data_len);
5093 skb_len -= data_len;
5095 if (qeth_is_last_sbale(element)) {
5096 QETH_CARD_TEXT(card, 4, "unexeob");
5097 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5098 dev_kfree_skb_any(skb);
5099 QETH_CARD_STAT_INC(card, rx_errors);
5104 data_ptr = element->addr;
5109 *__element = element;
5112 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5113 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5114 skb_shinfo(skb)->nr_frags);
5118 if (net_ratelimit()) {
5119 QETH_CARD_TEXT(card, 2, "noskbmem");
5121 QETH_CARD_STAT_INC(card, rx_dropped);
5124 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5126 int qeth_poll(struct napi_struct *napi, int budget)
5128 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5130 struct qeth_qdio_buffer *buffer;
5132 int new_budget = budget;
5135 if (!card->rx.b_count) {
5136 card->rx.qdio_err = 0;
5137 card->rx.b_count = qdio_get_next_buffers(
5138 card->data.ccwdev, 0, &card->rx.b_index,
5139 &card->rx.qdio_err);
5140 if (card->rx.b_count <= 0) {
5141 card->rx.b_count = 0;
5144 card->rx.b_element =
5145 &card->qdio.in_q->bufs[card->rx.b_index]
5146 .buffer->element[0];
5147 card->rx.e_offset = 0;
5150 while (card->rx.b_count) {
5151 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5152 if (!(card->rx.qdio_err &&
5153 qeth_check_qdio_errors(card, buffer->buffer,
5154 card->rx.qdio_err, "qinerr")))
5156 card->discipline->process_rx_buffer(
5157 card, new_budget, &done);
5162 QETH_CARD_STAT_INC(card, rx_bufs);
5163 qeth_put_buffer_pool_entry(card,
5164 buffer->pool_entry);
5165 qeth_queue_input_buffer(card, card->rx.b_index);
5167 if (card->rx.b_count) {
5169 (card->rx.b_index + 1) %
5170 QDIO_MAX_BUFFERS_PER_Q;
5171 card->rx.b_element =
5173 ->bufs[card->rx.b_index]
5174 .buffer->element[0];
5175 card->rx.e_offset = 0;
5179 if (work_done >= budget)
5182 new_budget = budget - work_done;
5186 napi_complete_done(napi, work_done);
5187 if (qdio_start_irq(card->data.ccwdev, 0))
5188 napi_schedule(&card->napi);
5192 EXPORT_SYMBOL_GPL(qeth_poll);
5194 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5195 unsigned int bidx, bool error, int budget)
5197 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5198 u8 sflags = buffer->buffer->element[15].sflags;
5199 struct qeth_card *card = queue->card;
5201 if (queue->bufstates && (queue->bufstates[bidx].flags &
5202 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5203 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5205 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5206 QETH_QDIO_BUF_PENDING) ==
5207 QETH_QDIO_BUF_PRIMED)
5208 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5210 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5212 /* prepare the queue slot for re-use: */
5213 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5214 if (qeth_init_qdio_out_buf(queue, bidx)) {
5215 QETH_CARD_TEXT(card, 2, "outofbuf");
5216 qeth_schedule_recovery(card);
5222 if (card->options.cq == QETH_CQ_ENABLED)
5223 qeth_notify_skbs(queue, buffer,
5224 qeth_compute_cq_notification(sflags, 0));
5225 qeth_clear_output_buffer(queue, buffer, error, budget);
5228 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5230 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5231 unsigned int queue_no = queue->queue_no;
5232 struct qeth_card *card = queue->card;
5233 struct net_device *dev = card->dev;
5234 unsigned int work_done = 0;
5235 struct netdev_queue *txq;
5237 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5240 unsigned int start, error, i;
5241 unsigned int packets = 0;
5242 unsigned int bytes = 0;
5245 if (qeth_out_queue_is_empty(queue)) {
5246 napi_complete(napi);
5250 /* Give the CPU a breather: */
5251 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5252 QETH_TXQ_STAT_INC(queue, completion_yield);
5253 if (napi_complete_done(napi, 0))
5254 napi_schedule(napi);
5258 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5260 if (completed <= 0) {
5261 /* Ensure we see TX completion for pending work: */
5262 if (napi_complete_done(napi, 0))
5263 qeth_tx_arm_timer(queue);
5267 for (i = start; i < start + completed; i++) {
5268 struct qeth_qdio_out_buffer *buffer;
5269 unsigned int bidx = QDIO_BUFNR(i);
5271 buffer = queue->bufs[bidx];
5272 packets += skb_queue_len(&buffer->skb_list);
5273 bytes += buffer->bytes;
5275 qeth_handle_send_error(card, buffer, error);
5276 qeth_iqd_tx_complete(queue, bidx, error, budget);
5277 qeth_cleanup_handled_pending(queue, bidx, false);
5280 netdev_tx_completed_queue(txq, packets, bytes);
5281 atomic_sub(completed, &queue->used_buffers);
5282 work_done += completed;
5284 /* xmit may have observed the full-condition, but not yet
5285 * stopped the txq. In which case the code below won't trigger.
5286 * So before returning, xmit will re-check the txq's fill level
5287 * and wake it up if needed.
5289 if (netif_tx_queue_stopped(txq) &&
5290 !qeth_out_queue_is_full(queue))
5291 netif_tx_wake_queue(txq);
5295 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5297 if (!cmd->hdr.return_code)
5298 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5299 return cmd->hdr.return_code;
5302 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5303 struct qeth_reply *reply,
5306 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5307 struct qeth_ipa_caps *caps = reply->param;
5309 if (qeth_setassparms_inspect_rc(cmd))
5312 caps->supported = cmd->data.setassparms.data.caps.supported;
5313 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5317 int qeth_setassparms_cb(struct qeth_card *card,
5318 struct qeth_reply *reply, unsigned long data)
5320 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5322 QETH_CARD_TEXT(card, 4, "defadpcb");
5324 if (cmd->hdr.return_code)
5327 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5328 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5329 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5330 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5331 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5334 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5336 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5337 enum qeth_ipa_funcs ipa_func,
5339 unsigned int data_length,
5340 enum qeth_prot_versions prot)
5342 struct qeth_ipacmd_setassparms *setassparms;
5343 struct qeth_ipacmd_setassparms_hdr *hdr;
5344 struct qeth_cmd_buffer *iob;
5346 QETH_CARD_TEXT(card, 4, "getasscm");
5347 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5349 offsetof(struct qeth_ipacmd_setassparms,
5354 setassparms = &__ipa_cmd(iob)->data.setassparms;
5355 setassparms->assist_no = ipa_func;
5357 hdr = &setassparms->hdr;
5358 hdr->length = sizeof(*hdr) + data_length;
5359 hdr->command_code = cmd_code;
5362 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5364 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5365 enum qeth_ipa_funcs ipa_func,
5366 u16 cmd_code, u32 *data,
5367 enum qeth_prot_versions prot)
5369 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
5370 struct qeth_cmd_buffer *iob;
5372 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5373 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5378 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
5379 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5381 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5383 static void qeth_unregister_dbf_views(void)
5386 for (x = 0; x < QETH_DBF_INFOS; x++) {
5387 debug_unregister(qeth_dbf[x].id);
5388 qeth_dbf[x].id = NULL;
5392 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5394 char dbf_txt_buf[32];
5397 if (!debug_level_enabled(id, level))
5399 va_start(args, fmt);
5400 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5402 debug_text_event(id, level, dbf_txt_buf);
5404 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5406 static int qeth_register_dbf_views(void)
5411 for (x = 0; x < QETH_DBF_INFOS; x++) {
5412 /* register the areas */
5413 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5417 if (qeth_dbf[x].id == NULL) {
5418 qeth_unregister_dbf_views();
5422 /* register a view */
5423 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5425 qeth_unregister_dbf_views();
5429 /* set a passing level */
5430 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5436 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
5438 int qeth_core_load_discipline(struct qeth_card *card,
5439 enum qeth_discipline_id discipline)
5441 mutex_lock(&qeth_mod_mutex);
5442 switch (discipline) {
5443 case QETH_DISCIPLINE_LAYER3:
5444 card->discipline = try_then_request_module(
5445 symbol_get(qeth_l3_discipline), "qeth_l3");
5447 case QETH_DISCIPLINE_LAYER2:
5448 card->discipline = try_then_request_module(
5449 symbol_get(qeth_l2_discipline), "qeth_l2");
5454 mutex_unlock(&qeth_mod_mutex);
5456 if (!card->discipline) {
5457 dev_err(&card->gdev->dev, "There is no kernel module to "
5458 "support discipline %d\n", discipline);
5462 card->options.layer = discipline;
5466 void qeth_core_free_discipline(struct qeth_card *card)
5468 if (IS_LAYER2(card))
5469 symbol_put(qeth_l2_discipline);
5471 symbol_put(qeth_l3_discipline);
5472 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5473 card->discipline = NULL;
5476 const struct device_type qeth_generic_devtype = {
5477 .name = "qeth_generic",
5478 .groups = qeth_generic_attr_groups,
5480 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5482 static const struct device_type qeth_osn_devtype = {
5484 .groups = qeth_osn_attr_groups,
5487 #define DBF_NAME_LEN 20
5489 struct qeth_dbf_entry {
5490 char dbf_name[DBF_NAME_LEN];
5491 debug_info_t *dbf_info;
5492 struct list_head dbf_list;
5495 static LIST_HEAD(qeth_dbf_list);
5496 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5498 static debug_info_t *qeth_get_dbf_entry(char *name)
5500 struct qeth_dbf_entry *entry;
5501 debug_info_t *rc = NULL;
5503 mutex_lock(&qeth_dbf_list_mutex);
5504 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5505 if (strcmp(entry->dbf_name, name) == 0) {
5506 rc = entry->dbf_info;
5510 mutex_unlock(&qeth_dbf_list_mutex);
5514 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5516 struct qeth_dbf_entry *new_entry;
5518 card->debug = debug_register(name, 2, 1, 8);
5520 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5523 if (debug_register_view(card->debug, &debug_hex_ascii_view))
5525 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5528 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5529 new_entry->dbf_info = card->debug;
5530 mutex_lock(&qeth_dbf_list_mutex);
5531 list_add(&new_entry->dbf_list, &qeth_dbf_list);
5532 mutex_unlock(&qeth_dbf_list_mutex);
5537 debug_unregister(card->debug);
5542 static void qeth_clear_dbf_list(void)
5544 struct qeth_dbf_entry *entry, *tmp;
5546 mutex_lock(&qeth_dbf_list_mutex);
5547 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5548 list_del(&entry->dbf_list);
5549 debug_unregister(entry->dbf_info);
5552 mutex_unlock(&qeth_dbf_list_mutex);
5555 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5557 struct net_device *dev;
5559 switch (card->info.type) {
5560 case QETH_CARD_TYPE_IQD:
5561 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
5562 ether_setup, QETH_MAX_QUEUES, 1);
5564 case QETH_CARD_TYPE_OSM:
5565 dev = alloc_etherdev(0);
5567 case QETH_CARD_TYPE_OSN:
5568 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5571 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
5577 dev->ml_priv = card;
5578 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5579 dev->min_mtu = IS_OSN(card) ? 64 : 576;
5580 /* initialized when device first goes online: */
5583 SET_NETDEV_DEV(dev, &card->gdev->dev);
5584 netif_carrier_off(dev);
5587 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5589 dev->ethtool_ops = &qeth_ethtool_ops;
5590 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5591 dev->hw_features |= NETIF_F_SG;
5592 dev->vlan_features |= NETIF_F_SG;
5594 dev->features |= NETIF_F_SG;
5595 if (netif_set_real_num_tx_queues(dev,
5596 QETH_IQD_MIN_TXQ)) {
5606 struct net_device *qeth_clone_netdev(struct net_device *orig)
5608 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5613 clone->dev_port = orig->dev_port;
5617 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5619 struct qeth_card *card;
5622 enum qeth_discipline_id enforced_disc;
5623 char dbf_name[DBF_NAME_LEN];
5625 QETH_DBF_TEXT(SETUP, 2, "probedev");
5628 if (!get_device(dev))
5631 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5633 card = qeth_alloc_card(gdev);
5635 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5640 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5641 dev_name(&gdev->dev));
5642 card->debug = qeth_get_dbf_entry(dbf_name);
5644 rc = qeth_add_dbf_entry(card, dbf_name);
5649 qeth_setup_card(card);
5650 card->dev = qeth_alloc_netdev(card);
5656 card->qdio.no_out_queues = card->dev->num_tx_queues;
5657 rc = qeth_update_from_chp_desc(card);
5660 qeth_determine_capabilities(card);
5661 qeth_set_blkt_defaults(card);
5663 enforced_disc = qeth_enforce_discipline(card);
5664 switch (enforced_disc) {
5665 case QETH_DISCIPLINE_UNDETERMINED:
5666 gdev->dev.type = &qeth_generic_devtype;
5669 card->info.layer_enforced = true;
5670 rc = qeth_core_load_discipline(card, enforced_disc);
5674 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
5675 card->discipline->devtype;
5676 rc = card->discipline->setup(card->gdev);
5685 qeth_core_free_discipline(card);
5688 free_netdev(card->dev);
5690 qeth_core_free_card(card);
5696 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5698 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5700 QETH_CARD_TEXT(card, 2, "removedv");
5702 if (card->discipline) {
5703 card->discipline->remove(gdev);
5704 qeth_core_free_discipline(card);
5707 free_netdev(card->dev);
5708 qeth_core_free_card(card);
5709 put_device(&gdev->dev);
5712 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5714 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5716 enum qeth_discipline_id def_discipline;
5718 if (!card->discipline) {
5719 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
5720 QETH_DISCIPLINE_LAYER2;
5721 rc = qeth_core_load_discipline(card, def_discipline);
5724 rc = card->discipline->setup(card->gdev);
5726 qeth_core_free_discipline(card);
5730 rc = card->discipline->set_online(gdev);
5735 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5737 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5738 return card->discipline->set_offline(gdev);
5741 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5743 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5744 qeth_set_allowed_threads(card, 0, 1);
5745 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5746 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5747 qeth_qdio_clear_card(card, 0);
5748 qeth_drain_output_queues(card);
5749 qdio_free(CARD_DDEV(card));
5752 static int qeth_suspend(struct ccwgroup_device *gdev)
5754 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5756 qeth_set_allowed_threads(card, 0, 1);
5757 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
5758 if (gdev->state == CCWGROUP_OFFLINE)
5761 card->discipline->set_offline(gdev);
5765 static int qeth_resume(struct ccwgroup_device *gdev)
5767 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5770 rc = card->discipline->set_online(gdev);
5772 qeth_set_allowed_threads(card, 0xffffffff, 0);
5774 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
5778 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5783 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5786 return err ? err : count;
5788 static DRIVER_ATTR_WO(group);
5790 static struct attribute *qeth_drv_attrs[] = {
5791 &driver_attr_group.attr,
5794 static struct attribute_group qeth_drv_attr_group = {
5795 .attrs = qeth_drv_attrs,
5797 static const struct attribute_group *qeth_drv_attr_groups[] = {
5798 &qeth_drv_attr_group,
5802 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5804 .groups = qeth_drv_attr_groups,
5805 .owner = THIS_MODULE,
5808 .ccw_driver = &qeth_ccw_driver,
5809 .setup = qeth_core_probe_device,
5810 .remove = qeth_core_remove_device,
5811 .set_online = qeth_core_set_online,
5812 .set_offline = qeth_core_set_offline,
5813 .shutdown = qeth_core_shutdown,
5816 .freeze = qeth_suspend,
5817 .thaw = qeth_resume,
5818 .restore = qeth_resume,
5821 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5823 struct ccwgroup_device *gdev;
5824 struct qeth_card *card;
5826 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5830 card = dev_get_drvdata(&gdev->dev);
5831 put_device(&gdev->dev);
5834 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5836 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5838 struct qeth_card *card = dev->ml_priv;
5839 struct mii_ioctl_data *mii_data;
5846 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5847 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5849 case SIOC_QETH_GET_CARD_TYPE:
5850 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
5855 mii_data = if_mii(rq);
5856 mii_data->phy_id = 0;
5859 mii_data = if_mii(rq);
5860 if (mii_data->phy_id != 0)
5863 mii_data->val_out = qeth_mdio_read(dev,
5864 mii_data->phy_id, mii_data->reg_num);
5866 case SIOC_QETH_QUERY_OAT:
5867 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5870 if (card->discipline->do_ioctl)
5871 rc = card->discipline->do_ioctl(dev, rq, cmd);
5876 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5879 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5881 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
5884 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5885 u32 *features = reply->param;
5887 if (qeth_setassparms_inspect_rc(cmd))
5890 *features = cmd->data.setassparms.data.flags_32bit;
5894 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5895 enum qeth_prot_versions prot)
5897 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
5901 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5902 enum qeth_prot_versions prot)
5904 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
5905 struct qeth_cmd_buffer *iob;
5906 struct qeth_ipa_caps caps;
5910 /* some L3 HW requires combined L3+L4 csum offload: */
5911 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
5912 cstype == IPA_OUTBOUND_CHECKSUM)
5913 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
5915 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
5920 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
5924 if ((required_features & features) != required_features) {
5925 qeth_set_csum_off(card, cstype, prot);
5929 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
5930 SETASS_DATA_SIZEOF(flags_32bit),
5933 qeth_set_csum_off(card, cstype, prot);
5937 if (features & QETH_IPA_CHECKSUM_LP2LP)
5938 required_features |= QETH_IPA_CHECKSUM_LP2LP;
5939 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
5940 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
5942 qeth_set_csum_off(card, cstype, prot);
5946 if (!qeth_ipa_caps_supported(&caps, required_features) ||
5947 !qeth_ipa_caps_enabled(&caps, required_features)) {
5948 qeth_set_csum_off(card, cstype, prot);
5952 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
5953 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
5954 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
5955 cstype == IPA_OUTBOUND_CHECKSUM)
5956 dev_warn(&card->gdev->dev,
5957 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
5958 QETH_CARD_IFNAME(card));
5962 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
5963 enum qeth_prot_versions prot)
5965 return on ? qeth_set_csum_on(card, cstype, prot) :
5966 qeth_set_csum_off(card, cstype, prot);
5969 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
5972 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5973 struct qeth_tso_start_data *tso_data = reply->param;
5975 if (qeth_setassparms_inspect_rc(cmd))
5978 tso_data->mss = cmd->data.setassparms.data.tso.mss;
5979 tso_data->supported = cmd->data.setassparms.data.tso.supported;
5983 static int qeth_set_tso_off(struct qeth_card *card,
5984 enum qeth_prot_versions prot)
5986 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
5987 IPA_CMD_ASS_STOP, NULL, prot);
5990 static int qeth_set_tso_on(struct qeth_card *card,
5991 enum qeth_prot_versions prot)
5993 struct qeth_tso_start_data tso_data;
5994 struct qeth_cmd_buffer *iob;
5995 struct qeth_ipa_caps caps;
5998 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
5999 IPA_CMD_ASS_START, 0, prot);
6003 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6007 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6008 qeth_set_tso_off(card, prot);
6012 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6014 SETASS_DATA_SIZEOF(caps), prot);
6016 qeth_set_tso_off(card, prot);
6020 /* enable TSO capability */
6021 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6022 QETH_IPA_LARGE_SEND_TCP;
6023 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6025 qeth_set_tso_off(card, prot);
6029 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6030 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6031 qeth_set_tso_off(card, prot);
6035 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6040 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6041 enum qeth_prot_versions prot)
6043 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6046 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6048 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6051 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6052 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6054 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6055 /* no/one Offload Assist available, so the rc is trivial */
6058 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6062 /* enable: success if any Assist is active */
6063 return (rc_ipv6) ? rc_ipv4 : 0;
6065 /* disable: failure if any Assist is still active */
6066 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6070 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6071 * @dev: a net_device
6073 void qeth_enable_hw_features(struct net_device *dev)
6075 struct qeth_card *card = dev->ml_priv;
6076 netdev_features_t features;
6078 features = dev->features;
6079 /* force-off any feature that might need an IPA sequence.
6080 * netdev_update_features() will restart them.
6082 dev->features &= ~dev->hw_features;
6083 /* toggle VLAN filter, so that VIDs are re-programmed: */
6084 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6085 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6086 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6088 netdev_update_features(dev);
6089 if (features != dev->features)
6090 dev_warn(&card->gdev->dev,
6091 "Device recovery failed to restore all offload features\n");
6093 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6095 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6097 struct qeth_card *card = dev->ml_priv;
6098 netdev_features_t changed = dev->features ^ features;
6101 QETH_CARD_TEXT(card, 2, "setfeat");
6102 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6104 if ((changed & NETIF_F_IP_CSUM)) {
6105 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6106 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6108 changed ^= NETIF_F_IP_CSUM;
6110 if (changed & NETIF_F_IPV6_CSUM) {
6111 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6112 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6114 changed ^= NETIF_F_IPV6_CSUM;
6116 if (changed & NETIF_F_RXCSUM) {
6117 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6119 changed ^= NETIF_F_RXCSUM;
6121 if (changed & NETIF_F_TSO) {
6122 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6125 changed ^= NETIF_F_TSO;
6127 if (changed & NETIF_F_TSO6) {
6128 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6131 changed ^= NETIF_F_TSO6;
6134 /* everything changed successfully? */
6135 if ((dev->features ^ features) == changed)
6137 /* something went wrong. save changed features and return error */
6138 dev->features ^= changed;
6141 EXPORT_SYMBOL_GPL(qeth_set_features);
6143 netdev_features_t qeth_fix_features(struct net_device *dev,
6144 netdev_features_t features)
6146 struct qeth_card *card = dev->ml_priv;
6148 QETH_CARD_TEXT(card, 2, "fixfeat");
6149 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6150 features &= ~NETIF_F_IP_CSUM;
6151 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6152 features &= ~NETIF_F_IPV6_CSUM;
6153 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6154 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6155 features &= ~NETIF_F_RXCSUM;
6156 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6157 features &= ~NETIF_F_TSO;
6158 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6159 features &= ~NETIF_F_TSO6;
6161 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6164 EXPORT_SYMBOL_GPL(qeth_fix_features);
6166 netdev_features_t qeth_features_check(struct sk_buff *skb,
6167 struct net_device *dev,
6168 netdev_features_t features)
6170 /* GSO segmentation builds skbs with
6171 * a (small) linear part for the headers, and
6172 * page frags for the data.
6173 * Compared to a linear skb, the header-only part consumes an
6174 * additional buffer element. This reduces buffer utilization, and
6175 * hurts throughput. So compress small segments into one element.
6177 if (netif_needs_gso(skb, features)) {
6178 /* match skb_segment(): */
6179 unsigned int doffset = skb->data - skb_mac_header(skb);
6180 unsigned int hsize = skb_shinfo(skb)->gso_size;
6181 unsigned int hroom = skb_headroom(skb);
6183 /* linearize only if resulting skb allocations are order-0: */
6184 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6185 features &= ~NETIF_F_SG;
6188 return vlan_features_check(skb, features);
6190 EXPORT_SYMBOL_GPL(qeth_features_check);
6192 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6194 struct qeth_card *card = dev->ml_priv;
6195 struct qeth_qdio_out_q *queue;
6198 QETH_CARD_TEXT(card, 5, "getstat");
6200 stats->rx_packets = card->stats.rx_packets;
6201 stats->rx_bytes = card->stats.rx_bytes;
6202 stats->rx_errors = card->stats.rx_errors;
6203 stats->rx_dropped = card->stats.rx_dropped;
6204 stats->multicast = card->stats.rx_multicast;
6206 for (i = 0; i < card->qdio.no_out_queues; i++) {
6207 queue = card->qdio.out_qs[i];
6209 stats->tx_packets += queue->stats.tx_packets;
6210 stats->tx_bytes += queue->stats.tx_bytes;
6211 stats->tx_errors += queue->stats.tx_errors;
6212 stats->tx_dropped += queue->stats.tx_dropped;
6215 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6217 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6218 u8 cast_type, struct net_device *sb_dev)
6220 if (cast_type != RTN_UNICAST)
6221 return QETH_IQD_MCAST_TXQ;
6222 return QETH_IQD_MIN_UCAST_TXQ;
6224 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6226 int qeth_open(struct net_device *dev)
6228 struct qeth_card *card = dev->ml_priv;
6230 QETH_CARD_TEXT(card, 4, "qethopen");
6232 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6235 card->data.state = CH_STATE_UP;
6236 netif_tx_start_all_queues(dev);
6238 napi_enable(&card->napi);
6240 napi_schedule(&card->napi);
6242 struct qeth_qdio_out_q *queue;
6245 qeth_for_each_output_queue(card, queue, i) {
6246 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6248 napi_enable(&queue->napi);
6249 napi_schedule(&queue->napi);
6252 /* kick-start the NAPI softirq: */
6256 EXPORT_SYMBOL_GPL(qeth_open);
6258 int qeth_stop(struct net_device *dev)
6260 struct qeth_card *card = dev->ml_priv;
6262 QETH_CARD_TEXT(card, 4, "qethstop");
6264 struct qeth_qdio_out_q *queue;
6267 /* Quiesce the NAPI instances: */
6268 qeth_for_each_output_queue(card, queue, i) {
6269 napi_disable(&queue->napi);
6270 del_timer_sync(&queue->timer);
6273 /* Stop .ndo_start_xmit, might still access queue->napi. */
6274 netif_tx_disable(dev);
6276 /* Queues may get re-allocated, so remove the NAPIs here. */
6277 qeth_for_each_output_queue(card, queue, i)
6278 netif_napi_del(&queue->napi);
6280 netif_tx_disable(dev);
6283 napi_disable(&card->napi);
6286 EXPORT_SYMBOL_GPL(qeth_stop);
6288 static int __init qeth_core_init(void)
6292 pr_info("loading core functions\n");
6294 rc = qeth_register_dbf_views();
6297 qeth_core_root_dev = root_device_register("qeth");
6298 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6301 qeth_core_header_cache =
6302 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6303 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6305 if (!qeth_core_header_cache) {
6309 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6310 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6311 if (!qeth_qdio_outbuf_cache) {
6315 rc = ccw_driver_register(&qeth_ccw_driver);
6318 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6325 ccw_driver_unregister(&qeth_ccw_driver);
6327 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6329 kmem_cache_destroy(qeth_core_header_cache);
6331 root_device_unregister(qeth_core_root_dev);
6333 qeth_unregister_dbf_views();
6335 pr_err("Initializing the qeth device driver failed\n");
6339 static void __exit qeth_core_exit(void)
6341 qeth_clear_dbf_list();
6342 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6343 ccw_driver_unregister(&qeth_ccw_driver);
6344 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6345 kmem_cache_destroy(qeth_core_header_cache);
6346 root_device_unregister(qeth_core_root_dev);
6347 qeth_unregister_dbf_views();
6348 pr_info("core functions removed\n");
6351 module_init(qeth_core_init);
6352 module_exit(qeth_core_exit);
6353 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6354 MODULE_DESCRIPTION("qeth core functions");
6355 MODULE_LICENSE("GPL");