1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/rcutree.h>
30 #include <linux/skbuff.h>
31 #include <linux/vmalloc.h>
33 #include <net/iucv/af_iucv.h>
34 #include <net/dsfield.h>
36 #include <asm/ebcdic.h>
37 #include <asm/chpid.h>
39 #include <asm/sysinfo.h>
42 #include <asm/ccwdev.h>
43 #include <asm/cpcmd.h>
45 #include "qeth_core.h"
47 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
48 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
50 [QETH_DBF_SETUP] = {"qeth_setup",
51 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
52 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53 &debug_sprintf_view, NULL},
54 [QETH_DBF_CTRL] = {"qeth_control",
55 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
57 EXPORT_SYMBOL_GPL(qeth_dbf);
59 struct kmem_cache *qeth_core_header_cache;
60 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
63 static struct device *qeth_core_root_dev;
64 static struct dentry *qeth_debugfs_root;
65 static struct lock_class_key qdio_out_skb_queue_key;
67 static void qeth_issue_next_read_cb(struct qeth_card *card,
68 struct qeth_cmd_buffer *iob,
69 unsigned int data_length);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
72 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification);
75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
79 static void qeth_close_dev_handler(struct work_struct *work)
81 struct qeth_card *card;
83 card = container_of(work, struct qeth_card, close_dev_work);
84 QETH_CARD_TEXT(card, 2, "cldevhdl");
85 ccwgroup_set_offline(card->gdev);
88 static const char *qeth_get_cardname(struct qeth_card *card)
90 if (IS_VM_NIC(card)) {
91 switch (card->info.type) {
92 case QETH_CARD_TYPE_OSD:
93 return " Virtual NIC QDIO";
94 case QETH_CARD_TYPE_IQD:
95 return " Virtual NIC Hiper";
96 case QETH_CARD_TYPE_OSM:
97 return " Virtual NIC QDIO - OSM";
98 case QETH_CARD_TYPE_OSX:
99 return " Virtual NIC QDIO - OSX";
104 switch (card->info.type) {
105 case QETH_CARD_TYPE_OSD:
106 return " OSD Express";
107 case QETH_CARD_TYPE_IQD:
108 return " HiperSockets";
109 case QETH_CARD_TYPE_OSN:
111 case QETH_CARD_TYPE_OSM:
113 case QETH_CARD_TYPE_OSX:
122 /* max length to be returned: 14 */
123 const char *qeth_get_cardname_short(struct qeth_card *card)
125 if (IS_VM_NIC(card)) {
126 switch (card->info.type) {
127 case QETH_CARD_TYPE_OSD:
128 return "Virt.NIC QDIO";
129 case QETH_CARD_TYPE_IQD:
130 return "Virt.NIC Hiper";
131 case QETH_CARD_TYPE_OSM:
132 return "Virt.NIC OSM";
133 case QETH_CARD_TYPE_OSX:
134 return "Virt.NIC OSX";
139 switch (card->info.type) {
140 case QETH_CARD_TYPE_OSD:
141 switch (card->info.link_type) {
142 case QETH_LINK_TYPE_FAST_ETH:
144 case QETH_LINK_TYPE_HSTR:
146 case QETH_LINK_TYPE_GBIT_ETH:
148 case QETH_LINK_TYPE_10GBIT_ETH:
150 case QETH_LINK_TYPE_25GBIT_ETH:
152 case QETH_LINK_TYPE_LANE_ETH100:
153 return "OSD_FE_LANE";
154 case QETH_LINK_TYPE_LANE_TR:
155 return "OSD_TR_LANE";
156 case QETH_LINK_TYPE_LANE_ETH1000:
157 return "OSD_GbE_LANE";
158 case QETH_LINK_TYPE_LANE:
159 return "OSD_ATM_LANE";
161 return "OSD_Express";
163 case QETH_CARD_TYPE_IQD:
164 return "HiperSockets";
165 case QETH_CARD_TYPE_OSN:
167 case QETH_CARD_TYPE_OSM:
169 case QETH_CARD_TYPE_OSX:
178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
179 int clear_start_mask)
183 spin_lock_irqsave(&card->thread_mask_lock, flags);
184 card->thread_allowed_mask = threads;
185 if (clear_start_mask)
186 card->thread_start_mask &= threads;
187 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
188 wake_up(&card->wait_q);
190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
192 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
197 spin_lock_irqsave(&card->thread_mask_lock, flags);
198 rc = (card->thread_running_mask & threads);
199 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
202 EXPORT_SYMBOL_GPL(qeth_threads_running);
204 static void qeth_clear_working_pool_list(struct qeth_card *card)
206 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 struct qeth_qdio_q *queue = card->qdio.in_q;
210 QETH_CARD_TEXT(card, 5, "clwrklst");
211 list_for_each_entry_safe(pool_entry, tmp,
212 &card->qdio.in_buf_pool.entry_list, list){
213 list_del(&pool_entry->list);
216 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
217 queue->bufs[i].pool_entry = NULL;
220 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
224 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
225 if (entry->elements[i])
226 __free_page(entry->elements[i]);
232 static void qeth_free_buffer_pool(struct qeth_card *card)
234 struct qeth_buffer_pool_entry *entry, *tmp;
236 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
238 list_del(&entry->init_list);
239 qeth_free_pool_entry(entry);
243 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
245 struct qeth_buffer_pool_entry *entry;
248 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
252 for (i = 0; i < pages; i++) {
253 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
255 if (!entry->elements[i]) {
256 qeth_free_pool_entry(entry);
264 static int qeth_alloc_buffer_pool(struct qeth_card *card)
266 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
269 QETH_CARD_TEXT(card, 5, "alocpool");
270 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
271 struct qeth_buffer_pool_entry *entry;
273 entry = qeth_alloc_pool_entry(buf_elements);
275 qeth_free_buffer_pool(card);
279 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
284 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
286 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
287 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
288 struct qeth_buffer_pool_entry *entry, *tmp;
289 int delta = count - pool->buf_count;
292 QETH_CARD_TEXT(card, 2, "realcbp");
294 /* Defer until queue is allocated: */
295 if (!card->qdio.in_q)
298 /* Remove entries from the pool: */
300 entry = list_first_entry(&pool->entry_list,
301 struct qeth_buffer_pool_entry,
303 list_del(&entry->init_list);
304 qeth_free_pool_entry(entry);
309 /* Allocate additional entries: */
311 entry = qeth_alloc_pool_entry(buf_elements);
313 list_for_each_entry_safe(entry, tmp, &entries,
315 list_del(&entry->init_list);
316 qeth_free_pool_entry(entry);
322 list_add(&entry->init_list, &entries);
327 list_splice(&entries, &pool->entry_list);
330 card->qdio.in_buf_pool.buf_count = count;
331 pool->buf_count = count;
334 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
336 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
341 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
345 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
347 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
353 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
358 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
359 q->bufs[i].buffer = q->qdio_bufs[i];
361 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
365 static int qeth_cq_init(struct qeth_card *card)
369 if (card->options.cq == QETH_CQ_ENABLED) {
370 QETH_CARD_TEXT(card, 2, "cqinit");
371 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
372 QDIO_MAX_BUFFERS_PER_Q);
373 card->qdio.c_q->next_buf_to_init = 127;
374 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
375 card->qdio.no_in_queues - 1, 0,
378 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
387 static int qeth_alloc_cq(struct qeth_card *card)
391 if (card->options.cq == QETH_CQ_ENABLED) {
393 struct qdio_outbuf_state *outbuf_states;
395 QETH_CARD_TEXT(card, 2, "cqon");
396 card->qdio.c_q = qeth_alloc_qdio_queue();
397 if (!card->qdio.c_q) {
401 card->qdio.no_in_queues = 2;
402 card->qdio.out_bufstates =
403 kcalloc(card->qdio.no_out_queues *
404 QDIO_MAX_BUFFERS_PER_Q,
405 sizeof(struct qdio_outbuf_state),
407 outbuf_states = card->qdio.out_bufstates;
408 if (outbuf_states == NULL) {
412 for (i = 0; i < card->qdio.no_out_queues; ++i) {
413 card->qdio.out_qs[i]->bufstates = outbuf_states;
414 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
417 QETH_CARD_TEXT(card, 2, "nocq");
418 card->qdio.c_q = NULL;
419 card->qdio.no_in_queues = 1;
421 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
426 qeth_free_qdio_queue(card->qdio.c_q);
427 card->qdio.c_q = NULL;
429 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
433 static void qeth_free_cq(struct qeth_card *card)
435 if (card->qdio.c_q) {
436 --card->qdio.no_in_queues;
437 qeth_free_qdio_queue(card->qdio.c_q);
438 card->qdio.c_q = NULL;
440 kfree(card->qdio.out_bufstates);
441 card->qdio.out_bufstates = NULL;
444 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
447 enum iucv_tx_notify n;
451 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
457 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
458 TX_NOTIFY_UNREACHABLE;
461 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
462 TX_NOTIFY_GENERALERROR;
469 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
472 if (q->card->options.cq != QETH_CQ_ENABLED)
475 if (q->bufs[bidx]->next_pending != NULL) {
476 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
477 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
480 if (forced_cleanup ||
481 atomic_read(&c->state) ==
482 QETH_QDIO_BUF_HANDLED_DELAYED) {
483 struct qeth_qdio_out_buffer *f = c;
484 QETH_CARD_TEXT(f->q->card, 5, "fp");
485 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
486 /* release here to avoid interleaving between
487 outbound tasklet and inbound tasklet
488 regarding notifications and lifecycle */
489 qeth_tx_complete_buf(c, forced_cleanup, 0);
492 WARN_ON_ONCE(head->next_pending != f);
493 head->next_pending = c;
494 kmem_cache_free(qeth_qdio_outbuf_cache, f);
502 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
503 QETH_QDIO_BUF_HANDLED_DELAYED)) {
504 /* for recovery situations */
505 qeth_init_qdio_out_buf(q, bidx);
506 QETH_CARD_TEXT(q->card, 2, "clprecov");
511 static void qeth_qdio_handle_aob(struct qeth_card *card,
512 unsigned long phys_aob_addr)
515 struct qeth_qdio_out_buffer *buffer;
516 enum iucv_tx_notify notification;
519 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
520 QETH_CARD_TEXT(card, 5, "haob");
521 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
522 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
523 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
525 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
526 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
527 notification = TX_NOTIFY_OK;
529 WARN_ON_ONCE(atomic_read(&buffer->state) !=
530 QETH_QDIO_BUF_PENDING);
531 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
532 notification = TX_NOTIFY_DELAYED_OK;
535 if (aob->aorc != 0) {
536 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
537 notification = qeth_compute_cq_notification(aob->aorc, 1);
539 qeth_notify_skbs(buffer->q, buffer, notification);
541 /* Free dangling allocations. The attached skbs are handled by
542 * qeth_cleanup_handled_pending().
545 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
547 void *data = phys_to_virt(aob->sba[i]);
549 if (data && buffer->is_header[i])
550 kmem_cache_free(qeth_core_header_cache, data);
552 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
554 qdio_release_aob(aob);
557 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
560 ccw->cmd_code = cmd_code;
561 ccw->flags = flags | CCW_FLAG_SLI;
563 ccw->cda = (__u32) __pa(data);
566 static int __qeth_issue_next_read(struct qeth_card *card)
568 struct qeth_cmd_buffer *iob = card->read_cmd;
569 struct qeth_channel *channel = iob->channel;
570 struct ccw1 *ccw = __ccw_from_cmd(iob);
573 QETH_CARD_TEXT(card, 5, "issnxrd");
574 if (channel->state != CH_STATE_UP)
577 memset(iob->data, 0, iob->length);
578 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
579 iob->callback = qeth_issue_next_read_cb;
580 /* keep the cmd alive after completion: */
583 QETH_CARD_TEXT(card, 6, "noirqpnd");
584 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
586 channel->active_cmd = iob;
588 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
589 rc, CARD_DEVID(card));
590 qeth_unlock_channel(card, channel);
592 card->read_or_write_problem = 1;
593 qeth_schedule_recovery(card);
598 static int qeth_issue_next_read(struct qeth_card *card)
602 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
603 ret = __qeth_issue_next_read(card);
604 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
609 static void qeth_enqueue_cmd(struct qeth_card *card,
610 struct qeth_cmd_buffer *iob)
612 spin_lock_irq(&card->lock);
613 list_add_tail(&iob->list, &card->cmd_waiter_list);
614 spin_unlock_irq(&card->lock);
617 static void qeth_dequeue_cmd(struct qeth_card *card,
618 struct qeth_cmd_buffer *iob)
620 spin_lock_irq(&card->lock);
621 list_del(&iob->list);
622 spin_unlock_irq(&card->lock);
625 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
628 complete(&iob->done);
630 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
632 static void qeth_flush_local_addrs4(struct qeth_card *card)
634 struct qeth_local_addr *addr;
635 struct hlist_node *tmp;
638 spin_lock_irq(&card->local_addrs4_lock);
639 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
640 hash_del_rcu(&addr->hnode);
641 kfree_rcu(addr, rcu);
643 spin_unlock_irq(&card->local_addrs4_lock);
646 static void qeth_flush_local_addrs6(struct qeth_card *card)
648 struct qeth_local_addr *addr;
649 struct hlist_node *tmp;
652 spin_lock_irq(&card->local_addrs6_lock);
653 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
654 hash_del_rcu(&addr->hnode);
655 kfree_rcu(addr, rcu);
657 spin_unlock_irq(&card->local_addrs6_lock);
660 static void qeth_flush_local_addrs(struct qeth_card *card)
662 qeth_flush_local_addrs4(card);
663 qeth_flush_local_addrs6(card);
666 static void qeth_add_local_addrs4(struct qeth_card *card,
667 struct qeth_ipacmd_local_addrs4 *cmd)
671 if (cmd->addr_length !=
672 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
673 dev_err_ratelimited(&card->gdev->dev,
674 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
679 spin_lock(&card->local_addrs4_lock);
680 for (i = 0; i < cmd->count; i++) {
681 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
682 struct qeth_local_addr *addr;
683 bool duplicate = false;
685 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
686 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
695 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
697 dev_err(&card->gdev->dev,
698 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
699 &cmd->addrs[i].addr);
703 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
704 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
706 spin_unlock(&card->local_addrs4_lock);
709 static void qeth_add_local_addrs6(struct qeth_card *card,
710 struct qeth_ipacmd_local_addrs6 *cmd)
714 if (cmd->addr_length !=
715 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
716 dev_err_ratelimited(&card->gdev->dev,
717 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
722 spin_lock(&card->local_addrs6_lock);
723 for (i = 0; i < cmd->count; i++) {
724 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
725 struct qeth_local_addr *addr;
726 bool duplicate = false;
728 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
729 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
738 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
740 dev_err(&card->gdev->dev,
741 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
742 &cmd->addrs[i].addr);
746 addr->addr = cmd->addrs[i].addr;
747 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
749 spin_unlock(&card->local_addrs6_lock);
752 static void qeth_del_local_addrs4(struct qeth_card *card,
753 struct qeth_ipacmd_local_addrs4 *cmd)
757 if (cmd->addr_length !=
758 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
759 dev_err_ratelimited(&card->gdev->dev,
760 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
765 spin_lock(&card->local_addrs4_lock);
766 for (i = 0; i < cmd->count; i++) {
767 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
768 unsigned int key = ipv4_addr_hash(addr->addr);
769 struct qeth_local_addr *tmp;
771 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
772 if (tmp->addr.s6_addr32[3] == addr->addr) {
773 hash_del_rcu(&tmp->hnode);
779 spin_unlock(&card->local_addrs4_lock);
782 static void qeth_del_local_addrs6(struct qeth_card *card,
783 struct qeth_ipacmd_local_addrs6 *cmd)
787 if (cmd->addr_length !=
788 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
789 dev_err_ratelimited(&card->gdev->dev,
790 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
795 spin_lock(&card->local_addrs6_lock);
796 for (i = 0; i < cmd->count; i++) {
797 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
798 u32 key = ipv6_addr_hash(&addr->addr);
799 struct qeth_local_addr *tmp;
801 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
802 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
803 hash_del_rcu(&tmp->hnode);
809 spin_unlock(&card->local_addrs6_lock);
812 static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
815 struct qeth_local_addr *tmp;
816 bool is_local = false;
820 if (hash_empty(card->local_addrs4))
824 next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
825 key = ipv4_addr_hash(next_hop);
827 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
828 if (tmp->addr.s6_addr32[3] == next_hop) {
838 static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
841 struct qeth_local_addr *tmp;
842 struct in6_addr *next_hop;
843 bool is_local = false;
846 if (hash_empty(card->local_addrs6))
850 next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
851 key = ipv6_addr_hash(next_hop);
853 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
854 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
864 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
866 struct qeth_card *card = m->private;
867 struct qeth_local_addr *tmp;
871 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
872 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
873 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
874 seq_printf(m, "%pI6c\n", &tmp->addr);
880 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
882 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
883 struct qeth_card *card)
885 const char *ipa_name;
886 int com = cmd->hdr.command;
887 ipa_name = qeth_get_ipa_cmd_name(com);
890 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
891 ipa_name, com, CARD_DEVID(card), rc,
892 qeth_get_ipa_msg(rc));
894 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
895 ipa_name, com, CARD_DEVID(card));
898 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
899 struct qeth_ipa_cmd *cmd)
901 QETH_CARD_TEXT(card, 5, "chkipad");
903 if (IS_IPA_REPLY(cmd)) {
904 if (cmd->hdr.command != IPA_CMD_SETCCID &&
905 cmd->hdr.command != IPA_CMD_DELCCID &&
906 cmd->hdr.command != IPA_CMD_MODCCID &&
907 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
908 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
912 /* handle unsolicited event: */
913 switch (cmd->hdr.command) {
914 case IPA_CMD_STOPLAN:
915 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
916 dev_err(&card->gdev->dev,
917 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
918 QETH_CARD_IFNAME(card));
919 schedule_work(&card->close_dev_work);
921 dev_warn(&card->gdev->dev,
922 "The link for interface %s on CHPID 0x%X failed\n",
923 QETH_CARD_IFNAME(card), card->info.chpid);
924 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
925 netif_carrier_off(card->dev);
928 case IPA_CMD_STARTLAN:
929 dev_info(&card->gdev->dev,
930 "The link for %s on CHPID 0x%X has been restored\n",
931 QETH_CARD_IFNAME(card), card->info.chpid);
932 if (card->info.hwtrap)
933 card->info.hwtrap = 2;
934 qeth_schedule_recovery(card);
936 case IPA_CMD_SETBRIDGEPORT_IQD:
937 case IPA_CMD_SETBRIDGEPORT_OSA:
938 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
939 if (card->discipline->control_event_handler(card, cmd))
942 case IPA_CMD_MODCCID:
944 case IPA_CMD_REGISTER_LOCAL_ADDR:
945 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
946 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
947 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
948 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
950 QETH_CARD_TEXT(card, 3, "irla");
952 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
953 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
954 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
955 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
956 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
958 QETH_CARD_TEXT(card, 3, "urla");
961 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
966 static void qeth_clear_ipacmd_list(struct qeth_card *card)
968 struct qeth_cmd_buffer *iob;
971 QETH_CARD_TEXT(card, 4, "clipalst");
973 spin_lock_irqsave(&card->lock, flags);
974 list_for_each_entry(iob, &card->cmd_waiter_list, list)
975 qeth_notify_cmd(iob, -ECANCELED);
976 spin_unlock_irqrestore(&card->lock, flags);
979 static int qeth_check_idx_response(struct qeth_card *card,
980 unsigned char *buffer)
982 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
983 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
984 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
986 QETH_CARD_TEXT(card, 2, "ckidxres");
987 QETH_CARD_TEXT(card, 2, " idxterm");
988 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
989 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
990 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
991 dev_err(&card->gdev->dev,
992 "The device does not support the configured transport mode\n");
993 return -EPROTONOSUPPORT;
1000 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
1002 if (refcount_dec_and_test(&iob->ref_count)) {
1007 EXPORT_SYMBOL_GPL(qeth_put_cmd);
1009 static void qeth_release_buffer_cb(struct qeth_card *card,
1010 struct qeth_cmd_buffer *iob,
1011 unsigned int data_length)
1016 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
1018 qeth_notify_cmd(iob, rc);
1022 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
1023 unsigned int length, unsigned int ccws,
1026 struct qeth_cmd_buffer *iob;
1028 if (length > QETH_BUFSIZE)
1031 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1035 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1036 GFP_KERNEL | GFP_DMA);
1042 init_completion(&iob->done);
1043 spin_lock_init(&iob->lock);
1044 INIT_LIST_HEAD(&iob->list);
1045 refcount_set(&iob->ref_count, 1);
1046 iob->channel = channel;
1047 iob->timeout = timeout;
1048 iob->length = length;
1051 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1053 static void qeth_issue_next_read_cb(struct qeth_card *card,
1054 struct qeth_cmd_buffer *iob,
1055 unsigned int data_length)
1057 struct qeth_cmd_buffer *request = NULL;
1058 struct qeth_ipa_cmd *cmd = NULL;
1059 struct qeth_reply *reply = NULL;
1060 struct qeth_cmd_buffer *tmp;
1061 unsigned long flags;
1064 QETH_CARD_TEXT(card, 4, "sndctlcb");
1065 rc = qeth_check_idx_response(card, iob->data);
1070 qeth_schedule_recovery(card);
1073 qeth_clear_ipacmd_list(card);
1077 cmd = __ipa_reply(iob);
1079 cmd = qeth_check_ipa_data(card, cmd);
1082 if (IS_OSN(card) && card->osn_info.assist_cb &&
1083 cmd->hdr.command != IPA_CMD_STARTLAN) {
1084 card->osn_info.assist_cb(card->dev, cmd);
1089 /* match against pending cmd requests */
1090 spin_lock_irqsave(&card->lock, flags);
1091 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1092 if (tmp->match && tmp->match(tmp, iob)) {
1094 /* take the object outside the lock */
1095 qeth_get_cmd(request);
1099 spin_unlock_irqrestore(&card->lock, flags);
1104 reply = &request->reply;
1105 if (!reply->callback) {
1110 spin_lock_irqsave(&request->lock, flags);
1112 /* Bail out when the requestor has already left: */
1115 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1116 (unsigned long)iob);
1117 spin_unlock_irqrestore(&request->lock, flags);
1121 qeth_notify_cmd(request, rc);
1122 qeth_put_cmd(request);
1124 memcpy(&card->seqno.pdu_hdr_ack,
1125 QETH_PDU_HEADER_SEQ_NO(iob->data),
1126 QETH_SEQ_NO_LENGTH);
1127 __qeth_issue_next_read(card);
1132 static int qeth_set_thread_start_bit(struct qeth_card *card,
1133 unsigned long thread)
1135 unsigned long flags;
1138 spin_lock_irqsave(&card->thread_mask_lock, flags);
1139 if (!(card->thread_allowed_mask & thread))
1141 else if (card->thread_start_mask & thread)
1144 card->thread_start_mask |= thread;
1145 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1150 static void qeth_clear_thread_start_bit(struct qeth_card *card,
1151 unsigned long thread)
1153 unsigned long flags;
1155 spin_lock_irqsave(&card->thread_mask_lock, flags);
1156 card->thread_start_mask &= ~thread;
1157 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1158 wake_up(&card->wait_q);
1161 static void qeth_clear_thread_running_bit(struct qeth_card *card,
1162 unsigned long thread)
1164 unsigned long flags;
1166 spin_lock_irqsave(&card->thread_mask_lock, flags);
1167 card->thread_running_mask &= ~thread;
1168 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1169 wake_up_all(&card->wait_q);
1172 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1174 unsigned long flags;
1177 spin_lock_irqsave(&card->thread_mask_lock, flags);
1178 if (card->thread_start_mask & thread) {
1179 if ((card->thread_allowed_mask & thread) &&
1180 !(card->thread_running_mask & thread)) {
1182 card->thread_start_mask &= ~thread;
1183 card->thread_running_mask |= thread;
1187 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1191 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1195 wait_event(card->wait_q,
1196 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1200 int qeth_schedule_recovery(struct qeth_card *card)
1204 QETH_CARD_TEXT(card, 2, "startrec");
1206 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1208 schedule_work(&card->kernel_thread_starter);
1213 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1219 sense = (char *) irb->ecw;
1220 cstat = irb->scsw.cmd.cstat;
1221 dstat = irb->scsw.cmd.dstat;
1223 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1224 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1225 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1226 QETH_CARD_TEXT(card, 2, "CGENCHK");
1227 dev_warn(&cdev->dev, "The qeth device driver "
1228 "failed to recover an error on the device\n");
1229 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1230 CCW_DEVID(cdev), dstat, cstat);
1231 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1236 if (dstat & DEV_STAT_UNIT_CHECK) {
1237 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1238 SENSE_RESETTING_EVENT_FLAG) {
1239 QETH_CARD_TEXT(card, 2, "REVIND");
1242 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1243 SENSE_COMMAND_REJECT_FLAG) {
1244 QETH_CARD_TEXT(card, 2, "CMDREJi");
1247 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1248 QETH_CARD_TEXT(card, 2, "AFFE");
1251 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1252 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1255 QETH_CARD_TEXT(card, 2, "DGENCHK");
1261 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1267 switch (PTR_ERR(irb)) {
1269 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1271 QETH_CARD_TEXT(card, 2, "ckirberr");
1272 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1275 dev_warn(&cdev->dev, "A hardware operation timed out"
1276 " on the device\n");
1277 QETH_CARD_TEXT(card, 2, "ckirberr");
1278 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1281 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1282 PTR_ERR(irb), CCW_DEVID(cdev));
1283 QETH_CARD_TEXT(card, 2, "ckirberr");
1284 QETH_CARD_TEXT(card, 2, " rc???");
1285 return PTR_ERR(irb);
1289 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1294 struct qeth_cmd_buffer *iob = NULL;
1295 struct ccwgroup_device *gdev;
1296 struct qeth_channel *channel;
1297 struct qeth_card *card;
1299 /* while we hold the ccwdev lock, this stays valid: */
1300 gdev = dev_get_drvdata(&cdev->dev);
1301 card = dev_get_drvdata(&gdev->dev);
1303 QETH_CARD_TEXT(card, 5, "irq");
1305 if (card->read.ccwdev == cdev) {
1306 channel = &card->read;
1307 QETH_CARD_TEXT(card, 5, "read");
1308 } else if (card->write.ccwdev == cdev) {
1309 channel = &card->write;
1310 QETH_CARD_TEXT(card, 5, "write");
1312 channel = &card->data;
1313 QETH_CARD_TEXT(card, 5, "data");
1317 QETH_CARD_TEXT(card, 5, "irqunsol");
1318 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1319 QETH_CARD_TEXT(card, 5, "irqunexp");
1322 "Received IRQ with intparm %lx, expected %px\n",
1323 intparm, channel->active_cmd);
1324 if (channel->active_cmd)
1325 qeth_cancel_cmd(channel->active_cmd, -EIO);
1327 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1330 channel->active_cmd = NULL;
1331 qeth_unlock_channel(card, channel);
1333 rc = qeth_check_irb_error(card, cdev, irb);
1335 /* IO was terminated, free its resources. */
1337 qeth_cancel_cmd(iob, rc);
1341 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1342 channel->state = CH_STATE_STOPPED;
1343 wake_up(&card->wait_q);
1346 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1347 channel->state = CH_STATE_HALTED;
1348 wake_up(&card->wait_q);
1351 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1352 SCSW_FCTL_HALT_FUNC))) {
1353 qeth_cancel_cmd(iob, -ECANCELED);
1357 cstat = irb->scsw.cmd.cstat;
1358 dstat = irb->scsw.cmd.dstat;
1360 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1361 (dstat & DEV_STAT_UNIT_CHECK) ||
1363 if (irb->esw.esw0.erw.cons) {
1364 dev_warn(&channel->ccwdev->dev,
1365 "The qeth device driver failed to recover "
1366 "an error on the device\n");
1367 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1368 CCW_DEVID(channel->ccwdev), cstat,
1370 print_hex_dump(KERN_WARNING, "qeth: irb ",
1371 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1372 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1373 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1376 rc = qeth_get_problem(card, cdev, irb);
1378 card->read_or_write_problem = 1;
1380 qeth_cancel_cmd(iob, rc);
1381 qeth_clear_ipacmd_list(card);
1382 qeth_schedule_recovery(card);
1389 if (irb->scsw.cmd.count > iob->length) {
1390 qeth_cancel_cmd(iob, -EIO);
1394 iob->callback(card, iob,
1395 iob->length - irb->scsw.cmd.count);
1399 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1400 struct qeth_qdio_out_buffer *buf,
1401 enum iucv_tx_notify notification)
1403 struct sk_buff *skb;
1405 skb_queue_walk(&buf->skb_list, skb) {
1406 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1407 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1408 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1409 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1413 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1416 struct qeth_qdio_out_q *queue = buf->q;
1417 struct sk_buff *skb;
1419 /* release may never happen from within CQ tasklet scope */
1420 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1422 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1423 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1426 if (buf->next_element_to_fill == 0)
1429 QETH_TXQ_STAT_INC(queue, bufs);
1430 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1432 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1434 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1435 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1438 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1439 unsigned int bytes = qdisc_pkt_len(skb);
1440 bool is_tso = skb_is_gso(skb);
1441 unsigned int packets;
1443 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1445 if (skb->ip_summed == CHECKSUM_PARTIAL)
1446 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1447 if (skb_is_nonlinear(skb))
1448 QETH_TXQ_STAT_INC(queue, skbs_sg);
1450 QETH_TXQ_STAT_INC(queue, skbs_tso);
1451 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1455 napi_consume_skb(skb, budget);
1459 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1460 struct qeth_qdio_out_buffer *buf,
1461 bool error, int budget)
1465 /* is PCI flag set on buffer? */
1466 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1467 atomic_dec(&queue->set_pci_flags_count);
1469 qeth_tx_complete_buf(buf, error, budget);
1471 for (i = 0; i < queue->max_elements; ++i) {
1472 void *data = phys_to_virt(buf->buffer->element[i].addr);
1474 if (data && buf->is_header[i])
1475 kmem_cache_free(qeth_core_header_cache, data);
1476 buf->is_header[i] = 0;
1479 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1480 buf->next_element_to_fill = 0;
1483 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1486 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1490 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1493 qeth_cleanup_handled_pending(q, j, 1);
1494 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1496 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1502 static void qeth_drain_output_queues(struct qeth_card *card)
1506 QETH_CARD_TEXT(card, 2, "clearqdbf");
1507 /* clear outbound buffers to free skbs */
1508 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1509 if (card->qdio.out_qs[i])
1510 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1514 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1516 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1518 if (card->qdio.no_out_queues == max)
1521 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1522 qeth_free_qdio_queues(card);
1524 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1525 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1527 card->qdio.no_out_queues = max;
1530 static int qeth_update_from_chp_desc(struct qeth_card *card)
1532 struct ccw_device *ccwdev;
1533 struct channel_path_desc_fmt0 *chp_dsc;
1535 QETH_CARD_TEXT(card, 2, "chp_desc");
1537 ccwdev = card->data.ccwdev;
1538 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1542 card->info.func_level = 0x4100 + chp_dsc->desc;
1544 if (IS_OSD(card) || IS_OSX(card))
1545 /* CHPP field bit 6 == 1 -> single queue */
1546 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1549 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1550 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1554 static void qeth_init_qdio_info(struct qeth_card *card)
1556 QETH_CARD_TEXT(card, 4, "intqdinf");
1557 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1558 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1559 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1562 card->qdio.no_in_queues = 1;
1563 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1565 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1567 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1568 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1569 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1570 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1573 static void qeth_set_initial_options(struct qeth_card *card)
1575 card->options.route4.type = NO_ROUTER;
1576 card->options.route6.type = NO_ROUTER;
1577 card->options.isolation = ISOLATION_MODE_NONE;
1578 card->options.cq = QETH_CQ_DISABLED;
1579 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1582 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1584 unsigned long flags;
1587 spin_lock_irqsave(&card->thread_mask_lock, flags);
1588 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1589 (u8) card->thread_start_mask,
1590 (u8) card->thread_allowed_mask,
1591 (u8) card->thread_running_mask);
1592 rc = (card->thread_start_mask & thread);
1593 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1597 static int qeth_do_reset(void *data);
1598 static void qeth_start_kernel_thread(struct work_struct *work)
1600 struct task_struct *ts;
1601 struct qeth_card *card = container_of(work, struct qeth_card,
1602 kernel_thread_starter);
1603 QETH_CARD_TEXT(card , 2, "strthrd");
1605 if (card->read.state != CH_STATE_UP &&
1606 card->write.state != CH_STATE_UP)
1608 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1609 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1611 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1612 qeth_clear_thread_running_bit(card,
1613 QETH_RECOVER_THREAD);
1618 static void qeth_buffer_reclaim_work(struct work_struct *);
1619 static void qeth_setup_card(struct qeth_card *card)
1621 QETH_CARD_TEXT(card, 2, "setupcrd");
1623 card->info.type = CARD_RDEV(card)->id.driver_info;
1624 card->state = CARD_STATE_DOWN;
1625 spin_lock_init(&card->lock);
1626 spin_lock_init(&card->thread_mask_lock);
1627 mutex_init(&card->conf_mutex);
1628 mutex_init(&card->discipline_mutex);
1629 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1630 INIT_LIST_HEAD(&card->cmd_waiter_list);
1631 init_waitqueue_head(&card->wait_q);
1632 qeth_set_initial_options(card);
1633 /* IP address takeover */
1634 INIT_LIST_HEAD(&card->ipato.entries);
1635 qeth_init_qdio_info(card);
1636 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1637 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1638 hash_init(card->rx_mode_addrs);
1639 hash_init(card->local_addrs4);
1640 hash_init(card->local_addrs6);
1641 spin_lock_init(&card->local_addrs4_lock);
1642 spin_lock_init(&card->local_addrs6_lock);
1645 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1647 struct qeth_card *card = container_of(slr, struct qeth_card,
1648 qeth_service_level);
1649 if (card->info.mcl_level[0])
1650 seq_printf(m, "qeth: %s firmware level %s\n",
1651 CARD_BUS_ID(card), card->info.mcl_level);
1654 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1656 struct qeth_card *card;
1658 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1659 card = kzalloc(sizeof(*card), GFP_KERNEL);
1662 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1665 dev_set_drvdata(&gdev->dev, card);
1666 CARD_RDEV(card) = gdev->cdev[0];
1667 CARD_WDEV(card) = gdev->cdev[1];
1668 CARD_DDEV(card) = gdev->cdev[2];
1670 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1671 dev_name(&gdev->dev));
1672 if (!card->event_wq)
1675 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1676 if (!card->read_cmd)
1679 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1681 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1682 &qeth_debugfs_local_addr_fops);
1684 card->qeth_service_level.seq_print = qeth_core_sl_print;
1685 register_service_level(&card->qeth_service_level);
1689 destroy_workqueue(card->event_wq);
1691 dev_set_drvdata(&gdev->dev, NULL);
1697 static int qeth_clear_channel(struct qeth_card *card,
1698 struct qeth_channel *channel)
1702 QETH_CARD_TEXT(card, 3, "clearch");
1703 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1704 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1705 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1709 rc = wait_event_interruptible_timeout(card->wait_q,
1710 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1711 if (rc == -ERESTARTSYS)
1713 if (channel->state != CH_STATE_STOPPED)
1715 channel->state = CH_STATE_DOWN;
1719 static int qeth_halt_channel(struct qeth_card *card,
1720 struct qeth_channel *channel)
1724 QETH_CARD_TEXT(card, 3, "haltch");
1725 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1726 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1727 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1731 rc = wait_event_interruptible_timeout(card->wait_q,
1732 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1733 if (rc == -ERESTARTSYS)
1735 if (channel->state != CH_STATE_HALTED)
1740 static int qeth_stop_channel(struct qeth_channel *channel)
1742 struct ccw_device *cdev = channel->ccwdev;
1745 rc = ccw_device_set_offline(cdev);
1747 spin_lock_irq(get_ccwdev_lock(cdev));
1748 if (channel->active_cmd) {
1749 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1750 channel->active_cmd);
1751 channel->active_cmd = NULL;
1753 cdev->handler = NULL;
1754 spin_unlock_irq(get_ccwdev_lock(cdev));
1759 static int qeth_start_channel(struct qeth_channel *channel)
1761 struct ccw_device *cdev = channel->ccwdev;
1764 channel->state = CH_STATE_DOWN;
1765 atomic_set(&channel->irq_pending, 0);
1767 spin_lock_irq(get_ccwdev_lock(cdev));
1768 cdev->handler = qeth_irq;
1769 spin_unlock_irq(get_ccwdev_lock(cdev));
1771 rc = ccw_device_set_online(cdev);
1778 spin_lock_irq(get_ccwdev_lock(cdev));
1779 cdev->handler = NULL;
1780 spin_unlock_irq(get_ccwdev_lock(cdev));
1784 static int qeth_halt_channels(struct qeth_card *card)
1786 int rc1 = 0, rc2 = 0, rc3 = 0;
1788 QETH_CARD_TEXT(card, 3, "haltchs");
1789 rc1 = qeth_halt_channel(card, &card->read);
1790 rc2 = qeth_halt_channel(card, &card->write);
1791 rc3 = qeth_halt_channel(card, &card->data);
1799 static int qeth_clear_channels(struct qeth_card *card)
1801 int rc1 = 0, rc2 = 0, rc3 = 0;
1803 QETH_CARD_TEXT(card, 3, "clearchs");
1804 rc1 = qeth_clear_channel(card, &card->read);
1805 rc2 = qeth_clear_channel(card, &card->write);
1806 rc3 = qeth_clear_channel(card, &card->data);
1814 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1818 QETH_CARD_TEXT(card, 3, "clhacrd");
1821 rc = qeth_halt_channels(card);
1824 return qeth_clear_channels(card);
1827 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1831 QETH_CARD_TEXT(card, 3, "qdioclr");
1832 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1833 QETH_QDIO_CLEANING)) {
1834 case QETH_QDIO_ESTABLISHED:
1836 rc = qdio_shutdown(CARD_DDEV(card),
1837 QDIO_FLAG_CLEANUP_USING_HALT);
1839 rc = qdio_shutdown(CARD_DDEV(card),
1840 QDIO_FLAG_CLEANUP_USING_CLEAR);
1842 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1843 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1845 case QETH_QDIO_CLEANING:
1850 rc = qeth_clear_halt_card(card, use_halt);
1852 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1856 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1858 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1859 struct diag26c_vnic_resp *response = NULL;
1860 struct diag26c_vnic_req *request = NULL;
1861 struct ccw_dev_id id;
1865 QETH_CARD_TEXT(card, 2, "vmlayer");
1867 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1871 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1872 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1873 if (!request || !response) {
1878 ccw_device_get_id(CARD_RDEV(card), &id);
1879 request->resp_buf_len = sizeof(*response);
1880 request->resp_version = DIAG26C_VERSION6_VM65918;
1881 request->req_format = DIAG26C_VNIC_INFO;
1883 memcpy(&request->sys_name, userid, 8);
1884 request->devno = id.devno;
1886 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1887 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1888 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1891 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1893 if (request->resp_buf_len < sizeof(*response) ||
1894 response->version != request->resp_version) {
1899 if (response->protocol == VNIC_INFO_PROT_L2)
1900 disc = QETH_DISCIPLINE_LAYER2;
1901 else if (response->protocol == VNIC_INFO_PROT_L3)
1902 disc = QETH_DISCIPLINE_LAYER3;
1908 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1912 /* Determine whether the device requires a specific layer discipline */
1913 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1915 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1917 if (IS_OSM(card) || IS_OSN(card))
1918 disc = QETH_DISCIPLINE_LAYER2;
1919 else if (IS_VM_NIC(card))
1920 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1921 qeth_vm_detect_layer(card);
1924 case QETH_DISCIPLINE_LAYER2:
1925 QETH_CARD_TEXT(card, 3, "force l2");
1927 case QETH_DISCIPLINE_LAYER3:
1928 QETH_CARD_TEXT(card, 3, "force l3");
1931 QETH_CARD_TEXT(card, 3, "force no");
1937 static void qeth_set_blkt_defaults(struct qeth_card *card)
1939 QETH_CARD_TEXT(card, 2, "cfgblkt");
1941 if (card->info.use_v1_blkt) {
1942 card->info.blkt.time_total = 0;
1943 card->info.blkt.inter_packet = 0;
1944 card->info.blkt.inter_packet_jumbo = 0;
1946 card->info.blkt.time_total = 250;
1947 card->info.blkt.inter_packet = 5;
1948 card->info.blkt.inter_packet_jumbo = 15;
1952 static void qeth_idx_init(struct qeth_card *card)
1954 memset(&card->seqno, 0, sizeof(card->seqno));
1956 card->token.issuer_rm_w = 0x00010103UL;
1957 card->token.cm_filter_w = 0x00010108UL;
1958 card->token.cm_connection_w = 0x0001010aUL;
1959 card->token.ulp_filter_w = 0x0001010bUL;
1960 card->token.ulp_connection_w = 0x0001010dUL;
1962 switch (card->info.type) {
1963 case QETH_CARD_TYPE_IQD:
1964 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1966 case QETH_CARD_TYPE_OSD:
1967 case QETH_CARD_TYPE_OSN:
1968 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1975 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1976 struct qeth_cmd_buffer *iob)
1978 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1979 QETH_SEQ_NO_LENGTH);
1980 if (iob->channel == &card->write)
1981 card->seqno.trans_hdr++;
1984 static int qeth_peer_func_level(int level)
1986 if ((level & 0xff) == 8)
1987 return (level & 0xff) + 0x400;
1988 if (((level >> 8) & 3) == 1)
1989 return (level & 0xff) + 0x200;
1993 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1994 struct qeth_cmd_buffer *iob)
1996 qeth_idx_finalize_cmd(card, iob);
1998 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1999 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2000 card->seqno.pdu_hdr++;
2001 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2002 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2004 iob->callback = qeth_release_buffer_cb;
2007 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2008 struct qeth_cmd_buffer *reply)
2010 /* MPC cmds are issued strictly in sequence. */
2011 return !IS_IPA(reply->data);
2014 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2016 unsigned int data_length)
2018 struct qeth_cmd_buffer *iob;
2020 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2024 memcpy(iob->data, data, data_length);
2025 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2027 iob->finalize = qeth_mpc_finalize_cmd;
2028 iob->match = qeth_mpc_match_reply;
2033 * qeth_send_control_data() - send control command to the card
2034 * @card: qeth_card structure pointer
2035 * @iob: qeth_cmd_buffer pointer
2036 * @reply_cb: callback function pointer
2037 * @cb_card: pointer to the qeth_card structure
2038 * @cb_reply: pointer to the qeth_reply structure
2039 * @cb_cmd: pointer to the original iob for non-IPA
2040 * commands, or to the qeth_ipa_cmd structure
2041 * for the IPA commands.
2042 * @reply_param: private pointer passed to the callback
2044 * Callback function gets called one or more times, with cb_cmd
2045 * pointing to the response returned by the hardware. Callback
2046 * function must return
2047 * > 0 if more reply blocks are expected,
2048 * 0 if the last or only reply block is received, and
2050 * Callback function can get the value of the reply_param pointer from the
2051 * field 'param' of the structure qeth_reply.
2054 static int qeth_send_control_data(struct qeth_card *card,
2055 struct qeth_cmd_buffer *iob,
2056 int (*reply_cb)(struct qeth_card *cb_card,
2057 struct qeth_reply *cb_reply,
2058 unsigned long cb_cmd),
2061 struct qeth_channel *channel = iob->channel;
2062 struct qeth_reply *reply = &iob->reply;
2063 long timeout = iob->timeout;
2066 QETH_CARD_TEXT(card, 2, "sendctl");
2068 reply->callback = reply_cb;
2069 reply->param = reply_param;
2071 timeout = wait_event_interruptible_timeout(card->wait_q,
2072 qeth_trylock_channel(channel),
2076 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2080 iob->finalize(card, iob);
2081 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2083 qeth_enqueue_cmd(card, iob);
2085 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2088 QETH_CARD_TEXT(card, 6, "noirqpnd");
2089 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2090 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2091 (addr_t) iob, 0, 0, timeout);
2093 channel->active_cmd = iob;
2094 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2096 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2097 CARD_DEVID(card), rc);
2098 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2099 qeth_dequeue_cmd(card, iob);
2101 qeth_unlock_channel(card, channel);
2105 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2108 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2110 qeth_dequeue_cmd(card, iob);
2113 /* Wait until the callback for a late reply has completed: */
2114 spin_lock_irq(&iob->lock);
2116 /* Zap any callback that's still pending: */
2118 spin_unlock_irq(&iob->lock);
2129 struct qeth_node_desc {
2130 struct node_descriptor nd1;
2131 struct node_descriptor nd2;
2132 struct node_descriptor nd3;
2135 static void qeth_read_conf_data_cb(struct qeth_card *card,
2136 struct qeth_cmd_buffer *iob,
2137 unsigned int data_length)
2139 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2143 QETH_CARD_TEXT(card, 2, "cfgunit");
2145 if (data_length < sizeof(*nd)) {
2150 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2151 nd->nd1.plant[1] == _ascebc['M'];
2152 tag = (u8 *)&nd->nd1.tag;
2153 card->info.chpid = tag[0];
2154 card->info.unit_addr2 = tag[1];
2156 tag = (u8 *)&nd->nd2.tag;
2157 card->info.cula = tag[1];
2159 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2160 nd->nd3.model[1] == 0xF0 &&
2161 nd->nd3.model[2] >= 0xF1 &&
2162 nd->nd3.model[2] <= 0xF4;
2165 qeth_notify_cmd(iob, rc);
2169 static int qeth_read_conf_data(struct qeth_card *card)
2171 struct qeth_channel *channel = &card->data;
2172 struct qeth_cmd_buffer *iob;
2175 /* scan for RCD command in extended SenseID data */
2176 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2177 if (!ciw || ciw->cmd == 0)
2179 if (ciw->count < sizeof(struct qeth_node_desc))
2182 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2186 iob->callback = qeth_read_conf_data_cb;
2187 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2190 return qeth_send_control_data(card, iob, NULL, NULL);
2193 static int qeth_idx_check_activate_response(struct qeth_card *card,
2194 struct qeth_channel *channel,
2195 struct qeth_cmd_buffer *iob)
2199 rc = qeth_check_idx_response(card, iob->data);
2203 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2206 /* negative reply: */
2207 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2208 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2210 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2211 case QETH_IDX_ACT_ERR_EXCL:
2212 dev_err(&channel->ccwdev->dev,
2213 "The adapter is used exclusively by another host\n");
2215 case QETH_IDX_ACT_ERR_AUTH:
2216 case QETH_IDX_ACT_ERR_AUTH_USER:
2217 dev_err(&channel->ccwdev->dev,
2218 "Setting the device online failed because of insufficient authorization\n");
2221 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2222 CCW_DEVID(channel->ccwdev));
2227 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2228 struct qeth_cmd_buffer *iob,
2229 unsigned int data_length)
2231 struct qeth_channel *channel = iob->channel;
2235 QETH_CARD_TEXT(card, 2, "idxrdcb");
2237 rc = qeth_idx_check_activate_response(card, channel, iob);
2241 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2242 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2243 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2244 CCW_DEVID(channel->ccwdev),
2245 card->info.func_level, peer_level);
2250 memcpy(&card->token.issuer_rm_r,
2251 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2252 QETH_MPC_TOKEN_LENGTH);
2253 memcpy(&card->info.mcl_level[0],
2254 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2257 qeth_notify_cmd(iob, rc);
2261 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2262 struct qeth_cmd_buffer *iob,
2263 unsigned int data_length)
2265 struct qeth_channel *channel = iob->channel;
2269 QETH_CARD_TEXT(card, 2, "idxwrcb");
2271 rc = qeth_idx_check_activate_response(card, channel, iob);
2275 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2276 if ((peer_level & ~0x0100) !=
2277 qeth_peer_func_level(card->info.func_level)) {
2278 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2279 CCW_DEVID(channel->ccwdev),
2280 card->info.func_level, peer_level);
2285 qeth_notify_cmd(iob, rc);
2289 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2290 struct qeth_cmd_buffer *iob)
2292 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2293 u8 port = ((u8)card->dev->dev_port) | 0x80;
2294 struct ccw1 *ccw = __ccw_from_cmd(iob);
2296 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2298 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2299 iob->finalize = qeth_idx_finalize_cmd;
2301 port |= QETH_IDX_ACT_INVAL_FRAME;
2302 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2303 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2304 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2305 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2306 &card->info.func_level, 2);
2307 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2308 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2311 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2313 struct qeth_channel *channel = &card->read;
2314 struct qeth_cmd_buffer *iob;
2317 QETH_CARD_TEXT(card, 2, "idxread");
2319 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2323 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2324 qeth_idx_setup_activate_cmd(card, iob);
2325 iob->callback = qeth_idx_activate_read_channel_cb;
2327 rc = qeth_send_control_data(card, iob, NULL, NULL);
2331 channel->state = CH_STATE_UP;
2335 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2337 struct qeth_channel *channel = &card->write;
2338 struct qeth_cmd_buffer *iob;
2341 QETH_CARD_TEXT(card, 2, "idxwrite");
2343 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2347 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2348 qeth_idx_setup_activate_cmd(card, iob);
2349 iob->callback = qeth_idx_activate_write_channel_cb;
2351 rc = qeth_send_control_data(card, iob, NULL, NULL);
2355 channel->state = CH_STATE_UP;
2359 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2362 struct qeth_cmd_buffer *iob;
2364 QETH_CARD_TEXT(card, 2, "cmenblcb");
2366 iob = (struct qeth_cmd_buffer *) data;
2367 memcpy(&card->token.cm_filter_r,
2368 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2369 QETH_MPC_TOKEN_LENGTH);
2373 static int qeth_cm_enable(struct qeth_card *card)
2375 struct qeth_cmd_buffer *iob;
2377 QETH_CARD_TEXT(card, 2, "cmenable");
2379 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2383 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2384 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2385 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2386 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2388 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2391 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2394 struct qeth_cmd_buffer *iob;
2396 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2398 iob = (struct qeth_cmd_buffer *) data;
2399 memcpy(&card->token.cm_connection_r,
2400 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2401 QETH_MPC_TOKEN_LENGTH);
2405 static int qeth_cm_setup(struct qeth_card *card)
2407 struct qeth_cmd_buffer *iob;
2409 QETH_CARD_TEXT(card, 2, "cmsetup");
2411 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2415 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2416 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2417 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2418 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2419 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2420 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2421 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2424 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2426 if (link_type == QETH_LINK_TYPE_LANE_TR ||
2427 link_type == QETH_LINK_TYPE_HSTR) {
2428 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2435 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2437 struct net_device *dev = card->dev;
2438 unsigned int new_mtu;
2441 /* IQD needs accurate max MTU to set up its RX buffers: */
2444 /* tolerate quirky HW: */
2445 max_mtu = ETH_MAX_MTU;
2450 /* move any device with default MTU to new max MTU: */
2451 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2453 /* adjust RX buffer size to new max MTU: */
2454 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2455 if (dev->max_mtu && dev->max_mtu != max_mtu)
2456 qeth_free_qdio_queues(card);
2460 /* default MTUs for first setup: */
2461 else if (IS_LAYER2(card))
2462 new_mtu = ETH_DATA_LEN;
2464 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2467 dev->max_mtu = max_mtu;
2468 dev->mtu = min(new_mtu, max_mtu);
2473 static int qeth_get_mtu_outof_framesize(int framesize)
2475 switch (framesize) {
2489 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2492 __u16 mtu, framesize;
2494 struct qeth_cmd_buffer *iob;
2497 QETH_CARD_TEXT(card, 2, "ulpenacb");
2499 iob = (struct qeth_cmd_buffer *) data;
2500 memcpy(&card->token.ulp_filter_r,
2501 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2502 QETH_MPC_TOKEN_LENGTH);
2504 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2505 mtu = qeth_get_mtu_outof_framesize(framesize);
2507 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2509 *(u16 *)reply->param = mtu;
2511 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2512 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2514 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2515 if (!qeth_is_supported_link_type(card, link_type))
2516 return -EPROTONOSUPPORT;
2519 card->info.link_type = link_type;
2520 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2524 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2527 return QETH_PROT_OSN2;
2528 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2531 static int qeth_ulp_enable(struct qeth_card *card)
2533 u8 prot_type = qeth_mpc_select_prot_type(card);
2534 struct qeth_cmd_buffer *iob;
2538 QETH_CARD_TEXT(card, 2, "ulpenabl");
2540 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2544 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2545 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2546 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2547 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2548 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2549 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2550 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2553 return qeth_update_max_mtu(card, max_mtu);
2556 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2559 struct qeth_cmd_buffer *iob;
2561 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2563 iob = (struct qeth_cmd_buffer *) data;
2564 memcpy(&card->token.ulp_connection_r,
2565 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2566 QETH_MPC_TOKEN_LENGTH);
2567 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2569 QETH_CARD_TEXT(card, 2, "olmlimit");
2570 dev_err(&card->gdev->dev, "A connection could not be "
2571 "established because of an OLM limit\n");
2577 static int qeth_ulp_setup(struct qeth_card *card)
2580 struct qeth_cmd_buffer *iob;
2582 QETH_CARD_TEXT(card, 2, "ulpsetup");
2584 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2588 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2589 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2590 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2591 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2592 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2593 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2595 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2596 temp = (card->info.cula << 8) + card->info.unit_addr2;
2597 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2598 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2601 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2603 struct qeth_qdio_out_buffer *newbuf;
2605 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2609 newbuf->buffer = q->qdio_bufs[bidx];
2610 skb_queue_head_init(&newbuf->skb_list);
2611 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2613 newbuf->next_pending = q->bufs[bidx];
2614 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2615 q->bufs[bidx] = newbuf;
2619 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2624 qeth_drain_output_queue(q, true);
2625 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2629 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2631 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2636 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2643 static void qeth_tx_completion_timer(struct timer_list *timer)
2645 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2647 napi_schedule(&queue->napi);
2648 QETH_TXQ_STAT_INC(queue, completion_timer);
2651 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2655 QETH_CARD_TEXT(card, 2, "allcqdbf");
2657 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2658 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2661 QETH_CARD_TEXT(card, 2, "inq");
2662 card->qdio.in_q = qeth_alloc_qdio_queue();
2663 if (!card->qdio.in_q)
2666 /* inbound buffer pool */
2667 if (qeth_alloc_buffer_pool(card))
2671 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2672 struct qeth_qdio_out_q *queue;
2674 queue = qeth_alloc_output_queue();
2677 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2678 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2679 card->qdio.out_qs[i] = queue;
2681 queue->queue_no = i;
2682 spin_lock_init(&queue->lock);
2683 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2684 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2685 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2687 /* give outbound qeth_qdio_buffers their qdio_buffers */
2688 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2689 WARN_ON(queue->bufs[j]);
2690 if (qeth_init_qdio_out_buf(queue, j))
2691 goto out_freeoutqbufs;
2696 if (qeth_alloc_cq(card))
2704 kmem_cache_free(qeth_qdio_outbuf_cache,
2705 card->qdio.out_qs[i]->bufs[j]);
2706 card->qdio.out_qs[i]->bufs[j] = NULL;
2710 qeth_free_output_queue(card->qdio.out_qs[--i]);
2711 card->qdio.out_qs[i] = NULL;
2713 qeth_free_buffer_pool(card);
2715 qeth_free_qdio_queue(card->qdio.in_q);
2716 card->qdio.in_q = NULL;
2718 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2722 static void qeth_free_qdio_queues(struct qeth_card *card)
2726 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2727 QETH_QDIO_UNINITIALIZED)
2731 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2732 if (card->qdio.in_q->bufs[j].rx_skb)
2733 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2735 qeth_free_qdio_queue(card->qdio.in_q);
2736 card->qdio.in_q = NULL;
2737 /* inbound buffer pool */
2738 qeth_free_buffer_pool(card);
2739 /* free outbound qdio_qs */
2740 for (i = 0; i < card->qdio.no_out_queues; i++) {
2741 qeth_free_output_queue(card->qdio.out_qs[i]);
2742 card->qdio.out_qs[i] = NULL;
2746 static void qeth_fill_qib_parms(struct qeth_card *card,
2747 struct qeth_qib_parms *parms)
2749 parms->pcit_magic[0] = 'P';
2750 parms->pcit_magic[1] = 'C';
2751 parms->pcit_magic[2] = 'I';
2752 parms->pcit_magic[3] = 'T';
2753 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2754 parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2755 parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2756 parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2758 parms->blkt_magic[0] = 'B';
2759 parms->blkt_magic[1] = 'L';
2760 parms->blkt_magic[2] = 'K';
2761 parms->blkt_magic[3] = 'T';
2762 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2763 parms->blkt_total = card->info.blkt.time_total;
2764 parms->blkt_inter_packet = card->info.blkt.inter_packet;
2765 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2768 static int qeth_qdio_activate(struct qeth_card *card)
2770 QETH_CARD_TEXT(card, 3, "qdioact");
2771 return qdio_activate(CARD_DDEV(card));
2774 static int qeth_dm_act(struct qeth_card *card)
2776 struct qeth_cmd_buffer *iob;
2778 QETH_CARD_TEXT(card, 2, "dmact");
2780 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2784 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2785 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2786 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2787 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2788 return qeth_send_control_data(card, iob, NULL, NULL);
2791 static int qeth_mpc_initialize(struct qeth_card *card)
2795 QETH_CARD_TEXT(card, 2, "mpcinit");
2797 rc = qeth_issue_next_read(card);
2799 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2802 rc = qeth_cm_enable(card);
2804 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2807 rc = qeth_cm_setup(card);
2809 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2812 rc = qeth_ulp_enable(card);
2814 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2817 rc = qeth_ulp_setup(card);
2819 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2822 rc = qeth_alloc_qdio_queues(card);
2824 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2827 rc = qeth_qdio_establish(card);
2829 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2830 qeth_free_qdio_queues(card);
2833 rc = qeth_qdio_activate(card);
2835 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2838 rc = qeth_dm_act(card);
2840 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2847 static void qeth_print_status_message(struct qeth_card *card)
2849 switch (card->info.type) {
2850 case QETH_CARD_TYPE_OSD:
2851 case QETH_CARD_TYPE_OSM:
2852 case QETH_CARD_TYPE_OSX:
2853 /* VM will use a non-zero first character
2854 * to indicate a HiperSockets like reporting
2855 * of the level OSA sets the first character to zero
2857 if (!card->info.mcl_level[0]) {
2858 sprintf(card->info.mcl_level, "%02x%02x",
2859 card->info.mcl_level[2],
2860 card->info.mcl_level[3]);
2864 case QETH_CARD_TYPE_IQD:
2865 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2866 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2867 card->info.mcl_level[0]];
2868 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2869 card->info.mcl_level[1]];
2870 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2871 card->info.mcl_level[2]];
2872 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2873 card->info.mcl_level[3]];
2874 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2878 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2880 dev_info(&card->gdev->dev,
2881 "Device is a%s card%s%s%s\nwith link type %s.\n",
2882 qeth_get_cardname(card),
2883 (card->info.mcl_level[0]) ? " (level: " : "",
2884 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2885 (card->info.mcl_level[0]) ? ")" : "",
2886 qeth_get_cardname_short(card));
2889 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2891 struct qeth_buffer_pool_entry *entry;
2893 QETH_CARD_TEXT(card, 5, "inwrklst");
2895 list_for_each_entry(entry,
2896 &card->qdio.init_pool.entry_list, init_list) {
2897 qeth_put_buffer_pool_entry(card, entry);
2901 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2902 struct qeth_card *card)
2904 struct qeth_buffer_pool_entry *entry;
2907 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2910 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2912 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2913 if (page_count(entry->elements[i]) > 1) {
2919 list_del_init(&entry->list);
2924 /* no free buffer in pool so take first one and swap pages */
2925 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2926 struct qeth_buffer_pool_entry, list);
2927 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2928 if (page_count(entry->elements[i]) > 1) {
2929 struct page *page = dev_alloc_page();
2934 __free_page(entry->elements[i]);
2935 entry->elements[i] = page;
2936 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2939 list_del_init(&entry->list);
2943 static int qeth_init_input_buffer(struct qeth_card *card,
2944 struct qeth_qdio_buffer *buf)
2946 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2949 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2950 buf->rx_skb = netdev_alloc_skb(card->dev,
2952 sizeof(struct ipv6hdr));
2958 pool_entry = qeth_find_free_buffer_pool_entry(card);
2962 buf->pool_entry = pool_entry;
2966 * since the buffer is accessed only from the input_tasklet
2967 * there shouldn't be a need to synchronize; also, since we use
2968 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2971 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2972 buf->buffer->element[i].length = PAGE_SIZE;
2973 buf->buffer->element[i].addr =
2974 page_to_phys(pool_entry->elements[i]);
2975 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2976 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2978 buf->buffer->element[i].eflags = 0;
2979 buf->buffer->element[i].sflags = 0;
2984 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2985 struct qeth_qdio_out_q *queue)
2987 if (!IS_IQD(card) ||
2988 qeth_iqd_is_mcast_queue(card, queue) ||
2989 card->options.cq == QETH_CQ_ENABLED ||
2990 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2993 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2996 static int qeth_init_qdio_queues(struct qeth_card *card)
2998 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3002 QETH_CARD_TEXT(card, 2, "initqdqs");
3005 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3006 memset(&card->rx, 0, sizeof(struct qeth_rx));
3008 qeth_initialize_working_pool_list(card);
3009 /*give only as many buffers to hardware as we have buffer pool entries*/
3010 for (i = 0; i < rx_bufs; i++) {
3011 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3016 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3017 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
3019 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3024 rc = qeth_cq_init(card);
3029 /* outbound queue */
3030 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3031 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3033 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3034 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3035 queue->next_buf_to_fill = 0;
3037 queue->prev_hdr = NULL;
3038 queue->coalesced_frames = 0;
3039 queue->bulk_start = 0;
3040 queue->bulk_count = 0;
3041 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3042 atomic_set(&queue->used_buffers, 0);
3043 atomic_set(&queue->set_pci_flags_count, 0);
3044 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3049 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3050 struct qeth_cmd_buffer *iob)
3052 qeth_mpc_finalize_cmd(card, iob);
3054 /* override with IPA-specific values: */
3055 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3058 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3060 bool (*match)(struct qeth_cmd_buffer *iob,
3061 struct qeth_cmd_buffer *reply))
3063 u8 prot_type = qeth_mpc_select_prot_type(card);
3064 u16 total_length = iob->length;
3066 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3068 iob->finalize = qeth_ipa_finalize_cmd;
3071 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3072 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3073 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3074 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3075 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3076 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3077 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3078 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3080 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3082 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3083 struct qeth_cmd_buffer *reply)
3085 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3087 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3090 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3091 enum qeth_ipa_cmds cmd_code,
3092 enum qeth_prot_versions prot,
3093 unsigned int data_length)
3095 struct qeth_cmd_buffer *iob;
3096 struct qeth_ipacmd_hdr *hdr;
3098 data_length += offsetof(struct qeth_ipa_cmd, data);
3099 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3104 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3106 hdr = &__ipa_cmd(iob)->hdr;
3107 hdr->command = cmd_code;
3108 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3109 /* hdr->seqno is set by qeth_send_control_data() */
3110 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3111 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3112 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3113 hdr->param_count = 1;
3114 hdr->prot_version = prot;
3117 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3119 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3120 struct qeth_reply *reply, unsigned long data)
3122 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3124 return (cmd->hdr.return_code) ? -EIO : 0;
3128 * qeth_send_ipa_cmd() - send an IPA command
3130 * See qeth_send_control_data() for explanation of the arguments.
3133 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3134 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3140 QETH_CARD_TEXT(card, 4, "sendipa");
3142 if (card->read_or_write_problem) {
3147 if (reply_cb == NULL)
3148 reply_cb = qeth_send_ipa_cmd_cb;
3149 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3151 qeth_clear_ipacmd_list(card);
3152 qeth_schedule_recovery(card);
3156 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3158 static int qeth_send_startlan_cb(struct qeth_card *card,
3159 struct qeth_reply *reply, unsigned long data)
3161 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3163 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3166 return (cmd->hdr.return_code) ? -EIO : 0;
3169 static int qeth_send_startlan(struct qeth_card *card)
3171 struct qeth_cmd_buffer *iob;
3173 QETH_CARD_TEXT(card, 2, "strtlan");
3175 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3178 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3181 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3183 if (!cmd->hdr.return_code)
3184 cmd->hdr.return_code =
3185 cmd->data.setadapterparms.hdr.return_code;
3186 return cmd->hdr.return_code;
3189 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3190 struct qeth_reply *reply, unsigned long data)
3192 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3193 struct qeth_query_cmds_supp *query_cmd;
3195 QETH_CARD_TEXT(card, 3, "quyadpcb");
3196 if (qeth_setadpparms_inspect_rc(cmd))
3199 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3200 if (query_cmd->lan_type & 0x7f) {
3201 if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3202 return -EPROTONOSUPPORT;
3204 card->info.link_type = query_cmd->lan_type;
3205 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3208 card->options.adp.supported = query_cmd->supported_cmds;
3212 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3213 enum qeth_ipa_setadp_cmd adp_cmd,
3214 unsigned int data_length)
3216 struct qeth_ipacmd_setadpparms_hdr *hdr;
3217 struct qeth_cmd_buffer *iob;
3219 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3221 offsetof(struct qeth_ipacmd_setadpparms,
3226 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3227 hdr->cmdlength = sizeof(*hdr) + data_length;
3228 hdr->command_code = adp_cmd;
3229 hdr->used_total = 1;
3234 static int qeth_query_setadapterparms(struct qeth_card *card)
3237 struct qeth_cmd_buffer *iob;
3239 QETH_CARD_TEXT(card, 3, "queryadp");
3240 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3241 SETADP_DATA_SIZEOF(query_cmds_supp));
3244 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3248 static int qeth_query_ipassists_cb(struct qeth_card *card,
3249 struct qeth_reply *reply, unsigned long data)
3251 struct qeth_ipa_cmd *cmd;
3253 QETH_CARD_TEXT(card, 2, "qipasscb");
3255 cmd = (struct qeth_ipa_cmd *) data;
3257 switch (cmd->hdr.return_code) {
3258 case IPA_RC_SUCCESS:
3260 case IPA_RC_NOTSUPP:
3261 case IPA_RC_L2_UNSUPPORTED_CMD:
3262 QETH_CARD_TEXT(card, 2, "ipaunsup");
3263 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3264 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3267 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3268 CARD_DEVID(card), cmd->hdr.return_code);
3272 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3273 card->options.ipa4 = cmd->hdr.assists;
3274 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3275 card->options.ipa6 = cmd->hdr.assists;
3277 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3282 static int qeth_query_ipassists(struct qeth_card *card,
3283 enum qeth_prot_versions prot)
3286 struct qeth_cmd_buffer *iob;
3288 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3289 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3292 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3296 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3297 struct qeth_reply *reply, unsigned long data)
3299 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3300 struct qeth_query_switch_attributes *attrs;
3301 struct qeth_switch_info *sw_info;
3303 QETH_CARD_TEXT(card, 2, "qswiatcb");
3304 if (qeth_setadpparms_inspect_rc(cmd))
3307 sw_info = (struct qeth_switch_info *)reply->param;
3308 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3309 sw_info->capabilities = attrs->capabilities;
3310 sw_info->settings = attrs->settings;
3311 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3316 int qeth_query_switch_attributes(struct qeth_card *card,
3317 struct qeth_switch_info *sw_info)
3319 struct qeth_cmd_buffer *iob;
3321 QETH_CARD_TEXT(card, 2, "qswiattr");
3322 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3324 if (!netif_carrier_ok(card->dev))
3326 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3329 return qeth_send_ipa_cmd(card, iob,
3330 qeth_query_switch_attributes_cb, sw_info);
3333 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3334 enum qeth_diags_cmds sub_cmd,
3335 unsigned int data_length)
3337 struct qeth_ipacmd_diagass *cmd;
3338 struct qeth_cmd_buffer *iob;
3340 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3341 DIAG_HDR_LEN + data_length);
3345 cmd = &__ipa_cmd(iob)->data.diagass;
3346 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3347 cmd->subcmd = sub_cmd;
3350 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3352 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3353 struct qeth_reply *reply, unsigned long data)
3355 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3356 u16 rc = cmd->hdr.return_code;
3359 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3363 card->info.diagass_support = cmd->data.diagass.ext;
3367 static int qeth_query_setdiagass(struct qeth_card *card)
3369 struct qeth_cmd_buffer *iob;
3371 QETH_CARD_TEXT(card, 2, "qdiagass");
3372 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3375 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3378 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3380 unsigned long info = get_zeroed_page(GFP_KERNEL);
3381 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3382 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3383 struct ccw_dev_id ccwid;
3386 tid->chpid = card->info.chpid;
3387 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3388 tid->ssid = ccwid.ssid;
3389 tid->devno = ccwid.devno;
3392 level = stsi(NULL, 0, 0, 0);
3393 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3394 tid->lparnr = info222->lpar_number;
3395 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3396 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3397 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3403 static int qeth_hw_trap_cb(struct qeth_card *card,
3404 struct qeth_reply *reply, unsigned long data)
3406 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3407 u16 rc = cmd->hdr.return_code;
3410 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3416 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3418 struct qeth_cmd_buffer *iob;
3419 struct qeth_ipa_cmd *cmd;
3421 QETH_CARD_TEXT(card, 2, "diagtrap");
3422 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3425 cmd = __ipa_cmd(iob);
3426 cmd->data.diagass.type = 1;
3427 cmd->data.diagass.action = action;
3429 case QETH_DIAGS_TRAP_ARM:
3430 cmd->data.diagass.options = 0x0003;
3431 cmd->data.diagass.ext = 0x00010000 +
3432 sizeof(struct qeth_trap_id);
3433 qeth_get_trap_id(card,
3434 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3436 case QETH_DIAGS_TRAP_DISARM:
3437 cmd->data.diagass.options = 0x0001;
3439 case QETH_DIAGS_TRAP_CAPTURE:
3442 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3445 static int qeth_check_qdio_errors(struct qeth_card *card,
3446 struct qdio_buffer *buf,
3447 unsigned int qdio_error,
3448 const char *dbftext)
3451 QETH_CARD_TEXT(card, 2, dbftext);
3452 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3453 buf->element[15].sflags);
3454 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3455 buf->element[14].sflags);
3456 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3457 if ((buf->element[15].sflags) == 0x12) {
3458 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3466 static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3469 struct qeth_qdio_q *queue = card->qdio.in_q;
3470 struct list_head *lh;
3475 /* only requeue at a certain threshold to avoid SIGAs */
3476 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3477 for (i = queue->next_buf_to_init;
3478 i < queue->next_buf_to_init + count; ++i) {
3479 if (qeth_init_input_buffer(card,
3480 &queue->bufs[QDIO_BUFNR(i)])) {
3487 if (newcount < count) {
3488 /* we are in memory shortage so we switch back to
3489 traditional skb allocation and drop packages */
3490 atomic_set(&card->force_alloc_skb, 3);
3493 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3498 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3500 if (i == card->qdio.in_buf_pool.buf_count) {
3501 QETH_CARD_TEXT(card, 2, "qsarbw");
3502 schedule_delayed_work(
3503 &card->buffer_reclaim_work,
3504 QETH_RECLAIM_WORK_TIME);
3509 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3510 queue->next_buf_to_init, count);
3512 QETH_CARD_TEXT(card, 2, "qinberr");
3514 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3522 static void qeth_buffer_reclaim_work(struct work_struct *work)
3524 struct qeth_card *card = container_of(to_delayed_work(work),
3526 buffer_reclaim_work);
3529 napi_schedule(&card->napi);
3530 /* kick-start the NAPI softirq: */
3534 static void qeth_handle_send_error(struct qeth_card *card,
3535 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3537 int sbalf15 = buffer->buffer->element[15].sflags;
3539 QETH_CARD_TEXT(card, 6, "hdsnderr");
3540 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3545 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3548 QETH_CARD_TEXT(card, 1, "lnkfail");
3549 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3550 (u16)qdio_err, (u8)sbalf15);
3554 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3555 * @queue: queue to check for packing buffer
3557 * Returns number of buffers that were prepared for flush.
3559 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3561 struct qeth_qdio_out_buffer *buffer;
3563 buffer = queue->bufs[queue->next_buf_to_fill];
3564 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3565 (buffer->next_element_to_fill > 0)) {
3566 /* it's a packing buffer */
3567 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3568 queue->next_buf_to_fill =
3569 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3576 * Switched to packing state if the number of used buffers on a queue
3577 * reaches a certain limit.
3579 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3581 if (!queue->do_pack) {
3582 if (atomic_read(&queue->used_buffers)
3583 >= QETH_HIGH_WATERMARK_PACK){
3584 /* switch non-PACKING -> PACKING */
3585 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3586 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3593 * Switches from packing to non-packing mode. If there is a packing
3594 * buffer on the queue this buffer will be prepared to be flushed.
3595 * In that case 1 is returned to inform the caller. If no buffer
3596 * has to be flushed, zero is returned.
3598 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3600 if (queue->do_pack) {
3601 if (atomic_read(&queue->used_buffers)
3602 <= QETH_LOW_WATERMARK_PACK) {
3603 /* switch PACKING -> non-PACKING */
3604 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3605 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3607 return qeth_prep_flush_pack_buffer(queue);
3613 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3616 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3617 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3618 struct qeth_card *card = queue->card;
3622 for (i = index; i < index + count; ++i) {
3623 unsigned int bidx = QDIO_BUFNR(i);
3624 struct sk_buff *skb;
3626 buf = queue->bufs[bidx];
3627 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3628 SBAL_EFLAGS_LAST_ENTRY;
3629 queue->coalesced_frames += buf->frames;
3631 if (queue->bufstates)
3632 queue->bufstates[bidx].user = buf;
3635 skb_queue_walk(&buf->skb_list, skb)
3636 skb_tx_timestamp(skb);
3640 if (!IS_IQD(card)) {
3641 if (!queue->do_pack) {
3642 if ((atomic_read(&queue->used_buffers) >=
3643 (QETH_HIGH_WATERMARK_PACK -
3644 QETH_WATERMARK_PACK_FUZZ)) &&
3645 !atomic_read(&queue->set_pci_flags_count)) {
3646 /* it's likely that we'll go to packing
3648 atomic_inc(&queue->set_pci_flags_count);
3649 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3652 if (!atomic_read(&queue->set_pci_flags_count)) {
3654 * there's no outstanding PCI any more, so we
3655 * have to request a PCI to be sure the the PCI
3656 * will wake at some time in the future then we
3657 * can flush packed buffers that might still be
3658 * hanging around, which can happen if no
3659 * further send was requested by the stack
3661 atomic_inc(&queue->set_pci_flags_count);
3662 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3666 if (atomic_read(&queue->set_pci_flags_count))
3667 qdio_flags |= QDIO_FLAG_PCI_OUT;
3670 QETH_TXQ_STAT_INC(queue, doorbell);
3671 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3672 queue->queue_no, index, count);
3674 /* Fake the TX completion interrupt: */
3676 unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3677 unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3679 if (frames && queue->coalesced_frames >= frames) {
3680 napi_schedule(&queue->napi);
3681 queue->coalesced_frames = 0;
3682 QETH_TXQ_STAT_INC(queue, coal_frames);
3684 qeth_tx_arm_timer(queue, usecs);
3689 /* ignore temporary SIGA errors without busy condition */
3692 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3693 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3694 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3695 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3696 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3698 /* this must not happen under normal circumstances. if it
3699 * happens something is really wrong -> recover */
3700 qeth_schedule_recovery(queue->card);
3705 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3707 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3709 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3710 queue->prev_hdr = NULL;
3711 queue->bulk_count = 0;
3714 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3717 * check if weed have to switch to non-packing mode or if
3718 * we have to get a pci flag out on the queue
3720 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3721 !atomic_read(&queue->set_pci_flags_count)) {
3722 unsigned int index, flush_cnt;
3725 spin_lock(&queue->lock);
3727 index = queue->next_buf_to_fill;
3728 q_was_packing = queue->do_pack;
3730 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3731 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3732 flush_cnt = qeth_prep_flush_pack_buffer(queue);
3735 qeth_flush_buffers(queue, index, flush_cnt);
3737 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3740 spin_unlock(&queue->lock);
3744 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3746 struct qeth_card *card = (struct qeth_card *)card_ptr;
3748 napi_schedule_irqoff(&card->napi);
3751 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3755 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3759 if (card->options.cq == cq) {
3764 qeth_free_qdio_queues(card);
3765 card->options.cq = cq;
3772 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3774 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3775 unsigned int queue, int first_element,
3778 struct qeth_qdio_q *cq = card->qdio.c_q;
3782 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3783 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3784 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3787 netif_tx_stop_all_queues(card->dev);
3788 qeth_schedule_recovery(card);
3792 for (i = first_element; i < first_element + count; ++i) {
3793 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3796 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3797 buffer->element[e].addr) {
3798 unsigned long phys_aob_addr = buffer->element[e].addr;
3800 qeth_qdio_handle_aob(card, phys_aob_addr);
3803 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3805 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3806 card->qdio.c_q->next_buf_to_init,
3809 dev_warn(&card->gdev->dev,
3810 "QDIO reported an error, rc=%i\n", rc);
3811 QETH_CARD_TEXT(card, 2, "qcqherr");
3814 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3817 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3818 unsigned int qdio_err, int queue,
3819 int first_elem, int count,
3820 unsigned long card_ptr)
3822 struct qeth_card *card = (struct qeth_card *)card_ptr;
3824 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3825 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3828 qeth_schedule_recovery(card);
3831 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3832 unsigned int qdio_error, int __queue,
3833 int first_element, int count,
3834 unsigned long card_ptr)
3836 struct qeth_card *card = (struct qeth_card *) card_ptr;
3837 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3838 struct net_device *dev = card->dev;
3839 struct netdev_queue *txq;
3842 QETH_CARD_TEXT(card, 6, "qdouhdl");
3843 if (qdio_error & QDIO_ERROR_FATAL) {
3844 QETH_CARD_TEXT(card, 2, "achkcond");
3845 netif_tx_stop_all_queues(dev);
3846 qeth_schedule_recovery(card);
3850 for (i = first_element; i < (first_element + count); ++i) {
3851 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3853 qeth_handle_send_error(card, buf, qdio_error);
3854 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3857 atomic_sub(count, &queue->used_buffers);
3858 qeth_check_outbound_queue(queue);
3860 txq = netdev_get_tx_queue(dev, __queue);
3861 /* xmit may have observed the full-condition, but not yet stopped the
3862 * txq. In which case the code below won't trigger. So before returning,
3863 * xmit will re-check the txq's fill level and wake it up if needed.
3865 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3866 netif_tx_wake_queue(txq);
3870 * Note: Function assumes that we have 4 outbound queues.
3872 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3874 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3877 switch (card->qdio.do_prio_queueing) {
3878 case QETH_PRIO_Q_ING_TOS:
3879 case QETH_PRIO_Q_ING_PREC:
3880 switch (qeth_get_ip_version(skb)) {
3882 tos = ipv4_get_dsfield(ip_hdr(skb));
3885 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3888 return card->qdio.default_out_queue;
3890 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3891 return ~tos >> 6 & 3;
3892 if (tos & IPTOS_MINCOST)
3894 if (tos & IPTOS_RELIABILITY)
3896 if (tos & IPTOS_THROUGHPUT)
3898 if (tos & IPTOS_LOWDELAY)
3901 case QETH_PRIO_Q_ING_SKB:
3902 if (skb->priority > 5)
3904 return ~skb->priority >> 1 & 3;
3905 case QETH_PRIO_Q_ING_VLAN:
3906 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3907 return ~ntohs(veth->h_vlan_TCI) >>
3908 (VLAN_PRIO_SHIFT + 1) & 3;
3910 case QETH_PRIO_Q_ING_FIXED:
3911 return card->qdio.default_out_queue;
3915 return card->qdio.default_out_queue;
3917 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3920 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3923 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3924 * fragmented part of the SKB. Returns zero for linear SKB.
3926 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3928 int cnt, elements = 0;
3930 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3931 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3933 elements += qeth_get_elements_for_range(
3934 (addr_t)skb_frag_address(frag),
3935 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3941 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3942 * to transmit an skb.
3943 * @skb: the skb to operate on.
3944 * @data_offset: skip this part of the skb's linear data
3946 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3947 * skb's data (both its linear part and paged fragments).
3949 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3951 unsigned int elements = qeth_get_elements_for_frags(skb);
3952 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3953 addr_t start = (addr_t)skb->data + data_offset;
3956 elements += qeth_get_elements_for_range(start, end);
3959 EXPORT_SYMBOL_GPL(qeth_count_elements);
3961 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3965 * qeth_add_hw_header() - add a HW header to an skb.
3966 * @skb: skb that the HW header should be added to.
3967 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3968 * it contains a valid pointer to a qeth_hdr.
3969 * @hdr_len: length of the HW header.
3970 * @proto_len: length of protocol headers that need to be in same page as the
3973 * Returns the pushed length. If the header can't be pushed on
3974 * (eg. because it would cross a page boundary), it is allocated from
3975 * the cache instead and 0 is returned.
3976 * The number of needed buffer elements is returned in @elements.
3977 * Error to create the hdr is indicated by returning with < 0.
3979 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3980 struct sk_buff *skb, struct qeth_hdr **hdr,
3981 unsigned int hdr_len, unsigned int proto_len,
3982 unsigned int *elements)
3984 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3985 const unsigned int contiguous = proto_len ? proto_len : 1;
3986 const unsigned int max_elements = queue->max_elements;
3987 unsigned int __elements;
3993 start = (addr_t)skb->data - hdr_len;
3994 end = (addr_t)skb->data;
3996 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3997 /* Push HW header into same page as first protocol header. */
3999 /* ... but TSO always needs a separate element for headers: */
4000 if (skb_is_gso(skb))
4001 __elements = 1 + qeth_count_elements(skb, proto_len);
4003 __elements = qeth_count_elements(skb, 0);
4004 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4005 /* Push HW header into preceding page, flush with skb->data. */
4007 __elements = 1 + qeth_count_elements(skb, 0);
4009 /* Use header cache, copy protocol headers up. */
4011 __elements = 1 + qeth_count_elements(skb, proto_len);
4014 /* Compress skb to fit into one IO buffer: */
4015 if (__elements > max_elements) {
4016 if (!skb_is_nonlinear(skb)) {
4017 /* Drop it, no easy way of shrinking it further. */
4018 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4019 max_elements, __elements, skb->len);
4023 rc = skb_linearize(skb);
4025 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4029 QETH_TXQ_STAT_INC(queue, skbs_linearized);
4030 /* Linearization changed the layout, re-evaluate: */
4034 *elements = __elements;
4035 /* Add the header: */
4037 *hdr = skb_push(skb, hdr_len);
4041 /* Fall back to cache element with known-good alignment: */
4042 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4044 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4047 /* Copy protocol headers behind HW header: */
4048 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4052 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4053 struct sk_buff *curr_skb,
4054 struct qeth_hdr *curr_hdr)
4056 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4057 struct qeth_hdr *prev_hdr = queue->prev_hdr;
4062 /* All packets must have the same target: */
4063 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4064 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4066 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4067 eth_hdr(curr_skb)->h_dest) &&
4068 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4071 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4072 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4076 * qeth_fill_buffer() - map skb into an output buffer
4077 * @buf: buffer to transport the skb
4078 * @skb: skb to map into the buffer
4079 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4080 * from qeth_core_header_cache.
4081 * @offset: when mapping the skb, start at skb->data + offset
4082 * @hd_len: if > 0, build a dedicated header element of this size
4084 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4085 struct sk_buff *skb, struct qeth_hdr *hdr,
4086 unsigned int offset, unsigned int hd_len)
4088 struct qdio_buffer *buffer = buf->buffer;
4089 int element = buf->next_element_to_fill;
4090 int length = skb_headlen(skb) - offset;
4091 char *data = skb->data + offset;
4092 unsigned int elem_length, cnt;
4093 bool is_first_elem = true;
4095 __skb_queue_tail(&buf->skb_list, skb);
4097 /* build dedicated element for HW Header */
4099 is_first_elem = false;
4101 buffer->element[element].addr = virt_to_phys(hdr);
4102 buffer->element[element].length = hd_len;
4103 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4105 /* HW header is allocated from cache: */
4106 if ((void *)hdr != skb->data)
4107 buf->is_header[element] = 1;
4108 /* HW header was pushed and is contiguous with linear part: */
4109 else if (length > 0 && !PAGE_ALIGNED(data) &&
4110 (data == (char *)hdr + hd_len))
4111 buffer->element[element].eflags |=
4112 SBAL_EFLAGS_CONTIGUOUS;
4117 /* map linear part into buffer element(s) */
4118 while (length > 0) {
4119 elem_length = min_t(unsigned int, length,
4120 PAGE_SIZE - offset_in_page(data));
4122 buffer->element[element].addr = virt_to_phys(data);
4123 buffer->element[element].length = elem_length;
4124 length -= elem_length;
4125 if (is_first_elem) {
4126 is_first_elem = false;
4127 if (length || skb_is_nonlinear(skb))
4128 /* skb needs additional elements */
4129 buffer->element[element].eflags =
4130 SBAL_EFLAGS_FIRST_FRAG;
4132 buffer->element[element].eflags = 0;
4134 buffer->element[element].eflags =
4135 SBAL_EFLAGS_MIDDLE_FRAG;
4138 data += elem_length;
4142 /* map page frags into buffer element(s) */
4143 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4144 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4146 data = skb_frag_address(frag);
4147 length = skb_frag_size(frag);
4148 while (length > 0) {
4149 elem_length = min_t(unsigned int, length,
4150 PAGE_SIZE - offset_in_page(data));
4152 buffer->element[element].addr = virt_to_phys(data);
4153 buffer->element[element].length = elem_length;
4154 buffer->element[element].eflags =
4155 SBAL_EFLAGS_MIDDLE_FRAG;
4157 length -= elem_length;
4158 data += elem_length;
4163 if (buffer->element[element - 1].eflags)
4164 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4165 buf->next_element_to_fill = element;
4169 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4170 struct sk_buff *skb, unsigned int elements,
4171 struct qeth_hdr *hdr, unsigned int offset,
4172 unsigned int hd_len)
4174 unsigned int bytes = qdisc_pkt_len(skb);
4175 struct qeth_qdio_out_buffer *buffer;
4176 unsigned int next_element;
4177 struct netdev_queue *txq;
4178 bool stopped = false;
4181 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4182 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4184 /* Just a sanity check, the wake/stop logic should ensure that we always
4185 * get a free buffer.
4187 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4190 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4193 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4194 if (buffer->next_element_to_fill > 0) {
4195 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4196 queue->bulk_count++;
4199 if (queue->bulk_count >= queue->bulk_max)
4203 qeth_flush_queue(queue);
4205 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4206 queue->bulk_count)];
4208 /* Sanity-check again: */
4209 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4213 if (buffer->next_element_to_fill == 0 &&
4214 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4215 /* If a TX completion happens right _here_ and misses to wake
4216 * the txq, then our re-check below will catch the race.
4218 QETH_TXQ_STAT_INC(queue, stopped);
4219 netif_tx_stop_queue(txq);
4223 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4224 buffer->bytes += bytes;
4225 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4226 queue->prev_hdr = hdr;
4228 flush = __netdev_tx_sent_queue(txq, bytes,
4229 !stopped && netdev_xmit_more());
4231 if (flush || next_element >= queue->max_elements) {
4232 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4233 queue->bulk_count++;
4235 if (queue->bulk_count >= queue->bulk_max)
4239 qeth_flush_queue(queue);
4242 if (stopped && !qeth_out_queue_is_full(queue))
4243 netif_tx_start_queue(txq);
4247 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4248 struct sk_buff *skb, struct qeth_hdr *hdr,
4249 unsigned int offset, unsigned int hd_len,
4250 int elements_needed)
4252 unsigned int start_index = queue->next_buf_to_fill;
4253 struct qeth_qdio_out_buffer *buffer;
4254 unsigned int next_element;
4255 struct netdev_queue *txq;
4256 bool stopped = false;
4257 int flush_count = 0;
4261 buffer = queue->bufs[queue->next_buf_to_fill];
4263 /* Just a sanity check, the wake/stop logic should ensure that we always
4264 * get a free buffer.
4266 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4269 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4271 /* check if we need to switch packing state of this queue */
4272 qeth_switch_to_packing_if_needed(queue);
4273 if (queue->do_pack) {
4275 /* does packet fit in current buffer? */
4276 if (buffer->next_element_to_fill + elements_needed >
4277 queue->max_elements) {
4278 /* ... no -> set state PRIMED */
4279 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4281 queue->next_buf_to_fill =
4282 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4283 buffer = queue->bufs[queue->next_buf_to_fill];
4285 /* We stepped forward, so sanity-check again: */
4286 if (atomic_read(&buffer->state) !=
4287 QETH_QDIO_BUF_EMPTY) {
4288 qeth_flush_buffers(queue, start_index,
4296 if (buffer->next_element_to_fill == 0 &&
4297 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4298 /* If a TX completion happens right _here_ and misses to wake
4299 * the txq, then our re-check below will catch the race.
4301 QETH_TXQ_STAT_INC(queue, stopped);
4302 netif_tx_stop_queue(txq);
4306 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4307 buffer->bytes += qdisc_pkt_len(skb);
4308 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4311 QETH_TXQ_STAT_INC(queue, skbs_pack);
4312 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4314 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4315 queue->next_buf_to_fill =
4316 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4320 qeth_flush_buffers(queue, start_index, flush_count);
4324 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4326 if (stopped && !qeth_out_queue_is_full(queue))
4327 netif_tx_start_queue(txq);
4330 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4332 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4333 unsigned int payload_len, struct sk_buff *skb,
4334 unsigned int proto_len)
4336 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4338 ext->hdr_tot_len = sizeof(*ext);
4339 ext->imb_hdr_no = 1;
4341 ext->hdr_version = 1;
4343 ext->payload_len = payload_len;
4344 ext->mss = skb_shinfo(skb)->gso_size;
4345 ext->dg_hdr_len = proto_len;
4348 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4349 struct qeth_qdio_out_q *queue, int ipv,
4350 void (*fill_header)(struct qeth_qdio_out_q *queue,
4351 struct qeth_hdr *hdr, struct sk_buff *skb,
4352 int ipv, unsigned int data_len))
4354 unsigned int proto_len, hw_hdr_len;
4355 unsigned int frame_len = skb->len;
4356 bool is_tso = skb_is_gso(skb);
4357 unsigned int data_offset = 0;
4358 struct qeth_hdr *hdr = NULL;
4359 unsigned int hd_len = 0;
4360 unsigned int elements;
4364 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4365 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4367 hw_hdr_len = sizeof(struct qeth_hdr);
4368 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4371 rc = skb_cow_head(skb, hw_hdr_len);
4375 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4379 if (is_tso || !push_len) {
4380 /* HW header needs its own buffer element. */
4381 hd_len = hw_hdr_len + proto_len;
4382 data_offset = push_len + proto_len;
4384 memset(hdr, 0, hw_hdr_len);
4385 fill_header(queue, hdr, skb, ipv, frame_len);
4387 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4388 frame_len - proto_len, skb, proto_len);
4391 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4394 /* TODO: drop skb_orphan() once TX completion is fast enough */
4396 spin_lock(&queue->lock);
4397 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4399 spin_unlock(&queue->lock);
4402 if (rc && !push_len)
4403 kmem_cache_free(qeth_core_header_cache, hdr);
4407 EXPORT_SYMBOL_GPL(qeth_xmit);
4409 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4410 struct qeth_reply *reply, unsigned long data)
4412 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4413 struct qeth_ipacmd_setadpparms *setparms;
4415 QETH_CARD_TEXT(card, 4, "prmadpcb");
4417 setparms = &(cmd->data.setadapterparms);
4418 if (qeth_setadpparms_inspect_rc(cmd)) {
4419 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4420 setparms->data.mode = SET_PROMISC_MODE_OFF;
4422 card->info.promisc_mode = setparms->data.mode;
4423 return (cmd->hdr.return_code) ? -EIO : 0;
4426 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4428 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4429 SET_PROMISC_MODE_OFF;
4430 struct qeth_cmd_buffer *iob;
4431 struct qeth_ipa_cmd *cmd;
4433 QETH_CARD_TEXT(card, 4, "setprom");
4434 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4436 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4437 SETADP_DATA_SIZEOF(mode));
4440 cmd = __ipa_cmd(iob);
4441 cmd->data.setadapterparms.data.mode = mode;
4442 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4444 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4446 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4447 struct qeth_reply *reply, unsigned long data)
4449 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4450 struct qeth_ipacmd_setadpparms *adp_cmd;
4452 QETH_CARD_TEXT(card, 4, "chgmaccb");
4453 if (qeth_setadpparms_inspect_rc(cmd))
4456 adp_cmd = &cmd->data.setadapterparms;
4457 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4458 return -EADDRNOTAVAIL;
4460 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4461 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4462 return -EADDRNOTAVAIL;
4464 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4468 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4471 struct qeth_cmd_buffer *iob;
4472 struct qeth_ipa_cmd *cmd;
4474 QETH_CARD_TEXT(card, 4, "chgmac");
4476 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4477 SETADP_DATA_SIZEOF(change_addr));
4480 cmd = __ipa_cmd(iob);
4481 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4482 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4483 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4484 card->dev->dev_addr);
4485 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4489 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4491 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4492 struct qeth_reply *reply, unsigned long data)
4494 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4495 struct qeth_set_access_ctrl *access_ctrl_req;
4497 QETH_CARD_TEXT(card, 4, "setaccb");
4499 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4500 QETH_CARD_TEXT_(card, 2, "rc=%d",
4501 cmd->data.setadapterparms.hdr.return_code);
4502 if (cmd->data.setadapterparms.hdr.return_code !=
4503 SET_ACCESS_CTRL_RC_SUCCESS)
4504 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4505 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4506 cmd->data.setadapterparms.hdr.return_code);
4507 switch (qeth_setadpparms_inspect_rc(cmd)) {
4508 case SET_ACCESS_CTRL_RC_SUCCESS:
4509 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4510 dev_info(&card->gdev->dev,
4511 "QDIO data connection isolation is deactivated\n");
4513 dev_info(&card->gdev->dev,
4514 "QDIO data connection isolation is activated\n");
4516 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4517 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4520 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4521 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4524 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4525 dev_err(&card->gdev->dev, "Adapter does not "
4526 "support QDIO data connection isolation\n");
4528 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4529 dev_err(&card->gdev->dev,
4530 "Adapter is dedicated. "
4531 "QDIO data connection isolation not supported\n");
4533 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4534 dev_err(&card->gdev->dev,
4535 "TSO does not permit QDIO data connection isolation\n");
4537 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4538 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4539 "support reflective relay mode\n");
4541 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4542 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4543 "enabled at the adjacent switch port");
4545 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4546 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4547 "at the adjacent switch failed\n");
4548 /* benign error while disabling ISOLATION_MODE_FWD */
4555 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4556 enum qeth_ipa_isolation_modes mode)
4559 struct qeth_cmd_buffer *iob;
4560 struct qeth_ipa_cmd *cmd;
4561 struct qeth_set_access_ctrl *access_ctrl_req;
4563 QETH_CARD_TEXT(card, 4, "setacctl");
4565 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4566 dev_err(&card->gdev->dev,
4567 "Adapter does not support QDIO data connection isolation\n");
4571 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4572 SETADP_DATA_SIZEOF(set_access_ctrl));
4575 cmd = __ipa_cmd(iob);
4576 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4577 access_ctrl_req->subcmd_code = mode;
4579 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4582 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4583 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4584 rc, CARD_DEVID(card));
4590 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4592 struct qeth_card *card;
4594 card = dev->ml_priv;
4595 QETH_CARD_TEXT(card, 4, "txtimeo");
4596 qeth_schedule_recovery(card);
4598 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4600 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4602 struct qeth_card *card = dev->ml_priv;
4606 case MII_BMCR: /* Basic mode control register */
4608 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4609 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4610 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4611 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4612 rc |= BMCR_SPEED100;
4614 case MII_BMSR: /* Basic mode status register */
4615 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4616 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4619 case MII_PHYSID1: /* PHYS ID 1 */
4620 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4622 rc = (rc >> 5) & 0xFFFF;
4624 case MII_PHYSID2: /* PHYS ID 2 */
4625 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4627 case MII_ADVERTISE: /* Advertisement control reg */
4630 case MII_LPA: /* Link partner ability reg */
4631 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4632 LPA_100BASE4 | LPA_LPACK;
4634 case MII_EXPANSION: /* Expansion register */
4636 case MII_DCOUNTER: /* disconnect counter */
4638 case MII_FCSCOUNTER: /* false carrier counter */
4640 case MII_NWAYTEST: /* N-way auto-neg test register */
4642 case MII_RERRCOUNTER: /* rx error counter */
4643 rc = card->stats.rx_length_errors +
4644 card->stats.rx_frame_errors +
4645 card->stats.rx_fifo_errors;
4647 case MII_SREVISION: /* silicon revision */
4649 case MII_RESV1: /* reserved 1 */
4651 case MII_LBRERROR: /* loopback, rx, bypass error */
4653 case MII_PHYADDR: /* physical address */
4655 case MII_RESV2: /* reserved 2 */
4657 case MII_TPISTATUS: /* TPI status for 10mbps */
4659 case MII_NCONFIG: /* network interface config */
4667 static int qeth_snmp_command_cb(struct qeth_card *card,
4668 struct qeth_reply *reply, unsigned long data)
4670 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4671 struct qeth_arp_query_info *qinfo = reply->param;
4672 struct qeth_ipacmd_setadpparms *adp_cmd;
4673 unsigned int data_len;
4676 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4678 if (cmd->hdr.return_code) {
4679 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4682 if (cmd->data.setadapterparms.hdr.return_code) {
4683 cmd->hdr.return_code =
4684 cmd->data.setadapterparms.hdr.return_code;
4685 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4689 adp_cmd = &cmd->data.setadapterparms;
4690 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4691 if (adp_cmd->hdr.seq_no == 1) {
4692 snmp_data = &adp_cmd->data.snmp;
4694 snmp_data = &adp_cmd->data.snmp.request;
4695 data_len -= offsetof(struct qeth_snmp_cmd, request);
4698 /* check if there is enough room in userspace */
4699 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4700 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4703 QETH_CARD_TEXT_(card, 4, "snore%i",
4704 cmd->data.setadapterparms.hdr.used_total);
4705 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4706 cmd->data.setadapterparms.hdr.seq_no);
4707 /*copy entries to user buffer*/
4708 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4709 qinfo->udata_offset += data_len;
4711 if (cmd->data.setadapterparms.hdr.seq_no <
4712 cmd->data.setadapterparms.hdr.used_total)
4717 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4719 struct qeth_snmp_ureq __user *ureq;
4720 struct qeth_cmd_buffer *iob;
4721 unsigned int req_len;
4722 struct qeth_arp_query_info qinfo = {0, };
4725 QETH_CARD_TEXT(card, 3, "snmpcmd");
4727 if (IS_VM_NIC(card))
4730 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4734 ureq = (struct qeth_snmp_ureq __user *) udata;
4735 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4736 get_user(req_len, &ureq->hdr.req_len))
4739 /* Sanitize user input, to avoid overflows in iob size calculation: */
4740 if (req_len > QETH_BUFSIZE)
4743 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4747 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4748 &ureq->cmd, req_len)) {
4753 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4758 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4760 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4762 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4763 CARD_DEVID(card), rc);
4765 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4773 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4774 struct qeth_reply *reply,
4777 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4778 struct qeth_qoat_priv *priv = reply->param;
4781 QETH_CARD_TEXT(card, 3, "qoatcb");
4782 if (qeth_setadpparms_inspect_rc(cmd))
4785 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4787 if (resdatalen > (priv->buffer_len - priv->response_len))
4790 memcpy(priv->buffer + priv->response_len,
4791 &cmd->data.setadapterparms.hdr, resdatalen);
4792 priv->response_len += resdatalen;
4794 if (cmd->data.setadapterparms.hdr.seq_no <
4795 cmd->data.setadapterparms.hdr.used_total)
4800 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4803 struct qeth_cmd_buffer *iob;
4804 struct qeth_ipa_cmd *cmd;
4805 struct qeth_query_oat *oat_req;
4806 struct qeth_query_oat_data oat_data;
4807 struct qeth_qoat_priv priv;
4810 QETH_CARD_TEXT(card, 3, "qoatcmd");
4812 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4815 if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4818 priv.buffer_len = oat_data.buffer_len;
4819 priv.response_len = 0;
4820 priv.buffer = vzalloc(oat_data.buffer_len);
4824 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4825 SETADP_DATA_SIZEOF(query_oat));
4830 cmd = __ipa_cmd(iob);
4831 oat_req = &cmd->data.setadapterparms.data.query_oat;
4832 oat_req->subcmd_code = oat_data.command;
4834 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4836 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4837 u64_to_user_ptr(oat_data.ptr);
4838 oat_data.response_len = priv.response_len;
4840 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4841 copy_to_user(udata, &oat_data, sizeof(oat_data)))
4850 static int qeth_query_card_info_cb(struct qeth_card *card,
4851 struct qeth_reply *reply, unsigned long data)
4853 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4854 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4855 struct qeth_query_card_info *card_info;
4857 QETH_CARD_TEXT(card, 2, "qcrdincb");
4858 if (qeth_setadpparms_inspect_rc(cmd))
4861 card_info = &cmd->data.setadapterparms.data.card_info;
4862 carrier_info->card_type = card_info->card_type;
4863 carrier_info->port_mode = card_info->port_mode;
4864 carrier_info->port_speed = card_info->port_speed;
4868 int qeth_query_card_info(struct qeth_card *card,
4869 struct carrier_info *carrier_info)
4871 struct qeth_cmd_buffer *iob;
4873 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4874 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4876 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4879 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4880 (void *)carrier_info);
4884 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4885 * @card: pointer to a qeth_card
4888 * 0, if a MAC address has been set for the card's netdevice
4889 * a return code, for various error conditions
4891 int qeth_vm_request_mac(struct qeth_card *card)
4893 struct diag26c_mac_resp *response;
4894 struct diag26c_mac_req *request;
4897 QETH_CARD_TEXT(card, 2, "vmreqmac");
4899 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4900 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4901 if (!request || !response) {
4906 request->resp_buf_len = sizeof(*response);
4907 request->resp_version = DIAG26C_VERSION2;
4908 request->op_code = DIAG26C_GET_MAC;
4909 request->devno = card->info.ddev_devno;
4911 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4912 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4913 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4916 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4918 if (request->resp_buf_len < sizeof(*response) ||
4919 response->version != request->resp_version) {
4921 QETH_CARD_TEXT(card, 2, "badresp");
4922 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4923 sizeof(request->resp_buf_len));
4924 } else if (!is_valid_ether_addr(response->mac)) {
4926 QETH_CARD_TEXT(card, 2, "badmac");
4927 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4929 ether_addr_copy(card->dev->dev_addr, response->mac);
4937 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4939 static void qeth_determine_capabilities(struct qeth_card *card)
4941 struct qeth_channel *channel = &card->data;
4942 struct ccw_device *ddev = channel->ccwdev;
4944 int ddev_offline = 0;
4946 QETH_CARD_TEXT(card, 2, "detcapab");
4947 if (!ddev->online) {
4949 rc = qeth_start_channel(channel);
4951 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4956 rc = qeth_read_conf_data(card);
4958 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4959 CARD_DEVID(card), rc);
4960 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4964 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4966 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4968 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4969 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4970 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4971 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4972 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4973 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4974 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4975 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4976 dev_info(&card->gdev->dev,
4977 "Completion Queueing supported\n");
4979 card->options.cq = QETH_CQ_NOTAVAILABLE;
4984 if (ddev_offline == 1)
4985 qeth_stop_channel(channel);
4990 static void qeth_read_ccw_conf_data(struct qeth_card *card)
4992 struct qeth_card_info *info = &card->info;
4993 struct ccw_device *cdev = CARD_DDEV(card);
4994 struct ccw_dev_id dev_id;
4996 QETH_CARD_TEXT(card, 2, "ccwconfd");
4997 ccw_device_get_id(cdev, &dev_id);
4999 info->ddev_devno = dev_id.devno;
5000 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5001 !ccw_device_get_iid(cdev, &info->iid) &&
5002 !ccw_device_get_chid(cdev, 0, &info->chid);
5003 info->ssid = dev_id.ssid;
5005 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5006 info->chid, info->chpid);
5008 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5009 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5010 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5011 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5012 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5013 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5014 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5017 static int qeth_qdio_establish(struct qeth_card *card)
5019 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5020 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5021 struct qeth_qib_parms *qib_parms = NULL;
5022 struct qdio_initialize init_data;
5026 QETH_CARD_TEXT(card, 2, "qdioest");
5028 if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5029 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5033 qeth_fill_qib_parms(card, qib_parms);
5036 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5037 if (card->options.cq == QETH_CQ_ENABLED)
5038 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5040 for (i = 0; i < card->qdio.no_out_queues; i++)
5041 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5043 memset(&init_data, 0, sizeof(struct qdio_initialize));
5044 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5046 init_data.qib_param_field_format = 0;
5047 init_data.qib_param_field = (void *)qib_parms;
5048 init_data.no_input_qs = card->qdio.no_in_queues;
5049 init_data.no_output_qs = card->qdio.no_out_queues;
5050 init_data.input_handler = qeth_qdio_input_handler;
5051 init_data.output_handler = qeth_qdio_output_handler;
5052 init_data.irq_poll = qeth_qdio_poll;
5053 init_data.int_parm = (unsigned long) card;
5054 init_data.input_sbal_addr_array = in_sbal_ptrs;
5055 init_data.output_sbal_addr_array = out_sbal_ptrs;
5056 init_data.output_sbal_state_array = card->qdio.out_bufstates;
5057 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
5059 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5060 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5061 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5062 init_data.no_output_qs);
5064 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5067 rc = qdio_establish(CARD_DDEV(card), &init_data);
5069 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5070 qdio_free(CARD_DDEV(card));
5074 switch (card->options.cq) {
5075 case QETH_CQ_ENABLED:
5076 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5078 case QETH_CQ_DISABLED:
5079 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5090 static void qeth_core_free_card(struct qeth_card *card)
5092 QETH_CARD_TEXT(card, 2, "freecrd");
5094 unregister_service_level(&card->qeth_service_level);
5095 debugfs_remove_recursive(card->debugfs);
5096 qeth_put_cmd(card->read_cmd);
5097 destroy_workqueue(card->event_wq);
5098 dev_set_drvdata(&card->gdev->dev, NULL);
5102 static void qeth_trace_features(struct qeth_card *card)
5104 QETH_CARD_TEXT(card, 2, "features");
5105 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5106 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5107 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5108 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5109 sizeof(card->info.diagass_support));
5112 static struct ccw_device_id qeth_ids[] = {
5113 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5114 .driver_info = QETH_CARD_TYPE_OSD},
5115 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5116 .driver_info = QETH_CARD_TYPE_IQD},
5117 #ifdef CONFIG_QETH_OSN
5118 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5119 .driver_info = QETH_CARD_TYPE_OSN},
5121 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5122 .driver_info = QETH_CARD_TYPE_OSM},
5123 #ifdef CONFIG_QETH_OSX
5124 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5125 .driver_info = QETH_CARD_TYPE_OSX},
5129 MODULE_DEVICE_TABLE(ccw, qeth_ids);
5131 static struct ccw_driver qeth_ccw_driver = {
5133 .owner = THIS_MODULE,
5137 .probe = ccwgroup_probe_ccwdev,
5138 .remove = ccwgroup_remove_ccwdev,
5141 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5146 QETH_CARD_TEXT(card, 2, "hrdsetup");
5147 atomic_set(&card->force_alloc_skb, 0);
5148 rc = qeth_update_from_chp_desc(card);
5153 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5155 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5156 qeth_stop_channel(&card->data);
5157 qeth_stop_channel(&card->write);
5158 qeth_stop_channel(&card->read);
5159 qdio_free(CARD_DDEV(card));
5161 rc = qeth_start_channel(&card->read);
5164 rc = qeth_start_channel(&card->write);
5167 rc = qeth_start_channel(&card->data);
5171 if (rc == -ERESTARTSYS) {
5172 QETH_CARD_TEXT(card, 2, "break1");
5175 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5182 qeth_determine_capabilities(card);
5183 qeth_read_ccw_conf_data(card);
5184 qeth_idx_init(card);
5186 rc = qeth_idx_activate_read_channel(card);
5188 QETH_CARD_TEXT(card, 2, "break2");
5191 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5198 rc = qeth_idx_activate_write_channel(card);
5200 QETH_CARD_TEXT(card, 2, "break3");
5203 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5209 card->read_or_write_problem = 0;
5210 rc = qeth_mpc_initialize(card);
5212 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5216 rc = qeth_send_startlan(card);
5218 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5219 if (rc == -ENETDOWN) {
5220 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5221 *carrier_ok = false;
5229 card->options.ipa4.supported = 0;
5230 card->options.ipa6.supported = 0;
5231 card->options.adp.supported = 0;
5232 card->options.sbp.supported_funcs = 0;
5233 card->info.diagass_support = 0;
5234 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5237 if (qeth_is_supported(card, IPA_IPV6)) {
5238 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5242 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5243 rc = qeth_query_setadapterparms(card);
5245 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5249 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5250 rc = qeth_query_setdiagass(card);
5252 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5255 qeth_trace_features(card);
5257 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5258 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5259 card->info.hwtrap = 0;
5261 if (card->options.isolation != ISOLATION_MODE_NONE) {
5262 rc = qeth_setadpparms_set_access_ctrl(card,
5263 card->options.isolation);
5268 rc = qeth_init_qdio_queues(card);
5270 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5276 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5277 "an error on the device\n");
5278 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5279 CARD_DEVID(card), rc);
5283 static int qeth_set_online(struct qeth_card *card)
5288 mutex_lock(&card->discipline_mutex);
5289 mutex_lock(&card->conf_mutex);
5290 QETH_CARD_TEXT(card, 2, "setonlin");
5292 rc = qeth_hardsetup_card(card, &carrier_ok);
5294 QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5299 qeth_print_status_message(card);
5301 if (card->dev->reg_state != NETREG_REGISTERED) {
5302 struct qeth_priv *priv = netdev_priv(card->dev);
5305 priv->tx_wanted_queues = QETH_IQD_MIN_TXQ;
5306 else if (IS_VM_NIC(card))
5307 priv->tx_wanted_queues = 1;
5309 priv->tx_wanted_queues = card->dev->num_tx_queues;
5311 /* no need for locking / error handling at this early stage: */
5312 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5315 rc = card->discipline->set_online(card, carrier_ok);
5319 /* let user_space know that device is online */
5320 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5322 mutex_unlock(&card->conf_mutex);
5323 mutex_unlock(&card->discipline_mutex);
5328 qeth_qdio_clear_card(card, 0);
5329 qeth_clear_working_pool_list(card);
5330 qeth_flush_local_addrs(card);
5332 qeth_stop_channel(&card->data);
5333 qeth_stop_channel(&card->write);
5334 qeth_stop_channel(&card->read);
5335 qdio_free(CARD_DDEV(card));
5337 mutex_unlock(&card->conf_mutex);
5338 mutex_unlock(&card->discipline_mutex);
5342 int qeth_set_offline(struct qeth_card *card, bool resetting)
5346 mutex_lock(&card->discipline_mutex);
5347 mutex_lock(&card->conf_mutex);
5348 QETH_CARD_TEXT(card, 3, "setoffl");
5350 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5351 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5352 card->info.hwtrap = 1;
5355 /* cancel any stalled cmd that might block the rtnl: */
5356 qeth_clear_ipacmd_list(card);
5359 card->info.open_when_online = card->dev->flags & IFF_UP;
5360 dev_close(card->dev);
5361 netif_device_detach(card->dev);
5362 netif_carrier_off(card->dev);
5365 cancel_work_sync(&card->rx_mode_work);
5367 card->discipline->set_offline(card);
5369 qeth_qdio_clear_card(card, 0);
5370 qeth_drain_output_queues(card);
5371 qeth_clear_working_pool_list(card);
5372 qeth_flush_local_addrs(card);
5373 card->info.promisc_mode = 0;
5375 rc = qeth_stop_channel(&card->data);
5376 rc2 = qeth_stop_channel(&card->write);
5377 rc3 = qeth_stop_channel(&card->read);
5379 rc = (rc2) ? rc2 : rc3;
5381 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5382 qdio_free(CARD_DDEV(card));
5384 /* let user_space know that device is offline */
5385 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5387 mutex_unlock(&card->conf_mutex);
5388 mutex_unlock(&card->discipline_mutex);
5391 EXPORT_SYMBOL_GPL(qeth_set_offline);
5393 static int qeth_do_reset(void *data)
5395 struct qeth_card *card = data;
5398 QETH_CARD_TEXT(card, 2, "recover1");
5399 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5401 QETH_CARD_TEXT(card, 2, "recover2");
5402 dev_warn(&card->gdev->dev,
5403 "A recovery process has been started for the device\n");
5405 qeth_set_offline(card, true);
5406 rc = qeth_set_online(card);
5408 dev_info(&card->gdev->dev,
5409 "Device successfully recovered!\n");
5411 ccwgroup_set_offline(card->gdev);
5412 dev_warn(&card->gdev->dev,
5413 "The qeth device driver failed to recover an error on the device\n");
5415 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5416 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5420 #if IS_ENABLED(CONFIG_QETH_L3)
5421 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5422 struct qeth_hdr *hdr)
5424 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5425 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5426 struct net_device *dev = skb->dev;
5428 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5429 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5430 "FAKELL", skb->len);
5434 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5435 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5437 unsigned char tg_addr[ETH_ALEN];
5439 skb_reset_network_header(skb);
5440 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5441 case QETH_CAST_MULTICAST:
5442 if (prot == ETH_P_IP)
5443 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5445 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5446 QETH_CARD_STAT_INC(card, rx_multicast);
5448 case QETH_CAST_BROADCAST:
5449 ether_addr_copy(tg_addr, dev->broadcast);
5450 QETH_CARD_STAT_INC(card, rx_multicast);
5453 if (card->options.sniffer)
5454 skb->pkt_type = PACKET_OTHERHOST;
5455 ether_addr_copy(tg_addr, dev->dev_addr);
5458 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5459 dev_hard_header(skb, dev, prot, tg_addr,
5460 &l3_hdr->next_hop.rx.src_mac, skb->len);
5462 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5466 /* copy VLAN tag from hdr into skb */
5467 if (!card->options.sniffer &&
5468 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5469 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5470 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5472 l3_hdr->next_hop.rx.vlan_id;
5474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5479 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5480 struct qeth_hdr *hdr, bool uses_frags)
5482 struct napi_struct *napi = &card->napi;
5485 switch (hdr->hdr.l2.id) {
5486 case QETH_HEADER_TYPE_OSN:
5487 skb_push(skb, sizeof(*hdr));
5488 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5489 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5490 QETH_CARD_STAT_INC(card, rx_packets);
5492 card->osn_info.data_cb(skb);
5494 #if IS_ENABLED(CONFIG_QETH_L3)
5495 case QETH_HEADER_TYPE_LAYER3:
5496 qeth_l3_rebuild_skb(card, skb, hdr);
5497 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5500 case QETH_HEADER_TYPE_LAYER2:
5501 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5506 napi_free_frags(napi);
5508 dev_kfree_skb_any(skb);
5512 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5513 skb->ip_summed = CHECKSUM_UNNECESSARY;
5514 QETH_CARD_STAT_INC(card, rx_skb_csum);
5516 skb->ip_summed = CHECKSUM_NONE;
5519 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5520 QETH_CARD_STAT_INC(card, rx_packets);
5521 if (skb_is_nonlinear(skb)) {
5522 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5523 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5524 skb_shinfo(skb)->nr_frags);
5528 napi_gro_frags(napi);
5530 skb->protocol = eth_type_trans(skb, skb->dev);
5531 napi_gro_receive(napi, skb);
5535 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5537 struct page *page = virt_to_page(data);
5538 unsigned int next_frag;
5540 next_frag = skb_shinfo(skb)->nr_frags;
5542 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5546 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5548 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5551 static int qeth_extract_skb(struct qeth_card *card,
5552 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5555 struct qeth_priv *priv = netdev_priv(card->dev);
5556 struct qdio_buffer *buffer = qethbuffer->buffer;
5557 struct napi_struct *napi = &card->napi;
5558 struct qdio_buffer_element *element;
5559 unsigned int linear_len = 0;
5560 bool uses_frags = false;
5561 int offset = *__offset;
5562 bool use_rx_sg = false;
5563 unsigned int headroom;
5564 struct qeth_hdr *hdr;
5565 struct sk_buff *skb;
5568 element = &buffer->element[*element_no];
5571 /* qeth_hdr must not cross element boundaries */
5572 while (element->length < offset + sizeof(struct qeth_hdr)) {
5573 if (qeth_is_last_sbale(element))
5579 hdr = phys_to_virt(element->addr) + offset;
5580 offset += sizeof(*hdr);
5583 switch (hdr->hdr.l2.id) {
5584 case QETH_HEADER_TYPE_LAYER2:
5585 skb_len = hdr->hdr.l2.pkt_length;
5586 linear_len = ETH_HLEN;
5589 case QETH_HEADER_TYPE_LAYER3:
5590 skb_len = hdr->hdr.l3.length;
5591 if (!IS_LAYER3(card)) {
5592 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5596 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5597 linear_len = ETH_HLEN;
5602 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5603 linear_len = sizeof(struct ipv6hdr);
5605 linear_len = sizeof(struct iphdr);
5606 headroom = ETH_HLEN;
5608 case QETH_HEADER_TYPE_OSN:
5609 skb_len = hdr->hdr.osn.pdu_length;
5610 if (!IS_OSN(card)) {
5611 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5615 linear_len = skb_len;
5616 headroom = sizeof(struct qeth_hdr);
5619 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5620 QETH_CARD_STAT_INC(card, rx_frame_errors);
5622 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5624 /* Can't determine packet length, drop the whole buffer. */
5625 return -EPROTONOSUPPORT;
5628 if (skb_len < linear_len) {
5629 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5633 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5634 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5635 !atomic_read(&card->force_alloc_skb) &&
5639 /* QETH_CQ_ENABLED only: */
5640 if (qethbuffer->rx_skb &&
5641 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5642 skb = qethbuffer->rx_skb;
5643 qethbuffer->rx_skb = NULL;
5647 skb = napi_get_frags(napi);
5649 /* -ENOMEM, no point in falling back further. */
5650 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5654 if (skb_tailroom(skb) >= linear_len + headroom) {
5659 netdev_info_once(card->dev,
5660 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5661 linear_len + headroom, skb_tailroom(skb));
5662 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5665 linear_len = skb_len;
5666 skb = napi_alloc_skb(napi, linear_len + headroom);
5668 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5674 skb_reserve(skb, headroom);
5677 int data_len = min(skb_len, (int)(element->length - offset));
5678 char *data = phys_to_virt(element->addr) + offset;
5680 skb_len -= data_len;
5683 /* Extract data from current element: */
5684 if (skb && data_len) {
5686 unsigned int copy_len;
5688 copy_len = min_t(unsigned int, linear_len,
5691 skb_put_data(skb, data, copy_len);
5692 linear_len -= copy_len;
5693 data_len -= copy_len;
5698 qeth_create_skb_frag(skb, data, data_len);
5701 /* Step forward to next element: */
5703 if (qeth_is_last_sbale(element)) {
5704 QETH_CARD_TEXT(card, 4, "unexeob");
5705 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5708 napi_free_frags(napi);
5710 dev_kfree_skb_any(skb);
5711 QETH_CARD_STAT_INC(card,
5721 /* This packet was skipped, go get another one: */
5725 *element_no = element - &buffer->element[0];
5728 qeth_receive_skb(card, skb, hdr, uses_frags);
5732 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5733 struct qeth_qdio_buffer *buf, bool *done)
5735 unsigned int work_done = 0;
5738 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5739 &card->rx.e_offset)) {
5751 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5753 struct qeth_rx *ctx = &card->rx;
5754 unsigned int work_done = 0;
5756 while (budget > 0) {
5757 struct qeth_qdio_buffer *buffer;
5758 unsigned int skbs_done = 0;
5761 /* Fetch completed RX buffers: */
5762 if (!card->rx.b_count) {
5763 card->rx.qdio_err = 0;
5764 card->rx.b_count = qdio_get_next_buffers(
5765 card->data.ccwdev, 0, &card->rx.b_index,
5766 &card->rx.qdio_err);
5767 if (card->rx.b_count <= 0) {
5768 card->rx.b_count = 0;
5773 /* Process one completed RX buffer: */
5774 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5775 if (!(card->rx.qdio_err &&
5776 qeth_check_qdio_errors(card, buffer->buffer,
5777 card->rx.qdio_err, "qinerr")))
5778 skbs_done = qeth_extract_skbs(card, budget, buffer,
5783 work_done += skbs_done;
5784 budget -= skbs_done;
5787 QETH_CARD_STAT_INC(card, rx_bufs);
5788 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5789 buffer->pool_entry = NULL;
5792 ctx->bufs_refill -= qeth_rx_refill_queue(card,
5795 /* Step forward to next buffer: */
5796 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5797 card->rx.buf_element = 0;
5798 card->rx.e_offset = 0;
5805 static void qeth_cq_poll(struct qeth_card *card)
5807 unsigned int work_done = 0;
5809 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5810 unsigned int start, error;
5813 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5818 qeth_qdio_cq_handler(card, error, 1, start, completed);
5819 work_done += completed;
5823 int qeth_poll(struct napi_struct *napi, int budget)
5825 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5826 unsigned int work_done;
5828 work_done = qeth_rx_poll(card, budget);
5830 if (card->options.cq == QETH_CQ_ENABLED)
5834 struct qeth_rx *ctx = &card->rx;
5836 /* Process any substantial refill backlog: */
5837 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5839 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5840 if (work_done >= budget)
5844 if (napi_complete_done(napi, work_done) &&
5845 qdio_start_irq(CARD_DDEV(card)))
5846 napi_schedule(napi);
5850 EXPORT_SYMBOL_GPL(qeth_poll);
5852 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5853 unsigned int bidx, bool error, int budget)
5855 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5856 u8 sflags = buffer->buffer->element[15].sflags;
5857 struct qeth_card *card = queue->card;
5859 if (queue->bufstates && (queue->bufstates[bidx].flags &
5860 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5861 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5863 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5864 QETH_QDIO_BUF_PENDING) ==
5865 QETH_QDIO_BUF_PRIMED)
5866 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5868 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5870 /* prepare the queue slot for re-use: */
5871 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5872 if (qeth_init_qdio_out_buf(queue, bidx)) {
5873 QETH_CARD_TEXT(card, 2, "outofbuf");
5874 qeth_schedule_recovery(card);
5880 if (card->options.cq == QETH_CQ_ENABLED)
5881 qeth_notify_skbs(queue, buffer,
5882 qeth_compute_cq_notification(sflags, 0));
5883 qeth_clear_output_buffer(queue, buffer, error, budget);
5886 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5888 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5889 unsigned int queue_no = queue->queue_no;
5890 struct qeth_card *card = queue->card;
5891 struct net_device *dev = card->dev;
5892 unsigned int work_done = 0;
5893 struct netdev_queue *txq;
5895 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5898 unsigned int start, error, i;
5899 unsigned int packets = 0;
5900 unsigned int bytes = 0;
5903 if (qeth_out_queue_is_empty(queue)) {
5904 napi_complete(napi);
5908 /* Give the CPU a breather: */
5909 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5910 QETH_TXQ_STAT_INC(queue, completion_yield);
5911 if (napi_complete_done(napi, 0))
5912 napi_schedule(napi);
5916 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5918 if (completed <= 0) {
5919 /* Ensure we see TX completion for pending work: */
5920 if (napi_complete_done(napi, 0))
5921 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
5925 for (i = start; i < start + completed; i++) {
5926 struct qeth_qdio_out_buffer *buffer;
5927 unsigned int bidx = QDIO_BUFNR(i);
5929 buffer = queue->bufs[bidx];
5930 packets += buffer->frames;
5931 bytes += buffer->bytes;
5933 qeth_handle_send_error(card, buffer, error);
5934 qeth_iqd_tx_complete(queue, bidx, error, budget);
5935 qeth_cleanup_handled_pending(queue, bidx, false);
5938 netdev_tx_completed_queue(txq, packets, bytes);
5939 atomic_sub(completed, &queue->used_buffers);
5940 work_done += completed;
5942 /* xmit may have observed the full-condition, but not yet
5943 * stopped the txq. In which case the code below won't trigger.
5944 * So before returning, xmit will re-check the txq's fill level
5945 * and wake it up if needed.
5947 if (netif_tx_queue_stopped(txq) &&
5948 !qeth_out_queue_is_full(queue))
5949 netif_tx_wake_queue(txq);
5953 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5955 if (!cmd->hdr.return_code)
5956 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5957 return cmd->hdr.return_code;
5960 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5961 struct qeth_reply *reply,
5964 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5965 struct qeth_ipa_caps *caps = reply->param;
5967 if (qeth_setassparms_inspect_rc(cmd))
5970 caps->supported = cmd->data.setassparms.data.caps.supported;
5971 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5975 int qeth_setassparms_cb(struct qeth_card *card,
5976 struct qeth_reply *reply, unsigned long data)
5978 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5980 QETH_CARD_TEXT(card, 4, "defadpcb");
5982 if (cmd->hdr.return_code)
5985 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5986 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5987 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5988 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5989 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5992 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5994 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5995 enum qeth_ipa_funcs ipa_func,
5997 unsigned int data_length,
5998 enum qeth_prot_versions prot)
6000 struct qeth_ipacmd_setassparms *setassparms;
6001 struct qeth_ipacmd_setassparms_hdr *hdr;
6002 struct qeth_cmd_buffer *iob;
6004 QETH_CARD_TEXT(card, 4, "getasscm");
6005 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6007 offsetof(struct qeth_ipacmd_setassparms,
6012 setassparms = &__ipa_cmd(iob)->data.setassparms;
6013 setassparms->assist_no = ipa_func;
6015 hdr = &setassparms->hdr;
6016 hdr->length = sizeof(*hdr) + data_length;
6017 hdr->command_code = cmd_code;
6020 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6022 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6023 enum qeth_ipa_funcs ipa_func,
6024 u16 cmd_code, u32 *data,
6025 enum qeth_prot_versions prot)
6027 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6028 struct qeth_cmd_buffer *iob;
6030 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6031 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6036 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6037 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6039 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6041 static void qeth_unregister_dbf_views(void)
6044 for (x = 0; x < QETH_DBF_INFOS; x++) {
6045 debug_unregister(qeth_dbf[x].id);
6046 qeth_dbf[x].id = NULL;
6050 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6052 char dbf_txt_buf[32];
6055 if (!debug_level_enabled(id, level))
6057 va_start(args, fmt);
6058 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6060 debug_text_event(id, level, dbf_txt_buf);
6062 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6064 static int qeth_register_dbf_views(void)
6069 for (x = 0; x < QETH_DBF_INFOS; x++) {
6070 /* register the areas */
6071 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6075 if (qeth_dbf[x].id == NULL) {
6076 qeth_unregister_dbf_views();
6080 /* register a view */
6081 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6083 qeth_unregister_dbf_views();
6087 /* set a passing level */
6088 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6094 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6096 int qeth_core_load_discipline(struct qeth_card *card,
6097 enum qeth_discipline_id discipline)
6099 mutex_lock(&qeth_mod_mutex);
6100 switch (discipline) {
6101 case QETH_DISCIPLINE_LAYER3:
6102 card->discipline = try_then_request_module(
6103 symbol_get(qeth_l3_discipline), "qeth_l3");
6105 case QETH_DISCIPLINE_LAYER2:
6106 card->discipline = try_then_request_module(
6107 symbol_get(qeth_l2_discipline), "qeth_l2");
6112 mutex_unlock(&qeth_mod_mutex);
6114 if (!card->discipline) {
6115 dev_err(&card->gdev->dev, "There is no kernel module to "
6116 "support discipline %d\n", discipline);
6120 card->options.layer = discipline;
6124 void qeth_core_free_discipline(struct qeth_card *card)
6126 if (IS_LAYER2(card))
6127 symbol_put(qeth_l2_discipline);
6129 symbol_put(qeth_l3_discipline);
6130 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6131 card->discipline = NULL;
6134 const struct device_type qeth_generic_devtype = {
6135 .name = "qeth_generic",
6136 .groups = qeth_generic_attr_groups,
6138 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6140 static const struct device_type qeth_osn_devtype = {
6142 .groups = qeth_osn_attr_groups,
6145 #define DBF_NAME_LEN 20
6147 struct qeth_dbf_entry {
6148 char dbf_name[DBF_NAME_LEN];
6149 debug_info_t *dbf_info;
6150 struct list_head dbf_list;
6153 static LIST_HEAD(qeth_dbf_list);
6154 static DEFINE_MUTEX(qeth_dbf_list_mutex);
6156 static debug_info_t *qeth_get_dbf_entry(char *name)
6158 struct qeth_dbf_entry *entry;
6159 debug_info_t *rc = NULL;
6161 mutex_lock(&qeth_dbf_list_mutex);
6162 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6163 if (strcmp(entry->dbf_name, name) == 0) {
6164 rc = entry->dbf_info;
6168 mutex_unlock(&qeth_dbf_list_mutex);
6172 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6174 struct qeth_dbf_entry *new_entry;
6176 card->debug = debug_register(name, 2, 1, 8);
6178 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6181 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6183 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6186 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6187 new_entry->dbf_info = card->debug;
6188 mutex_lock(&qeth_dbf_list_mutex);
6189 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6190 mutex_unlock(&qeth_dbf_list_mutex);
6195 debug_unregister(card->debug);
6200 static void qeth_clear_dbf_list(void)
6202 struct qeth_dbf_entry *entry, *tmp;
6204 mutex_lock(&qeth_dbf_list_mutex);
6205 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6206 list_del(&entry->dbf_list);
6207 debug_unregister(entry->dbf_info);
6210 mutex_unlock(&qeth_dbf_list_mutex);
6213 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6215 struct net_device *dev;
6216 struct qeth_priv *priv;
6218 switch (card->info.type) {
6219 case QETH_CARD_TYPE_IQD:
6220 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6221 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6223 case QETH_CARD_TYPE_OSM:
6224 dev = alloc_etherdev(sizeof(*priv));
6226 case QETH_CARD_TYPE_OSN:
6227 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6231 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6237 priv = netdev_priv(dev);
6238 priv->rx_copybreak = QETH_RX_COPYBREAK;
6240 dev->ml_priv = card;
6241 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6242 dev->min_mtu = IS_OSN(card) ? 64 : 576;
6243 /* initialized when device first goes online: */
6246 SET_NETDEV_DEV(dev, &card->gdev->dev);
6247 netif_carrier_off(dev);
6250 dev->ethtool_ops = &qeth_osn_ethtool_ops;
6252 dev->ethtool_ops = &qeth_ethtool_ops;
6253 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6254 dev->hw_features |= NETIF_F_SG;
6255 dev->vlan_features |= NETIF_F_SG;
6257 dev->features |= NETIF_F_SG;
6263 struct net_device *qeth_clone_netdev(struct net_device *orig)
6265 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6270 clone->dev_port = orig->dev_port;
6274 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6276 struct qeth_card *card;
6279 enum qeth_discipline_id enforced_disc;
6280 char dbf_name[DBF_NAME_LEN];
6282 QETH_DBF_TEXT(SETUP, 2, "probedev");
6285 if (!get_device(dev))
6288 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6290 card = qeth_alloc_card(gdev);
6292 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6297 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6298 dev_name(&gdev->dev));
6299 card->debug = qeth_get_dbf_entry(dbf_name);
6301 rc = qeth_add_dbf_entry(card, dbf_name);
6306 qeth_setup_card(card);
6307 card->dev = qeth_alloc_netdev(card);
6313 qeth_determine_capabilities(card);
6314 qeth_set_blkt_defaults(card);
6316 card->qdio.no_out_queues = card->dev->num_tx_queues;
6317 rc = qeth_update_from_chp_desc(card);
6321 enforced_disc = qeth_enforce_discipline(card);
6322 switch (enforced_disc) {
6323 case QETH_DISCIPLINE_UNDETERMINED:
6324 gdev->dev.type = &qeth_generic_devtype;
6327 card->info.layer_enforced = true;
6328 rc = qeth_core_load_discipline(card, enforced_disc);
6332 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6333 card->discipline->devtype;
6334 rc = card->discipline->setup(card->gdev);
6343 qeth_core_free_discipline(card);
6346 free_netdev(card->dev);
6348 qeth_core_free_card(card);
6354 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6356 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6358 QETH_CARD_TEXT(card, 2, "removedv");
6360 if (card->discipline) {
6361 card->discipline->remove(gdev);
6362 qeth_core_free_discipline(card);
6365 qeth_free_qdio_queues(card);
6367 free_netdev(card->dev);
6368 qeth_core_free_card(card);
6369 put_device(&gdev->dev);
6372 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6374 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6376 enum qeth_discipline_id def_discipline;
6378 if (!card->discipline) {
6379 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6380 QETH_DISCIPLINE_LAYER2;
6381 rc = qeth_core_load_discipline(card, def_discipline);
6384 rc = card->discipline->setup(card->gdev);
6386 qeth_core_free_discipline(card);
6391 rc = qeth_set_online(card);
6396 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6398 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6400 return qeth_set_offline(card, false);
6403 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6405 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6406 qeth_set_allowed_threads(card, 0, 1);
6407 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6408 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6409 qeth_qdio_clear_card(card, 0);
6410 qeth_drain_output_queues(card);
6411 qdio_free(CARD_DDEV(card));
6414 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6419 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6422 return err ? err : count;
6424 static DRIVER_ATTR_WO(group);
6426 static struct attribute *qeth_drv_attrs[] = {
6427 &driver_attr_group.attr,
6430 static struct attribute_group qeth_drv_attr_group = {
6431 .attrs = qeth_drv_attrs,
6433 static const struct attribute_group *qeth_drv_attr_groups[] = {
6434 &qeth_drv_attr_group,
6438 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6440 .groups = qeth_drv_attr_groups,
6441 .owner = THIS_MODULE,
6444 .ccw_driver = &qeth_ccw_driver,
6445 .setup = qeth_core_probe_device,
6446 .remove = qeth_core_remove_device,
6447 .set_online = qeth_core_set_online,
6448 .set_offline = qeth_core_set_offline,
6449 .shutdown = qeth_core_shutdown,
6452 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6454 struct ccwgroup_device *gdev;
6455 struct qeth_card *card;
6457 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6461 card = dev_get_drvdata(&gdev->dev);
6462 put_device(&gdev->dev);
6465 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6467 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6469 struct qeth_card *card = dev->ml_priv;
6470 struct mii_ioctl_data *mii_data;
6474 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6475 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6477 case SIOC_QETH_GET_CARD_TYPE:
6478 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6483 mii_data = if_mii(rq);
6484 mii_data->phy_id = 0;
6487 mii_data = if_mii(rq);
6488 if (mii_data->phy_id != 0)
6491 mii_data->val_out = qeth_mdio_read(dev,
6492 mii_data->phy_id, mii_data->reg_num);
6494 case SIOC_QETH_QUERY_OAT:
6495 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6498 if (card->discipline->do_ioctl)
6499 rc = card->discipline->do_ioctl(dev, rq, cmd);
6504 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6507 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6509 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6512 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6513 u32 *features = reply->param;
6515 if (qeth_setassparms_inspect_rc(cmd))
6518 *features = cmd->data.setassparms.data.flags_32bit;
6522 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6523 enum qeth_prot_versions prot)
6525 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6529 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6530 enum qeth_prot_versions prot, u8 *lp2lp)
6532 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6533 struct qeth_cmd_buffer *iob;
6534 struct qeth_ipa_caps caps;
6538 /* some L3 HW requires combined L3+L4 csum offload: */
6539 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6540 cstype == IPA_OUTBOUND_CHECKSUM)
6541 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6543 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6548 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6552 if ((required_features & features) != required_features) {
6553 qeth_set_csum_off(card, cstype, prot);
6557 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6558 SETASS_DATA_SIZEOF(flags_32bit),
6561 qeth_set_csum_off(card, cstype, prot);
6565 if (features & QETH_IPA_CHECKSUM_LP2LP)
6566 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6567 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6568 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6570 qeth_set_csum_off(card, cstype, prot);
6574 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6575 !qeth_ipa_caps_enabled(&caps, required_features)) {
6576 qeth_set_csum_off(card, cstype, prot);
6580 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6581 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6584 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6589 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6590 enum qeth_prot_versions prot, u8 *lp2lp)
6592 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6593 qeth_set_csum_off(card, cstype, prot);
6596 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6599 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6600 struct qeth_tso_start_data *tso_data = reply->param;
6602 if (qeth_setassparms_inspect_rc(cmd))
6605 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6606 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6610 static int qeth_set_tso_off(struct qeth_card *card,
6611 enum qeth_prot_versions prot)
6613 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6614 IPA_CMD_ASS_STOP, NULL, prot);
6617 static int qeth_set_tso_on(struct qeth_card *card,
6618 enum qeth_prot_versions prot)
6620 struct qeth_tso_start_data tso_data;
6621 struct qeth_cmd_buffer *iob;
6622 struct qeth_ipa_caps caps;
6625 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6626 IPA_CMD_ASS_START, 0, prot);
6630 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6634 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6635 qeth_set_tso_off(card, prot);
6639 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6641 SETASS_DATA_SIZEOF(caps), prot);
6643 qeth_set_tso_off(card, prot);
6647 /* enable TSO capability */
6648 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6649 QETH_IPA_LARGE_SEND_TCP;
6650 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6652 qeth_set_tso_off(card, prot);
6656 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6657 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6658 qeth_set_tso_off(card, prot);
6662 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6667 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6668 enum qeth_prot_versions prot)
6670 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6673 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6675 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6678 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6679 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6680 QETH_PROT_IPV4, NULL);
6681 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6682 /* no/one Offload Assist available, so the rc is trivial */
6685 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6686 QETH_PROT_IPV6, NULL);
6689 /* enable: success if any Assist is active */
6690 return (rc_ipv6) ? rc_ipv4 : 0;
6692 /* disable: failure if any Assist is still active */
6693 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6697 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6698 * @dev: a net_device
6700 void qeth_enable_hw_features(struct net_device *dev)
6702 struct qeth_card *card = dev->ml_priv;
6703 netdev_features_t features;
6705 features = dev->features;
6706 /* force-off any feature that might need an IPA sequence.
6707 * netdev_update_features() will restart them.
6709 dev->features &= ~dev->hw_features;
6710 /* toggle VLAN filter, so that VIDs are re-programmed: */
6711 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6712 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6713 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6715 netdev_update_features(dev);
6716 if (features != dev->features)
6717 dev_warn(&card->gdev->dev,
6718 "Device recovery failed to restore all offload features\n");
6720 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6722 static void qeth_check_restricted_features(struct qeth_card *card,
6723 netdev_features_t changed,
6724 netdev_features_t actual)
6726 netdev_features_t ipv6_features = NETIF_F_TSO6;
6727 netdev_features_t ipv4_features = NETIF_F_TSO;
6729 if (!card->info.has_lp2lp_cso_v6)
6730 ipv6_features |= NETIF_F_IPV6_CSUM;
6731 if (!card->info.has_lp2lp_cso_v4)
6732 ipv4_features |= NETIF_F_IP_CSUM;
6734 if ((changed & ipv6_features) && !(actual & ipv6_features))
6735 qeth_flush_local_addrs6(card);
6736 if ((changed & ipv4_features) && !(actual & ipv4_features))
6737 qeth_flush_local_addrs4(card);
6740 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6742 struct qeth_card *card = dev->ml_priv;
6743 netdev_features_t changed = dev->features ^ features;
6746 QETH_CARD_TEXT(card, 2, "setfeat");
6747 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6749 if ((changed & NETIF_F_IP_CSUM)) {
6750 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6751 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6752 &card->info.has_lp2lp_cso_v4);
6754 changed ^= NETIF_F_IP_CSUM;
6756 if (changed & NETIF_F_IPV6_CSUM) {
6757 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6758 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6759 &card->info.has_lp2lp_cso_v6);
6761 changed ^= NETIF_F_IPV6_CSUM;
6763 if (changed & NETIF_F_RXCSUM) {
6764 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6766 changed ^= NETIF_F_RXCSUM;
6768 if (changed & NETIF_F_TSO) {
6769 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6772 changed ^= NETIF_F_TSO;
6774 if (changed & NETIF_F_TSO6) {
6775 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6778 changed ^= NETIF_F_TSO6;
6781 qeth_check_restricted_features(card, dev->features ^ features,
6782 dev->features ^ changed);
6784 /* everything changed successfully? */
6785 if ((dev->features ^ features) == changed)
6787 /* something went wrong. save changed features and return error */
6788 dev->features ^= changed;
6791 EXPORT_SYMBOL_GPL(qeth_set_features);
6793 netdev_features_t qeth_fix_features(struct net_device *dev,
6794 netdev_features_t features)
6796 struct qeth_card *card = dev->ml_priv;
6798 QETH_CARD_TEXT(card, 2, "fixfeat");
6799 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6800 features &= ~NETIF_F_IP_CSUM;
6801 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6802 features &= ~NETIF_F_IPV6_CSUM;
6803 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6804 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6805 features &= ~NETIF_F_RXCSUM;
6806 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6807 features &= ~NETIF_F_TSO;
6808 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6809 features &= ~NETIF_F_TSO6;
6811 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6814 EXPORT_SYMBOL_GPL(qeth_fix_features);
6816 netdev_features_t qeth_features_check(struct sk_buff *skb,
6817 struct net_device *dev,
6818 netdev_features_t features)
6820 struct qeth_card *card = dev->ml_priv;
6822 /* Traffic with local next-hop is not eligible for some offloads: */
6823 if (skb->ip_summed == CHECKSUM_PARTIAL &&
6824 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6825 netdev_features_t restricted = 0;
6827 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6828 restricted |= NETIF_F_ALL_TSO;
6830 switch (vlan_get_protocol(skb)) {
6831 case htons(ETH_P_IP):
6832 if (!card->info.has_lp2lp_cso_v4)
6833 restricted |= NETIF_F_IP_CSUM;
6835 if (restricted && qeth_next_hop_is_local_v4(card, skb))
6836 features &= ~restricted;
6838 case htons(ETH_P_IPV6):
6839 if (!card->info.has_lp2lp_cso_v6)
6840 restricted |= NETIF_F_IPV6_CSUM;
6842 if (restricted && qeth_next_hop_is_local_v6(card, skb))
6843 features &= ~restricted;
6850 /* GSO segmentation builds skbs with
6851 * a (small) linear part for the headers, and
6852 * page frags for the data.
6853 * Compared to a linear skb, the header-only part consumes an
6854 * additional buffer element. This reduces buffer utilization, and
6855 * hurts throughput. So compress small segments into one element.
6857 if (netif_needs_gso(skb, features)) {
6858 /* match skb_segment(): */
6859 unsigned int doffset = skb->data - skb_mac_header(skb);
6860 unsigned int hsize = skb_shinfo(skb)->gso_size;
6861 unsigned int hroom = skb_headroom(skb);
6863 /* linearize only if resulting skb allocations are order-0: */
6864 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6865 features &= ~NETIF_F_SG;
6868 return vlan_features_check(skb, features);
6870 EXPORT_SYMBOL_GPL(qeth_features_check);
6872 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6874 struct qeth_card *card = dev->ml_priv;
6875 struct qeth_qdio_out_q *queue;
6878 QETH_CARD_TEXT(card, 5, "getstat");
6880 stats->rx_packets = card->stats.rx_packets;
6881 stats->rx_bytes = card->stats.rx_bytes;
6882 stats->rx_errors = card->stats.rx_length_errors +
6883 card->stats.rx_frame_errors +
6884 card->stats.rx_fifo_errors;
6885 stats->rx_dropped = card->stats.rx_dropped_nomem +
6886 card->stats.rx_dropped_notsupp +
6887 card->stats.rx_dropped_runt;
6888 stats->multicast = card->stats.rx_multicast;
6889 stats->rx_length_errors = card->stats.rx_length_errors;
6890 stats->rx_frame_errors = card->stats.rx_frame_errors;
6891 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6893 for (i = 0; i < card->qdio.no_out_queues; i++) {
6894 queue = card->qdio.out_qs[i];
6896 stats->tx_packets += queue->stats.tx_packets;
6897 stats->tx_bytes += queue->stats.tx_bytes;
6898 stats->tx_errors += queue->stats.tx_errors;
6899 stats->tx_dropped += queue->stats.tx_dropped;
6902 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6904 #define TC_IQD_UCAST 0
6905 static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6906 unsigned int ucast_txqs)
6910 /* IQD requires mcast traffic to be placed on a dedicated queue, and
6911 * qeth_iqd_select_queue() deals with this.
6912 * For unicast traffic, we defer the queue selection to the stack.
6913 * By installing a trivial prio map that spans over only the unicast
6914 * queues, we can encourage the stack to spread the ucast traffic evenly
6915 * without selecting the mcast queue.
6918 /* One traffic class, spanning over all active ucast queues: */
6919 netdev_set_num_tc(dev, 1);
6920 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6921 QETH_IQD_MIN_UCAST_TXQ);
6923 /* Map all priorities to this traffic class: */
6924 for (prio = 0; prio <= TC_BITMASK; prio++)
6925 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6928 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6930 struct net_device *dev = card->dev;
6933 /* Per netif_setup_tc(), adjust the mapping first: */
6935 qeth_iqd_set_prio_tc_map(dev, count - 1);
6937 rc = netif_set_real_num_tx_queues(dev, count);
6939 if (rc && IS_IQD(card))
6940 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6944 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
6946 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6947 u8 cast_type, struct net_device *sb_dev)
6951 if (cast_type != RTN_UNICAST)
6952 return QETH_IQD_MCAST_TXQ;
6953 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6954 return QETH_IQD_MIN_UCAST_TXQ;
6956 txq = netdev_pick_tx(dev, skb, sb_dev);
6957 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
6959 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6961 int qeth_open(struct net_device *dev)
6963 struct qeth_card *card = dev->ml_priv;
6965 QETH_CARD_TEXT(card, 4, "qethopen");
6967 card->data.state = CH_STATE_UP;
6968 netif_tx_start_all_queues(dev);
6970 napi_enable(&card->napi);
6972 napi_schedule(&card->napi);
6974 struct qeth_qdio_out_q *queue;
6977 qeth_for_each_output_queue(card, queue, i) {
6978 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6980 napi_enable(&queue->napi);
6981 napi_schedule(&queue->napi);
6984 /* kick-start the NAPI softirq: */
6988 EXPORT_SYMBOL_GPL(qeth_open);
6990 int qeth_stop(struct net_device *dev)
6992 struct qeth_card *card = dev->ml_priv;
6994 QETH_CARD_TEXT(card, 4, "qethstop");
6996 struct qeth_qdio_out_q *queue;
6999 /* Quiesce the NAPI instances: */
7000 qeth_for_each_output_queue(card, queue, i)
7001 napi_disable(&queue->napi);
7003 /* Stop .ndo_start_xmit, might still access queue->napi. */
7004 netif_tx_disable(dev);
7006 qeth_for_each_output_queue(card, queue, i) {
7007 del_timer_sync(&queue->timer);
7008 /* Queues may get re-allocated, so remove the NAPIs. */
7009 netif_napi_del(&queue->napi);
7012 netif_tx_disable(dev);
7015 napi_disable(&card->napi);
7016 cancel_delayed_work_sync(&card->buffer_reclaim_work);
7017 qdio_stop_irq(CARD_DDEV(card));
7021 EXPORT_SYMBOL_GPL(qeth_stop);
7023 static int __init qeth_core_init(void)
7027 pr_info("loading core functions\n");
7029 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7031 rc = qeth_register_dbf_views();
7034 qeth_core_root_dev = root_device_register("qeth");
7035 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7038 qeth_core_header_cache =
7039 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7040 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7042 if (!qeth_core_header_cache) {
7046 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7047 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7048 if (!qeth_qdio_outbuf_cache) {
7052 rc = ccw_driver_register(&qeth_ccw_driver);
7055 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7062 ccw_driver_unregister(&qeth_ccw_driver);
7064 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7066 kmem_cache_destroy(qeth_core_header_cache);
7068 root_device_unregister(qeth_core_root_dev);
7070 qeth_unregister_dbf_views();
7072 debugfs_remove_recursive(qeth_debugfs_root);
7073 pr_err("Initializing the qeth device driver failed\n");
7077 static void __exit qeth_core_exit(void)
7079 qeth_clear_dbf_list();
7080 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7081 ccw_driver_unregister(&qeth_ccw_driver);
7082 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7083 kmem_cache_destroy(qeth_core_header_cache);
7084 root_device_unregister(qeth_core_root_dev);
7085 qeth_unregister_dbf_views();
7086 debugfs_remove_recursive(qeth_debugfs_root);
7087 pr_info("core functions removed\n");
7090 module_init(qeth_core_init);
7091 module_exit(qeth_core_exit);
7092 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7093 MODULE_DESCRIPTION("qeth core functions");
7094 MODULE_LICENSE("GPL");