ixgbe: add comment on SFP+ ID for Active DA
[linux-2.6-block.git] / drivers / net / ehea / ehea_main.c
CommitLineData
7a291083
JBT
1/*
2 * linux/drivers/net/ehea/ehea_main.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
508d2b5d
DM
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
7a291083
JBT
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/udp.h>
33#include <linux/if.h>
34#include <linux/list.h>
5a0e3ad6 35#include <linux/slab.h>
7a291083 36#include <linux/if_ether.h>
2a6f4e49
JBT
37#include <linux/notifier.h>
38#include <linux/reboot.h>
48cfb14f 39#include <linux/memory.h>
21eee2dd 40#include <asm/kexec.h>
06f89edf 41#include <linux/mutex.h>
2a6f4e49 42
7a291083
JBT
43#include <net/ip.h>
44
45#include "ehea.h"
46#include "ehea_qmr.h"
47#include "ehea_phyp.h"
48
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52MODULE_DESCRIPTION("IBM eServer HEA Driver");
53MODULE_VERSION(DRV_VERSION);
54
55
56static int msg_level = -1;
57static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
58static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
59static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
60static int sq_entries = EHEA_DEF_ENTRIES_SQ;
508d2b5d
DM
61static int use_mcs;
62static int use_lro;
d4dc4ec9 63static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
18604c54 64static int num_tx_qps = EHEA_NUM_TX_QP;
508d2b5d 65static int prop_carrier_state;
7a291083
JBT
66
67module_param(msg_level, int, 0);
68module_param(rq1_entries, int, 0);
69module_param(rq2_entries, int, 0);
70module_param(rq3_entries, int, 0);
71module_param(sq_entries, int, 0);
8759cf76 72module_param(prop_carrier_state, int, 0);
18604c54 73module_param(use_mcs, int, 0);
d4dc4ec9
JBT
74module_param(use_lro, int, 0);
75module_param(lro_max_aggr, int, 0);
18604c54 76module_param(num_tx_qps, int, 0);
7a291083 77
18604c54 78MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
7a291083 79MODULE_PARM_DESC(msg_level, "msg_level");
8759cf76
JBT
80MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
7a291083
JBT
82MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
85MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
88MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
91MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
18072a5b 94MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
7a291083 95
d4dc4ec9
JBT
96MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
98MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
99 "Default = 0");
100
508d2b5d 101static int port_name_cnt;
44c82152 102static LIST_HEAD(adapter_list);
48e4cc77 103static unsigned long ehea_driver_flags;
44c82152 104struct work_struct ehea_rereg_mr_task;
06f89edf 105static DEFINE_MUTEX(dlpar_mem_lock);
21eee2dd
TK
106struct ehea_fw_handle_array ehea_fw_handles;
107struct ehea_bcmc_reg_array ehea_bcmc_regs;
108
d1dea38d 109
6b08f3ae 110static int __devinit ehea_probe_adapter(struct of_device *dev,
d1d25aab 111 const struct of_device_id *id);
d1dea38d 112
6b08f3ae 113static int __devexit ehea_remove(struct of_device *dev);
d1dea38d
TK
114
115static struct of_device_id ehea_device_table[] = {
116 {
117 .name = "lhea",
118 .compatible = "IBM,lhea",
119 },
120 {},
121};
b0afffe8 122MODULE_DEVICE_TABLE(of, ehea_device_table);
d1dea38d 123
6b08f3ae 124static struct of_platform_driver ehea_driver = {
4018294b
GL
125 .driver = {
126 .name = "ehea",
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
129 },
d1dea38d
TK
130 .probe = ehea_probe_adapter,
131 .remove = ehea_remove,
132};
133
508d2b5d
DM
134void ehea_dump(void *adr, int len, char *msg)
135{
7a291083
JBT
136 int x;
137 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) {
a1c5a893 139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
508d2b5d 140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
7a291083
JBT
141 deb += 16;
142 }
143}
144
2f69ae01
JBT
145void ehea_schedule_port_reset(struct ehea_port *port)
146{
147 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
148 schedule_work(&port->reset_task);
149}
150
21eee2dd
TK
151static void ehea_update_firmware_handles(void)
152{
153 struct ehea_fw_handle_entry *arr = NULL;
154 struct ehea_adapter *adapter;
155 int num_adapters = 0;
156 int num_ports = 0;
157 int num_portres = 0;
158 int i = 0;
159 int num_fw_handles, k, l;
160
161 /* Determine number of handles */
52e21b1b
JBT
162 mutex_lock(&ehea_fw_handles.lock);
163
21eee2dd
TK
164 list_for_each_entry(adapter, &adapter_list, list) {
165 num_adapters++;
166
167 for (k = 0; k < EHEA_MAX_PORTS; k++) {
168 struct ehea_port *port = adapter->port[k];
169
170 if (!port || (port->state != EHEA_PORT_UP))
171 continue;
172
173 num_ports++;
174 num_portres += port->num_def_qps + port->num_add_tx_qps;
175 }
176 }
177
178 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
179 num_ports * EHEA_NUM_PORT_FW_HANDLES +
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
181
182 if (num_fw_handles) {
183 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
184 if (!arr)
52e21b1b 185 goto out; /* Keep the existing array */
21eee2dd
TK
186 } else
187 goto out_update;
188
189 list_for_each_entry(adapter, &adapter_list, list) {
52e21b1b
JBT
190 if (num_adapters == 0)
191 break;
192
21eee2dd
TK
193 for (k = 0; k < EHEA_MAX_PORTS; k++) {
194 struct ehea_port *port = adapter->port[k];
195
8e95a202
JP
196 if (!port || (port->state != EHEA_PORT_UP) ||
197 (num_ports == 0))
21eee2dd
TK
198 continue;
199
200 for (l = 0;
201 l < port->num_def_qps + port->num_add_tx_qps;
202 l++) {
203 struct ehea_port_res *pr = &port->port_res[l];
204
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->qp->fw_handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->send_cq->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->recv_cq->fw_handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->eq->fw_handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = pr->send_mr.handle;
215 arr[i].adh = adapter->handle;
216 arr[i++].fwh = pr->recv_mr.handle;
217 }
218 arr[i].adh = adapter->handle;
219 arr[i++].fwh = port->qp_eq->fw_handle;
52e21b1b 220 num_ports--;
21eee2dd
TK
221 }
222
223 arr[i].adh = adapter->handle;
224 arr[i++].fwh = adapter->neq->fw_handle;
225
226 if (adapter->mr.handle) {
227 arr[i].adh = adapter->handle;
228 arr[i++].fwh = adapter->mr.handle;
229 }
52e21b1b 230 num_adapters--;
21eee2dd
TK
231 }
232
233out_update:
234 kfree(ehea_fw_handles.arr);
235 ehea_fw_handles.arr = arr;
236 ehea_fw_handles.num_entries = i;
52e21b1b
JBT
237out:
238 mutex_unlock(&ehea_fw_handles.lock);
21eee2dd
TK
239}
240
241static void ehea_update_bcmc_registrations(void)
242{
52e21b1b 243 unsigned long flags;
21eee2dd
TK
244 struct ehea_bcmc_reg_entry *arr = NULL;
245 struct ehea_adapter *adapter;
246 struct ehea_mc_list *mc_entry;
247 int num_registrations = 0;
248 int i = 0;
249 int k;
250
52e21b1b
JBT
251 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
252
21eee2dd
TK
253 /* Determine number of registrations */
254 list_for_each_entry(adapter, &adapter_list, list)
255 for (k = 0; k < EHEA_MAX_PORTS; k++) {
256 struct ehea_port *port = adapter->port[k];
257
258 if (!port || (port->state != EHEA_PORT_UP))
259 continue;
260
261 num_registrations += 2; /* Broadcast registrations */
262
263 list_for_each_entry(mc_entry, &port->mc_list->list,list)
264 num_registrations += 2;
265 }
266
267 if (num_registrations) {
5c2cec14 268 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
21eee2dd 269 if (!arr)
52e21b1b 270 goto out; /* Keep the existing array */
21eee2dd
TK
271 } else
272 goto out_update;
273
274 list_for_each_entry(adapter, &adapter_list, list) {
275 for (k = 0; k < EHEA_MAX_PORTS; k++) {
276 struct ehea_port *port = adapter->port[k];
277
278 if (!port || (port->state != EHEA_PORT_UP))
279 continue;
280
52e21b1b
JBT
281 if (num_registrations == 0)
282 goto out_update;
283
21eee2dd
TK
284 arr[i].adh = adapter->handle;
285 arr[i].port_id = port->logical_port_id;
286 arr[i].reg_type = EHEA_BCMC_BROADCAST |
287 EHEA_BCMC_UNTAGGED;
288 arr[i++].macaddr = port->mac_addr;
289
290 arr[i].adh = adapter->handle;
291 arr[i].port_id = port->logical_port_id;
292 arr[i].reg_type = EHEA_BCMC_BROADCAST |
293 EHEA_BCMC_VLANID_ALL;
294 arr[i++].macaddr = port->mac_addr;
52e21b1b 295 num_registrations -= 2;
21eee2dd
TK
296
297 list_for_each_entry(mc_entry,
298 &port->mc_list->list, list) {
52e21b1b
JBT
299 if (num_registrations == 0)
300 goto out_update;
301
21eee2dd
TK
302 arr[i].adh = adapter->handle;
303 arr[i].port_id = port->logical_port_id;
304 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
305 EHEA_BCMC_MULTICAST |
306 EHEA_BCMC_UNTAGGED;
307 arr[i++].macaddr = mc_entry->macaddr;
308
309 arr[i].adh = adapter->handle;
310 arr[i].port_id = port->logical_port_id;
311 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
312 EHEA_BCMC_MULTICAST |
313 EHEA_BCMC_VLANID_ALL;
314 arr[i++].macaddr = mc_entry->macaddr;
52e21b1b 315 num_registrations -= 2;
21eee2dd
TK
316 }
317 }
318 }
319
320out_update:
321 kfree(ehea_bcmc_regs.arr);
322 ehea_bcmc_regs.arr = arr;
323 ehea_bcmc_regs.num_entries = i;
52e21b1b
JBT
324out:
325 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
21eee2dd
TK
326}
327
7a291083
JBT
328static struct net_device_stats *ehea_get_stats(struct net_device *dev)
329{
330 struct ehea_port *port = netdev_priv(dev);
331 struct net_device_stats *stats = &port->stats;
332 struct hcp_ehea_port_cb2 *cb2;
7393b87c 333 u64 hret, rx_packets, tx_packets;
7a291083
JBT
334 int i;
335
336 memset(stats, 0, sizeof(*stats));
337
3faf2693 338 cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
7a291083
JBT
339 if (!cb2) {
340 ehea_error("no mem for cb2");
341 goto out;
342 }
343
344 hret = ehea_h_query_ehea_port(port->adapter->handle,
345 port->logical_port_id,
346 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
347 if (hret != H_SUCCESS) {
348 ehea_error("query_ehea_port failed");
349 goto out_herr;
350 }
351
352 if (netif_msg_hw(port))
353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
354
355 rx_packets = 0;
356 for (i = 0; i < port->num_def_qps; i++)
357 rx_packets += port->port_res[i].rx_packets;
358
7393b87c
TK
359 tx_packets = 0;
360 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
361 tx_packets += port->port_res[i].tx_packets;
362
363 stats->tx_packets = tx_packets;
7a291083
JBT
364 stats->multicast = cb2->rxmcp;
365 stats->rx_errors = cb2->rxuerr;
366 stats->rx_bytes = cb2->rxo;
367 stats->tx_bytes = cb2->txo;
368 stats->rx_packets = rx_packets;
369
370out_herr:
3faf2693 371 free_page((unsigned long)cb2);
7a291083
JBT
372out:
373 return stats;
374}
375
376static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
377{
378 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
379 struct net_device *dev = pr->port->netdev;
380 int max_index_mask = pr->rq1_skba.len - 1;
2c69448b
JBT
381 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
382 int adder = 0;
7a291083
JBT
383 int i;
384
2c69448b
JBT
385 pr->rq1_skba.os_skbs = 0;
386
387 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
44fb3126
TK
388 if (nr_of_wqes > 0)
389 pr->rq1_skba.index = index;
2c69448b 390 pr->rq1_skba.os_skbs = fill_wqes;
7a291083 391 return;
2c69448b 392 }
7a291083 393
2c69448b 394 for (i = 0; i < fill_wqes; i++) {
7a291083
JBT
395 if (!skb_arr_rq1[index]) {
396 skb_arr_rq1[index] = netdev_alloc_skb(dev,
397 EHEA_L_PKT_SIZE);
398 if (!skb_arr_rq1[index]) {
2c69448b 399 pr->rq1_skba.os_skbs = fill_wqes - i;
7a291083
JBT
400 break;
401 }
402 }
403 index--;
404 index &= max_index_mask;
2c69448b 405 adder++;
7a291083 406 }
2c69448b
JBT
407
408 if (adder == 0)
409 return;
410
7a291083 411 /* Ring doorbell */
2c69448b 412 ehea_update_rq1a(pr->qp, adder);
7a291083
JBT
413}
414
e2878806 415static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
7a291083 416{
7a291083
JBT
417 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
418 struct net_device *dev = pr->port->netdev;
419 int i;
420
421 for (i = 0; i < pr->rq1_skba.len; i++) {
422 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
e2878806
TK
423 if (!skb_arr_rq1[i])
424 break;
7a291083
JBT
425 }
426 /* Ring doorbell */
427 ehea_update_rq1a(pr->qp, nr_rq1a);
7a291083
JBT
428}
429
430static int ehea_refill_rq_def(struct ehea_port_res *pr,
431 struct ehea_q_skb_arr *q_skba, int rq_nr,
432 int num_wqes, int wqe_type, int packet_size)
433{
434 struct net_device *dev = pr->port->netdev;
435 struct ehea_qp *qp = pr->qp;
436 struct sk_buff **skb_arr = q_skba->arr;
437 struct ehea_rwqe *rwqe;
438 int i, index, max_index_mask, fill_wqes;
2c69448b 439 int adder = 0;
7a291083
JBT
440 int ret = 0;
441
442 fill_wqes = q_skba->os_skbs + num_wqes;
2c69448b 443 q_skba->os_skbs = 0;
7a291083 444
2c69448b
JBT
445 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
446 q_skba->os_skbs = fill_wqes;
7a291083 447 return ret;
2c69448b 448 }
7a291083
JBT
449
450 index = q_skba->index;
451 max_index_mask = q_skba->len - 1;
452 for (i = 0; i < fill_wqes; i++) {
2c69448b 453 u64 tmp_addr;
89d71a66
ED
454 struct sk_buff *skb;
455
456 skb = netdev_alloc_skb_ip_align(dev, packet_size);
7a291083 457 if (!skb) {
7a291083 458 q_skba->os_skbs = fill_wqes - i;
e2878806
TK
459 if (q_skba->os_skbs == q_skba->len - 2) {
460 ehea_info("%s: rq%i ran dry - no mem for skb",
461 pr->port->netdev->name, rq_nr);
462 ret = -ENOMEM;
463 }
7a291083
JBT
464 break;
465 }
7a291083
JBT
466
467 skb_arr[index] = skb;
2c69448b
JBT
468 tmp_addr = ehea_map_vaddr(skb->data);
469 if (tmp_addr == -1) {
470 dev_kfree_skb(skb);
471 q_skba->os_skbs = fill_wqes - i;
472 ret = 0;
473 break;
474 }
7a291083
JBT
475
476 rwqe = ehea_get_next_rwqe(qp, rq_nr);
477 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
d1d25aab 478 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
7a291083 479 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
2c69448b 480 rwqe->sg_list[0].vaddr = tmp_addr;
7a291083
JBT
481 rwqe->sg_list[0].len = packet_size;
482 rwqe->data_segments = 1;
483
484 index++;
485 index &= max_index_mask;
2c69448b 486 adder++;
7a291083 487 }
44c82152 488
7a291083 489 q_skba->index = index;
2c69448b
JBT
490 if (adder == 0)
491 goto out;
7a291083
JBT
492
493 /* Ring doorbell */
494 iosync();
495 if (rq_nr == 2)
2c69448b 496 ehea_update_rq2a(pr->qp, adder);
7a291083 497 else
2c69448b 498 ehea_update_rq3a(pr->qp, adder);
44c82152 499out:
7a291083
JBT
500 return ret;
501}
502
503
504static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
505{
506 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
507 nr_of_wqes, EHEA_RWQE2_TYPE,
89d71a66 508 EHEA_RQ2_PKT_SIZE);
7a291083
JBT
509}
510
511
512static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
513{
514 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
515 nr_of_wqes, EHEA_RWQE3_TYPE,
89d71a66 516 EHEA_MAX_PACKET_SIZE);
7a291083
JBT
517}
518
519static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
520{
521 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
522 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
523 return 0;
524 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
525 (cqe->header_length == 0))
526 return 0;
527 return -EINVAL;
528}
529
530static inline void ehea_fill_skb(struct net_device *dev,
531 struct sk_buff *skb, struct ehea_cqe *cqe)
532{
533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
534
535 skb_put(skb, length);
536 skb->ip_summed = CHECKSUM_UNNECESSARY;
537 skb->protocol = eth_type_trans(skb, dev);
538}
539
540static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
541 int arr_len,
542 struct ehea_cqe *cqe)
543{
544 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
545 struct sk_buff *skb;
546 void *pref;
547 int x;
548
549 x = skb_index + 1;
550 x &= (arr_len - 1);
551
552 pref = skb_array[x];
0b2febf3
HH
553 if (pref) {
554 prefetchw(pref);
555 prefetchw(pref + EHEA_CACHE_LINE);
556
557 pref = (skb_array[x]->data);
558 prefetch(pref);
559 prefetch(pref + EHEA_CACHE_LINE);
560 prefetch(pref + EHEA_CACHE_LINE * 2);
561 prefetch(pref + EHEA_CACHE_LINE * 3);
562 }
563
7a291083
JBT
564 skb = skb_array[skb_index];
565 skb_array[skb_index] = NULL;
566 return skb;
567}
568
569static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
570 int arr_len, int wqe_index)
571{
572 struct sk_buff *skb;
573 void *pref;
574 int x;
575
576 x = wqe_index + 1;
577 x &= (arr_len - 1);
578
579 pref = skb_array[x];
0b2febf3
HH
580 if (pref) {
581 prefetchw(pref);
582 prefetchw(pref + EHEA_CACHE_LINE);
7a291083 583
0b2febf3
HH
584 pref = (skb_array[x]->data);
585 prefetchw(pref);
586 prefetchw(pref + EHEA_CACHE_LINE);
587 }
7a291083
JBT
588
589 skb = skb_array[wqe_index];
590 skb_array[wqe_index] = NULL;
591 return skb;
592}
593
594static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
595 struct ehea_cqe *cqe, int *processed_rq2,
596 int *processed_rq3)
597{
598 struct sk_buff *skb;
599
acbddb59
JBT
600 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
601 pr->p_stats.err_tcp_cksum++;
602 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
603 pr->p_stats.err_ip_cksum++;
604 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
605 pr->p_stats.err_frame_crc++;
606
7a291083
JBT
607 if (rq == 2) {
608 *processed_rq2 += 1;
609 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
610 dev_kfree_skb(skb);
611 } else if (rq == 3) {
612 *processed_rq3 += 1;
613 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
614 dev_kfree_skb(skb);
615 }
616
617 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
58dd8258
TK
618 if (netif_msg_rx_err(pr->port)) {
619 ehea_error("Critical receive error for QP %d. "
620 "Resetting port.", pr->qp->init_attr.qp_nr);
621 ehea_dump(cqe, sizeof(*cqe), "CQE");
622 }
2f69ae01 623 ehea_schedule_port_reset(pr->port);
7a291083
JBT
624 return 1;
625 }
626
627 return 0;
628}
629
d4dc4ec9
JBT
630static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
631 void **tcph, u64 *hdr_flags, void *priv)
632{
633 struct ehea_cqe *cqe = priv;
634 unsigned int ip_len;
635 struct iphdr *iph;
636
637 /* non tcp/udp packets */
638 if (!cqe->header_length)
639 return -1;
640
641 /* non tcp packet */
642 skb_reset_network_header(skb);
643 iph = ip_hdr(skb);
644 if (iph->protocol != IPPROTO_TCP)
645 return -1;
646
647 ip_len = ip_hdrlen(skb);
648 skb_set_transport_header(skb, ip_len);
649 *tcph = tcp_hdr(skb);
650
651 /* check if ip header and tcp header are complete */
3ff2cd23 652 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
d4dc4ec9
JBT
653 return -1;
654
655 *hdr_flags = LRO_IPV4 | LRO_TCP;
656 *iphdr = iph;
657
658 return 0;
659}
660
661static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
662 struct sk_buff *skb)
663{
8e95a202
JP
664 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
665 pr->port->vgrp);
d4dc4ec9
JBT
666
667 if (use_lro) {
668 if (vlan_extracted)
669 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
670 pr->port->vgrp,
671 cqe->vlan_tag,
672 cqe);
673 else
674 lro_receive_skb(&pr->lro_mgr, skb, cqe);
675 } else {
676 if (vlan_extracted)
677 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
678 cqe->vlan_tag);
679 else
680 netif_receive_skb(skb);
681 }
682}
683
bea3348e
SH
684static int ehea_proc_rwqes(struct net_device *dev,
685 struct ehea_port_res *pr,
686 int budget)
7a291083 687{
18604c54 688 struct ehea_port *port = pr->port;
7a291083
JBT
689 struct ehea_qp *qp = pr->qp;
690 struct ehea_cqe *cqe;
691 struct sk_buff *skb;
692 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
693 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
694 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
695 int skb_arr_rq1_len = pr->rq1_skba.len;
696 int skb_arr_rq2_len = pr->rq2_skba.len;
697 int skb_arr_rq3_len = pr->rq3_skba.len;
698 int processed, processed_rq1, processed_rq2, processed_rq3;
bea3348e 699 int wqe_index, last_wqe_index, rq, port_reset;
7a291083
JBT
700
701 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
702 last_wqe_index = 0;
7a291083 703
7a291083 704 cqe = ehea_poll_rq1(qp, &wqe_index);
bea3348e 705 while ((processed < budget) && cqe) {
7a291083
JBT
706 ehea_inc_rq1(qp);
707 processed_rq1++;
708 processed++;
7a291083
JBT
709 if (netif_msg_rx_status(port))
710 ehea_dump(cqe, sizeof(*cqe), "CQE");
711
712 last_wqe_index = wqe_index;
713 rmb();
714 if (!ehea_check_cqe(cqe, &rq)) {
508d2b5d
DM
715 if (rq == 1) {
716 /* LL RQ1 */
7a291083
JBT
717 skb = get_skb_by_index_ll(skb_arr_rq1,
718 skb_arr_rq1_len,
719 wqe_index);
720 if (unlikely(!skb)) {
721 if (netif_msg_rx_err(port))
722 ehea_error("LL rq1: skb=NULL");
18604c54 723
bea3348e 724 skb = netdev_alloc_skb(dev,
7a291083
JBT
725 EHEA_L_PKT_SIZE);
726 if (!skb)
727 break;
728 }
508d2b5d 729 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
d1d25aab 730 cqe->num_bytes_transfered - 4);
bea3348e 731 ehea_fill_skb(dev, skb, cqe);
508d2b5d
DM
732 } else if (rq == 2) {
733 /* RQ2 */
7a291083
JBT
734 skb = get_skb_by_index(skb_arr_rq2,
735 skb_arr_rq2_len, cqe);
736 if (unlikely(!skb)) {
737 if (netif_msg_rx_err(port))
738 ehea_error("rq2: skb=NULL");
739 break;
740 }
bea3348e 741 ehea_fill_skb(dev, skb, cqe);
7a291083 742 processed_rq2++;
508d2b5d
DM
743 } else {
744 /* RQ3 */
7a291083
JBT
745 skb = get_skb_by_index(skb_arr_rq3,
746 skb_arr_rq3_len, cqe);
747 if (unlikely(!skb)) {
748 if (netif_msg_rx_err(port))
749 ehea_error("rq3: skb=NULL");
750 break;
751 }
bea3348e 752 ehea_fill_skb(dev, skb, cqe);
7a291083
JBT
753 processed_rq3++;
754 }
755
d4dc4ec9 756 ehea_proc_skb(pr, cqe, skb);
18604c54 757 } else {
acbddb59 758 pr->p_stats.poll_receive_errors++;
7a291083
JBT
759 port_reset = ehea_treat_poll_error(pr, rq, cqe,
760 &processed_rq2,
761 &processed_rq3);
762 if (port_reset)
763 break;
764 }
765 cqe = ehea_poll_rq1(qp, &wqe_index);
766 }
d4dc4ec9
JBT
767 if (use_lro)
768 lro_flush_all(&pr->lro_mgr);
7a291083 769
7a291083
JBT
770 pr->rx_packets += processed;
771
772 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
773 ehea_refill_rq2(pr, processed_rq2);
774 ehea_refill_rq3(pr, processed_rq3);
775
bea3348e 776 return processed;
7a291083
JBT
777}
778
18604c54 779static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
7a291083 780{
acbddb59 781 struct sk_buff *skb;
7a291083
JBT
782 struct ehea_cq *send_cq = pr->send_cq;
783 struct ehea_cqe *cqe;
18604c54 784 int quota = my_quota;
7a291083
JBT
785 int cqe_counter = 0;
786 int swqe_av = 0;
acbddb59 787 int index;
7a291083
JBT
788 unsigned long flags;
789
18604c54 790 cqe = ehea_poll_cq(send_cq);
508d2b5d 791 while (cqe && (quota > 0)) {
18604c54
JBT
792 ehea_inc_cq(send_cq);
793
7a291083
JBT
794 cqe_counter++;
795 rmb();
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ea96ceac
TK
797 ehea_error("Bad send completion status=0x%04X",
798 cqe->status);
799
7a291083
JBT
800 if (netif_msg_tx_err(pr->port))
801 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
ea96ceac
TK
802
803 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
804 ehea_error("Resetting port");
805 ehea_schedule_port_reset(pr->port);
806 break;
807 }
7a291083
JBT
808 }
809
810 if (netif_msg_tx_done(pr->port))
811 ehea_dump(cqe, sizeof(*cqe), "CQE");
812
813 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
acbddb59
JBT
814 == EHEA_SWQE2_TYPE)) {
815
816 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
817 skb = pr->sq_skba.arr[index];
818 dev_kfree_skb(skb);
819 pr->sq_skba.arr[index] = NULL;
820 }
7a291083
JBT
821
822 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
823 quota--;
18604c54
JBT
824
825 cqe = ehea_poll_cq(send_cq);
ee289b64 826 }
7a291083
JBT
827
828 ehea_update_feca(send_cq, cqe_counter);
829 atomic_add(swqe_av, &pr->swqe_avail);
830
831 spin_lock_irqsave(&pr->netif_queue, flags);
18604c54 832
7a291083
JBT
833 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
834 >= pr->swqe_refill_th)) {
835 netif_wake_queue(pr->port->netdev);
836 pr->queue_stopped = 0;
837 }
838 spin_unlock_irqrestore(&pr->netif_queue, flags);
839
18604c54 840 return cqe;
7a291083
JBT
841}
842
18604c54 843#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
bea3348e 844#define EHEA_POLL_MAX_CQES 65535
18604c54 845
bea3348e 846static int ehea_poll(struct napi_struct *napi, int budget)
7a291083 847{
508d2b5d
DM
848 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
849 napi);
bea3348e 850 struct net_device *dev = pr->port->netdev;
18604c54
JBT
851 struct ehea_cqe *cqe;
852 struct ehea_cqe *cqe_skb = NULL;
853 int force_irq, wqe_index;
bea3348e 854 int rx = 0;
18604c54
JBT
855
856 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
bea3348e
SH
857 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
858
859 if (!force_irq)
860 rx += ehea_proc_rwqes(dev, pr, budget - rx);
18604c54 861
bea3348e 862 while ((rx != budget) || force_irq) {
18604c54 863 pr->poll_counter = 0;
bea3348e 864 force_irq = 0;
288379f0 865 napi_complete(napi);
18604c54
JBT
866 ehea_reset_cq_ep(pr->recv_cq);
867 ehea_reset_cq_ep(pr->send_cq);
868 ehea_reset_cq_n1(pr->recv_cq);
869 ehea_reset_cq_n1(pr->send_cq);
870 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
871 cqe_skb = ehea_poll_cq(pr->send_cq);
872
e542aa6b 873 if (!cqe && !cqe_skb)
bea3348e 874 return rx;
18604c54 875
288379f0 876 if (!napi_reschedule(napi))
bea3348e 877 return rx;
18604c54 878
bea3348e
SH
879 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
880 rx += ehea_proc_rwqes(dev, pr, budget - rx);
881 }
e542aa6b 882
bea3348e
SH
883 pr->poll_counter++;
884 return rx;
7a291083
JBT
885}
886
8d22c971
JBT
887#ifdef CONFIG_NET_POLL_CONTROLLER
888static void ehea_netpoll(struct net_device *dev)
889{
890 struct ehea_port *port = netdev_priv(dev);
bea3348e 891 int i;
8d22c971 892
bea3348e 893 for (i = 0; i < port->num_def_qps; i++)
288379f0 894 napi_schedule(&port->port_res[i].napi);
8d22c971
JBT
895}
896#endif
897
7d12e780 898static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
7a291083
JBT
899{
900 struct ehea_port_res *pr = param;
18604c54 901
288379f0 902 napi_schedule(&pr->napi);
18604c54 903
7a291083
JBT
904 return IRQ_HANDLED;
905}
906
7d12e780 907static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
7a291083
JBT
908{
909 struct ehea_port *port = param;
910 struct ehea_eqe *eqe;
d2db9eea 911 struct ehea_qp *qp;
7a291083 912 u32 qp_token;
ea96ceac
TK
913 u64 resource_type, aer, aerr;
914 int reset_port = 0;
7a291083
JBT
915
916 eqe = ehea_poll_eq(port->qp_eq);
bb3a6449 917
7a291083 918 while (eqe) {
7a291083 919 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
a1c5a893 920 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
bb3a6449 921 eqe->entry, qp_token);
d2db9eea
JBT
922
923 qp = port->port_res[qp_token].qp;
ea96ceac
TK
924
925 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
926 &aer, &aerr);
927
928 if (resource_type == EHEA_AER_RESTYPE_QP) {
929 if ((aer & EHEA_AER_RESET_MASK) ||
930 (aerr & EHEA_AERR_RESET_MASK))
931 reset_port = 1;
932 } else
933 reset_port = 1; /* Reset in case of CQ or EQ error */
934
bb3a6449 935 eqe = ehea_poll_eq(port->qp_eq);
7a291083
JBT
936 }
937
ea96ceac
TK
938 if (reset_port) {
939 ehea_error("Resetting port");
940 ehea_schedule_port_reset(port);
941 }
d2db9eea 942
7a291083
JBT
943 return IRQ_HANDLED;
944}
945
946static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
947 int logical_port)
948{
949 int i;
950
1acf2318 951 for (i = 0; i < EHEA_MAX_PORTS; i++)
41b69c70 952 if (adapter->port[i])
d1d25aab 953 if (adapter->port[i]->logical_port_id == logical_port)
41b69c70 954 return adapter->port[i];
7a291083
JBT
955 return NULL;
956}
957
958int ehea_sense_port_attr(struct ehea_port *port)
959{
960 int ret;
961 u64 hret;
962 struct hcp_ehea_port_cb0 *cb0;
963
508d2b5d 964 /* may be called via ehea_neq_tasklet() */
3faf2693 965 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
508d2b5d 966 if (!cb0) {
7a291083
JBT
967 ehea_error("no mem for cb0");
968 ret = -ENOMEM;
969 goto out;
970 }
971
972 hret = ehea_h_query_ehea_port(port->adapter->handle,
973 port->logical_port_id, H_PORT_CB0,
974 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
975 cb0);
976 if (hret != H_SUCCESS) {
977 ret = -EIO;
978 goto out_free;
979 }
980
981 /* MAC address */
982 port->mac_addr = cb0->port_mac_addr << 16;
983
508d2b5d 984 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
7a291083
JBT
985 ret = -EADDRNOTAVAIL;
986 goto out_free;
987 }
988
989 /* Port speed */
990 switch (cb0->port_speed) {
991 case H_SPEED_10M_H:
992 port->port_speed = EHEA_SPEED_10M;
993 port->full_duplex = 0;
994 break;
995 case H_SPEED_10M_F:
996 port->port_speed = EHEA_SPEED_10M;
997 port->full_duplex = 1;
998 break;
999 case H_SPEED_100M_H:
1000 port->port_speed = EHEA_SPEED_100M;
1001 port->full_duplex = 0;
1002 break;
1003 case H_SPEED_100M_F:
1004 port->port_speed = EHEA_SPEED_100M;
1005 port->full_duplex = 1;
1006 break;
1007 case H_SPEED_1G_F:
1008 port->port_speed = EHEA_SPEED_1G;
1009 port->full_duplex = 1;
1010 break;
1011 case H_SPEED_10G_F:
1012 port->port_speed = EHEA_SPEED_10G;
1013 port->full_duplex = 1;
1014 break;
1015 default:
1016 port->port_speed = 0;
1017 port->full_duplex = 0;
1018 break;
1019 }
1020
e919b593 1021 port->autoneg = 1;
18604c54 1022 port->num_mcs = cb0->num_default_qps;
e919b593 1023
7a291083 1024 /* Number of default QPs */
18604c54
JBT
1025 if (use_mcs)
1026 port->num_def_qps = cb0->num_default_qps;
1027 else
1028 port->num_def_qps = 1;
7a291083
JBT
1029
1030 if (!port->num_def_qps) {
1031 ret = -EINVAL;
1032 goto out_free;
1033 }
1034
18604c54
JBT
1035 port->num_tx_qps = num_tx_qps;
1036
1037 if (port->num_def_qps >= port->num_tx_qps)
7a291083
JBT
1038 port->num_add_tx_qps = 0;
1039 else
18604c54 1040 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
7a291083
JBT
1041
1042 ret = 0;
1043out_free:
1044 if (ret || netif_msg_probe(port))
1045 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
3faf2693 1046 free_page((unsigned long)cb0);
7a291083
JBT
1047out:
1048 return ret;
1049}
1050
1051int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1052{
1053 struct hcp_ehea_port_cb4 *cb4;
1054 u64 hret;
1055 int ret = 0;
1056
3faf2693 1057 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1058 if (!cb4) {
1059 ehea_error("no mem for cb4");
1060 ret = -ENOMEM;
1061 goto out;
1062 }
1063
1064 cb4->port_speed = port_speed;
1065
1066 netif_carrier_off(port->netdev);
1067
1068 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1069 port->logical_port_id,
1070 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1071 if (hret == H_SUCCESS) {
1072 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1073
1074 hret = ehea_h_query_ehea_port(port->adapter->handle,
1075 port->logical_port_id,
1076 H_PORT_CB4, H_PORT_CB4_SPEED,
1077 cb4);
1078 if (hret == H_SUCCESS) {
1079 switch (cb4->port_speed) {
1080 case H_SPEED_10M_H:
1081 port->port_speed = EHEA_SPEED_10M;
1082 port->full_duplex = 0;
1083 break;
1084 case H_SPEED_10M_F:
1085 port->port_speed = EHEA_SPEED_10M;
1086 port->full_duplex = 1;
1087 break;
1088 case H_SPEED_100M_H:
1089 port->port_speed = EHEA_SPEED_100M;
1090 port->full_duplex = 0;
1091 break;
1092 case H_SPEED_100M_F:
1093 port->port_speed = EHEA_SPEED_100M;
1094 port->full_duplex = 1;
1095 break;
1096 case H_SPEED_1G_F:
1097 port->port_speed = EHEA_SPEED_1G;
1098 port->full_duplex = 1;
1099 break;
1100 case H_SPEED_10G_F:
1101 port->port_speed = EHEA_SPEED_10G;
1102 port->full_duplex = 1;
1103 break;
1104 default:
1105 port->port_speed = 0;
1106 port->full_duplex = 0;
1107 break;
1108 }
1109 } else {
1110 ehea_error("Failed sensing port speed");
1111 ret = -EIO;
1112 }
1113 } else {
1114 if (hret == H_AUTHORITY) {
7674a588 1115 ehea_info("Hypervisor denied setting port speed");
7a291083
JBT
1116 ret = -EPERM;
1117 } else {
1118 ret = -EIO;
1119 ehea_error("Failed setting port speed");
1120 }
1121 }
8759cf76
JBT
1122 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1123 netif_carrier_on(port->netdev);
1124
3faf2693 1125 free_page((unsigned long)cb4);
7a291083
JBT
1126out:
1127 return ret;
1128}
1129
1130static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1131{
1132 int ret;
1133 u8 ec;
1134 u8 portnum;
1135 struct ehea_port *port;
1136
1137 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1138 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1139 port = ehea_get_port(adapter, portnum);
1140
1141 switch (ec) {
1142 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1143
1144 if (!port) {
1145 ehea_error("unknown portnum %x", portnum);
1146 break;
1147 }
1148
1149 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1150 if (!netif_carrier_ok(port->netdev)) {
1e1675cc 1151 ret = ehea_sense_port_attr(port);
7a291083
JBT
1152 if (ret) {
1153 ehea_error("failed resensing port "
1154 "attributes");
1155 break;
1156 }
1157
1158 if (netif_msg_link(port))
1159 ehea_info("%s: Logical port up: %dMbps "
1160 "%s Duplex",
1161 port->netdev->name,
1162 port->port_speed,
1163 port->full_duplex ==
1164 1 ? "Full" : "Half");
1165
1166 netif_carrier_on(port->netdev);
1167 netif_wake_queue(port->netdev);
1168 }
1169 } else
1170 if (netif_carrier_ok(port->netdev)) {
1171 if (netif_msg_link(port))
1172 ehea_info("%s: Logical port down",
1173 port->netdev->name);
1174 netif_carrier_off(port->netdev);
1175 netif_stop_queue(port->netdev);
1176 }
1177
1178 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
8759cf76 1179 port->phy_link = EHEA_PHY_LINK_UP;
7a291083
JBT
1180 if (netif_msg_link(port))
1181 ehea_info("%s: Physical port up",
1182 port->netdev->name);
8759cf76
JBT
1183 if (prop_carrier_state)
1184 netif_carrier_on(port->netdev);
7a291083 1185 } else {
8759cf76 1186 port->phy_link = EHEA_PHY_LINK_DOWN;
7a291083
JBT
1187 if (netif_msg_link(port))
1188 ehea_info("%s: Physical port down",
1189 port->netdev->name);
8759cf76
JBT
1190 if (prop_carrier_state)
1191 netif_carrier_off(port->netdev);
7a291083
JBT
1192 }
1193
1194 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1195 ehea_info("External switch port is primary port");
1196 else
1197 ehea_info("External switch port is backup port");
1198
1199 break;
1200 case EHEA_EC_ADAPTER_MALFUNC:
1201 ehea_error("Adapter malfunction");
1202 break;
1203 case EHEA_EC_PORT_MALFUNC:
1204 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1205 netif_carrier_off(port->netdev);
1206 netif_stop_queue(port->netdev);
1207 break;
1208 default:
a1c5a893 1209 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
7a291083
JBT
1210 break;
1211 }
1212}
1213
1214static void ehea_neq_tasklet(unsigned long data)
1215{
508d2b5d 1216 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
7a291083
JBT
1217 struct ehea_eqe *eqe;
1218 u64 event_mask;
1219
1220 eqe = ehea_poll_eq(adapter->neq);
1221 ehea_debug("eqe=%p", eqe);
1222
1223 while (eqe) {
1224 ehea_debug("*eqe=%lx", eqe->entry);
1225 ehea_parse_eqe(adapter, eqe->entry);
1226 eqe = ehea_poll_eq(adapter->neq);
1227 ehea_debug("next eqe=%p", eqe);
1228 }
1229
1230 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1231 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1232 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1233
1234 ehea_h_reset_events(adapter->handle,
1235 adapter->neq->fw_handle, event_mask);
1236}
1237
7d12e780 1238static irqreturn_t ehea_interrupt_neq(int irq, void *param)
7a291083
JBT
1239{
1240 struct ehea_adapter *adapter = param;
1241 tasklet_hi_schedule(&adapter->neq_tasklet);
1242 return IRQ_HANDLED;
1243}
1244
1245
1246static int ehea_fill_port_res(struct ehea_port_res *pr)
1247{
1248 int ret;
1249 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1250
e2878806
TK
1251 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1252 - init_attr->act_nr_rwqes_rq2
1253 - init_attr->act_nr_rwqes_rq3 - 1);
7a291083 1254
e2878806 1255 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
7a291083
JBT
1256
1257 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1258
1259 return ret;
1260}
1261
1262static int ehea_reg_interrupts(struct net_device *dev)
1263{
1264 struct ehea_port *port = netdev_priv(dev);
1265 struct ehea_port_res *pr;
1266 int i, ret;
1267
7a291083
JBT
1268
1269 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1270 dev->name);
1271
6b08f3ae 1272 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
7a291083 1273 ehea_qp_aff_irq_handler,
38515e90 1274 IRQF_DISABLED, port->int_aff_name, port);
7a291083
JBT
1275 if (ret) {
1276 ehea_error("failed registering irq for qp_aff_irq_handler:"
1277 "ist=%X", port->qp_eq->attr.ist1);
1278 goto out_free_qpeq;
1279 }
1280
1281 if (netif_msg_ifup(port))
1282 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1283 "registered", port->qp_eq->attr.ist1);
1284
18604c54 1285
7a291083
JBT
1286 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1287 pr = &port->port_res[i];
1288 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
18604c54 1289 "%s-queue%d", dev->name, i);
6b08f3ae 1290 ret = ibmebus_request_irq(pr->eq->attr.ist1,
18604c54 1291 ehea_recv_irq_handler,
38515e90 1292 IRQF_DISABLED, pr->int_send_name,
7a291083
JBT
1293 pr);
1294 if (ret) {
18604c54 1295 ehea_error("failed registering irq for ehea_queue "
7a291083 1296 "port_res_nr:%d, ist=%X", i,
18604c54 1297 pr->eq->attr.ist1);
7a291083
JBT
1298 goto out_free_req;
1299 }
1300 if (netif_msg_ifup(port))
18604c54
JBT
1301 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1302 "%d registered", pr->eq->attr.ist1, i);
7a291083
JBT
1303 }
1304out:
1305 return ret;
1306
18604c54 1307
7a291083
JBT
1308out_free_req:
1309 while (--i >= 0) {
18604c54 1310 u32 ist = port->port_res[i].eq->attr.ist1;
6b08f3ae 1311 ibmebus_free_irq(ist, &port->port_res[i]);
7a291083 1312 }
18604c54 1313
7a291083 1314out_free_qpeq:
6b08f3ae 1315 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083 1316 i = port->num_def_qps;
18604c54 1317
7a291083 1318 goto out;
18604c54 1319
7a291083
JBT
1320}
1321
1322static void ehea_free_interrupts(struct net_device *dev)
1323{
1324 struct ehea_port *port = netdev_priv(dev);
1325 struct ehea_port_res *pr;
1326 int i;
1327
1328 /* send */
18604c54 1329
7a291083
JBT
1330 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1331 pr = &port->port_res[i];
6b08f3ae 1332 ibmebus_free_irq(pr->eq->attr.ist1, pr);
7a291083
JBT
1333 if (netif_msg_intr(port))
1334 ehea_info("free send irq for res %d with handle 0x%X",
18604c54 1335 i, pr->eq->attr.ist1);
7a291083
JBT
1336 }
1337
1338 /* associated events */
6b08f3ae 1339 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
7a291083
JBT
1340 if (netif_msg_intr(port))
1341 ehea_info("associated event interrupt for handle 0x%X freed",
1342 port->qp_eq->attr.ist1);
1343}
1344
1345static int ehea_configure_port(struct ehea_port *port)
1346{
1347 int ret, i;
1348 u64 hret, mask;
1349 struct hcp_ehea_port_cb0 *cb0;
1350
1351 ret = -ENOMEM;
3faf2693 1352 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1353 if (!cb0)
1354 goto out;
1355
1356 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1358 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1359 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1360 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1361 PXLY_RC_VLAN_FILTER)
1362 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1363
18604c54
JBT
1364 for (i = 0; i < port->num_mcs; i++)
1365 if (use_mcs)
1366 cb0->default_qpn_arr[i] =
1367 port->port_res[i].qp->init_attr.qp_nr;
1368 else
1369 cb0->default_qpn_arr[i] =
1370 port->port_res[0].qp->init_attr.qp_nr;
e542aa6b 1371
7a291083
JBT
1372 if (netif_msg_ifup(port))
1373 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1374
1375 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1376 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1377
1378 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1379 port->logical_port_id,
1380 H_PORT_CB0, mask, cb0);
1381 ret = -EIO;
1382 if (hret != H_SUCCESS)
1383 goto out_free;
1384
1385 ret = 0;
1386
1387out_free:
3faf2693 1388 free_page((unsigned long)cb0);
7a291083
JBT
1389out:
1390 return ret;
1391}
1392
e542aa6b 1393int ehea_gen_smrs(struct ehea_port_res *pr)
7a291083 1394{
e542aa6b 1395 int ret;
7a291083
JBT
1396 struct ehea_adapter *adapter = pr->port->adapter;
1397
e542aa6b
JBT
1398 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1399 if (ret)
7a291083
JBT
1400 goto out;
1401
e542aa6b
JBT
1402 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1403 if (ret)
1404 goto out_free;
7a291083
JBT
1405
1406 return 0;
1407
e542aa6b
JBT
1408out_free:
1409 ehea_rem_mr(&pr->send_mr);
7a291083 1410out:
e542aa6b 1411 ehea_error("Generating SMRS failed\n");
7a291083
JBT
1412 return -EIO;
1413}
1414
e542aa6b 1415int ehea_rem_smrs(struct ehea_port_res *pr)
7a291083 1416{
8e95a202
JP
1417 if ((ehea_rem_mr(&pr->send_mr)) ||
1418 (ehea_rem_mr(&pr->recv_mr)))
e542aa6b
JBT
1419 return -EIO;
1420 else
1421 return 0;
7a291083
JBT
1422}
1423
1424static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1425{
508d2b5d 1426 int arr_size = sizeof(void *) * max_q_entries;
7a291083
JBT
1427
1428 q_skba->arr = vmalloc(arr_size);
1429 if (!q_skba->arr)
1430 return -ENOMEM;
1431
1432 memset(q_skba->arr, 0, arr_size);
1433
1434 q_skba->len = max_q_entries;
1435 q_skba->index = 0;
1436 q_skba->os_skbs = 0;
1437
1438 return 0;
1439}
1440
1441static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1442 struct port_res_cfg *pr_cfg, int queue_token)
1443{
1444 struct ehea_adapter *adapter = port->adapter;
1445 enum ehea_eq_type eq_type = EHEA_EQ;
1446 struct ehea_qp_init_attr *init_attr = NULL;
1447 int ret = -EIO;
1448
1449 memset(pr, 0, sizeof(struct ehea_port_res));
1450
1451 pr->port = port;
7a291083
JBT
1452 spin_lock_init(&pr->xmit_lock);
1453 spin_lock_init(&pr->netif_queue);
1454
18604c54
JBT
1455 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1456 if (!pr->eq) {
1457 ehea_error("create_eq failed (eq)");
7a291083
JBT
1458 goto out_free;
1459 }
1460
1461 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
18604c54 1462 pr->eq->fw_handle,
7a291083
JBT
1463 port->logical_port_id);
1464 if (!pr->recv_cq) {
1465 ehea_error("create_cq failed (cq_recv)");
1466 goto out_free;
1467 }
1468
1469 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
18604c54 1470 pr->eq->fw_handle,
7a291083
JBT
1471 port->logical_port_id);
1472 if (!pr->send_cq) {
1473 ehea_error("create_cq failed (cq_send)");
1474 goto out_free;
1475 }
1476
1477 if (netif_msg_ifup(port))
1478 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1479 pr->send_cq->attr.act_nr_of_cqes,
1480 pr->recv_cq->attr.act_nr_of_cqes);
1481
1482 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1483 if (!init_attr) {
1484 ret = -ENOMEM;
1485 ehea_error("no mem for ehea_qp_init_attr");
1486 goto out_free;
1487 }
1488
1489 init_attr->low_lat_rq1 = 1;
1490 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1491 init_attr->rq_count = 3;
1492 init_attr->qp_token = queue_token;
1493 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1494 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1495 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1496 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1497 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1498 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1499 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1500 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1501 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1502 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1503 init_attr->port_nr = port->logical_port_id;
1504 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1505 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1506 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1507
1508 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1509 if (!pr->qp) {
1510 ehea_error("create_qp failed");
1511 ret = -EIO;
1512 goto out_free;
1513 }
1514
1515 if (netif_msg_ifup(port))
1516 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1517 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1518 init_attr->act_nr_send_wqes,
1519 init_attr->act_nr_rwqes_rq1,
1520 init_attr->act_nr_rwqes_rq2,
1521 init_attr->act_nr_rwqes_rq3);
1522
44fb3126
TK
1523 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1524
1525 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
7a291083
JBT
1526 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1527 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1528 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1529 if (ret)
1530 goto out_free;
1531
1532 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1533 if (ehea_gen_smrs(pr) != 0) {
1534 ret = -EIO;
1535 goto out_free;
1536 }
18604c54 1537
7a291083
JBT
1538 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1539
1540 kfree(init_attr);
18604c54 1541
bea3348e 1542 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
18604c54 1543
d4dc4ec9
JBT
1544 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1545 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1546 pr->lro_mgr.lro_arr = pr->lro_desc;
1547 pr->lro_mgr.get_skb_header = get_skb_hdr;
1548 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1549 pr->lro_mgr.dev = port->netdev;
1550 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1551 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1552
7a291083
JBT
1553 ret = 0;
1554 goto out;
1555
1556out_free:
1557 kfree(init_attr);
1558 vfree(pr->sq_skba.arr);
1559 vfree(pr->rq1_skba.arr);
1560 vfree(pr->rq2_skba.arr);
1561 vfree(pr->rq3_skba.arr);
1562 ehea_destroy_qp(pr->qp);
1563 ehea_destroy_cq(pr->send_cq);
1564 ehea_destroy_cq(pr->recv_cq);
18604c54 1565 ehea_destroy_eq(pr->eq);
7a291083
JBT
1566out:
1567 return ret;
1568}
1569
1570static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1571{
1572 int ret, i;
1573
357eb46d
HH
1574 if (pr->qp)
1575 netif_napi_del(&pr->napi);
1576
7a291083
JBT
1577 ret = ehea_destroy_qp(pr->qp);
1578
1579 if (!ret) {
1580 ehea_destroy_cq(pr->send_cq);
1581 ehea_destroy_cq(pr->recv_cq);
18604c54 1582 ehea_destroy_eq(pr->eq);
7a291083
JBT
1583
1584 for (i = 0; i < pr->rq1_skba.len; i++)
1585 if (pr->rq1_skba.arr[i])
1586 dev_kfree_skb(pr->rq1_skba.arr[i]);
1587
1588 for (i = 0; i < pr->rq2_skba.len; i++)
1589 if (pr->rq2_skba.arr[i])
1590 dev_kfree_skb(pr->rq2_skba.arr[i]);
1591
1592 for (i = 0; i < pr->rq3_skba.len; i++)
1593 if (pr->rq3_skba.arr[i])
1594 dev_kfree_skb(pr->rq3_skba.arr[i]);
1595
1596 for (i = 0; i < pr->sq_skba.len; i++)
1597 if (pr->sq_skba.arr[i])
1598 dev_kfree_skb(pr->sq_skba.arr[i]);
1599
1600 vfree(pr->rq1_skba.arr);
1601 vfree(pr->rq2_skba.arr);
1602 vfree(pr->rq3_skba.arr);
1603 vfree(pr->sq_skba.arr);
1604 ret = ehea_rem_smrs(pr);
1605 }
1606 return ret;
1607}
1608
1609/*
1610 * The write_* functions store information in swqe which is used by
1611 * the hardware to calculate the ip/tcp/udp checksum
1612 */
1613
1614static inline void write_ip_start_end(struct ehea_swqe *swqe,
1615 const struct sk_buff *skb)
1616{
eddc9ec5 1617 swqe->ip_start = skb_network_offset(skb);
c9bdd4b5 1618 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
7a291083
JBT
1619}
1620
1621static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1622 const struct sk_buff *skb)
1623{
1624 swqe->tcp_offset =
1625 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1626
1627 swqe->tcp_end = (u16)skb->len - 1;
1628}
1629
1630static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1631 const struct sk_buff *skb)
1632{
1633 swqe->tcp_offset =
1634 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1635
1636 swqe->tcp_end = (u16)skb->len - 1;
1637}
1638
1639
1640static void write_swqe2_TSO(struct sk_buff *skb,
1641 struct ehea_swqe *swqe, u32 lkey)
1642{
1643 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1644 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
e743d313 1645 int skb_data_size = skb_headlen(skb);
7a291083 1646 int headersize;
7a291083
JBT
1647
1648 /* Packet is TCP with TSO enabled */
1649 swqe->tx_control |= EHEA_SWQE_TSO;
1650 swqe->mss = skb_shinfo(skb)->gso_size;
1651 /* copy only eth/ip/tcp headers to immediate data and
1652 * the rest of skb->data to sg1entry
1653 */
ab6a5bb6 1654 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
7a291083 1655
e743d313 1656 skb_data_size = skb_headlen(skb);
7a291083
JBT
1657
1658 if (skb_data_size >= headersize) {
1659 /* copy immediate data */
d626f62b 1660 skb_copy_from_linear_data(skb, imm_data, headersize);
7a291083
JBT
1661 swqe->immediate_data_length = headersize;
1662
1663 if (skb_data_size > headersize) {
1664 /* set sg1entry data */
1665 sg1entry->l_key = lkey;
1666 sg1entry->len = skb_data_size - headersize;
44a5b3d5
TK
1667 sg1entry->vaddr =
1668 ehea_map_vaddr(skb->data + headersize);
7a291083
JBT
1669 swqe->descriptors++;
1670 }
1671 } else
1672 ehea_error("cannot handle fragmented headers");
1673}
1674
1675static void write_swqe2_nonTSO(struct sk_buff *skb,
1676 struct ehea_swqe *swqe, u32 lkey)
1677{
e743d313 1678 int skb_data_size = skb_headlen(skb);
7a291083
JBT
1679 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1680 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
7a291083
JBT
1681
1682 /* Packet is any nonTSO type
1683 *
1684 * Copy as much as possible skb->data to immediate data and
1685 * the rest to sg1entry
1686 */
1687 if (skb_data_size >= SWQE2_MAX_IMM) {
1688 /* copy immediate data */
d626f62b 1689 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
7a291083
JBT
1690
1691 swqe->immediate_data_length = SWQE2_MAX_IMM;
1692
1693 if (skb_data_size > SWQE2_MAX_IMM) {
1694 /* copy sg1entry data */
1695 sg1entry->l_key = lkey;
1696 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
44a5b3d5
TK
1697 sg1entry->vaddr =
1698 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
7a291083
JBT
1699 swqe->descriptors++;
1700 }
1701 } else {
d626f62b 1702 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
7a291083
JBT
1703 swqe->immediate_data_length = skb_data_size;
1704 }
1705}
1706
1707static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1708 struct ehea_swqe *swqe, u32 lkey)
1709{
1710 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1711 skb_frag_t *frag;
1712 int nfrags, sg1entry_contains_frag_data, i;
7a291083
JBT
1713
1714 nfrags = skb_shinfo(skb)->nr_frags;
1715 sg1entry = &swqe->u.immdata_desc.sg_entry;
508d2b5d 1716 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
7a291083
JBT
1717 swqe->descriptors = 0;
1718 sg1entry_contains_frag_data = 0;
1719
1720 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1721 write_swqe2_TSO(skb, swqe, lkey);
1722 else
1723 write_swqe2_nonTSO(skb, swqe, lkey);
1724
1725 /* write descriptors */
1726 if (nfrags > 0) {
1727 if (swqe->descriptors == 0) {
1728 /* sg1entry not yet used */
1729 frag = &skb_shinfo(skb)->frags[0];
1730
1731 /* copy sg1entry data */
1732 sg1entry->l_key = lkey;
1733 sg1entry->len = frag->size;
44a5b3d5
TK
1734 sg1entry->vaddr =
1735 ehea_map_vaddr(page_address(frag->page)
1736 + frag->page_offset);
7a291083
JBT
1737 swqe->descriptors++;
1738 sg1entry_contains_frag_data = 1;
1739 }
1740
1741 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1742
1743 frag = &skb_shinfo(skb)->frags[i];
1744 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1745
1746 sgentry->l_key = lkey;
1747 sgentry->len = frag->size;
44a5b3d5
TK
1748 sgentry->vaddr =
1749 ehea_map_vaddr(page_address(frag->page)
1750 + frag->page_offset);
7a291083
JBT
1751 swqe->descriptors++;
1752 }
1753 }
1754}
1755
1756static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1757{
1758 int ret = 0;
1759 u64 hret;
1760 u8 reg_type;
1761
1762 /* De/Register untagged packets */
1763 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1764 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1765 port->logical_port_id,
1766 reg_type, port->mac_addr, 0, hcallid);
1767 if (hret != H_SUCCESS) {
f9e29228 1768 ehea_error("%sregistering bc address failed (tagged)",
508d2b5d 1769 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1770 ret = -EIO;
1771 goto out_herr;
1772 }
1773
1774 /* De/Register VLAN packets */
1775 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1776 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1777 port->logical_port_id,
1778 reg_type, port->mac_addr, 0, hcallid);
1779 if (hret != H_SUCCESS) {
f9e29228
TK
1780 ehea_error("%sregistering bc address failed (vlan)",
1781 hcallid == H_REG_BCMC ? "" : "de");
7a291083
JBT
1782 ret = -EIO;
1783 }
1784out_herr:
1785 return ret;
1786}
1787
1788static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1789{
1790 struct ehea_port *port = netdev_priv(dev);
1791 struct sockaddr *mac_addr = sa;
1792 struct hcp_ehea_port_cb0 *cb0;
1793 int ret;
1794 u64 hret;
1795
1796 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1797 ret = -EADDRNOTAVAIL;
1798 goto out;
1799 }
1800
3faf2693 1801 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
1802 if (!cb0) {
1803 ehea_error("no mem for cb0");
1804 ret = -ENOMEM;
1805 goto out;
1806 }
1807
1808 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1809
1810 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1811
1812 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1813 port->logical_port_id, H_PORT_CB0,
1814 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1815 if (hret != H_SUCCESS) {
1816 ret = -EIO;
1817 goto out_free;
1818 }
1819
1820 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1821
1822 /* Deregister old MAC in pHYP */
00aaea2f
JBT
1823 if (port->state == EHEA_PORT_UP) {
1824 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1825 if (ret)
1826 goto out_upregs;
1827 }
7a291083
JBT
1828
1829 port->mac_addr = cb0->port_mac_addr << 16;
1830
1831 /* Register new MAC in pHYP */
00aaea2f
JBT
1832 if (port->state == EHEA_PORT_UP) {
1833 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1834 if (ret)
1835 goto out_upregs;
1836 }
7a291083
JBT
1837
1838 ret = 0;
21eee2dd
TK
1839
1840out_upregs:
1841 ehea_update_bcmc_registrations();
7a291083 1842out_free:
3faf2693 1843 free_page((unsigned long)cb0);
7a291083
JBT
1844out:
1845 return ret;
1846}
1847
1848static void ehea_promiscuous_error(u64 hret, int enable)
1849{
7674a588
TK
1850 if (hret == H_AUTHORITY)
1851 ehea_info("Hypervisor denied %sabling promiscuous mode",
1852 enable == 1 ? "en" : "dis");
1853 else
1854 ehea_error("failed %sabling promiscuous mode",
1855 enable == 1 ? "en" : "dis");
7a291083
JBT
1856}
1857
1858static void ehea_promiscuous(struct net_device *dev, int enable)
1859{
1860 struct ehea_port *port = netdev_priv(dev);
1861 struct hcp_ehea_port_cb7 *cb7;
1862 u64 hret;
1863
1864 if ((enable && port->promisc) || (!enable && !port->promisc))
1865 return;
1866
3faf2693 1867 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
7a291083
JBT
1868 if (!cb7) {
1869 ehea_error("no mem for cb7");
1870 goto out;
1871 }
1872
1873 /* Modify Pxs_DUCQPN in CB7 */
1874 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1875
1876 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1877 port->logical_port_id,
1878 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1879 if (hret) {
1880 ehea_promiscuous_error(hret, enable);
1881 goto out;
1882 }
1883
1884 port->promisc = enable;
1885out:
3faf2693 1886 free_page((unsigned long)cb7);
7a291083
JBT
1887}
1888
1889static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1890 u32 hcallid)
1891{
1892 u64 hret;
1893 u8 reg_type;
1894
1895 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1896 | EHEA_BCMC_UNTAGGED;
1897
1898 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1899 port->logical_port_id,
1900 reg_type, mc_mac_addr, 0, hcallid);
1901 if (hret)
1902 goto out;
1903
1904 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1905 | EHEA_BCMC_VLANID_ALL;
1906
1907 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1908 port->logical_port_id,
1909 reg_type, mc_mac_addr, 0, hcallid);
1910out:
1911 return hret;
1912}
1913
1914static int ehea_drop_multicast_list(struct net_device *dev)
1915{
1916 struct ehea_port *port = netdev_priv(dev);
1917 struct ehea_mc_list *mc_entry = port->mc_list;
1918 struct list_head *pos;
1919 struct list_head *temp;
1920 int ret = 0;
1921 u64 hret;
1922
1923 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1924 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1925
1926 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1927 H_DEREG_BCMC);
1928 if (hret) {
1929 ehea_error("failed deregistering mcast MAC");
1930 ret = -EIO;
1931 }
1932
1933 list_del(pos);
1934 kfree(mc_entry);
1935 }
1936 return ret;
1937}
1938
1939static void ehea_allmulti(struct net_device *dev, int enable)
1940{
1941 struct ehea_port *port = netdev_priv(dev);
1942 u64 hret;
1943
1944 if (!port->allmulti) {
1945 if (enable) {
1946 /* Enable ALLMULTI */
1947 ehea_drop_multicast_list(dev);
1948 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1949 if (!hret)
1950 port->allmulti = 1;
1951 else
1952 ehea_error("failed enabling IFF_ALLMULTI");
1953 }
1954 } else
1955 if (!enable) {
1956 /* Disable ALLMULTI */
1957 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1958 if (!hret)
1959 port->allmulti = 0;
1960 else
1961 ehea_error("failed disabling IFF_ALLMULTI");
1962 }
1963}
1964
508d2b5d 1965static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
7a291083
JBT
1966{
1967 struct ehea_mc_list *ehea_mcl_entry;
1968 u64 hret;
1969
1e1675cc 1970 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
7a291083
JBT
1971 if (!ehea_mcl_entry) {
1972 ehea_error("no mem for mcl_entry");
1973 return;
1974 }
1975
1976 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1977
1978 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1979
1980 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1981 H_REG_BCMC);
1982 if (!hret)
1983 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1984 else {
1985 ehea_error("failed registering mcast MAC");
1986 kfree(ehea_mcl_entry);
1987 }
1988}
1989
1990static void ehea_set_multicast_list(struct net_device *dev)
1991{
1992 struct ehea_port *port = netdev_priv(dev);
22bedad3 1993 struct netdev_hw_addr *ha;
48e2f183 1994 int ret;
7a291083
JBT
1995
1996 if (dev->flags & IFF_PROMISC) {
1997 ehea_promiscuous(dev, 1);
1998 return;
1999 }
2000 ehea_promiscuous(dev, 0);
2001
2002 if (dev->flags & IFF_ALLMULTI) {
2003 ehea_allmulti(dev, 1);
21eee2dd 2004 goto out;
7a291083
JBT
2005 }
2006 ehea_allmulti(dev, 0);
2007
4cd24eaf 2008 if (!netdev_mc_empty(dev)) {
7a291083
JBT
2009 ret = ehea_drop_multicast_list(dev);
2010 if (ret) {
2011 /* Dropping the current multicast list failed.
2012 * Enabling ALL_MULTI is the best we can do.
2013 */
2014 ehea_allmulti(dev, 1);
2015 }
2016
4cd24eaf 2017 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
a1c5a893 2018 ehea_info("Mcast registration limit reached (0x%llx). "
7a291083
JBT
2019 "Use ALLMULTI!",
2020 port->adapter->max_mc_mac);
2021 goto out;
2022 }
2023
22bedad3
JP
2024 netdev_for_each_mc_addr(ha, dev)
2025 ehea_add_multicast_entry(port, ha->addr);
508d2b5d 2026
7a291083
JBT
2027 }
2028out:
21eee2dd 2029 ehea_update_bcmc_registrations();
7a291083
JBT
2030}
2031
2032static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2033{
2034 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2035 return -EINVAL;
2036 dev->mtu = new_mtu;
2037 return 0;
2038}
2039
2040static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2041 struct ehea_swqe *swqe, u32 lkey)
2042{
2043 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2044 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2045
7a291083
JBT
2046 /* IPv4 */
2047 swqe->tx_control |= EHEA_SWQE_CRC
2048 | EHEA_SWQE_IP_CHECKSUM
2049 | EHEA_SWQE_TCP_CHECKSUM
2050 | EHEA_SWQE_IMM_DATA_PRESENT
2051 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2052
2053 write_ip_start_end(swqe, skb);
2054
eddc9ec5 2055 if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2056 if ((iph->frag_off & IP_MF) ||
2057 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2058 /* IP fragment, so don't change cs */
2059 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2060 else
2061 write_udp_offset_end(swqe, skb);
eddc9ec5 2062 } else if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2063 write_tcp_offset_end(swqe, skb);
2064 }
2065
2066 /* icmp (big data) and ip segmentation packets (all other ip
2067 packets) do not require any special handling */
2068
2069 } else {
2070 /* Other Ethernet Protocol */
2071 swqe->tx_control |= EHEA_SWQE_CRC
2072 | EHEA_SWQE_IMM_DATA_PRESENT
2073 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2074 }
2075
2076 write_swqe2_data(skb, dev, swqe, lkey);
2077}
2078
2079static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2080 struct ehea_swqe *swqe)
2081{
2082 int nfrags = skb_shinfo(skb)->nr_frags;
2083 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2084 skb_frag_t *frag;
2085 int i;
2086
2087 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5 2088 const struct iphdr *iph = ip_hdr(skb);
d1d25aab 2089
7a291083
JBT
2090 /* IPv4 */
2091 write_ip_start_end(swqe, skb);
2092
eddc9ec5 2093 if (iph->protocol == IPPROTO_TCP) {
7a291083
JBT
2094 swqe->tx_control |= EHEA_SWQE_CRC
2095 | EHEA_SWQE_IP_CHECKSUM
2096 | EHEA_SWQE_TCP_CHECKSUM
2097 | EHEA_SWQE_IMM_DATA_PRESENT;
2098
2099 write_tcp_offset_end(swqe, skb);
2100
eddc9ec5 2101 } else if (iph->protocol == IPPROTO_UDP) {
8e95a202
JP
2102 if ((iph->frag_off & IP_MF) ||
2103 (iph->frag_off & IP_OFFSET))
7a291083
JBT
2104 /* IP fragment, so don't change cs */
2105 swqe->tx_control |= EHEA_SWQE_CRC
2106 | EHEA_SWQE_IMM_DATA_PRESENT;
2107 else {
2108 swqe->tx_control |= EHEA_SWQE_CRC
2109 | EHEA_SWQE_IP_CHECKSUM
2110 | EHEA_SWQE_TCP_CHECKSUM
2111 | EHEA_SWQE_IMM_DATA_PRESENT;
2112
2113 write_udp_offset_end(swqe, skb);
2114 }
2115 } else {
2116 /* icmp (big data) and
2117 ip segmentation packets (all other ip packets) */
2118 swqe->tx_control |= EHEA_SWQE_CRC
2119 | EHEA_SWQE_IP_CHECKSUM
2120 | EHEA_SWQE_IMM_DATA_PRESENT;
2121 }
2122 } else {
2123 /* Other Ethernet Protocol */
2124 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2125 }
2126 /* copy (immediate) data */
2127 if (nfrags == 0) {
2128 /* data is in a single piece */
d626f62b 2129 skb_copy_from_linear_data(skb, imm_data, skb->len);
7a291083
JBT
2130 } else {
2131 /* first copy data from the skb->data buffer ... */
d626f62b 2132 skb_copy_from_linear_data(skb, imm_data,
e743d313
ED
2133 skb_headlen(skb));
2134 imm_data += skb_headlen(skb);
7a291083
JBT
2135
2136 /* ... then copy data from the fragments */
2137 for (i = 0; i < nfrags; i++) {
2138 frag = &skb_shinfo(skb)->frags[i];
2139 memcpy(imm_data,
2140 page_address(frag->page) + frag->page_offset,
2141 frag->size);
2142 imm_data += frag->size;
2143 }
2144 }
2145 swqe->immediate_data_length = skb->len;
2146 dev_kfree_skb(skb);
2147}
2148
18604c54
JBT
2149static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2150{
2151 struct tcphdr *tcp;
2152 u32 tmp;
2153
2154 if ((skb->protocol == htons(ETH_P_IP)) &&
88ca2d07 2155 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
508d2b5d
DM
2156 tcp = (struct tcphdr *)(skb_network_header(skb) +
2157 (ip_hdr(skb)->ihl * 4));
18604c54 2158 tmp = (tcp->source + (tcp->dest << 16)) % 31;
88ca2d07 2159 tmp += ip_hdr(skb)->daddr % 31;
18604c54 2160 return tmp % num_qps;
508d2b5d 2161 } else
18604c54
JBT
2162 return 0;
2163}
2164
7a291083
JBT
2165static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2166{
2167 struct ehea_port *port = netdev_priv(dev);
2168 struct ehea_swqe *swqe;
2169 unsigned long flags;
2170 u32 lkey;
2171 int swqe_index;
18604c54
JBT
2172 struct ehea_port_res *pr;
2173
2174 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2175
18604c54
JBT
2176 if (!spin_trylock(&pr->xmit_lock))
2177 return NETDEV_TX_BUSY;
2178
2179 if (pr->queue_stopped) {
2180 spin_unlock(&pr->xmit_lock);
2181 return NETDEV_TX_BUSY;
2182 }
7a291083
JBT
2183
2184 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2185 memset(swqe, 0, SWQE_HEADER_SIZE);
2186 atomic_dec(&pr->swqe_avail);
2187
2188 if (skb->len <= SWQE3_MAX_IMM) {
2189 u32 sig_iv = port->sig_comp_iv;
2190 u32 swqe_num = pr->swqe_id_counter;
2191 ehea_xmit3(skb, dev, swqe);
2192 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2193 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2194 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2195 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2196 sig_iv);
2197 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2198 pr->swqe_ll_count = 0;
2199 } else
2200 pr->swqe_ll_count += 1;
2201 } else {
2202 swqe->wr_id =
2203 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2204 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
acbddb59 2205 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
7a291083
JBT
2206 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2207 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2208
2209 pr->sq_skba.index++;
2210 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2211
2212 lkey = pr->send_mr.lkey;
2213 ehea_xmit2(skb, dev, swqe, lkey);
acbddb59 2214 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
7a291083
JBT
2215 }
2216 pr->swqe_id_counter += 1;
2217
2218 if (port->vgrp && vlan_tx_tag_present(skb)) {
2219 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2220 swqe->vlan_tag = vlan_tx_tag_get(skb);
2221 }
2222
2223 if (netif_msg_tx_queued(port)) {
2224 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
bff0a55f 2225 ehea_dump(swqe, 512, "swqe");
7a291083
JBT
2226 }
2227
2c69448b
JBT
2228 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2229 netif_stop_queue(dev);
2230 swqe->tx_control |= EHEA_SWQE_PURGE;
2231 }
44c82152 2232
7a291083 2233 ehea_post_swqe(pr->qp, swqe);
7393b87c 2234 pr->tx_packets++;
7a291083
JBT
2235
2236 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2237 spin_lock_irqsave(&pr->netif_queue, flags);
2238 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
acbddb59 2239 pr->p_stats.queue_stopped++;
7a291083
JBT
2240 netif_stop_queue(dev);
2241 pr->queue_stopped = 1;
2242 }
2243 spin_unlock_irqrestore(&pr->netif_queue, flags);
2244 }
1ae5dc34 2245 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
7a291083 2246 spin_unlock(&pr->xmit_lock);
2c69448b 2247
7a291083
JBT
2248 return NETDEV_TX_OK;
2249}
2250
2251static void ehea_vlan_rx_register(struct net_device *dev,
2252 struct vlan_group *grp)
2253{
2254 struct ehea_port *port = netdev_priv(dev);
2255 struct ehea_adapter *adapter = port->adapter;
2256 struct hcp_ehea_port_cb1 *cb1;
2257 u64 hret;
2258
2259 port->vgrp = grp;
2260
3faf2693 2261 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2262 if (!cb1) {
2263 ehea_error("no mem for cb1");
2264 goto out;
2265 }
2266
7a291083
JBT
2267 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2268 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2269 if (hret != H_SUCCESS)
2270 ehea_error("modify_ehea_port failed");
2271
3faf2693 2272 free_page((unsigned long)cb1);
7a291083
JBT
2273out:
2274 return;
2275}
2276
2277static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2278{
2279 struct ehea_port *port = netdev_priv(dev);
2280 struct ehea_adapter *adapter = port->adapter;
2281 struct hcp_ehea_port_cb1 *cb1;
2282 int index;
2283 u64 hret;
2284
3faf2693 2285 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2286 if (!cb1) {
2287 ehea_error("no mem for cb1");
2288 goto out;
2289 }
2290
2291 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2292 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2293 if (hret != H_SUCCESS) {
2294 ehea_error("query_ehea_port failed");
2295 goto out;
2296 }
2297
2298 index = (vid / 64);
dec590c1 2299 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2300
2301 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2302 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2303 if (hret != H_SUCCESS)
2304 ehea_error("modify_ehea_port failed");
2305out:
3faf2693 2306 free_page((unsigned long)cb1);
7a291083
JBT
2307 return;
2308}
2309
2310static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2311{
2312 struct ehea_port *port = netdev_priv(dev);
2313 struct ehea_adapter *adapter = port->adapter;
2314 struct hcp_ehea_port_cb1 *cb1;
2315 int index;
2316 u64 hret;
2317
5c15bdec 2318 vlan_group_set_device(port->vgrp, vid, NULL);
7a291083 2319
3faf2693 2320 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2321 if (!cb1) {
2322 ehea_error("no mem for cb1");
2323 goto out;
2324 }
2325
2326 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2327 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2328 if (hret != H_SUCCESS) {
2329 ehea_error("query_ehea_port failed");
2330 goto out;
2331 }
2332
2333 index = (vid / 64);
dec590c1 2334 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
7a291083
JBT
2335
2336 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2337 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2338 if (hret != H_SUCCESS)
2339 ehea_error("modify_ehea_port failed");
2340out:
3faf2693 2341 free_page((unsigned long)cb1);
7a291083
JBT
2342}
2343
2344int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2345{
2346 int ret = -EIO;
2347 u64 hret;
2348 u16 dummy16 = 0;
2349 u64 dummy64 = 0;
508d2b5d 2350 struct hcp_modify_qp_cb0 *cb0;
7a291083 2351
3faf2693 2352 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2353 if (!cb0) {
2354 ret = -ENOMEM;
2355 goto out;
2356 }
2357
2358 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2359 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2360 if (hret != H_SUCCESS) {
2361 ehea_error("query_ehea_qp failed (1)");
2362 goto out;
2363 }
2364
2365 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2366 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2367 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2368 &dummy64, &dummy64, &dummy16, &dummy16);
2369 if (hret != H_SUCCESS) {
2370 ehea_error("modify_ehea_qp failed (1)");
2371 goto out;
2372 }
2373
2374 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2375 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2376 if (hret != H_SUCCESS) {
2377 ehea_error("query_ehea_qp failed (2)");
2378 goto out;
2379 }
2380
2381 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2382 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2383 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2384 &dummy64, &dummy64, &dummy16, &dummy16);
2385 if (hret != H_SUCCESS) {
2386 ehea_error("modify_ehea_qp failed (2)");
2387 goto out;
2388 }
2389
2390 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2391 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2392 if (hret != H_SUCCESS) {
2393 ehea_error("query_ehea_qp failed (3)");
2394 goto out;
2395 }
2396
2397 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2398 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2399 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2400 &dummy64, &dummy64, &dummy16, &dummy16);
2401 if (hret != H_SUCCESS) {
2402 ehea_error("modify_ehea_qp failed (3)");
2403 goto out;
2404 }
2405
2406 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2407 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2408 if (hret != H_SUCCESS) {
2409 ehea_error("query_ehea_qp failed (4)");
2410 goto out;
2411 }
2412
2413 ret = 0;
2414out:
3faf2693 2415 free_page((unsigned long)cb0);
7a291083
JBT
2416 return ret;
2417}
2418
2419static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2420 int add_tx_qps)
2421{
2422 int ret, i;
2423 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2424 enum ehea_eq_type eq_type = EHEA_EQ;
2425
2426 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2427 EHEA_MAX_ENTRIES_EQ, 1);
2428 if (!port->qp_eq) {
2429 ret = -EINVAL;
2430 ehea_error("ehea_create_eq failed (qp_eq)");
2431 goto out_kill_eq;
2432 }
2433
2434 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
18604c54 2435 pr_cfg.max_entries_scq = sq_entries * 2;
7a291083
JBT
2436 pr_cfg.max_entries_sq = sq_entries;
2437 pr_cfg.max_entries_rq1 = rq1_entries;
2438 pr_cfg.max_entries_rq2 = rq2_entries;
2439 pr_cfg.max_entries_rq3 = rq3_entries;
2440
2441 pr_cfg_small_rx.max_entries_rcq = 1;
2442 pr_cfg_small_rx.max_entries_scq = sq_entries;
2443 pr_cfg_small_rx.max_entries_sq = sq_entries;
2444 pr_cfg_small_rx.max_entries_rq1 = 1;
2445 pr_cfg_small_rx.max_entries_rq2 = 1;
2446 pr_cfg_small_rx.max_entries_rq3 = 1;
2447
2448 for (i = 0; i < def_qps; i++) {
2449 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2450 if (ret)
2451 goto out_clean_pr;
2452 }
2453 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2454 ret = ehea_init_port_res(port, &port->port_res[i],
2455 &pr_cfg_small_rx, i);
2456 if (ret)
2457 goto out_clean_pr;
2458 }
2459
2460 return 0;
2461
2462out_clean_pr:
2463 while (--i >= 0)
2464 ehea_clean_portres(port, &port->port_res[i]);
2465
2466out_kill_eq:
2467 ehea_destroy_eq(port->qp_eq);
2468 return ret;
2469}
2470
2471static int ehea_clean_all_portres(struct ehea_port *port)
2472{
2473 int ret = 0;
2474 int i;
2475
508d2b5d 2476 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
7a291083
JBT
2477 ret |= ehea_clean_portres(port, &port->port_res[i]);
2478
2479 ret |= ehea_destroy_eq(port->qp_eq);
2480
2481 return ret;
2482}
2483
35cf2e2e 2484static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2485{
35cf2e2e
TK
2486 if (adapter->active_ports)
2487 return;
1211bb6d
TK
2488
2489 ehea_rem_mr(&adapter->mr);
2490}
2491
35cf2e2e 2492static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
1211bb6d 2493{
35cf2e2e
TK
2494 if (adapter->active_ports)
2495 return 0;
1211bb6d
TK
2496
2497 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2498}
2499
7a291083
JBT
2500static int ehea_up(struct net_device *dev)
2501{
2502 int ret, i;
2503 struct ehea_port *port = netdev_priv(dev);
7a291083
JBT
2504
2505 if (port->state == EHEA_PORT_UP)
2506 return 0;
2507
2508 ret = ehea_port_res_setup(port, port->num_def_qps,
2509 port->num_add_tx_qps);
2510 if (ret) {
2511 ehea_error("port_res_failed");
2512 goto out;
2513 }
2514
2515 /* Set default QP for this port */
2516 ret = ehea_configure_port(port);
2517 if (ret) {
2518 ehea_error("ehea_configure_port failed. ret:%d", ret);
2519 goto out_clean_pr;
2520 }
2521
7a291083
JBT
2522 ret = ehea_reg_interrupts(dev);
2523 if (ret) {
f9e29228
TK
2524 ehea_error("reg_interrupts failed. ret:%d", ret);
2525 goto out_clean_pr;
7a291083
JBT
2526 }
2527
508d2b5d 2528 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
7a291083
JBT
2529 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2530 if (ret) {
2531 ehea_error("activate_qp failed");
2532 goto out_free_irqs;
2533 }
2534 }
2535
508d2b5d 2536 for (i = 0; i < port->num_def_qps; i++) {
7a291083
JBT
2537 ret = ehea_fill_port_res(&port->port_res[i]);
2538 if (ret) {
2539 ehea_error("out_free_irqs");
2540 goto out_free_irqs;
2541 }
2542 }
2543
21eee2dd
TK
2544 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2545 if (ret) {
2546 ret = -EIO;
2547 goto out_free_irqs;
2548 }
2549
7a291083 2550 port->state = EHEA_PORT_UP;
21eee2dd
TK
2551
2552 ret = 0;
7a291083
JBT
2553 goto out;
2554
2555out_free_irqs:
2556 ehea_free_interrupts(dev);
2557
7a291083
JBT
2558out_clean_pr:
2559 ehea_clean_all_portres(port);
2560out:
44c82152
TK
2561 if (ret)
2562 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2563
21eee2dd 2564 ehea_update_bcmc_registrations();
21eee2dd 2565 ehea_update_firmware_handles();
21eee2dd 2566
7a291083
JBT
2567 return ret;
2568}
2569
bea3348e
SH
2570static void port_napi_disable(struct ehea_port *port)
2571{
2572 int i;
2573
0173b793 2574 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2575 napi_disable(&port->port_res[i].napi);
2576}
2577
2578static void port_napi_enable(struct ehea_port *port)
2579{
2580 int i;
2581
0173b793 2582 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
bea3348e
SH
2583 napi_enable(&port->port_res[i].napi);
2584}
2585
7a291083
JBT
2586static int ehea_open(struct net_device *dev)
2587{
2588 int ret;
2589 struct ehea_port *port = netdev_priv(dev);
2590
a5af6ad3 2591 mutex_lock(&port->port_lock);
7a291083
JBT
2592
2593 if (netif_msg_ifup(port))
2594 ehea_info("enabling port %s", dev->name);
2595
2596 ret = ehea_up(dev);
bea3348e
SH
2597 if (!ret) {
2598 port_napi_enable(port);
7a291083 2599 netif_start_queue(dev);
bea3348e 2600 }
7a291083 2601
a5af6ad3 2602 mutex_unlock(&port->port_lock);
7a291083
JBT
2603
2604 return ret;
2605}
2606
2607static int ehea_down(struct net_device *dev)
2608{
bea3348e 2609 int ret;
7a291083
JBT
2610 struct ehea_port *port = netdev_priv(dev);
2611
2612 if (port->state == EHEA_PORT_DOWN)
2613 return 0;
2614
2615 ehea_drop_multicast_list(dev);
21eee2dd
TK
2616 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2617
7a291083
JBT
2618 ehea_free_interrupts(dev);
2619
7a291083 2620 port->state = EHEA_PORT_DOWN;
44c82152 2621
21eee2dd 2622 ehea_update_bcmc_registrations();
21eee2dd 2623
44c82152
TK
2624 ret = ehea_clean_all_portres(port);
2625 if (ret)
2626 ehea_info("Failed freeing resources for %s. ret=%i",
2627 dev->name, ret);
2628
21eee2dd 2629 ehea_update_firmware_handles();
21eee2dd 2630
7a291083
JBT
2631 return ret;
2632}
2633
2634static int ehea_stop(struct net_device *dev)
2635{
2636 int ret;
2637 struct ehea_port *port = netdev_priv(dev);
2638
2639 if (netif_msg_ifdown(port))
2640 ehea_info("disabling port %s", dev->name);
2641
2f69ae01 2642 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
4bb073c0 2643 cancel_work_sync(&port->reset_task);
a5af6ad3 2644 mutex_lock(&port->port_lock);
7a291083 2645 netif_stop_queue(dev);
0173b793 2646 port_napi_disable(port);
7a291083 2647 ret = ehea_down(dev);
a5af6ad3 2648 mutex_unlock(&port->port_lock);
2f69ae01 2649 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
7a291083
JBT
2650 return ret;
2651}
2652
22559c5d 2653static void ehea_purge_sq(struct ehea_qp *orig_qp)
2c69448b
JBT
2654{
2655 struct ehea_qp qp = *orig_qp;
2656 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2657 struct ehea_swqe *swqe;
2658 int wqe_index;
2659 int i;
2660
2661 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2662 swqe = ehea_get_swqe(&qp, &wqe_index);
2663 swqe->tx_control |= EHEA_SWQE_PURGE;
2664 }
2665}
2666
22559c5d 2667static void ehea_flush_sq(struct ehea_port *port)
44fb3126
TK
2668{
2669 int i;
2670
2671 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2672 struct ehea_port_res *pr = &port->port_res[i];
2673 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2674 int k = 0;
2675 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2676 msleep(5);
2677 if (++k == 20)
2678 break;
2679 }
2680 }
2681}
2682
2c69448b
JBT
2683int ehea_stop_qps(struct net_device *dev)
2684{
2685 struct ehea_port *port = netdev_priv(dev);
2686 struct ehea_adapter *adapter = port->adapter;
508d2b5d 2687 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2688 int ret = -EIO;
2689 int dret;
2690 int i;
2691 u64 hret;
2692 u64 dummy64 = 0;
2693 u16 dummy16 = 0;
2694
3faf2693 2695 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2696 if (!cb0) {
2697 ret = -ENOMEM;
2698 goto out;
2699 }
2700
2701 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2702 struct ehea_port_res *pr = &port->port_res[i];
2703 struct ehea_qp *qp = pr->qp;
2704
2705 /* Purge send queue */
2706 ehea_purge_sq(qp);
2707
2708 /* Disable queue pair */
2709 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2710 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2711 cb0);
2712 if (hret != H_SUCCESS) {
2713 ehea_error("query_ehea_qp failed (1)");
2714 goto out;
2715 }
2716
2717 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2718 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2719
2720 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2721 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2722 1), cb0, &dummy64,
2723 &dummy64, &dummy16, &dummy16);
2724 if (hret != H_SUCCESS) {
2725 ehea_error("modify_ehea_qp failed (1)");
2726 goto out;
2727 }
2728
2729 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2730 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2731 cb0);
2732 if (hret != H_SUCCESS) {
2733 ehea_error("query_ehea_qp failed (2)");
2734 goto out;
2735 }
2736
2737 /* deregister shared memory regions */
2738 dret = ehea_rem_smrs(pr);
2739 if (dret) {
2740 ehea_error("unreg shared memory region failed");
2741 goto out;
2742 }
2743 }
2744
2745 ret = 0;
2746out:
3faf2693 2747 free_page((unsigned long)cb0);
2c69448b
JBT
2748
2749 return ret;
2750}
2751
508d2b5d 2752void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2c69448b
JBT
2753{
2754 struct ehea_qp qp = *orig_qp;
2755 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2756 struct ehea_rwqe *rwqe;
2757 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2758 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2759 struct sk_buff *skb;
2760 u32 lkey = pr->recv_mr.lkey;
2761
2762
2763 int i;
2764 int index;
2765
2766 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2767 rwqe = ehea_get_next_rwqe(&qp, 2);
2768 rwqe->sg_list[0].l_key = lkey;
2769 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2770 skb = skba_rq2[index];
2771 if (skb)
2772 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2773 }
2774
2775 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2776 rwqe = ehea_get_next_rwqe(&qp, 3);
2777 rwqe->sg_list[0].l_key = lkey;
2778 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2779 skb = skba_rq3[index];
2780 if (skb)
2781 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2782 }
2783}
2784
2785int ehea_restart_qps(struct net_device *dev)
2786{
2787 struct ehea_port *port = netdev_priv(dev);
2788 struct ehea_adapter *adapter = port->adapter;
2789 int ret = 0;
2790 int i;
2791
508d2b5d 2792 struct hcp_modify_qp_cb0 *cb0;
2c69448b
JBT
2793 u64 hret;
2794 u64 dummy64 = 0;
2795 u16 dummy16 = 0;
2796
3faf2693 2797 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2c69448b
JBT
2798 if (!cb0) {
2799 ret = -ENOMEM;
2800 goto out;
2801 }
2802
2803 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2804 struct ehea_port_res *pr = &port->port_res[i];
2805 struct ehea_qp *qp = pr->qp;
2806
2807 ret = ehea_gen_smrs(pr);
2808 if (ret) {
2809 ehea_error("creation of shared memory regions failed");
2810 goto out;
2811 }
2812
2813 ehea_update_rqs(qp, pr);
2814
2815 /* Enable queue pair */
2816 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2817 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2818 cb0);
2819 if (hret != H_SUCCESS) {
2820 ehea_error("query_ehea_qp failed (1)");
2821 goto out;
2822 }
2823
2824 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2825 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2826
2827 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2828 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2829 1), cb0, &dummy64,
2830 &dummy64, &dummy16, &dummy16);
2831 if (hret != H_SUCCESS) {
2832 ehea_error("modify_ehea_qp failed (1)");
2833 goto out;
2834 }
2835
2836 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2837 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2838 cb0);
2839 if (hret != H_SUCCESS) {
2840 ehea_error("query_ehea_qp failed (2)");
2841 goto out;
2842 }
2843
2844 /* refill entire queue */
2845 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2846 ehea_refill_rq2(pr, 0);
2847 ehea_refill_rq3(pr, 0);
2848 }
2849out:
3faf2693 2850 free_page((unsigned long)cb0);
2c69448b
JBT
2851
2852 return ret;
2853}
2854
c4028958 2855static void ehea_reset_port(struct work_struct *work)
7a291083
JBT
2856{
2857 int ret;
c4028958
DH
2858 struct ehea_port *port =
2859 container_of(work, struct ehea_port, reset_task);
2860 struct net_device *dev = port->netdev;
7a291083
JBT
2861
2862 port->resets++;
a5af6ad3 2863 mutex_lock(&port->port_lock);
7a291083 2864 netif_stop_queue(dev);
bea3348e
SH
2865
2866 port_napi_disable(port);
7a291083 2867
44c82152 2868 ehea_down(dev);
7a291083
JBT
2869
2870 ret = ehea_up(dev);
44c82152 2871 if (ret)
7a291083 2872 goto out;
7a291083 2873
2c69448b
JBT
2874 ehea_set_multicast_list(dev);
2875
7a291083
JBT
2876 if (netif_msg_timer(port))
2877 ehea_info("Device %s resetted successfully", dev->name);
2878
bea3348e
SH
2879 port_napi_enable(port);
2880
7a291083
JBT
2881 netif_wake_queue(dev);
2882out:
a5af6ad3 2883 mutex_unlock(&port->port_lock);
7a291083
JBT
2884}
2885
44c82152
TK
2886static void ehea_rereg_mrs(struct work_struct *work)
2887{
2888 int ret, i;
2889 struct ehea_adapter *adapter;
2890
d4f12daf 2891 ehea_info("LPAR memory changed - re-initializing driver");
44c82152
TK
2892
2893 list_for_each_entry(adapter, &adapter_list, list)
2894 if (adapter->active_ports) {
2895 /* Shutdown all ports */
2896 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2897 struct ehea_port *port = adapter->port[i];
a5af6ad3 2898 struct net_device *dev;
44c82152 2899
a5af6ad3
DW
2900 if (!port)
2901 continue;
44c82152 2902
a5af6ad3
DW
2903 dev = port->netdev;
2904
2905 if (dev->flags & IFF_UP) {
2906 mutex_lock(&port->port_lock);
2907 netif_stop_queue(dev);
df39e8ba 2908 ehea_flush_sq(port);
a5af6ad3
DW
2909 ret = ehea_stop_qps(dev);
2910 if (ret) {
2911 mutex_unlock(&port->port_lock);
2912 goto out;
44c82152 2913 }
a5af6ad3
DW
2914 port_napi_disable(port);
2915 mutex_unlock(&port->port_lock);
44c82152
TK
2916 }
2917 }
2918
2919 /* Unregister old memory region */
2920 ret = ehea_rem_mr(&adapter->mr);
2921 if (ret) {
2922 ehea_error("unregister MR failed - driver"
2923 " inoperable!");
2924 goto out;
2925 }
2926 }
2927
44c82152
TK
2928 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2929
2930 list_for_each_entry(adapter, &adapter_list, list)
2931 if (adapter->active_ports) {
2932 /* Register new memory region */
2933 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2934 if (ret) {
2935 ehea_error("register MR failed - driver"
2936 " inoperable!");
2937 goto out;
2938 }
2939
2940 /* Restart all ports */
2941 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2942 struct ehea_port *port = adapter->port[i];
2943
2944 if (port) {
2945 struct net_device *dev = port->netdev;
2946
2947 if (dev->flags & IFF_UP) {
a5af6ad3 2948 mutex_lock(&port->port_lock);
2c69448b
JBT
2949 port_napi_enable(port);
2950 ret = ehea_restart_qps(dev);
2951 if (!ret)
44c82152 2952 netif_wake_queue(dev);
a5af6ad3 2953 mutex_unlock(&port->port_lock);
44c82152
TK
2954 }
2955 }
2956 }
2957 }
68905eb4 2958 ehea_info("re-initializing driver complete");
44c82152
TK
2959out:
2960 return;
2961}
2962
7a291083
JBT
2963static void ehea_tx_watchdog(struct net_device *dev)
2964{
2965 struct ehea_port *port = netdev_priv(dev);
2966
2c69448b
JBT
2967 if (netif_carrier_ok(dev) &&
2968 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2f69ae01 2969 ehea_schedule_port_reset(port);
7a291083
JBT
2970}
2971
2972int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2973{
2974 struct hcp_query_ehea *cb;
2975 u64 hret;
2976 int ret;
2977
3faf2693 2978 cb = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
2979 if (!cb) {
2980 ret = -ENOMEM;
2981 goto out;
2982 }
2983
2984 hret = ehea_h_query_ehea(adapter->handle, cb);
2985
2986 if (hret != H_SUCCESS) {
2987 ret = -EIO;
2988 goto out_herr;
2989 }
2990
7a291083
JBT
2991 adapter->max_mc_mac = cb->max_mc_mac - 1;
2992 ret = 0;
2993
2994out_herr:
3faf2693 2995 free_page((unsigned long)cb);
7a291083
JBT
2996out:
2997 return ret;
2998}
2999
1acf2318 3000int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
7a291083 3001{
7a291083 3002 struct hcp_ehea_port_cb4 *cb4;
1acf2318
JBT
3003 u64 hret;
3004 int ret = 0;
7a291083 3005
1acf2318 3006 *jumbo = 0;
7a291083 3007
1acf2318 3008 /* (Try to) enable *jumbo frames */
3faf2693 3009 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
7a291083
JBT
3010 if (!cb4) {
3011 ehea_error("no mem for cb4");
1acf2318
JBT
3012 ret = -ENOMEM;
3013 goto out;
7a291083 3014 } else {
1acf2318 3015 hret = ehea_h_query_ehea_port(port->adapter->handle,
9c750b7d
TK
3016 port->logical_port_id,
3017 H_PORT_CB4,
3018 H_PORT_CB4_JUMBO, cb4);
9c750b7d
TK
3019 if (hret == H_SUCCESS) {
3020 if (cb4->jumbo_frame)
1acf2318 3021 *jumbo = 1;
9c750b7d
TK
3022 else {
3023 cb4->jumbo_frame = 1;
1acf2318
JBT
3024 hret = ehea_h_modify_ehea_port(port->adapter->
3025 handle,
9c750b7d 3026 port->
1acf2318 3027 logical_port_id,
9c750b7d
TK
3028 H_PORT_CB4,
3029 H_PORT_CB4_JUMBO,
3030 cb4);
3031 if (hret == H_SUCCESS)
1acf2318 3032 *jumbo = 1;
9c750b7d 3033 }
1acf2318
JBT
3034 } else
3035 ret = -EINVAL;
3036
3faf2693 3037 free_page((unsigned long)cb4);
7a291083 3038 }
1acf2318
JBT
3039out:
3040 return ret;
3041}
3042
3043static ssize_t ehea_show_port_id(struct device *dev,
3044 struct device_attribute *attr, char *buf)
3045{
3046 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
a8e34fda 3047 return sprintf(buf, "%d", port->logical_port_id);
1acf2318
JBT
3048}
3049
3050static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3051 NULL);
3052
3053static void __devinit logical_port_release(struct device *dev)
3054{
3055 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
61c7a080 3056 of_node_put(port->ofdev.dev.of_node);
1acf2318
JBT
3057}
3058
3059static struct device *ehea_register_port(struct ehea_port *port,
3060 struct device_node *dn)
3061{
3062 int ret;
3063
61c7a080 3064 port->ofdev.dev.of_node = of_node_get(dn);
6b08f3ae 3065 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
d1dea38d 3066 port->ofdev.dev.bus = &ibmebus_bus_type;
1acf2318 3067
db1d7bf7 3068 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
1acf2318
JBT
3069 port->ofdev.dev.release = logical_port_release;
3070
3071 ret = of_device_register(&port->ofdev);
3072 if (ret) {
3073 ehea_error("failed to register device. ret=%d", ret);
3074 goto out;
3075 }
3076
3077 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
d1d25aab 3078 if (ret) {
1acf2318
JBT
3079 ehea_error("failed to register attributes, ret=%d", ret);
3080 goto out_unreg_of_dev;
3081 }
e542aa6b 3082
1acf2318
JBT
3083 return &port->ofdev.dev;
3084
3085out_unreg_of_dev:
3086 of_device_unregister(&port->ofdev);
3087out:
3088 return NULL;
3089}
3090
3091static void ehea_unregister_port(struct ehea_port *port)
3092{
3093 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3094 of_device_unregister(&port->ofdev);
3095}
3096
086c1b2c
TK
3097static const struct net_device_ops ehea_netdev_ops = {
3098 .ndo_open = ehea_open,
3099 .ndo_stop = ehea_stop,
3100 .ndo_start_xmit = ehea_start_xmit,
3101#ifdef CONFIG_NET_POLL_CONTROLLER
3102 .ndo_poll_controller = ehea_netpoll,
3103#endif
3104 .ndo_get_stats = ehea_get_stats,
3105 .ndo_set_mac_address = ehea_set_mac_addr,
240c102d 3106 .ndo_validate_addr = eth_validate_addr,
086c1b2c
TK
3107 .ndo_set_multicast_list = ehea_set_multicast_list,
3108 .ndo_change_mtu = ehea_change_mtu,
3109 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3110 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
32e8f9a8
AB
3111 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3112 .ndo_tx_timeout = ehea_tx_watchdog,
086c1b2c
TK
3113};
3114
1acf2318
JBT
3115struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3116 u32 logical_port_id,
3117 struct device_node *dn)
3118{
3119 int ret;
3120 struct net_device *dev;
3121 struct ehea_port *port;
3122 struct device *port_dev;
3123 int jumbo;
3124
3125 /* allocate memory for the port structures */
3126 dev = alloc_etherdev(sizeof(struct ehea_port));
3127
3128 if (!dev) {
3129 ehea_error("no mem for net_device");
3130 ret = -ENOMEM;
3131 goto out_err;
3132 }
3133
3134 port = netdev_priv(dev);
3135
a5af6ad3 3136 mutex_init(&port->port_lock);
1acf2318
JBT
3137 port->state = EHEA_PORT_DOWN;
3138 port->sig_comp_iv = sq_entries / 10;
3139
3140 port->adapter = adapter;
3141 port->netdev = dev;
3142 port->logical_port_id = logical_port_id;
3143
3144 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3145
3146 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3147 if (!port->mc_list) {
3148 ret = -ENOMEM;
3149 goto out_free_ethdev;
3150 }
3151
3152 INIT_LIST_HEAD(&port->mc_list->list);
3153
3154 ret = ehea_sense_port_attr(port);
3155 if (ret)
3156 goto out_free_mc_list;
3157
3158 port_dev = ehea_register_port(port, dn);
3159 if (!port_dev)
3160 goto out_free_mc_list;
3161
3162 SET_NETDEV_DEV(dev, port_dev);
7a291083
JBT
3163
3164 /* initialize net_device structure */
7a291083
JBT
3165 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3166
086c1b2c
TK
3167 dev->netdev_ops = &ehea_netdev_ops;
3168 ehea_set_ethtool_ops(dev);
3169
7a291083 3170 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
dc01c447 3171 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
7a291083
JBT
3172 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3173 | NETIF_F_LLTX;
7a291083
JBT
3174 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3175
c4028958 3176 INIT_WORK(&port->reset_task, ehea_reset_port);
7a291083
JBT
3177
3178 ret = register_netdev(dev);
3179 if (ret) {
3180 ehea_error("register_netdev failed. ret=%d", ret);
21eee2dd 3181 goto out_unreg_port;
7a291083
JBT
3182 }
3183
d4dc4ec9
JBT
3184 port->lro_max_aggr = lro_max_aggr;
3185
1acf2318 3186 ret = ehea_get_jumboframe_status(port, &jumbo);
e542aa6b 3187 if (ret)
1acf2318
JBT
3188 ehea_error("failed determining jumbo frame status for %s",
3189 port->netdev->name);
3190
9c750b7d
TK
3191 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3192 jumbo == 1 ? "en" : "dis");
3193
44c82152
TK
3194 adapter->active_ports++;
3195
1acf2318 3196 return port;
7a291083 3197
1acf2318
JBT
3198out_unreg_port:
3199 ehea_unregister_port(port);
3200
3201out_free_mc_list:
7a291083 3202 kfree(port->mc_list);
1acf2318
JBT
3203
3204out_free_ethdev:
3205 free_netdev(dev);
3206
3207out_err:
3208 ehea_error("setting up logical port with id=%d failed, ret=%d",
3209 logical_port_id, ret);
3210 return NULL;
3211}
3212
3213static void ehea_shutdown_single_port(struct ehea_port *port)
3214{
7fb1c2ac 3215 struct ehea_adapter *adapter = port->adapter;
1acf2318
JBT
3216 unregister_netdev(port->netdev);
3217 ehea_unregister_port(port);
3218 kfree(port->mc_list);
3219 free_netdev(port->netdev);
7fb1c2ac 3220 adapter->active_ports--;
7a291083
JBT
3221}
3222
3223static int ehea_setup_ports(struct ehea_adapter *adapter)
3224{
1acf2318
JBT
3225 struct device_node *lhea_dn;
3226 struct device_node *eth_dn = NULL;
d1d25aab 3227
9f9a3b8a 3228 const u32 *dn_log_port_id;
1acf2318
JBT
3229 int i = 0;
3230
61c7a080 3231 lhea_dn = adapter->ofdev->dev.of_node;
1eef4e04 3232 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3233
40cd3a45 3234 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3235 NULL);
1acf2318
JBT
3236 if (!dn_log_port_id) {
3237 ehea_error("bad device node: eth_dn name=%s",
3238 eth_dn->full_name);
3239 continue;
3240 }
7a291083 3241
1211bb6d
TK
3242 if (ehea_add_adapter_mr(adapter)) {
3243 ehea_error("creating MR failed");
3244 of_node_put(eth_dn);
3245 return -EIO;
3246 }
3247
1acf2318
JBT
3248 adapter->port[i] = ehea_setup_single_port(adapter,
3249 *dn_log_port_id,
3250 eth_dn);
7a291083 3251 if (adapter->port[i])
1acf2318 3252 ehea_info("%s -> logical port id #%d",
e542aa6b 3253 adapter->port[i]->netdev->name,
1acf2318 3254 *dn_log_port_id);
1211bb6d
TK
3255 else
3256 ehea_remove_adapter_mr(adapter);
3257
1acf2318 3258 i++;
ee289b64 3259 }
1211bb6d 3260 return 0;
1acf2318
JBT
3261}
3262
e542aa6b
JBT
3263static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3264 u32 logical_port_id)
1acf2318
JBT
3265{
3266 struct device_node *lhea_dn;
3267 struct device_node *eth_dn = NULL;
9f9a3b8a 3268 const u32 *dn_log_port_id;
1acf2318 3269
61c7a080 3270 lhea_dn = adapter->ofdev->dev.of_node;
1eef4e04 3271 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
e542aa6b 3272
40cd3a45 3273 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
d1d25aab 3274 NULL);
1acf2318
JBT
3275 if (dn_log_port_id)
3276 if (*dn_log_port_id == logical_port_id)
3277 return eth_dn;
ee289b64 3278 }
1acf2318
JBT
3279
3280 return NULL;
3281}
3282
3283static ssize_t ehea_probe_port(struct device *dev,
3284 struct device_attribute *attr,
3285 const char *buf, size_t count)
3286{
c7ae011d 3287 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3288 struct ehea_port *port;
3289 struct device_node *eth_dn = NULL;
3290 int i;
3291
3292 u32 logical_port_id;
3293
a8e34fda 3294 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3295
3296 port = ehea_get_port(adapter, logical_port_id);
3297
3298 if (port) {
3299 ehea_info("adding port with logical port id=%d failed. port "
3300 "already configured as %s.", logical_port_id,
3301 port->netdev->name);
3302 return -EINVAL;
7a291083 3303 }
e542aa6b 3304
1acf2318 3305 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
7a291083 3306
1acf2318
JBT
3307 if (!eth_dn) {
3308 ehea_info("no logical port with id %d found", logical_port_id);
3309 return -EINVAL;
3310 }
e542aa6b 3311
1211bb6d
TK
3312 if (ehea_add_adapter_mr(adapter)) {
3313 ehea_error("creating MR failed");
3314 return -EIO;
3315 }
3316
1acf2318 3317 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
7a291083 3318
1eef4e04
JBT
3319 of_node_put(eth_dn);
3320
1acf2318 3321 if (port) {
508d2b5d 3322 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3323 if (!adapter->port[i]) {
3324 adapter->port[i] = port;
3325 break;
3326 }
7a291083 3327
1acf2318
JBT
3328 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3329 logical_port_id);
1211bb6d
TK
3330 } else {
3331 ehea_remove_adapter_mr(adapter);
e542aa6b 3332 return -EIO;
1211bb6d 3333 }
7a291083 3334
1acf2318
JBT
3335 return (ssize_t) count;
3336}
3337
3338static ssize_t ehea_remove_port(struct device *dev,
3339 struct device_attribute *attr,
3340 const char *buf, size_t count)
3341{
c7ae011d 3342 struct ehea_adapter *adapter = dev_get_drvdata(dev);
1acf2318
JBT
3343 struct ehea_port *port;
3344 int i;
3345 u32 logical_port_id;
3346
a8e34fda 3347 sscanf(buf, "%d", &logical_port_id);
1acf2318
JBT
3348
3349 port = ehea_get_port(adapter, logical_port_id);
3350
3351 if (port) {
3352 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3353 logical_port_id);
3354
3355 ehea_shutdown_single_port(port);
3356
508d2b5d 3357 for (i = 0; i < EHEA_MAX_PORTS; i++)
1acf2318
JBT
3358 if (adapter->port[i] == port) {
3359 adapter->port[i] = NULL;
3360 break;
3361 }
3362 } else {
3363 ehea_error("removing port with logical port id=%d failed. port "
3364 "not configured.", logical_port_id);
3365 return -EINVAL;
3366 }
3367
1211bb6d
TK
3368 ehea_remove_adapter_mr(adapter);
3369
1acf2318
JBT
3370 return (ssize_t) count;
3371}
3372
3373static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3374static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3375
6b08f3ae 3376int ehea_create_device_sysfs(struct of_device *dev)
1acf2318 3377{
6b08f3ae 3378 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
1acf2318
JBT
3379 if (ret)
3380 goto out;
3381
6b08f3ae 3382 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
1acf2318 3383out:
7a291083
JBT
3384 return ret;
3385}
3386
6b08f3ae 3387void ehea_remove_device_sysfs(struct of_device *dev)
1acf2318 3388{
6b08f3ae
JF
3389 device_remove_file(&dev->dev, &dev_attr_probe_port);
3390 device_remove_file(&dev->dev, &dev_attr_remove_port);
1acf2318
JBT
3391}
3392
6b08f3ae 3393static int __devinit ehea_probe_adapter(struct of_device *dev,
1acf2318 3394 const struct of_device_id *id)
7a291083
JBT
3395{
3396 struct ehea_adapter *adapter;
9f9a3b8a 3397 const u64 *adapter_handle;
7a291083
JBT
3398 int ret;
3399
61c7a080 3400 if (!dev || !dev->dev.of_node) {
1eef4e04
JBT
3401 ehea_error("Invalid ibmebus device probed");
3402 return -EINVAL;
3403 }
3404
7a291083
JBT
3405 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3406 if (!adapter) {
3407 ret = -ENOMEM;
6b08f3ae 3408 dev_err(&dev->dev, "no mem for ehea_adapter\n");
7a291083
JBT
3409 goto out;
3410 }
3411
44c82152
TK
3412 list_add(&adapter->list, &adapter_list);
3413
6b08f3ae 3414 adapter->ofdev = dev;
1acf2318 3415
61c7a080 3416 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
d1d25aab 3417 NULL);
061bf3cd
TK
3418 if (adapter_handle)
3419 adapter->handle = *adapter_handle;
3420
3421 if (!adapter->handle) {
6b08f3ae 3422 dev_err(&dev->dev, "failed getting handle for adapter"
61c7a080 3423 " '%s'\n", dev->dev.of_node->full_name);
7a291083
JBT
3424 ret = -ENODEV;
3425 goto out_free_ad;
3426 }
3427
7a291083
JBT
3428 adapter->pd = EHEA_PD_ID;
3429
c7ae011d 3430 dev_set_drvdata(&dev->dev, adapter);
7a291083 3431
7a291083
JBT
3432
3433 /* initialize adapter and ports */
3434 /* get adapter properties */
3435 ret = ehea_sense_adapter_attr(adapter);
3436 if (ret) {
898eb71c 3437 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
1211bb6d 3438 goto out_free_ad;
7a291083 3439 }
7a291083
JBT
3440
3441 adapter->neq = ehea_create_eq(adapter,
3442 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3443 if (!adapter->neq) {
1eef4e04 3444 ret = -EIO;
898eb71c 3445 dev_err(&dev->dev, "NEQ creation failed\n");
1211bb6d 3446 goto out_free_ad;
7a291083
JBT
3447 }
3448
3449 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3450 (unsigned long)adapter);
3451
6b08f3ae 3452 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
38515e90 3453 ehea_interrupt_neq, IRQF_DISABLED,
7a291083
JBT
3454 "ehea_neq", adapter);
3455 if (ret) {
898eb71c 3456 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
7a291083
JBT
3457 goto out_kill_eq;
3458 }
3459
1eef4e04
JBT
3460 ret = ehea_create_device_sysfs(dev);
3461 if (ret)
3bf76b81 3462 goto out_free_irq;
1acf2318 3463
7a291083
JBT
3464 ret = ehea_setup_ports(adapter);
3465 if (ret) {
898eb71c 3466 dev_err(&dev->dev, "setup_ports failed\n");
1acf2318 3467 goto out_rem_dev_sysfs;
7a291083
JBT
3468 }
3469
3470 ret = 0;
3471 goto out;
3472
1acf2318
JBT
3473out_rem_dev_sysfs:
3474 ehea_remove_device_sysfs(dev);
3475
7a291083 3476out_free_irq:
6b08f3ae 3477 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
7a291083
JBT
3478
3479out_kill_eq:
3480 ehea_destroy_eq(adapter->neq);
3481
7a291083 3482out_free_ad:
51621fbd 3483 list_del(&adapter->list);
7a291083 3484 kfree(adapter);
21eee2dd 3485
7a291083 3486out:
21eee2dd 3487 ehea_update_firmware_handles();
52e21b1b 3488
7a291083
JBT
3489 return ret;
3490}
3491
6b08f3ae 3492static int __devexit ehea_remove(struct of_device *dev)
7a291083 3493{
c7ae011d 3494 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
7a291083
JBT
3495 int i;
3496
1acf2318 3497 for (i = 0; i < EHEA_MAX_PORTS; i++)
7a291083
JBT
3498 if (adapter->port[i]) {
3499 ehea_shutdown_single_port(adapter->port[i]);
3500 adapter->port[i] = NULL;
3501 }
1acf2318
JBT
3502
3503 ehea_remove_device_sysfs(dev);
3504
3bf76b81 3505 flush_scheduled_work();
7a291083 3506
6b08f3ae 3507 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
d4150a27 3508 tasklet_kill(&adapter->neq_tasklet);
7a291083
JBT
3509
3510 ehea_destroy_eq(adapter->neq);
1211bb6d 3511 ehea_remove_adapter_mr(adapter);
44c82152 3512 list_del(&adapter->list);
7a291083 3513 kfree(adapter);
44c82152 3514
21eee2dd 3515 ehea_update_firmware_handles();
21eee2dd 3516
7a291083
JBT
3517 return 0;
3518}
3519
21eee2dd
TK
3520void ehea_crash_handler(void)
3521{
3522 int i;
3523
3524 if (ehea_fw_handles.arr)
3525 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3526 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3527 ehea_fw_handles.arr[i].fwh,
3528 FORCE_FREE);
3529
3530 if (ehea_bcmc_regs.arr)
3531 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3532 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3533 ehea_bcmc_regs.arr[i].port_id,
3534 ehea_bcmc_regs.arr[i].reg_type,
3535 ehea_bcmc_regs.arr[i].macaddr,
3536 0, H_DEREG_BCMC);
3537}
3538
48cfb14f
HH
3539static int ehea_mem_notifier(struct notifier_block *nb,
3540 unsigned long action, void *data)
3541{
a7c561f2 3542 int ret = NOTIFY_BAD;
d4f12daf 3543 struct memory_notify *arg = data;
a7c561f2
TK
3544
3545 if (!mutex_trylock(&dlpar_mem_lock)) {
3546 ehea_info("ehea_mem_notifier must not be called parallelized");
3547 goto out;
3548 }
3549
48cfb14f 3550 switch (action) {
d4f12daf
HH
3551 case MEM_CANCEL_OFFLINE:
3552 ehea_info("memory offlining canceled");
3553 /* Readd canceled memory block */
3554 case MEM_ONLINE:
3555 ehea_info("memory is going online");
3876732c 3556 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf 3557 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
a7c561f2 3558 goto out_unlock;
d4f12daf
HH
3559 ehea_rereg_mrs(NULL);
3560 break;
3561 case MEM_GOING_OFFLINE:
3562 ehea_info("memory is going offline");
3876732c 3563 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
d4f12daf 3564 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
a7c561f2 3565 goto out_unlock;
48cfb14f
HH
3566 ehea_rereg_mrs(NULL);
3567 break;
3568 default:
3569 break;
3570 }
52e21b1b
JBT
3571
3572 ehea_update_firmware_handles();
a7c561f2 3573 ret = NOTIFY_OK;
52e21b1b 3574
a7c561f2
TK
3575out_unlock:
3576 mutex_unlock(&dlpar_mem_lock);
3577out:
3578 return ret;
48cfb14f
HH
3579}
3580
3581static struct notifier_block ehea_mem_nb = {
3582 .notifier_call = ehea_mem_notifier,
3583};
3584
2a6f4e49
JBT
3585static int ehea_reboot_notifier(struct notifier_block *nb,
3586 unsigned long action, void *unused)
3587{
3588 if (action == SYS_RESTART) {
3589 ehea_info("Reboot: freeing all eHEA resources");
3590 ibmebus_unregister_driver(&ehea_driver);
3591 }
3592 return NOTIFY_DONE;
3593}
3594
3595static struct notifier_block ehea_reboot_nb = {
508d2b5d 3596 .notifier_call = ehea_reboot_notifier,
2a6f4e49
JBT
3597};
3598
7a291083
JBT
3599static int check_module_parm(void)
3600{
3601 int ret = 0;
3602
3603 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3604 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3605 ehea_info("Bad parameter: rq1_entries");
3606 ret = -EINVAL;
3607 }
3608 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3609 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3610 ehea_info("Bad parameter: rq2_entries");
3611 ret = -EINVAL;
3612 }
3613 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3614 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3615 ehea_info("Bad parameter: rq3_entries");
3616 ret = -EINVAL;
3617 }
3618 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3619 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3620 ehea_info("Bad parameter: sq_entries");
3621 ret = -EINVAL;
3622 }
3623
3624 return ret;
3625}
3626
4c3ca4da
JBT
3627static ssize_t ehea_show_capabilities(struct device_driver *drv,
3628 char *buf)
3629{
3630 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3631}
3632
3633static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3634 ehea_show_capabilities, NULL);
3635
7a291083
JBT
3636int __init ehea_module_init(void)
3637{
3638 int ret;
3639
3640 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3641 DRV_VERSION);
3642
44c82152
TK
3643
3644 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
21eee2dd
TK
3645 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3646 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3647
9f71a568 3648 mutex_init(&ehea_fw_handles.lock);
5c2cec14 3649 spin_lock_init(&ehea_bcmc_regs.lock);
44c82152 3650
7a291083
JBT
3651 ret = check_module_parm();
3652 if (ret)
3653 goto out;
44c82152
TK
3654
3655 ret = ehea_create_busmap();
3656 if (ret)
3657 goto out;
3658
21eee2dd
TK
3659 ret = register_reboot_notifier(&ehea_reboot_nb);
3660 if (ret)
3661 ehea_info("failed registering reboot notifier");
3662
48cfb14f
HH
3663 ret = register_memory_notifier(&ehea_mem_nb);
3664 if (ret)
3665 ehea_info("failed registering memory remove notifier");
3666
21eee2dd
TK
3667 ret = crash_shutdown_register(&ehea_crash_handler);
3668 if (ret)
3669 ehea_info("failed registering crash handler");
2a6f4e49 3670
7a291083 3671 ret = ibmebus_register_driver(&ehea_driver);
4c3ca4da 3672 if (ret) {
7a291083 3673 ehea_error("failed registering eHEA device driver on ebus");
21eee2dd 3674 goto out2;
4c3ca4da
JBT
3675 }
3676
3677 ret = driver_create_file(&ehea_driver.driver,
3678 &driver_attr_capabilities);
3679 if (ret) {
3680 ehea_error("failed to register capabilities attribute, ret=%d",
3681 ret);
21eee2dd 3682 goto out3;
4c3ca4da 3683 }
7a291083 3684
21eee2dd
TK
3685 return ret;
3686
3687out3:
3688 ibmebus_unregister_driver(&ehea_driver);
3689out2:
48cfb14f 3690 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd
TK
3691 unregister_reboot_notifier(&ehea_reboot_nb);
3692 crash_shutdown_unregister(&ehea_crash_handler);
7a291083
JBT
3693out:
3694 return ret;
3695}
3696
3697static void __exit ehea_module_exit(void)
3698{
21eee2dd
TK
3699 int ret;
3700
3bf76b81 3701 flush_scheduled_work();
4c3ca4da 3702 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
7a291083 3703 ibmebus_unregister_driver(&ehea_driver);
2a6f4e49 3704 unregister_reboot_notifier(&ehea_reboot_nb);
21eee2dd
TK
3705 ret = crash_shutdown_unregister(&ehea_crash_handler);
3706 if (ret)
3707 ehea_info("failed unregistering crash handler");
48cfb14f 3708 unregister_memory_notifier(&ehea_mem_nb);
21eee2dd
TK
3709 kfree(ehea_fw_handles.arr);
3710 kfree(ehea_bcmc_regs.arr);
44c82152 3711 ehea_destroy_busmap();
7a291083
JBT
3712}
3713
3714module_init(ehea_module_init);
3715module_exit(ehea_module_exit);