Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[linux-2.6-block.git] / drivers / net / ethernet / ibm / ibmveth.c
CommitLineData
f148f61d 1/*
9d348af4 2 * IBM Power Virtual Ethernet Device Driver
f148f61d 3 *
9d348af4
SL
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
f148f61d 8 *
9d348af4
SL
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
f148f61d 13 *
9d348af4 14 * You should have received a copy of the GNU General Public License
0ab75ae8 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
f148f61d 16 *
9d348af4
SL
17 * Copyright (C) IBM Corporation, 2003, 2010
18 *
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
f148f61d 24 */
1da177e4 25
1da177e4 26#include <linux/module.h>
1096d63d 27#include <linux/moduleparam.h>
1da177e4
LT
28#include <linux/types.h>
29#include <linux/errno.h>
1da177e4
LT
30#include <linux/dma-mapping.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
a6b7a407 36#include <linux/interrupt.h>
1da177e4 37#include <linux/mm.h>
e7a3af5d 38#include <linux/pm.h>
1da177e4 39#include <linux/ethtool.h>
f4ff2872
BK
40#include <linux/in.h>
41#include <linux/ip.h>
ab78df75 42#include <linux/ipv6.h>
5a0e3ad6 43#include <linux/slab.h>
1da177e4 44#include <asm/hvcall.h>
60063497 45#include <linux/atomic.h>
1da177e4 46#include <asm/vio.h>
1096d63d 47#include <asm/iommu.h>
1096d63d 48#include <asm/firmware.h>
1da177e4
LT
49
50#include "ibmveth.h"
51
7d12e780 52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
493a684a 53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
1096d63d 54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
e295fe83 55
860f242e 56static struct kobj_type ktype_veth_pool;
1da177e4 57
1096d63d 58
1da177e4 59static const char ibmveth_driver_name[] = "ibmveth";
9d348af4 60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
7b596738 61#define ibmveth_driver_version "1.06"
1da177e4 62
9d348af4
SL
63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
1da177e4
LT
65MODULE_LICENSE("GPL");
66MODULE_VERSION(ibmveth_driver_version);
67
c08cc3cc
SL
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
8d86c61a
SL
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
0c26b677
SL
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
07e6a97d
TF
82static bool old_large_send __read_mostly;
83module_param(old_large_send, bool, S_IRUGO);
84MODULE_PARM_DESC(old_large_send,
85 "Use old large send method on firmware that supports the new method");
86
ddbb4de9
BK
87struct ibmveth_stat {
88 char name[ETH_GSTRING_LEN];
89 int offset;
90};
91
92#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95struct ibmveth_stat ibmveth_stats[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
f148f61d
SL
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success) },
ddbb4de9
BK
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
ddbb4de9
BK
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
ab78df75
SL
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
8641dd85 108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
07e6a97d
TF
109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
ddbb4de9
BK
111};
112
1da177e4 113/* simple methods of getting data from the current rxq entry */
79ef4a4d
BK
114static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115{
0b536be7 116 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
79ef4a4d
BK
117}
118
119static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120{
f148f61d
SL
121 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT;
79ef4a4d
BK
123}
124
1da177e4
LT
125static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126{
f148f61d 127 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
1da177e4
LT
128}
129
130static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131{
f148f61d 132 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
1da177e4
LT
133}
134
135static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136{
f148f61d 137 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
1da177e4
LT
138}
139
7b596738
TF
140static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
141{
142 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
143}
144
1da177e4
LT
145static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
146{
0b536be7 147 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
1da177e4
LT
148}
149
f4ff2872
BK
150static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
151{
f148f61d 152 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
f4ff2872
BK
153}
154
1da177e4 155/* setup the initial settings for a buffer pool */
f148f61d
SL
156static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
157 u32 pool_index, u32 pool_size,
158 u32 buff_size, u32 pool_active)
1da177e4
LT
159{
160 pool->size = pool_size;
161 pool->index = pool_index;
162 pool->buff_size = buff_size;
c033a6d1 163 pool->threshold = pool_size * 7 / 8;
860f242e 164 pool->active = pool_active;
1da177e4
LT
165}
166
167/* allocate and setup an buffer pool - called during open */
168static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
169{
170 int i;
171
d7fbeba6 172 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
1da177e4 173
f148f61d 174 if (!pool->free_map)
1da177e4 175 return -1;
1da177e4 176
076ef440 177 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
f148f61d 178 if (!pool->dma_addr) {
1da177e4
LT
179 kfree(pool->free_map);
180 pool->free_map = NULL;
181 return -1;
182 }
183
a05abcb5 184 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
1da177e4 185
f148f61d 186 if (!pool->skbuff) {
1da177e4
LT
187 kfree(pool->dma_addr);
188 pool->dma_addr = NULL;
189
190 kfree(pool->free_map);
191 pool->free_map = NULL;
192 return -1;
193 }
194
f148f61d 195 for (i = 0; i < pool->size; ++i)
1da177e4 196 pool->free_map[i] = i;
1da177e4
LT
197
198 atomic_set(&pool->available, 0);
199 pool->producer_index = 0;
200 pool->consumer_index = 0;
201
202 return 0;
203}
204
0c26b677
SL
205static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
206{
207 unsigned long offset;
208
209 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
210 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
211}
212
1da177e4
LT
213/* replenish the buffers for a pool. note that we don't need to
214 * skb_reserve these since they are used for incoming...
215 */
f148f61d
SL
216static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
217 struct ibmveth_buff_pool *pool)
1da177e4
LT
218{
219 u32 i;
220 u32 count = pool->size - atomic_read(&pool->available);
221 u32 buffers_added = 0;
1096d63d
RJ
222 struct sk_buff *skb;
223 unsigned int free_index, index;
224 u64 correlator;
225 unsigned long lpar_rc;
226 dma_addr_t dma_addr;
1da177e4
LT
227
228 mb();
229
f148f61d 230 for (i = 0; i < count; ++i) {
1da177e4 231 union ibmveth_buf_desc desc;
1da177e4 232
003212cc 233 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
1da177e4 234
f148f61d 235 if (!skb) {
c43ced18
SL
236 netdev_dbg(adapter->netdev,
237 "replenish: unable to allocate skb\n");
1da177e4
LT
238 adapter->replenish_no_mem++;
239 break;
240 }
241
047a66d4 242 free_index = pool->consumer_index;
a613f581
SL
243 pool->consumer_index++;
244 if (pool->consumer_index >= pool->size)
245 pool->consumer_index = 0;
1da177e4 246 index = pool->free_map[free_index];
d7fbeba6 247
6485911a
SL
248 BUG_ON(index == IBM_VETH_INVALID_MAP);
249 BUG_ON(pool->skbuff[index] != NULL);
1da177e4
LT
250
251 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
252 pool->buff_size, DMA_FROM_DEVICE);
253
c713e7cb 254 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
255 goto failure;
256
1da177e4
LT
257 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
258 pool->dma_addr[index] = dma_addr;
259 pool->skbuff[index] = skb;
260
261 correlator = ((u64)pool->index << 32) | index;
f148f61d 262 *(u64 *)skb->data = correlator;
1da177e4 263
79ef4a4d 264 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
d7fbeba6 265 desc.fields.address = dma_addr;
1da177e4 266
0c26b677
SL
267 if (rx_flush) {
268 unsigned int len = min(pool->buff_size,
269 adapter->netdev->mtu +
270 IBMVETH_BUFF_OH);
271 ibmveth_flush_buffer(skb->data, len);
272 }
f148f61d
SL
273 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
274 desc.desc);
d7fbeba6 275
f148f61d 276 if (lpar_rc != H_SUCCESS) {
1096d63d 277 goto failure;
f148f61d 278 } else {
1da177e4
LT
279 buffers_added++;
280 adapter->replenish_add_buff_success++;
281 }
282 }
d7fbeba6 283
1096d63d
RJ
284 mb();
285 atomic_add(buffers_added, &(pool->available));
286 return;
287
288failure:
289 pool->free_map[free_index] = index;
290 pool->skbuff[index] = NULL;
291 if (pool->consumer_index == 0)
292 pool->consumer_index = pool->size - 1;
293 else
294 pool->consumer_index--;
c713e7cb 295 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
296 dma_unmap_single(&adapter->vdev->dev,
297 pool->dma_addr[index], pool->buff_size,
298 DMA_FROM_DEVICE);
299 dev_kfree_skb_any(skb);
300 adapter->replenish_add_buff_failure++;
301
1da177e4
LT
302 mb();
303 atomic_add(buffers_added, &(pool->available));
304}
305
cbd52281
AB
306/*
307 * The final 8 bytes of the buffer list is a counter of frames dropped
308 * because there was not a buffer in the buffer list capable of holding
309 * the frame.
310 */
311static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
312{
313 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
314
315 adapter->rx_no_buffer = be64_to_cpup(p);
316}
317
e2adbcb4 318/* replenish routine */
d7fbeba6 319static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
1da177e4 320{
b6d35182
SL
321 int i;
322
1da177e4
LT
323 adapter->replenish_task_cycles++;
324
517e80e6 325 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
c033a6d1
SL
326 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
327
328 if (pool->active &&
329 (atomic_read(&pool->available) < pool->threshold))
330 ibmveth_replenish_buffer_pool(adapter, pool);
331 }
1da177e4 332
cbd52281 333 ibmveth_update_rx_no_buffer(adapter);
1da177e4
LT
334}
335
336/* empty and free ana buffer pool - also used to do cleanup in error paths */
f148f61d
SL
337static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
338 struct ibmveth_buff_pool *pool)
1da177e4
LT
339{
340 int i;
341
b4558ea9
JJ
342 kfree(pool->free_map);
343 pool->free_map = NULL;
1da177e4 344
f148f61d
SL
345 if (pool->skbuff && pool->dma_addr) {
346 for (i = 0; i < pool->size; ++i) {
1da177e4 347 struct sk_buff *skb = pool->skbuff[i];
f148f61d 348 if (skb) {
1da177e4
LT
349 dma_unmap_single(&adapter->vdev->dev,
350 pool->dma_addr[i],
351 pool->buff_size,
352 DMA_FROM_DEVICE);
353 dev_kfree_skb_any(skb);
354 pool->skbuff[i] = NULL;
355 }
356 }
357 }
358
f148f61d 359 if (pool->dma_addr) {
1da177e4
LT
360 kfree(pool->dma_addr);
361 pool->dma_addr = NULL;
362 }
363
f148f61d 364 if (pool->skbuff) {
1da177e4
LT
365 kfree(pool->skbuff);
366 pool->skbuff = NULL;
367 }
368}
369
370/* remove a buffer from a pool */
f148f61d
SL
371static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
372 u64 correlator)
1da177e4
LT
373{
374 unsigned int pool = correlator >> 32;
375 unsigned int index = correlator & 0xffffffffUL;
376 unsigned int free_index;
377 struct sk_buff *skb;
378
6485911a
SL
379 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
380 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
381
382 skb = adapter->rx_buff_pool[pool].skbuff[index];
383
6485911a 384 BUG_ON(skb == NULL);
1da177e4
LT
385
386 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
387
388 dma_unmap_single(&adapter->vdev->dev,
389 adapter->rx_buff_pool[pool].dma_addr[index],
390 adapter->rx_buff_pool[pool].buff_size,
391 DMA_FROM_DEVICE);
392
047a66d4 393 free_index = adapter->rx_buff_pool[pool].producer_index;
a613f581
SL
394 adapter->rx_buff_pool[pool].producer_index++;
395 if (adapter->rx_buff_pool[pool].producer_index >=
396 adapter->rx_buff_pool[pool].size)
397 adapter->rx_buff_pool[pool].producer_index = 0;
1da177e4
LT
398 adapter->rx_buff_pool[pool].free_map[free_index] = index;
399
400 mb();
401
402 atomic_dec(&(adapter->rx_buff_pool[pool].available));
403}
404
405/* get the current buffer on the rx queue */
406static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
407{
408 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
409 unsigned int pool = correlator >> 32;
410 unsigned int index = correlator & 0xffffffffUL;
411
6485911a
SL
412 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
413 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
414
415 return adapter->rx_buff_pool[pool].skbuff[index];
416}
417
418/* recycle the current buffer on the rx queue */
c6f59d13 419static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
1da177e4
LT
420{
421 u32 q_index = adapter->rx_queue.index;
422 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
423 unsigned int pool = correlator >> 32;
424 unsigned int index = correlator & 0xffffffffUL;
425 union ibmveth_buf_desc desc;
426 unsigned long lpar_rc;
c6f59d13 427 int ret = 1;
1da177e4 428
6485911a
SL
429 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
430 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4 431
f148f61d 432 if (!adapter->rx_buff_pool[pool].active) {
b6d35182
SL
433 ibmveth_rxq_harvest_buffer(adapter);
434 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
c6f59d13 435 goto out;
b6d35182
SL
436 }
437
79ef4a4d
BK
438 desc.fields.flags_len = IBMVETH_BUF_VALID |
439 adapter->rx_buff_pool[pool].buff_size;
1da177e4
LT
440 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
441
442 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
d7fbeba6 443
f148f61d 444 if (lpar_rc != H_SUCCESS) {
c43ced18
SL
445 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
446 "during recycle rc=%ld", lpar_rc);
1da177e4 447 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
c6f59d13 448 ret = 0;
1da177e4
LT
449 }
450
f148f61d 451 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
452 adapter->rx_queue.index = 0;
453 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
454 }
c6f59d13
AB
455
456out:
457 return ret;
1da177e4
LT
458}
459
493a684a 460static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
1da177e4
LT
461{
462 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
463
f148f61d 464 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
465 adapter->rx_queue.index = 0;
466 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
467 }
468}
469
470static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
471{
b6d35182 472 int i;
8d8bb39b 473 struct device *dev = &adapter->vdev->dev;
b6d35182 474
f148f61d 475 if (adapter->buffer_list_addr != NULL) {
8d8bb39b
FT
476 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
477 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
1da177e4
LT
478 DMA_BIDIRECTIONAL);
479 adapter->buffer_list_dma = DMA_ERROR_CODE;
480 }
481 free_page((unsigned long)adapter->buffer_list_addr);
482 adapter->buffer_list_addr = NULL;
d7fbeba6 483 }
1da177e4 484
f148f61d 485 if (adapter->filter_list_addr != NULL) {
8d8bb39b
FT
486 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
487 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
1da177e4
LT
488 DMA_BIDIRECTIONAL);
489 adapter->filter_list_dma = DMA_ERROR_CODE;
490 }
491 free_page((unsigned long)adapter->filter_list_addr);
492 adapter->filter_list_addr = NULL;
493 }
494
f148f61d 495 if (adapter->rx_queue.queue_addr != NULL) {
d90c92fe
SL
496 dma_free_coherent(dev, adapter->rx_queue.queue_len,
497 adapter->rx_queue.queue_addr,
498 adapter->rx_queue.queue_dma);
1da177e4
LT
499 adapter->rx_queue.queue_addr = NULL;
500 }
501
f148f61d 502 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
860f242e 503 if (adapter->rx_buff_pool[i].active)
d7fbeba6 504 ibmveth_free_buffer_pool(adapter,
860f242e 505 &adapter->rx_buff_pool[i]);
1096d63d
RJ
506
507 if (adapter->bounce_buffer != NULL) {
c713e7cb 508 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
1096d63d
RJ
509 dma_unmap_single(&adapter->vdev->dev,
510 adapter->bounce_buffer_dma,
511 adapter->netdev->mtu + IBMVETH_BUFF_OH,
512 DMA_BIDIRECTIONAL);
513 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
514 }
515 kfree(adapter->bounce_buffer);
516 adapter->bounce_buffer = NULL;
517 }
1da177e4
LT
518}
519
bbedefcc
ME
520static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
521 union ibmveth_buf_desc rxq_desc, u64 mac_address)
522{
523 int rc, try_again = 1;
524
f148f61d
SL
525 /*
526 * After a kexec the adapter will still be open, so our attempt to
527 * open it will fail. So if we get a failure we free the adapter and
528 * try again, but only once.
529 */
bbedefcc
ME
530retry:
531 rc = h_register_logical_lan(adapter->vdev->unit_address,
532 adapter->buffer_list_dma, rxq_desc.desc,
533 adapter->filter_list_dma, mac_address);
534
535 if (rc != H_SUCCESS && try_again) {
536 do {
537 rc = h_free_logical_lan(adapter->vdev->unit_address);
538 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
539
540 try_again = 0;
541 goto retry;
542 }
543
544 return rc;
545}
546
d746ca95
AB
547static u64 ibmveth_encode_mac_addr(u8 *mac)
548{
549 int i;
550 u64 encoded = 0;
551
552 for (i = 0; i < ETH_ALEN; i++)
553 encoded = (encoded << 8) | mac[i];
554
555 return encoded;
556}
557
1da177e4
LT
558static int ibmveth_open(struct net_device *netdev)
559{
4cf1653a 560 struct ibmveth_adapter *adapter = netdev_priv(netdev);
d746ca95 561 u64 mac_address;
b6d35182 562 int rxq_entries = 1;
1da177e4
LT
563 unsigned long lpar_rc;
564 int rc;
565 union ibmveth_buf_desc rxq_desc;
b6d35182 566 int i;
8d8bb39b 567 struct device *dev;
1da177e4 568
c43ced18 569 netdev_dbg(netdev, "open starting\n");
1da177e4 570
bea3348e
SH
571 napi_enable(&adapter->napi);
572
517e80e6 573 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
b6d35182 574 rxq_entries += adapter->rx_buff_pool[i].size;
d7fbeba6 575
1da177e4
LT
576 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
577 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
d7fbeba6 578
f148f61d 579 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
21c2dece
SL
580 netdev_err(netdev, "unable to allocate filter or buffer list "
581 "pages\n");
88426f2a
DK
582 rc = -ENOMEM;
583 goto err_out;
1da177e4
LT
584 }
585
d90c92fe
SL
586 dev = &adapter->vdev->dev;
587
f148f61d
SL
588 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
589 rxq_entries;
d90c92fe 590 adapter->rx_queue.queue_addr =
d0320f75
JP
591 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
592 &adapter->rx_queue.queue_dma, GFP_KERNEL);
f148f61d 593 if (!adapter->rx_queue.queue_addr) {
88426f2a
DK
594 rc = -ENOMEM;
595 goto err_out;
1da177e4
LT
596 }
597
8d8bb39b 598 adapter->buffer_list_dma = dma_map_single(dev,
1da177e4 599 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
8d8bb39b 600 adapter->filter_list_dma = dma_map_single(dev,
1da177e4 601 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
1da177e4 602
8d8bb39b 603 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
d90c92fe 604 (dma_mapping_error(dev, adapter->filter_list_dma))) {
21c2dece
SL
605 netdev_err(netdev, "unable to map filter or buffer list "
606 "pages\n");
88426f2a
DK
607 rc = -ENOMEM;
608 goto err_out;
1da177e4
LT
609 }
610
611 adapter->rx_queue.index = 0;
612 adapter->rx_queue.num_slots = rxq_entries;
613 adapter->rx_queue.toggle = 1;
614
d746ca95 615 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
1da177e4 616
f148f61d
SL
617 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
618 adapter->rx_queue.queue_len;
1da177e4
LT
619 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
620
c43ced18
SL
621 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
622 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
623 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
1da177e4 624
4347ef15
SL
625 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
626
bbedefcc 627 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
1da177e4 628
f148f61d 629 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
630 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
631 lpar_rc);
632 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
633 "desc:0x%llx MAC:0x%llx\n",
1da177e4
LT
634 adapter->buffer_list_dma,
635 adapter->filter_list_dma,
636 rxq_desc.desc,
637 mac_address);
88426f2a
DK
638 rc = -ENONET;
639 goto err_out;
1da177e4
LT
640 }
641
f148f61d
SL
642 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
643 if (!adapter->rx_buff_pool[i].active)
860f242e
SL
644 continue;
645 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
21c2dece 646 netdev_err(netdev, "unable to alloc pool\n");
860f242e 647 adapter->rx_buff_pool[i].active = 0;
88426f2a
DK
648 rc = -ENOMEM;
649 goto err_out;
860f242e
SL
650 }
651 }
652
c43ced18 653 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
f148f61d
SL
654 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
655 netdev);
656 if (rc != 0) {
21c2dece
SL
657 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
658 netdev->irq, rc);
1da177e4 659 do {
95de86cf
BK
660 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
661 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
1da177e4 662
88426f2a 663 goto err_out;
1da177e4
LT
664 }
665
1096d63d
RJ
666 adapter->bounce_buffer =
667 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
668 if (!adapter->bounce_buffer) {
88426f2a 669 rc = -ENOMEM;
e0e8ab59 670 goto err_out_free_irq;
1096d63d
RJ
671 }
672 adapter->bounce_buffer_dma =
673 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
674 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
8d8bb39b 675 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
21c2dece 676 netdev_err(netdev, "unable to map bounce buffer\n");
88426f2a 677 rc = -ENOMEM;
e0e8ab59 678 goto err_out_free_irq;
1096d63d
RJ
679 }
680
c43ced18 681 netdev_dbg(netdev, "initial replenish cycle\n");
7d12e780 682 ibmveth_interrupt(netdev->irq, netdev);
1da177e4 683
e2adbcb4 684 netif_start_queue(netdev);
1da177e4 685
c43ced18 686 netdev_dbg(netdev, "open complete\n");
1da177e4
LT
687
688 return 0;
88426f2a 689
e0e8ab59
DK
690err_out_free_irq:
691 free_irq(netdev->irq, netdev);
88426f2a
DK
692err_out:
693 ibmveth_cleanup(adapter);
694 napi_disable(&adapter->napi);
695 return rc;
1da177e4
LT
696}
697
698static int ibmveth_close(struct net_device *netdev)
699{
4cf1653a 700 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4 701 long lpar_rc;
d7fbeba6 702
c43ced18 703 netdev_dbg(netdev, "close starting\n");
1da177e4 704
bea3348e
SH
705 napi_disable(&adapter->napi);
706
860f242e
SL
707 if (!adapter->pool_config)
708 netif_stop_queue(netdev);
1da177e4 709
ee2e6114 710 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
1da177e4 711
1da177e4
LT
712 do {
713 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
706c8c93 714 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
1da177e4 715
f148f61d 716 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
717 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
718 "continuing with close\n", lpar_rc);
1da177e4
LT
719 }
720
ee2e6114
RJ
721 free_irq(netdev->irq, netdev);
722
cbd52281 723 ibmveth_update_rx_no_buffer(adapter);
1da177e4
LT
724
725 ibmveth_cleanup(adapter);
726
c43ced18 727 netdev_dbg(netdev, "close complete\n");
1da177e4
LT
728
729 return 0;
730}
731
9ce8c2df
PR
732static int netdev_get_link_ksettings(struct net_device *dev,
733 struct ethtool_link_ksettings *cmd)
f148f61d 734{
9ce8c2df
PR
735 u32 supported, advertising;
736
737 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
f148f61d 738 SUPPORTED_FIBRE);
9ce8c2df 739 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
f148f61d 740 ADVERTISED_FIBRE);
9ce8c2df
PR
741 cmd->base.speed = SPEED_1000;
742 cmd->base.duplex = DUPLEX_FULL;
743 cmd->base.port = PORT_FIBRE;
744 cmd->base.phy_address = 0;
745 cmd->base.autoneg = AUTONEG_ENABLE;
746
747 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
748 supported);
749 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
750 advertising);
751
1da177e4
LT
752 return 0;
753}
754
f148f61d
SL
755static void netdev_get_drvinfo(struct net_device *dev,
756 struct ethtool_drvinfo *info)
757{
7826d43f
JP
758 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
759 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
1da177e4
LT
760}
761
c8f44aff
MM
762static netdev_features_t ibmveth_fix_features(struct net_device *dev,
763 netdev_features_t features)
5fc7e01c 764{
b9367bf3
MM
765 /*
766 * Since the ibmveth firmware interface does not have the
767 * concept of separate tx/rx checksum offload enable, if rx
768 * checksum is disabled we also have to disable tx checksum
769 * offload. Once we disable rx checksum offload, we are no
770 * longer allowed to send tx buffers that are not properly
771 * checksummed.
772 */
5fc7e01c 773
b9367bf3 774 if (!(features & NETIF_F_RXCSUM))
a188222b 775 features &= ~NETIF_F_CSUM_MASK;
5fc7e01c 776
b9367bf3 777 return features;
5fc7e01c
BK
778}
779
b9367bf3 780static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
5fc7e01c 781{
4cf1653a 782 struct ibmveth_adapter *adapter = netdev_priv(dev);
ff5bfc35 783 unsigned long set_attr, clr_attr, ret_attr;
ab78df75 784 unsigned long set_attr6, clr_attr6;
fb82fd20 785 long ret, ret4, ret6;
5fc7e01c
BK
786 int rc1 = 0, rc2 = 0;
787 int restart = 0;
788
789 if (netif_running(dev)) {
790 restart = 1;
791 adapter->pool_config = 1;
792 ibmveth_close(dev);
793 adapter->pool_config = 0;
794 }
795
79ef4a4d
BK
796 set_attr = 0;
797 clr_attr = 0;
fb82fd20
AB
798 set_attr6 = 0;
799 clr_attr6 = 0;
5fc7e01c 800
ab78df75 801 if (data) {
79ef4a4d 802 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
803 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
804 } else {
79ef4a4d 805 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
806 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
807 }
5fc7e01c 808
79ef4a4d 809 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
5fc7e01c 810
79ef4a4d
BK
811 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
812 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
813 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
fb82fd20 814 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
79ef4a4d 815 set_attr, &ret_attr);
5fc7e01c 816
fb82fd20 817 if (ret4 != H_SUCCESS) {
21c2dece
SL
818 netdev_err(dev, "unable to change IPv4 checksum "
819 "offload settings. %d rc=%ld\n",
fb82fd20
AB
820 data, ret4);
821
822 h_illan_attributes(adapter->vdev->unit_address,
823 set_attr, clr_attr, &ret_attr);
824
825 if (data == 1)
826 dev->features &= ~NETIF_F_IP_CSUM;
5fc7e01c 827
f148f61d 828 } else {
ab78df75 829 adapter->fw_ipv4_csum_support = data;
f148f61d 830 }
ab78df75
SL
831
832 ret6 = h_illan_attributes(adapter->vdev->unit_address,
833 clr_attr6, set_attr6, &ret_attr);
834
835 if (ret6 != H_SUCCESS) {
21c2dece
SL
836 netdev_err(dev, "unable to change IPv6 checksum "
837 "offload settings. %d rc=%ld\n",
fb82fd20
AB
838 data, ret6);
839
840 h_illan_attributes(adapter->vdev->unit_address,
841 set_attr6, clr_attr6, &ret_attr);
842
843 if (data == 1)
844 dev->features &= ~NETIF_F_IPV6_CSUM;
ab78df75 845
ab78df75
SL
846 } else
847 adapter->fw_ipv6_csum_support = data;
848
fb82fd20 849 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
b9367bf3 850 adapter->rx_csum = data;
ab78df75
SL
851 else
852 rc1 = -EIO;
5fc7e01c
BK
853 } else {
854 rc1 = -EIO;
21c2dece
SL
855 netdev_err(dev, "unable to change checksum offload settings."
856 " %d rc=%ld ret_attr=%lx\n", data, ret,
857 ret_attr);
5fc7e01c
BK
858 }
859
860 if (restart)
861 rc2 = ibmveth_open(dev);
862
863 return rc1 ? rc1 : rc2;
864}
865
07e6a97d
TF
866static int ibmveth_set_tso(struct net_device *dev, u32 data)
867{
868 struct ibmveth_adapter *adapter = netdev_priv(dev);
869 unsigned long set_attr, clr_attr, ret_attr;
870 long ret1, ret2;
871 int rc1 = 0, rc2 = 0;
872 int restart = 0;
873
874 if (netif_running(dev)) {
875 restart = 1;
876 adapter->pool_config = 1;
877 ibmveth_close(dev);
878 adapter->pool_config = 0;
879 }
880
881 set_attr = 0;
882 clr_attr = 0;
883
884 if (data)
885 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
886 else
887 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
888
889 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
890
891 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
892 !old_large_send) {
893 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
894 set_attr, &ret_attr);
895
896 if (ret2 != H_SUCCESS) {
897 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
898 data, ret2);
899
900 h_illan_attributes(adapter->vdev->unit_address,
901 set_attr, clr_attr, &ret_attr);
902
903 if (data == 1)
904 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
905 rc1 = -EIO;
906
907 } else {
908 adapter->fw_large_send_support = data;
909 adapter->large_send = data;
910 }
911 } else {
912 /* Older firmware version of large send offload does not
913 * support tcp6/ipv6
914 */
915 if (data == 1) {
916 dev->features &= ~NETIF_F_TSO6;
917 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
918 }
919 adapter->large_send = data;
920 }
921
922 if (restart)
923 rc2 = ibmveth_open(dev);
924
925 return rc1 ? rc1 : rc2;
926}
927
c8f44aff
MM
928static int ibmveth_set_features(struct net_device *dev,
929 netdev_features_t features)
5fc7e01c 930{
4cf1653a 931 struct ibmveth_adapter *adapter = netdev_priv(dev);
b9367bf3 932 int rx_csum = !!(features & NETIF_F_RXCSUM);
07e6a97d
TF
933 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
934 int rc1 = 0, rc2 = 0;
5fc7e01c 935
07e6a97d
TF
936 if (rx_csum != adapter->rx_csum) {
937 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
938 if (rc1 && !adapter->rx_csum)
939 dev->features =
a188222b
TH
940 features & ~(NETIF_F_CSUM_MASK |
941 NETIF_F_RXCSUM);
07e6a97d 942 }
5fc7e01c 943
07e6a97d
TF
944 if (large_send != adapter->large_send) {
945 rc2 = ibmveth_set_tso(dev, large_send);
946 if (rc2 && !adapter->large_send)
947 dev->features =
948 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
949 }
5fc7e01c 950
07e6a97d 951 return rc1 ? rc1 : rc2;
5fc7e01c
BK
952}
953
ddbb4de9
BK
954static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
955{
956 int i;
957
958 if (stringset != ETH_SS_STATS)
959 return;
960
961 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
962 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
963}
964
b9f2c044 965static int ibmveth_get_sset_count(struct net_device *dev, int sset)
ddbb4de9 966{
b9f2c044
JG
967 switch (sset) {
968 case ETH_SS_STATS:
969 return ARRAY_SIZE(ibmveth_stats);
970 default:
971 return -EOPNOTSUPP;
972 }
ddbb4de9
BK
973}
974
975static void ibmveth_get_ethtool_stats(struct net_device *dev,
976 struct ethtool_stats *stats, u64 *data)
977{
978 int i;
4cf1653a 979 struct ibmveth_adapter *adapter = netdev_priv(dev);
ddbb4de9
BK
980
981 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
982 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
983}
984
7282d491 985static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4 986 .get_drvinfo = netdev_get_drvinfo,
ed4ba4b5 987 .get_link = ethtool_op_get_link,
ddbb4de9 988 .get_strings = ibmveth_get_strings,
b9f2c044 989 .get_sset_count = ibmveth_get_sset_count,
ddbb4de9 990 .get_ethtool_stats = ibmveth_get_ethtool_stats,
9ce8c2df 991 .get_link_ksettings = netdev_get_link_ksettings,
1da177e4
LT
992};
993
994static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
995{
996 return -EOPNOTSUPP;
997}
998
999#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
1000
6e8ab30e 1001static int ibmveth_send(struct ibmveth_adapter *adapter,
07e6a97d 1002 union ibmveth_buf_desc *descs, unsigned long mss)
1da177e4 1003{
1da177e4
LT
1004 unsigned long correlator;
1005 unsigned int retry_count;
6e8ab30e
SL
1006 unsigned long ret;
1007
1008 /*
1009 * The retry count sets a maximum for the number of broadcast and
1010 * multicast destinations within the system.
1011 */
1012 retry_count = 1024;
1013 correlator = 0;
1014 do {
1015 ret = h_send_logical_lan(adapter->vdev->unit_address,
1016 descs[0].desc, descs[1].desc,
1017 descs[2].desc, descs[3].desc,
1018 descs[4].desc, descs[5].desc,
07e6a97d
TF
1019 correlator, &correlator, mss,
1020 adapter->fw_large_send_support);
6e8ab30e
SL
1021 } while ((ret == H_BUSY) && (retry_count--));
1022
1023 if (ret != H_SUCCESS && ret != H_DROPPED) {
21c2dece
SL
1024 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1025 "with rc=%ld\n", ret);
6e8ab30e
SL
1026 return 1;
1027 }
1028
1029 return 0;
1030}
60296d9e 1031
6e8ab30e
SL
1032static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1033 struct net_device *netdev)
1034{
1035 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1036 unsigned int desc_flags;
1037 union ibmveth_buf_desc descs[6];
1038 int last, i;
1039 int force_bounce = 0;
b93da27f 1040 dma_addr_t dma_addr;
07e6a97d 1041 unsigned long mss = 0;
6e8ab30e
SL
1042
1043 /*
1044 * veth handles a maximum of 6 segments including the header, so
1045 * we have to linearize the skb if there are more than this.
1046 */
1047 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1048 netdev->stats.tx_dropped++;
1049 goto out;
1050 }
1da177e4 1051
6e8ab30e 1052 /* veth can't checksum offload UDP */
f4ff2872 1053 if (skb->ip_summed == CHECKSUM_PARTIAL &&
ab78df75
SL
1054 ((skb->protocol == htons(ETH_P_IP) &&
1055 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1056 (skb->protocol == htons(ETH_P_IPV6) &&
1057 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1058 skb_checksum_help(skb)) {
1059
21c2dece 1060 netdev_err(netdev, "tx: failed to checksum packet\n");
6e8ab30e 1061 netdev->stats.tx_dropped++;
f4ff2872
BK
1062 goto out;
1063 }
1064
6e8ab30e
SL
1065 desc_flags = IBMVETH_BUF_VALID;
1066
07e6a97d
TF
1067 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1068 desc_flags |= IBMVETH_BUF_LRG_SND;
1069
f4ff2872 1070 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6e8ab30e
SL
1071 unsigned char *buf = skb_transport_header(skb) +
1072 skb->csum_offset;
f4ff2872 1073
6e8ab30e 1074 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
f4ff2872
BK
1075
1076 /* Need to zero out the checksum */
1077 buf[0] = 0;
1078 buf[1] = 0;
1079 }
1080
6e8ab30e
SL
1081retry_bounce:
1082 memset(descs, 0, sizeof(descs));
c08cc3cc 1083
6e8ab30e
SL
1084 /*
1085 * If a linear packet is below the rx threshold then
1086 * copy it into the static bounce buffer. This avoids the
1087 * cost of a TCE insert and remove.
1088 */
1089 if (force_bounce || (!skb_is_nonlinear(skb) &&
1090 (skb->len < tx_copybreak))) {
1096d63d
RJ
1091 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1092 skb->len);
1da177e4 1093
6e8ab30e
SL
1094 descs[0].fields.flags_len = desc_flags | skb->len;
1095 descs[0].fields.address = adapter->bounce_buffer_dma;
1096
07e6a97d 1097 if (ibmveth_send(adapter, descs, 0)) {
6e8ab30e
SL
1098 adapter->tx_send_failed++;
1099 netdev->stats.tx_dropped++;
1100 } else {
1101 netdev->stats.tx_packets++;
1102 netdev->stats.tx_bytes += skb->len;
1103 }
1104
1105 goto out;
1106 }
1107
1108 /* Map the header */
b93da27f
AB
1109 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1110 skb_headlen(skb), DMA_TO_DEVICE);
1111 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
6e8ab30e
SL
1112 goto map_failed;
1113
1114 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
b93da27f 1115 descs[0].fields.address = dma_addr;
6e8ab30e
SL
1116
1117 /* Map the frags */
1118 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 1119 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6e8ab30e 1120
8838a538 1121 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
9e903e08 1122 skb_frag_size(frag), DMA_TO_DEVICE);
6e8ab30e
SL
1123
1124 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1125 goto map_failed_frags;
1126
9e903e08 1127 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
6e8ab30e
SL
1128 descs[i+1].fields.address = dma_addr;
1129 }
1130
07e6a97d
TF
1131 if (skb_is_gso(skb)) {
1132 if (adapter->fw_large_send_support) {
1133 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1134 adapter->tx_large_packets++;
1135 } else if (!skb_is_gso_v6(skb)) {
1136 /* Put -1 in the IP checksum to tell phyp it
1137 * is a largesend packet. Put the mss in
1138 * the TCP checksum.
1139 */
1140 ip_hdr(skb)->check = 0xffff;
1141 tcp_hdr(skb)->check =
1142 cpu_to_be16(skb_shinfo(skb)->gso_size);
1143 adapter->tx_large_packets++;
1144 }
8641dd85
TF
1145 }
1146
07e6a97d 1147 if (ibmveth_send(adapter, descs, mss)) {
6e8ab30e
SL
1148 adapter->tx_send_failed++;
1149 netdev->stats.tx_dropped++;
1da177e4 1150 } else {
6e8ab30e
SL
1151 netdev->stats.tx_packets++;
1152 netdev->stats.tx_bytes += skb->len;
1da177e4
LT
1153 }
1154
33a48ab1
BK
1155 dma_unmap_single(&adapter->vdev->dev,
1156 descs[0].fields.address,
1157 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1158 DMA_TO_DEVICE);
1159
1160 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
6e8ab30e
SL
1161 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1162 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1163 DMA_TO_DEVICE);
1da177e4 1164
e8cb7eb4 1165out:
26faa9d7 1166 dev_consume_skb_any(skb);
6ed10654 1167 return NETDEV_TX_OK;
6e8ab30e
SL
1168
1169map_failed_frags:
1170 last = i+1;
1171 for (i = 0; i < last; i++)
1172 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1173 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1174 DMA_TO_DEVICE);
1175
1176map_failed:
1177 if (!firmware_has_feature(FW_FEATURE_CMO))
21c2dece 1178 netdev_err(netdev, "tx: unable to map xmit buffer\n");
6e8ab30e 1179 adapter->tx_map_failed++;
2c42bf4b
TF
1180 if (skb_linearize(skb)) {
1181 netdev->stats.tx_dropped++;
1182 goto out;
1183 }
6e8ab30e
SL
1184 force_bounce = 1;
1185 goto retry_bounce;
1da177e4
LT
1186}
1187
7b596738
TF
1188static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1189{
94acf164 1190 struct tcphdr *tcph;
7b596738 1191 int offset = 0;
94acf164 1192 int hdr_len;
7b596738
TF
1193
1194 /* only TCP packets will be aggregated */
1195 if (skb->protocol == htons(ETH_P_IP)) {
1196 struct iphdr *iph = (struct iphdr *)skb->data;
1197
1198 if (iph->protocol == IPPROTO_TCP) {
1199 offset = iph->ihl * 4;
1200 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1201 } else {
1202 return;
1203 }
1204 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1205 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1206
1207 if (iph6->nexthdr == IPPROTO_TCP) {
1208 offset = sizeof(struct ipv6hdr);
1209 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1210 } else {
1211 return;
1212 }
1213 } else {
1214 return;
1215 }
1216 /* if mss is not set through Large Packet bit/mss in rx buffer,
1217 * expect that the mss will be written to the tcp header checksum.
1218 */
94acf164 1219 tcph = (struct tcphdr *)(skb->data + offset);
7b596738
TF
1220 if (lrg_pkt) {
1221 skb_shinfo(skb)->gso_size = mss;
1222 } else if (offset) {
7b596738
TF
1223 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1224 tcph->check = 0;
1225 }
94acf164
TF
1226
1227 if (skb_shinfo(skb)->gso_size) {
1228 hdr_len = offset + tcph->doff * 4;
1229 skb_shinfo(skb)->gso_segs =
1230 DIV_ROUND_UP(skb->len - hdr_len,
1231 skb_shinfo(skb)->gso_size);
1232 }
7b596738
TF
1233}
1234
bea3348e 1235static int ibmveth_poll(struct napi_struct *napi, int budget)
1da177e4 1236{
f148f61d
SL
1237 struct ibmveth_adapter *adapter =
1238 container_of(napi, struct ibmveth_adapter, napi);
bea3348e 1239 struct net_device *netdev = adapter->netdev;
1da177e4 1240 int frames_processed = 0;
1da177e4 1241 unsigned long lpar_rc;
9c7e8bc5 1242 struct iphdr *iph;
7b596738 1243 u16 mss = 0;
1da177e4 1244
f148f61d 1245restart_poll:
cb013ea1 1246 while (frames_processed < budget) {
bea3348e
SH
1247 if (!ibmveth_rxq_pending_buffer(adapter))
1248 break;
1da177e4 1249
f89e49e7 1250 smp_rmb();
bea3348e
SH
1251 if (!ibmveth_rxq_buffer_valid(adapter)) {
1252 wmb(); /* suggested by larson1 */
1253 adapter->rx_invalid_buffer++;
c43ced18 1254 netdev_dbg(netdev, "recycling invalid buffer\n");
bea3348e
SH
1255 ibmveth_rxq_recycle_buffer(adapter);
1256 } else {
8d86c61a 1257 struct sk_buff *skb, *new_skb;
bea3348e
SH
1258 int length = ibmveth_rxq_frame_length(adapter);
1259 int offset = ibmveth_rxq_frame_offset(adapter);
f4ff2872 1260 int csum_good = ibmveth_rxq_csum_good(adapter);
7b596738 1261 int lrg_pkt = ibmveth_rxq_large_packet(adapter);
f4ff2872 1262
bea3348e 1263 skb = ibmveth_rxq_get_buffer(adapter);
1da177e4 1264
7b596738
TF
1265 /* if the large packet bit is set in the rx queue
1266 * descriptor, the mss will be written by PHYP eight
1267 * bytes from the start of the rx buffer, which is
1268 * skb->data at this stage
1269 */
1270 if (lrg_pkt) {
1271 __be64 *rxmss = (__be64 *)(skb->data + 8);
1272
1273 mss = (u16)be64_to_cpu(*rxmss);
1274 }
1275
8d86c61a
SL
1276 new_skb = NULL;
1277 if (length < rx_copybreak)
1278 new_skb = netdev_alloc_skb(netdev, length);
1279
1280 if (new_skb) {
1281 skb_copy_to_linear_data(new_skb,
1282 skb->data + offset,
1283 length);
0c26b677
SL
1284 if (rx_flush)
1285 ibmveth_flush_buffer(skb->data,
1286 length + offset);
c6f59d13
AB
1287 if (!ibmveth_rxq_recycle_buffer(adapter))
1288 kfree_skb(skb);
8d86c61a 1289 skb = new_skb;
8d86c61a
SL
1290 } else {
1291 ibmveth_rxq_harvest_buffer(adapter);
1292 skb_reserve(skb, offset);
1293 }
1da177e4 1294
bea3348e
SH
1295 skb_put(skb, length);
1296 skb->protocol = eth_type_trans(skb, netdev);
1da177e4 1297
9c7e8bc5 1298 if (csum_good) {
8d86c61a 1299 skb->ip_summed = CHECKSUM_UNNECESSARY;
9c7e8bc5
TF
1300 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1301 iph = (struct iphdr *)skb->data;
1302
1303 /* If the IP checksum is not offloaded and if the packet
1304 * is large send, the checksum must be rebuilt.
1305 */
1306 if (iph->check == 0xffff) {
1307 iph->check = 0;
1308 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
9c7e8bc5
TF
1309 }
1310 }
1311 }
8d86c61a 1312
7b596738
TF
1313 if (length > netdev->mtu + ETH_HLEN) {
1314 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1315 adapter->rx_large_packets++;
1316 }
1317
92ec8279 1318 napi_gro_receive(napi, skb); /* send it up */
1da177e4 1319
09f75cd7
JG
1320 netdev->stats.rx_packets++;
1321 netdev->stats.rx_bytes += length;
bea3348e 1322 frames_processed++;
1da177e4 1323 }
cb013ea1 1324 }
1da177e4 1325
e2adbcb4 1326 ibmveth_replenish_task(adapter);
1da177e4 1327
bea3348e 1328 if (frames_processed < budget) {
6ad20165 1329 napi_complete_done(napi, frames_processed);
4736edc7 1330
bea3348e
SH
1331 /* We think we are done - reenable interrupts,
1332 * then check once more to make sure we are done.
1333 */
1334 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1335 VIO_IRQ_ENABLE);
1da177e4 1336
6485911a 1337 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4 1338
bea3348e 1339 if (ibmveth_rxq_pending_buffer(adapter) &&
288379f0 1340 napi_reschedule(napi)) {
bea3348e
SH
1341 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1342 VIO_IRQ_DISABLE);
1343 goto restart_poll;
1344 }
1da177e4
LT
1345 }
1346
bea3348e 1347 return frames_processed;
1da177e4
LT
1348}
1349
7d12e780 1350static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
d7fbeba6 1351{
1da177e4 1352 struct net_device *netdev = dev_instance;
4cf1653a 1353 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1354 unsigned long lpar_rc;
1355
288379f0 1356 if (napi_schedule_prep(&adapter->napi)) {
bea3348e
SH
1357 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1358 VIO_IRQ_DISABLE);
6485911a 1359 BUG_ON(lpar_rc != H_SUCCESS);
288379f0 1360 __napi_schedule(&adapter->napi);
1da177e4
LT
1361 }
1362 return IRQ_HANDLED;
1363}
1364
1da177e4
LT
1365static void ibmveth_set_multicast_list(struct net_device *netdev)
1366{
4cf1653a 1367 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1368 unsigned long lpar_rc;
1369
4cd24eaf
JP
1370 if ((netdev->flags & IFF_PROMISC) ||
1371 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1da177e4
LT
1372 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1373 IbmVethMcastEnableRecv |
1374 IbmVethMcastDisableFiltering,
1375 0);
f148f61d 1376 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1377 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1378 "entering promisc mode\n", lpar_rc);
1da177e4
LT
1379 }
1380 } else {
22bedad3 1381 struct netdev_hw_addr *ha;
1da177e4
LT
1382 /* clear the filter table & disable filtering */
1383 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1384 IbmVethMcastEnableRecv |
1385 IbmVethMcastDisableFiltering |
1386 IbmVethMcastClearFilterTable,
1387 0);
f148f61d 1388 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1389 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1390 "attempting to clear filter table\n",
1391 lpar_rc);
1da177e4
LT
1392 }
1393 /* add the addresses to the filter table */
22bedad3 1394 netdev_for_each_mc_addr(ha, netdev) {
f148f61d 1395 /* add the multicast address to the filter table */
d746ca95
AB
1396 u64 mcast_addr;
1397 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1da177e4
LT
1398 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1399 IbmVethMcastAddFilter,
1400 mcast_addr);
f148f61d 1401 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1402 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1403 "when adding an entry to the filter "
1404 "table\n", lpar_rc);
1da177e4
LT
1405 }
1406 }
d7fbeba6 1407
1da177e4
LT
1408 /* re-enable filtering */
1409 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1410 IbmVethMcastEnableFiltering,
1411 0);
f148f61d 1412 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1413 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1414 "enabling filtering\n", lpar_rc);
1da177e4
LT
1415 }
1416 }
1417}
1418
1419static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1420{
4cf1653a 1421 struct ibmveth_adapter *adapter = netdev_priv(dev);
1096d63d 1422 struct vio_dev *viodev = adapter->vdev;
860f242e 1423 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
0645bab7
RJ
1424 int i, rc;
1425 int need_restart = 0;
b6d35182 1426
517e80e6 1427 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
4fce1482 1428 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
ce6eea58
BK
1429 break;
1430
517e80e6 1431 if (i == IBMVETH_NUM_BUFF_POOLS)
ce6eea58
BK
1432 return -EINVAL;
1433
ea866e65
SL
1434 /* Deactivate all the buffer pools so that the next loop can activate
1435 only the buffer pools necessary to hold the new MTU */
0645bab7
RJ
1436 if (netif_running(adapter->netdev)) {
1437 need_restart = 1;
1438 adapter->pool_config = 1;
1439 ibmveth_close(adapter->netdev);
1440 adapter->pool_config = 0;
1441 }
ea866e65 1442
860f242e 1443 /* Look for an active buffer pool that can hold the new MTU */
f148f61d 1444 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
ea866e65 1445 adapter->rx_buff_pool[i].active = 1;
ce6eea58 1446
4fce1482 1447 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1096d63d
RJ
1448 dev->mtu = new_mtu;
1449 vio_cmo_set_dev_desired(viodev,
1450 ibmveth_get_desired_dma
1451 (viodev));
0645bab7
RJ
1452 if (need_restart) {
1453 return ibmveth_open(adapter->netdev);
1454 }
860f242e 1455 return 0;
b6d35182 1456 }
b6d35182 1457 }
0645bab7
RJ
1458
1459 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1460 return rc;
1461
860f242e 1462 return -EINVAL;
1da177e4
LT
1463}
1464
6b422374
SL
1465#ifdef CONFIG_NET_POLL_CONTROLLER
1466static void ibmveth_poll_controller(struct net_device *dev)
1467{
4cf1653a 1468 ibmveth_replenish_task(netdev_priv(dev));
5f77113c 1469 ibmveth_interrupt(dev->irq, dev);
6b422374
SL
1470}
1471#endif
1472
1096d63d
RJ
1473/**
1474 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1475 *
1476 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1477 *
1478 * Return value:
1479 * Number of bytes of IO data the driver will need to perform well.
1480 */
1481static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1482{
1483 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1484 struct ibmveth_adapter *adapter;
d0847757 1485 struct iommu_table *tbl;
1096d63d
RJ
1486 unsigned long ret;
1487 int i;
1488 int rxqentries = 1;
1489
d0847757
AP
1490 tbl = get_iommu_table_base(&vdev->dev);
1491
1096d63d
RJ
1492 /* netdev inits at probe time along with the structures we need below*/
1493 if (netdev == NULL)
d0847757 1494 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1096d63d
RJ
1495
1496 adapter = netdev_priv(netdev);
1497
1498 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
d0847757 1499 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1096d63d 1500
517e80e6 1501 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1096d63d
RJ
1502 /* add the size of the active receive buffers */
1503 if (adapter->rx_buff_pool[i].active)
1504 ret +=
1505 adapter->rx_buff_pool[i].size *
1506 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
d0847757 1507 buff_size, tbl);
1096d63d
RJ
1508 rxqentries += adapter->rx_buff_pool[i].size;
1509 }
1510 /* add the size of the receive queue entries */
d0847757
AP
1511 ret += IOMMU_PAGE_ALIGN(
1512 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1096d63d
RJ
1513
1514 return ret;
1515}
1516
c77c761f
TF
1517static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1518{
1519 struct ibmveth_adapter *adapter = netdev_priv(dev);
1520 struct sockaddr *addr = p;
1521 u64 mac_address;
1522 int rc;
1523
1524 if (!is_valid_ether_addr(addr->sa_data))
1525 return -EADDRNOTAVAIL;
1526
1527 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1528 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1529 if (rc) {
1530 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1531 return rc;
1532 }
1533
1534 ether_addr_copy(dev->dev_addr, addr->sa_data);
1535
1536 return 0;
1537}
1538
e186d174
AB
1539static const struct net_device_ops ibmveth_netdev_ops = {
1540 .ndo_open = ibmveth_open,
1541 .ndo_stop = ibmveth_close,
1542 .ndo_start_xmit = ibmveth_start_xmit,
afc4b13d 1543 .ndo_set_rx_mode = ibmveth_set_multicast_list,
e186d174
AB
1544 .ndo_do_ioctl = ibmveth_ioctl,
1545 .ndo_change_mtu = ibmveth_change_mtu,
b9367bf3
MM
1546 .ndo_fix_features = ibmveth_fix_features,
1547 .ndo_set_features = ibmveth_set_features,
e186d174 1548 .ndo_validate_addr = eth_validate_addr,
c77c761f 1549 .ndo_set_mac_address = ibmveth_set_mac_addr,
e186d174
AB
1550#ifdef CONFIG_NET_POLL_CONTROLLER
1551 .ndo_poll_controller = ibmveth_poll_controller,
1552#endif
1553};
1554
1dd06ae8 1555static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1da177e4 1556{
13f85203 1557 int rc, i, mac_len;
1da177e4 1558 struct net_device *netdev;
9dc83afd 1559 struct ibmveth_adapter *adapter;
1da177e4
LT
1560 unsigned char *mac_addr_p;
1561 unsigned int *mcastFilterSize_p;
07e6a97d
TF
1562 long ret;
1563 unsigned long ret_attr;
1da177e4 1564
c43ced18
SL
1565 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1566 dev->unit_address);
1da177e4 1567
f148f61d 1568 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
13f85203 1569 &mac_len);
f148f61d 1570 if (!mac_addr_p) {
21c2dece 1571 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
be35ae9e 1572 return -EINVAL;
1da177e4 1573 }
13f85203
BH
1574 /* Workaround for old/broken pHyp */
1575 if (mac_len == 8)
1576 mac_addr_p += 2;
1577 else if (mac_len != 6) {
1578 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1579 mac_len);
1580 return -EINVAL;
1581 }
d7fbeba6 1582
f148f61d 1583 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
493a684a 1584 VETH_MCAST_FILTER_SIZE, NULL);
f148f61d 1585 if (!mcastFilterSize_p) {
21c2dece
SL
1586 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1587 "attribute\n");
be35ae9e 1588 return -EINVAL;
1da177e4 1589 }
d7fbeba6 1590
1da177e4
LT
1591 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1592
f148f61d 1593 if (!netdev)
1da177e4
LT
1594 return -ENOMEM;
1595
4cf1653a 1596 adapter = netdev_priv(netdev);
c7ae011d 1597 dev_set_drvdata(&dev->dev, netdev);
1da177e4
LT
1598
1599 adapter->vdev = dev;
1600 adapter->netdev = netdev;
f148f61d 1601 adapter->mcastFilterSize = *mcastFilterSize_p;
860f242e 1602 adapter->pool_config = 0;
d7fbeba6 1603
bea3348e
SH
1604 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1605
1da177e4 1606 netdev->irq = dev->irq;
e186d174
AB
1607 netdev->netdev_ops = &ibmveth_netdev_ops;
1608 netdev->ethtool_ops = &netdev_ethtool_ops;
1da177e4 1609 SET_NETDEV_DEV(netdev, &dev->dev);
23d28a85
TH
1610 netdev->hw_features = NETIF_F_SG;
1611 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1612 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1613 NETIF_F_RXCSUM;
1614 }
07e6a97d 1615
b9367bf3 1616 netdev->features |= netdev->hw_features;
1da177e4 1617
07e6a97d
TF
1618 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1619
1620 /* If running older firmware, TSO should not be enabled by default */
1621 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1622 !old_large_send) {
1623 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1624 netdev->features |= netdev->hw_features;
1625 } else {
1626 netdev->hw_features |= NETIF_F_TSO;
1627 }
8641dd85 1628
d894be57 1629 netdev->min_mtu = IBMVETH_MIN_MTU;
110447f8 1630 netdev->max_mtu = ETH_MAX_MTU;
d894be57 1631
d746ca95 1632 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1da177e4 1633
cd7c7ec3
TF
1634 if (firmware_has_feature(FW_FEATURE_CMO))
1635 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1636
f148f61d 1637 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e 1638 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
8dde2a96
GKH
1639 int error;
1640
d7fbeba6
JG
1641 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1642 pool_count[i], pool_size[i],
860f242e 1643 pool_active[i]);
8dde2a96
GKH
1644 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1645 &dev->dev.kobj, "pool%d", i);
1646 if (!error)
1647 kobject_uevent(kobj, KOBJ_ADD);
860f242e 1648 }
1da177e4 1649
c43ced18 1650 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1da177e4 1651
1da177e4
LT
1652 adapter->buffer_list_dma = DMA_ERROR_CODE;
1653 adapter->filter_list_dma = DMA_ERROR_CODE;
1654 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1655
c43ced18 1656 netdev_dbg(netdev, "registering netdev...\n");
1da177e4 1657
b801a4e7
MM
1658 ibmveth_set_features(netdev, netdev->features);
1659
1da177e4
LT
1660 rc = register_netdev(netdev);
1661
f148f61d 1662 if (rc) {
c43ced18 1663 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1da177e4
LT
1664 free_netdev(netdev);
1665 return rc;
1666 }
1667
c43ced18 1668 netdev_dbg(netdev, "registered\n");
1da177e4 1669
1da177e4
LT
1670 return 0;
1671}
1672
e11787a2 1673static int ibmveth_remove(struct vio_dev *dev)
1da177e4 1674{
c7ae011d 1675 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4cf1653a 1676 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1677 int i;
1678
f148f61d 1679 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
c10997f6 1680 kobject_put(&adapter->rx_buff_pool[i].kobj);
1da177e4
LT
1681
1682 unregister_netdev(netdev);
1683
1da177e4 1684 free_netdev(netdev);
1096d63d
RJ
1685 dev_set_drvdata(&dev->dev, NULL);
1686
1da177e4
LT
1687 return 0;
1688}
1689
860f242e
SL
1690static struct attribute veth_active_attr;
1691static struct attribute veth_num_attr;
1692static struct attribute veth_size_attr;
1693
f148f61d
SL
1694static ssize_t veth_pool_show(struct kobject *kobj,
1695 struct attribute *attr, char *buf)
860f242e 1696{
d7fbeba6 1697 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1698 struct ibmveth_buff_pool,
1699 kobj);
1700
1701 if (attr == &veth_active_attr)
1702 return sprintf(buf, "%d\n", pool->active);
1703 else if (attr == &veth_num_attr)
1704 return sprintf(buf, "%d\n", pool->size);
1705 else if (attr == &veth_size_attr)
1706 return sprintf(buf, "%d\n", pool->buff_size);
1707 return 0;
1708}
1709
f148f61d
SL
1710static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1711 const char *buf, size_t count)
860f242e 1712{
d7fbeba6 1713 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1714 struct ibmveth_buff_pool,
1715 kobj);
c7ae011d
GKH
1716 struct net_device *netdev = dev_get_drvdata(
1717 container_of(kobj->parent, struct device, kobj));
4cf1653a 1718 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1719 long value = simple_strtol(buf, NULL, 10);
1720 long rc;
1721
1722 if (attr == &veth_active_attr) {
1723 if (value && !pool->active) {
4aa9c93e 1724 if (netif_running(netdev)) {
f148f61d 1725 if (ibmveth_alloc_buffer_pool(pool)) {
21c2dece
SL
1726 netdev_err(netdev,
1727 "unable to alloc pool\n");
4aa9c93e
BK
1728 return -ENOMEM;
1729 }
1730 pool->active = 1;
1731 adapter->pool_config = 1;
1732 ibmveth_close(netdev);
1733 adapter->pool_config = 0;
1734 if ((rc = ibmveth_open(netdev)))
1735 return rc;
f148f61d 1736 } else {
4aa9c93e 1737 pool->active = 1;
f148f61d 1738 }
860f242e
SL
1739 } else if (!value && pool->active) {
1740 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1741 int i;
1742 /* Make sure there is a buffer pool with buffers that
1743 can hold a packet of the size of the MTU */
517e80e6 1744 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e
SL
1745 if (pool == &adapter->rx_buff_pool[i])
1746 continue;
1747 if (!adapter->rx_buff_pool[i].active)
1748 continue;
76b9cfcc
BK
1749 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1750 break;
860f242e 1751 }
76b9cfcc 1752
517e80e6 1753 if (i == IBMVETH_NUM_BUFF_POOLS) {
21c2dece 1754 netdev_err(netdev, "no active pool >= MTU\n");
860f242e
SL
1755 return -EPERM;
1756 }
76b9cfcc 1757
76b9cfcc
BK
1758 if (netif_running(netdev)) {
1759 adapter->pool_config = 1;
1760 ibmveth_close(netdev);
ea866e65 1761 pool->active = 0;
76b9cfcc
BK
1762 adapter->pool_config = 0;
1763 if ((rc = ibmveth_open(netdev)))
1764 return rc;
1765 }
ea866e65 1766 pool->active = 0;
860f242e
SL
1767 }
1768 } else if (attr == &veth_num_attr) {
f148f61d 1769 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
860f242e 1770 return -EINVAL;
f148f61d 1771 } else {
4aa9c93e
BK
1772 if (netif_running(netdev)) {
1773 adapter->pool_config = 1;
1774 ibmveth_close(netdev);
1775 adapter->pool_config = 0;
1776 pool->size = value;
1777 if ((rc = ibmveth_open(netdev)))
1778 return rc;
f148f61d 1779 } else {
4aa9c93e 1780 pool->size = value;
f148f61d 1781 }
860f242e
SL
1782 }
1783 } else if (attr == &veth_size_attr) {
f148f61d 1784 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
860f242e 1785 return -EINVAL;
f148f61d 1786 } else {
4aa9c93e
BK
1787 if (netif_running(netdev)) {
1788 adapter->pool_config = 1;
1789 ibmveth_close(netdev);
1790 adapter->pool_config = 0;
1791 pool->buff_size = value;
1792 if ((rc = ibmveth_open(netdev)))
1793 return rc;
f148f61d 1794 } else {
4aa9c93e 1795 pool->buff_size = value;
f148f61d 1796 }
860f242e
SL
1797 }
1798 }
1799
1800 /* kick the interrupt handler to allocate/deallocate pools */
7d12e780 1801 ibmveth_interrupt(netdev->irq, netdev);
860f242e
SL
1802 return count;
1803}
1804
1805
f148f61d
SL
1806#define ATTR(_name, _mode) \
1807 struct attribute veth_##_name##_attr = { \
1808 .name = __stringify(_name), .mode = _mode, \
1809 };
860f242e
SL
1810
1811static ATTR(active, 0644);
1812static ATTR(num, 0644);
1813static ATTR(size, 0644);
1814
f148f61d 1815static struct attribute *veth_pool_attrs[] = {
860f242e
SL
1816 &veth_active_attr,
1817 &veth_num_attr,
1818 &veth_size_attr,
1819 NULL,
1820};
1821
52cf25d0 1822static const struct sysfs_ops veth_pool_ops = {
860f242e
SL
1823 .show = veth_pool_show,
1824 .store = veth_pool_store,
1825};
1826
1827static struct kobj_type ktype_veth_pool = {
1828 .release = NULL,
1829 .sysfs_ops = &veth_pool_ops,
1830 .default_attrs = veth_pool_attrs,
1831};
1832
e7a3af5d
BK
1833static int ibmveth_resume(struct device *dev)
1834{
1835 struct net_device *netdev = dev_get_drvdata(dev);
1836 ibmveth_interrupt(netdev->irq, netdev);
1837 return 0;
1838}
860f242e 1839
e11787a2 1840static struct vio_device_id ibmveth_device_table[] = {
1da177e4 1841 { "network", "IBM,l-lan"},
fb120da6 1842 { "", "" }
1da177e4 1843};
1da177e4
LT
1844MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1845
e7a3af5d
BK
1846static struct dev_pm_ops ibmveth_pm_ops = {
1847 .resume = ibmveth_resume
1848};
1849
1da177e4 1850static struct vio_driver ibmveth_driver = {
6fdf5392
SR
1851 .id_table = ibmveth_device_table,
1852 .probe = ibmveth_probe,
1853 .remove = ibmveth_remove,
1096d63d 1854 .get_desired_dma = ibmveth_get_desired_dma,
cb52d897
BH
1855 .name = ibmveth_driver_name,
1856 .pm = &ibmveth_pm_ops,
1da177e4
LT
1857};
1858
1859static int __init ibmveth_module_init(void)
1860{
21c2dece
SL
1861 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1862 ibmveth_driver_string, ibmveth_driver_version);
1da177e4 1863
1da177e4
LT
1864 return vio_register_driver(&ibmveth_driver);
1865}
1866
1867static void __exit ibmveth_module_exit(void)
1868{
1869 vio_unregister_driver(&ibmveth_driver);
d7fbeba6 1870}
1da177e4
LT
1871
1872module_init(ibmveth_module_init);
1873module_exit(ibmveth_module_exit);