Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
f148f61d | 2 | /* |
9d348af4 | 3 | * IBM Power Virtual Ethernet Device Driver |
f148f61d | 4 | * |
9d348af4 SL |
5 | * Copyright (C) IBM Corporation, 2003, 2010 |
6 | * | |
7 | * Authors: Dave Larson <larson1@us.ibm.com> | |
8 | * Santiago Leon <santil@linux.vnet.ibm.com> | |
9 | * Brian King <brking@linux.vnet.ibm.com> | |
10 | * Robert Jennings <rcj@linux.vnet.ibm.com> | |
11 | * Anton Blanchard <anton@au.ibm.com> | |
f148f61d | 12 | */ |
1da177e4 | 13 | |
1da177e4 | 14 | #include <linux/module.h> |
1da177e4 LT |
15 | #include <linux/types.h> |
16 | #include <linux/errno.h> | |
1da177e4 LT |
17 | #include <linux/dma-mapping.h> |
18 | #include <linux/kernel.h> | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/etherdevice.h> | |
21 | #include <linux/skbuff.h> | |
22 | #include <linux/init.h> | |
a6b7a407 | 23 | #include <linux/interrupt.h> |
1da177e4 | 24 | #include <linux/mm.h> |
e7a3af5d | 25 | #include <linux/pm.h> |
1da177e4 | 26 | #include <linux/ethtool.h> |
f4ff2872 BK |
27 | #include <linux/in.h> |
28 | #include <linux/ip.h> | |
ab78df75 | 29 | #include <linux/ipv6.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
1da177e4 | 31 | #include <asm/hvcall.h> |
60063497 | 32 | #include <linux/atomic.h> |
1da177e4 | 33 | #include <asm/vio.h> |
1096d63d | 34 | #include <asm/iommu.h> |
1096d63d | 35 | #include <asm/firmware.h> |
66aa0678 SK |
36 | #include <net/tcp.h> |
37 | #include <net/ip6_checksum.h> | |
1da177e4 LT |
38 | |
39 | #include "ibmveth.h" | |
40 | ||
7d12e780 | 41 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
1096d63d | 42 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); |
e295fe83 | 43 | |
860f242e | 44 | static struct kobj_type ktype_veth_pool; |
1da177e4 | 45 | |
1096d63d | 46 | |
1da177e4 | 47 | static const char ibmveth_driver_name[] = "ibmveth"; |
9d348af4 | 48 | static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; |
7b596738 | 49 | #define ibmveth_driver_version "1.06" |
1da177e4 | 50 | |
9d348af4 SL |
51 | MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); |
52 | MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); | |
1da177e4 LT |
53 | MODULE_LICENSE("GPL"); |
54 | MODULE_VERSION(ibmveth_driver_version); | |
55 | ||
c08cc3cc SL |
56 | static unsigned int tx_copybreak __read_mostly = 128; |
57 | module_param(tx_copybreak, uint, 0644); | |
58 | MODULE_PARM_DESC(tx_copybreak, | |
59 | "Maximum size of packet that is copied to a new buffer on transmit"); | |
60 | ||
8d86c61a SL |
61 | static unsigned int rx_copybreak __read_mostly = 128; |
62 | module_param(rx_copybreak, uint, 0644); | |
63 | MODULE_PARM_DESC(rx_copybreak, | |
64 | "Maximum size of packet that is copied to a new buffer on receive"); | |
65 | ||
0c26b677 SL |
66 | static unsigned int rx_flush __read_mostly = 0; |
67 | module_param(rx_flush, uint, 0644); | |
68 | MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); | |
69 | ||
07e6a97d | 70 | static bool old_large_send __read_mostly; |
d3757ba4 | 71 | module_param(old_large_send, bool, 0444); |
07e6a97d TF |
72 | MODULE_PARM_DESC(old_large_send, |
73 | "Use old large send method on firmware that supports the new method"); | |
74 | ||
ddbb4de9 BK |
75 | struct ibmveth_stat { |
76 | char name[ETH_GSTRING_LEN]; | |
77 | int offset; | |
78 | }; | |
79 | ||
80 | #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) | |
81 | #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) | |
82 | ||
a0cfa79f | 83 | static struct ibmveth_stat ibmveth_stats[] = { |
ddbb4de9 BK |
84 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, |
85 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, | |
f148f61d SL |
86 | { "replenish_add_buff_failure", |
87 | IBMVETH_STAT_OFF(replenish_add_buff_failure) }, | |
88 | { "replenish_add_buff_success", | |
89 | IBMVETH_STAT_OFF(replenish_add_buff_success) }, | |
ddbb4de9 BK |
90 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, |
91 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, | |
ddbb4de9 BK |
92 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, |
93 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, | |
ab78df75 SL |
94 | { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, |
95 | { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, | |
8641dd85 | 96 | { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, |
07e6a97d TF |
97 | { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, |
98 | { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } | |
ddbb4de9 BK |
99 | }; |
100 | ||
1da177e4 | 101 | /* simple methods of getting data from the current rxq entry */ |
79ef4a4d BK |
102 | static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) |
103 | { | |
0b536be7 | 104 | return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); |
79ef4a4d BK |
105 | } |
106 | ||
107 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) | |
108 | { | |
f148f61d SL |
109 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> |
110 | IBMVETH_RXQ_TOGGLE_SHIFT; | |
79ef4a4d BK |
111 | } |
112 | ||
1da177e4 LT |
113 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) |
114 | { | |
f148f61d | 115 | return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; |
1da177e4 LT |
116 | } |
117 | ||
118 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | |
119 | { | |
f148f61d | 120 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; |
1da177e4 LT |
121 | } |
122 | ||
123 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | |
124 | { | |
f148f61d | 125 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; |
1da177e4 LT |
126 | } |
127 | ||
7b596738 TF |
128 | static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) |
129 | { | |
130 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; | |
131 | } | |
132 | ||
1da177e4 LT |
133 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) |
134 | { | |
0b536be7 | 135 | return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); |
1da177e4 LT |
136 | } |
137 | ||
f4ff2872 BK |
138 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) |
139 | { | |
f148f61d | 140 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; |
f4ff2872 BK |
141 | } |
142 | ||
10c2aba8 NC |
143 | static unsigned int ibmveth_real_max_tx_queues(void) |
144 | { | |
145 | unsigned int n_cpu = num_online_cpus(); | |
146 | ||
147 | return min(n_cpu, IBMVETH_MAX_QUEUES); | |
148 | } | |
149 | ||
1da177e4 | 150 | /* setup the initial settings for a buffer pool */ |
f148f61d SL |
151 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, |
152 | u32 pool_index, u32 pool_size, | |
153 | u32 buff_size, u32 pool_active) | |
1da177e4 LT |
154 | { |
155 | pool->size = pool_size; | |
156 | pool->index = pool_index; | |
157 | pool->buff_size = buff_size; | |
c033a6d1 | 158 | pool->threshold = pool_size * 7 / 8; |
860f242e | 159 | pool->active = pool_active; |
1da177e4 LT |
160 | } |
161 | ||
162 | /* allocate and setup an buffer pool - called during open */ | |
163 | static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |
164 | { | |
165 | int i; | |
166 | ||
6da2ec56 | 167 | pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL); |
1da177e4 | 168 | |
f148f61d | 169 | if (!pool->free_map) |
1da177e4 | 170 | return -1; |
1da177e4 | 171 | |
076ef440 | 172 | pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); |
f148f61d | 173 | if (!pool->dma_addr) { |
1da177e4 LT |
174 | kfree(pool->free_map); |
175 | pool->free_map = NULL; | |
176 | return -1; | |
177 | } | |
178 | ||
a05abcb5 | 179 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); |
1da177e4 | 180 | |
f148f61d | 181 | if (!pool->skbuff) { |
1da177e4 LT |
182 | kfree(pool->dma_addr); |
183 | pool->dma_addr = NULL; | |
184 | ||
185 | kfree(pool->free_map); | |
186 | pool->free_map = NULL; | |
187 | return -1; | |
188 | } | |
189 | ||
f148f61d | 190 | for (i = 0; i < pool->size; ++i) |
1da177e4 | 191 | pool->free_map[i] = i; |
1da177e4 LT |
192 | |
193 | atomic_set(&pool->available, 0); | |
194 | pool->producer_index = 0; | |
195 | pool->consumer_index = 0; | |
196 | ||
197 | return 0; | |
198 | } | |
199 | ||
0c26b677 SL |
200 | static inline void ibmveth_flush_buffer(void *addr, unsigned long length) |
201 | { | |
202 | unsigned long offset; | |
203 | ||
204 | for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) | |
bfedba3b | 205 | asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); |
0c26b677 SL |
206 | } |
207 | ||
1da177e4 LT |
208 | /* replenish the buffers for a pool. note that we don't need to |
209 | * skb_reserve these since they are used for incoming... | |
210 | */ | |
f148f61d SL |
211 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
212 | struct ibmveth_buff_pool *pool) | |
1da177e4 | 213 | { |
2094200b MC |
214 | union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0}; |
215 | u32 remaining = pool->size - atomic_read(&pool->available); | |
216 | u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0}; | |
1096d63d | 217 | unsigned long lpar_rc; |
2094200b MC |
218 | u32 buffers_added = 0; |
219 | u32 i, filled, batch; | |
220 | struct vio_dev *vdev; | |
1096d63d | 221 | dma_addr_t dma_addr; |
2094200b MC |
222 | struct device *dev; |
223 | u32 index; | |
224 | ||
225 | vdev = adapter->vdev; | |
226 | dev = &vdev->dev; | |
1da177e4 LT |
227 | |
228 | mb(); | |
229 | ||
2094200b | 230 | batch = adapter->rx_buffers_per_hcall; |
1da177e4 | 231 | |
2094200b MC |
232 | while (remaining > 0) { |
233 | unsigned int free_index = pool->consumer_index; | |
b5381a55 | 234 | |
2094200b MC |
235 | /* Fill a batch of descriptors */ |
236 | for (filled = 0; filled < min(remaining, batch); filled++) { | |
237 | index = pool->free_map[free_index]; | |
238 | if (WARN_ON(index == IBM_VETH_INVALID_MAP)) { | |
239 | adapter->replenish_add_buff_failure++; | |
240 | netdev_info(adapter->netdev, | |
241 | "Invalid map index %u, reset\n", | |
242 | index); | |
243 | schedule_work(&adapter->work); | |
244 | break; | |
245 | } | |
b5381a55 | 246 | |
2094200b MC |
247 | if (!pool->skbuff[index]) { |
248 | struct sk_buff *skb = NULL; | |
b5381a55 | 249 | |
2094200b MC |
250 | skb = netdev_alloc_skb(adapter->netdev, |
251 | pool->buff_size); | |
252 | if (!skb) { | |
253 | adapter->replenish_no_mem++; | |
254 | adapter->replenish_add_buff_failure++; | |
255 | break; | |
256 | } | |
257 | ||
258 | dma_addr = dma_map_single(dev, skb->data, | |
259 | pool->buff_size, | |
260 | DMA_FROM_DEVICE); | |
261 | if (dma_mapping_error(dev, dma_addr)) { | |
262 | dev_kfree_skb_any(skb); | |
263 | adapter->replenish_add_buff_failure++; | |
264 | break; | |
265 | } | |
1da177e4 | 266 | |
2094200b MC |
267 | pool->dma_addr[index] = dma_addr; |
268 | pool->skbuff[index] = skb; | |
269 | } else { | |
270 | /* re-use case */ | |
271 | dma_addr = pool->dma_addr[index]; | |
272 | } | |
1da177e4 | 273 | |
2094200b MC |
274 | if (rx_flush) { |
275 | unsigned int len; | |
1da177e4 | 276 | |
2094200b MC |
277 | len = adapter->netdev->mtu + IBMVETH_BUFF_OH; |
278 | len = min(pool->buff_size, len); | |
279 | ibmveth_flush_buffer(pool->skbuff[index]->data, | |
280 | len); | |
281 | } | |
1096d63d | 282 | |
2094200b MC |
283 | descs[filled].fields.flags_len = IBMVETH_BUF_VALID | |
284 | pool->buff_size; | |
285 | descs[filled].fields.address = dma_addr; | |
1da177e4 | 286 | |
2094200b MC |
287 | correlators[filled] = ((u64)pool->index << 32) | index; |
288 | *(u64 *)pool->skbuff[index]->data = correlators[filled]; | |
b5381a55 | 289 | |
2094200b MC |
290 | free_index++; |
291 | if (free_index >= pool->size) | |
292 | free_index = 0; | |
293 | } | |
b5381a55 | 294 | |
2094200b MC |
295 | if (!filled) |
296 | break; | |
d7fbeba6 | 297 | |
2094200b MC |
298 | /* single buffer case*/ |
299 | if (filled == 1) | |
300 | lpar_rc = h_add_logical_lan_buffer(vdev->unit_address, | |
301 | descs[0].desc); | |
302 | else | |
303 | /* Multi-buffer hcall */ | |
304 | lpar_rc = h_add_logical_lan_buffers(vdev->unit_address, | |
305 | descs[0].desc, | |
306 | descs[1].desc, | |
307 | descs[2].desc, | |
308 | descs[3].desc, | |
309 | descs[4].desc, | |
310 | descs[5].desc, | |
311 | descs[6].desc, | |
312 | descs[7].desc); | |
f148f61d | 313 | if (lpar_rc != H_SUCCESS) { |
2094200b MC |
314 | dev_warn_ratelimited(dev, |
315 | "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n", | |
316 | filled, lpar_rc, batch); | |
317 | goto hcall_failure; | |
1da177e4 | 318 | } |
b5381a55 | 319 | |
2094200b MC |
320 | /* Only update pool state after hcall succeeds */ |
321 | for (i = 0; i < filled; i++) { | |
322 | free_index = pool->consumer_index; | |
323 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | |
b5381a55 | 324 | |
2094200b MC |
325 | pool->consumer_index++; |
326 | if (pool->consumer_index >= pool->size) | |
327 | pool->consumer_index = 0; | |
328 | } | |
d7fbeba6 | 329 | |
2094200b MC |
330 | buffers_added += filled; |
331 | adapter->replenish_add_buff_success += filled; | |
332 | remaining -= filled; | |
1096d63d | 333 | |
2094200b MC |
334 | memset(&descs, 0, sizeof(descs)); |
335 | memset(&correlators, 0, sizeof(correlators)); | |
336 | continue; | |
b5381a55 | 337 | |
2094200b MC |
338 | hcall_failure: |
339 | for (i = 0; i < filled; i++) { | |
340 | index = correlators[i] & 0xffffffffUL; | |
341 | dma_addr = pool->dma_addr[index]; | |
342 | ||
343 | if (pool->skbuff[index]) { | |
344 | if (dma_addr && | |
345 | !dma_mapping_error(dev, dma_addr)) | |
346 | dma_unmap_single(dev, dma_addr, | |
347 | pool->buff_size, | |
348 | DMA_FROM_DEVICE); | |
349 | ||
350 | dev_kfree_skb_any(pool->skbuff[index]); | |
351 | pool->skbuff[index] = NULL; | |
352 | } | |
353 | } | |
354 | adapter->replenish_add_buff_failure += filled; | |
355 | ||
356 | /* | |
357 | * If multi rx buffers hcall is no longer supported by FW | |
358 | * e.g. in the case of Live Parttion Migration | |
359 | */ | |
360 | if (batch > 1 && lpar_rc == H_FUNCTION) { | |
361 | /* | |
362 | * Instead of retry submit single buffer individually | |
363 | * here just set the max rx buffer per hcall to 1 | |
364 | * buffers will be respleshed next time | |
365 | * when ibmveth_replenish_buffer_pool() is called again | |
366 | * with single-buffer case | |
367 | */ | |
368 | netdev_info(adapter->netdev, | |
369 | "RX Multi buffers not supported by FW, rc=%lu\n", | |
370 | lpar_rc); | |
371 | adapter->rx_buffers_per_hcall = 1; | |
372 | netdev_info(adapter->netdev, | |
373 | "Next rx replesh will fall back to single-buffer hcall\n"); | |
374 | } | |
375 | break; | |
376 | } | |
1096d63d | 377 | |
1da177e4 LT |
378 | mb(); |
379 | atomic_add(buffers_added, &(pool->available)); | |
380 | } | |
381 | ||
cbd52281 AB |
382 | /* |
383 | * The final 8 bytes of the buffer list is a counter of frames dropped | |
384 | * because there was not a buffer in the buffer list capable of holding | |
385 | * the frame. | |
386 | */ | |
387 | static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) | |
388 | { | |
389 | __be64 *p = adapter->buffer_list_addr + 4096 - 8; | |
390 | ||
391 | adapter->rx_no_buffer = be64_to_cpup(p); | |
392 | } | |
393 | ||
e2adbcb4 | 394 | /* replenish routine */ |
d7fbeba6 | 395 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
1da177e4 | 396 | { |
b6d35182 SL |
397 | int i; |
398 | ||
1da177e4 LT |
399 | adapter->replenish_task_cycles++; |
400 | ||
517e80e6 | 401 | for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { |
c033a6d1 SL |
402 | struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; |
403 | ||
404 | if (pool->active && | |
405 | (atomic_read(&pool->available) < pool->threshold)) | |
406 | ibmveth_replenish_buffer_pool(adapter, pool); | |
407 | } | |
1da177e4 | 408 | |
cbd52281 | 409 | ibmveth_update_rx_no_buffer(adapter); |
1da177e4 LT |
410 | } |
411 | ||
412 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | |
f148f61d SL |
413 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, |
414 | struct ibmveth_buff_pool *pool) | |
1da177e4 LT |
415 | { |
416 | int i; | |
417 | ||
b4558ea9 JJ |
418 | kfree(pool->free_map); |
419 | pool->free_map = NULL; | |
1da177e4 | 420 | |
f148f61d SL |
421 | if (pool->skbuff && pool->dma_addr) { |
422 | for (i = 0; i < pool->size; ++i) { | |
1da177e4 | 423 | struct sk_buff *skb = pool->skbuff[i]; |
f148f61d | 424 | if (skb) { |
1da177e4 LT |
425 | dma_unmap_single(&adapter->vdev->dev, |
426 | pool->dma_addr[i], | |
427 | pool->buff_size, | |
428 | DMA_FROM_DEVICE); | |
429 | dev_kfree_skb_any(skb); | |
430 | pool->skbuff[i] = NULL; | |
431 | } | |
432 | } | |
433 | } | |
434 | ||
f148f61d | 435 | if (pool->dma_addr) { |
1da177e4 LT |
436 | kfree(pool->dma_addr); |
437 | pool->dma_addr = NULL; | |
438 | } | |
439 | ||
f148f61d | 440 | if (pool->skbuff) { |
1da177e4 LT |
441 | kfree(pool->skbuff); |
442 | pool->skbuff = NULL; | |
443 | } | |
444 | } | |
445 | ||
2c91e231 DM |
446 | /** |
447 | * ibmveth_remove_buffer_from_pool - remove a buffer from a pool | |
448 | * @adapter: adapter instance | |
449 | * @correlator: identifies pool and index | |
450 | * @reuse: whether to reuse buffer | |
451 | * | |
452 | * Return: | |
453 | * * %0 - success | |
454 | * * %-EINVAL - correlator maps to pool or index out of range | |
455 | * * %-EFAULT - pool and index map to null skb | |
456 | */ | |
457 | static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, | |
458 | u64 correlator, bool reuse) | |
1da177e4 LT |
459 | { |
460 | unsigned int pool = correlator >> 32; | |
461 | unsigned int index = correlator & 0xffffffffUL; | |
462 | unsigned int free_index; | |
463 | struct sk_buff *skb; | |
464 | ||
2c91e231 DM |
465 | if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
466 | WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { | |
467 | schedule_work(&adapter->work); | |
468 | return -EINVAL; | |
469 | } | |
1da177e4 LT |
470 | |
471 | skb = adapter->rx_buff_pool[pool].skbuff[index]; | |
2c91e231 DM |
472 | if (WARN_ON(!skb)) { |
473 | schedule_work(&adapter->work); | |
474 | return -EFAULT; | |
475 | } | |
1da177e4 | 476 | |
b5381a55 NC |
477 | /* if we are going to reuse the buffer then keep the pointers around |
478 | * but mark index as available. replenish will see the skb pointer and | |
479 | * assume it is to be recycled. | |
480 | */ | |
481 | if (!reuse) { | |
482 | /* remove the skb pointer to mark free. actual freeing is done | |
483 | * by upper level networking after gro_recieve | |
484 | */ | |
485 | adapter->rx_buff_pool[pool].skbuff[index] = NULL; | |
1da177e4 | 486 | |
b5381a55 NC |
487 | dma_unmap_single(&adapter->vdev->dev, |
488 | adapter->rx_buff_pool[pool].dma_addr[index], | |
489 | adapter->rx_buff_pool[pool].buff_size, | |
490 | DMA_FROM_DEVICE); | |
491 | } | |
1da177e4 | 492 | |
047a66d4 | 493 | free_index = adapter->rx_buff_pool[pool].producer_index; |
a613f581 SL |
494 | adapter->rx_buff_pool[pool].producer_index++; |
495 | if (adapter->rx_buff_pool[pool].producer_index >= | |
496 | adapter->rx_buff_pool[pool].size) | |
497 | adapter->rx_buff_pool[pool].producer_index = 0; | |
1da177e4 LT |
498 | adapter->rx_buff_pool[pool].free_map[free_index] = index; |
499 | ||
500 | mb(); | |
501 | ||
502 | atomic_dec(&(adapter->rx_buff_pool[pool].available)); | |
2c91e231 DM |
503 | |
504 | return 0; | |
1da177e4 LT |
505 | } |
506 | ||
507 | /* get the current buffer on the rx queue */ | |
508 | static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) | |
509 | { | |
510 | u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; | |
511 | unsigned int pool = correlator >> 32; | |
512 | unsigned int index = correlator & 0xffffffffUL; | |
513 | ||
2c91e231 DM |
514 | if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
515 | WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { | |
516 | schedule_work(&adapter->work); | |
517 | return NULL; | |
518 | } | |
1da177e4 LT |
519 | |
520 | return adapter->rx_buff_pool[pool].skbuff[index]; | |
521 | } | |
522 | ||
2c91e231 DM |
523 | /** |
524 | * ibmveth_rxq_harvest_buffer - Harvest buffer from pool | |
525 | * | |
526 | * @adapter: pointer to adapter | |
527 | * @reuse: whether to reuse buffer | |
528 | * | |
529 | * Context: called from ibmveth_poll | |
530 | * | |
531 | * Return: | |
532 | * * %0 - success | |
533 | * * other - non-zero return from ibmveth_remove_buffer_from_pool | |
534 | */ | |
535 | static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, | |
536 | bool reuse) | |
1da177e4 | 537 | { |
b5381a55 | 538 | u64 cor; |
2c91e231 | 539 | int rc; |
1da177e4 | 540 | |
b5381a55 | 541 | cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; |
2c91e231 DM |
542 | rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse); |
543 | if (unlikely(rc)) | |
544 | return rc; | |
1da177e4 | 545 | |
f148f61d | 546 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
1da177e4 LT |
547 | adapter->rx_queue.index = 0; |
548 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | |
549 | } | |
2c91e231 DM |
550 | |
551 | return 0; | |
1da177e4 LT |
552 | } |
553 | ||
10c2aba8 NC |
554 | static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx) |
555 | { | |
556 | dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx], | |
557 | adapter->tx_ltb_size, DMA_TO_DEVICE); | |
558 | kfree(adapter->tx_ltb_ptr[idx]); | |
559 | adapter->tx_ltb_ptr[idx] = NULL; | |
560 | } | |
561 | ||
562 | static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx) | |
563 | { | |
564 | adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size, | |
565 | GFP_KERNEL); | |
566 | if (!adapter->tx_ltb_ptr[idx]) { | |
567 | netdev_err(adapter->netdev, | |
568 | "unable to allocate tx long term buffer\n"); | |
569 | return -ENOMEM; | |
570 | } | |
571 | adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev, | |
572 | adapter->tx_ltb_ptr[idx], | |
573 | adapter->tx_ltb_size, | |
574 | DMA_TO_DEVICE); | |
575 | if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) { | |
576 | netdev_err(adapter->netdev, | |
577 | "unable to DMA map tx long term buffer\n"); | |
578 | kfree(adapter->tx_ltb_ptr[idx]); | |
579 | adapter->tx_ltb_ptr[idx] = NULL; | |
580 | return -ENOMEM; | |
581 | } | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
bbedefcc ME |
586 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
587 | union ibmveth_buf_desc rxq_desc, u64 mac_address) | |
588 | { | |
589 | int rc, try_again = 1; | |
590 | ||
f148f61d SL |
591 | /* |
592 | * After a kexec the adapter will still be open, so our attempt to | |
593 | * open it will fail. So if we get a failure we free the adapter and | |
594 | * try again, but only once. | |
595 | */ | |
bbedefcc ME |
596 | retry: |
597 | rc = h_register_logical_lan(adapter->vdev->unit_address, | |
598 | adapter->buffer_list_dma, rxq_desc.desc, | |
599 | adapter->filter_list_dma, mac_address); | |
600 | ||
601 | if (rc != H_SUCCESS && try_again) { | |
602 | do { | |
603 | rc = h_free_logical_lan(adapter->vdev->unit_address); | |
604 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | |
605 | ||
606 | try_again = 0; | |
607 | goto retry; | |
608 | } | |
609 | ||
610 | return rc; | |
611 | } | |
612 | ||
1da177e4 LT |
613 | static int ibmveth_open(struct net_device *netdev) |
614 | { | |
4cf1653a | 615 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
d746ca95 | 616 | u64 mac_address; |
b6d35182 | 617 | int rxq_entries = 1; |
1da177e4 LT |
618 | unsigned long lpar_rc; |
619 | int rc; | |
620 | union ibmveth_buf_desc rxq_desc; | |
b6d35182 | 621 | int i; |
8d8bb39b | 622 | struct device *dev; |
1da177e4 | 623 | |
c43ced18 | 624 | netdev_dbg(netdev, "open starting\n"); |
1da177e4 | 625 | |
bea3348e SH |
626 | napi_enable(&adapter->napi); |
627 | ||
517e80e6 | 628 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
b6d35182 | 629 | rxq_entries += adapter->rx_buff_pool[i].size; |
d7fbeba6 | 630 | |
d43732ce | 631 | rc = -ENOMEM; |
1da177e4 | 632 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
d43732ce CH |
633 | if (!adapter->buffer_list_addr) { |
634 | netdev_err(netdev, "unable to allocate list pages\n"); | |
635 | goto out; | |
636 | } | |
d7fbeba6 | 637 | |
d43732ce CH |
638 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
639 | if (!adapter->filter_list_addr) { | |
640 | netdev_err(netdev, "unable to allocate filter pages\n"); | |
641 | goto out_free_buffer_list; | |
1da177e4 LT |
642 | } |
643 | ||
d90c92fe SL |
644 | dev = &adapter->vdev->dev; |
645 | ||
f148f61d SL |
646 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * |
647 | rxq_entries; | |
d90c92fe | 648 | adapter->rx_queue.queue_addr = |
d0320f75 JP |
649 | dma_alloc_coherent(dev, adapter->rx_queue.queue_len, |
650 | &adapter->rx_queue.queue_dma, GFP_KERNEL); | |
d43732ce CH |
651 | if (!adapter->rx_queue.queue_addr) |
652 | goto out_free_filter_list; | |
1da177e4 | 653 | |
8d8bb39b | 654 | adapter->buffer_list_dma = dma_map_single(dev, |
1da177e4 | 655 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
d43732ce CH |
656 | if (dma_mapping_error(dev, adapter->buffer_list_dma)) { |
657 | netdev_err(netdev, "unable to map buffer list pages\n"); | |
658 | goto out_free_queue_mem; | |
659 | } | |
660 | ||
8d8bb39b | 661 | adapter->filter_list_dma = dma_map_single(dev, |
1da177e4 | 662 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
d43732ce CH |
663 | if (dma_mapping_error(dev, adapter->filter_list_dma)) { |
664 | netdev_err(netdev, "unable to map filter list pages\n"); | |
665 | goto out_unmap_buffer_list; | |
1da177e4 LT |
666 | } |
667 | ||
10c2aba8 NC |
668 | for (i = 0; i < netdev->real_num_tx_queues; i++) { |
669 | if (ibmveth_allocate_tx_ltb(adapter, i)) | |
670 | goto out_free_tx_ltb; | |
d6832ca4 NC |
671 | } |
672 | ||
1da177e4 LT |
673 | adapter->rx_queue.index = 0; |
674 | adapter->rx_queue.num_slots = rxq_entries; | |
675 | adapter->rx_queue.toggle = 1; | |
676 | ||
5c8b3485 | 677 | mac_address = ether_addr_to_u64(netdev->dev_addr); |
1da177e4 | 678 | |
f148f61d SL |
679 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
680 | adapter->rx_queue.queue_len; | |
1da177e4 LT |
681 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; |
682 | ||
c43ced18 SL |
683 | netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); |
684 | netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); | |
685 | netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | |
1da177e4 | 686 | |
4347ef15 SL |
687 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); |
688 | ||
bbedefcc | 689 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); |
1da177e4 | 690 | |
f148f61d | 691 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
692 | netdev_err(netdev, "h_register_logical_lan failed with %ld\n", |
693 | lpar_rc); | |
694 | netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " | |
695 | "desc:0x%llx MAC:0x%llx\n", | |
1da177e4 LT |
696 | adapter->buffer_list_dma, |
697 | adapter->filter_list_dma, | |
698 | rxq_desc.desc, | |
699 | mac_address); | |
88426f2a | 700 | rc = -ENONET; |
d43732ce | 701 | goto out_unmap_filter_list; |
1da177e4 LT |
702 | } |
703 | ||
f148f61d SL |
704 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
705 | if (!adapter->rx_buff_pool[i].active) | |
860f242e SL |
706 | continue; |
707 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | |
21c2dece | 708 | netdev_err(netdev, "unable to alloc pool\n"); |
860f242e | 709 | adapter->rx_buff_pool[i].active = 0; |
88426f2a | 710 | rc = -ENOMEM; |
d43732ce | 711 | goto out_free_buffer_pools; |
860f242e SL |
712 | } |
713 | } | |
714 | ||
c43ced18 | 715 | netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); |
f148f61d SL |
716 | rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, |
717 | netdev); | |
718 | if (rc != 0) { | |
21c2dece SL |
719 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
720 | netdev->irq, rc); | |
1da177e4 | 721 | do { |
95de86cf BK |
722 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
723 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | |
1da177e4 | 724 | |
d43732ce | 725 | goto out_free_buffer_pools; |
1da177e4 LT |
726 | } |
727 | ||
d43732ce | 728 | rc = -ENOMEM; |
d43732ce | 729 | |
c43ced18 | 730 | netdev_dbg(netdev, "initial replenish cycle\n"); |
7d12e780 | 731 | ibmveth_interrupt(netdev->irq, netdev); |
1da177e4 | 732 | |
d926793c | 733 | netif_tx_start_all_queues(netdev); |
1da177e4 | 734 | |
c43ced18 | 735 | netdev_dbg(netdev, "open complete\n"); |
1da177e4 LT |
736 | |
737 | return 0; | |
88426f2a | 738 | |
d43732ce CH |
739 | out_free_buffer_pools: |
740 | while (--i >= 0) { | |
741 | if (adapter->rx_buff_pool[i].active) | |
742 | ibmveth_free_buffer_pool(adapter, | |
743 | &adapter->rx_buff_pool[i]); | |
744 | } | |
745 | out_unmap_filter_list: | |
746 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | |
747 | DMA_BIDIRECTIONAL); | |
d6832ca4 | 748 | |
10c2aba8 | 749 | out_free_tx_ltb: |
d926793c | 750 | while (--i >= 0) { |
10c2aba8 | 751 | ibmveth_free_tx_ltb(adapter, i); |
d926793c | 752 | } |
d6832ca4 | 753 | |
d43732ce CH |
754 | out_unmap_buffer_list: |
755 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | |
756 | DMA_BIDIRECTIONAL); | |
757 | out_free_queue_mem: | |
758 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | |
759 | adapter->rx_queue.queue_addr, | |
760 | adapter->rx_queue.queue_dma); | |
761 | out_free_filter_list: | |
762 | free_page((unsigned long)adapter->filter_list_addr); | |
763 | out_free_buffer_list: | |
764 | free_page((unsigned long)adapter->buffer_list_addr); | |
765 | out: | |
88426f2a DK |
766 | napi_disable(&adapter->napi); |
767 | return rc; | |
1da177e4 LT |
768 | } |
769 | ||
770 | static int ibmveth_close(struct net_device *netdev) | |
771 | { | |
4cf1653a | 772 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
d43732ce | 773 | struct device *dev = &adapter->vdev->dev; |
1da177e4 | 774 | long lpar_rc; |
d43732ce | 775 | int i; |
d7fbeba6 | 776 | |
c43ced18 | 777 | netdev_dbg(netdev, "close starting\n"); |
1da177e4 | 778 | |
bea3348e SH |
779 | napi_disable(&adapter->napi); |
780 | ||
127b7218 | 781 | netif_tx_stop_all_queues(netdev); |
1da177e4 | 782 | |
ee2e6114 | 783 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); |
1da177e4 | 784 | |
1da177e4 LT |
785 | do { |
786 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | |
706c8c93 | 787 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
1da177e4 | 788 | |
f148f61d | 789 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
790 | netdev_err(netdev, "h_free_logical_lan failed with %lx, " |
791 | "continuing with close\n", lpar_rc); | |
1da177e4 LT |
792 | } |
793 | ||
ee2e6114 RJ |
794 | free_irq(netdev->irq, netdev); |
795 | ||
cbd52281 | 796 | ibmveth_update_rx_no_buffer(adapter); |
1da177e4 | 797 | |
d43732ce CH |
798 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
799 | DMA_BIDIRECTIONAL); | |
800 | free_page((unsigned long)adapter->buffer_list_addr); | |
801 | ||
802 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | |
803 | DMA_BIDIRECTIONAL); | |
804 | free_page((unsigned long)adapter->filter_list_addr); | |
805 | ||
806 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | |
807 | adapter->rx_queue.queue_addr, | |
808 | adapter->rx_queue.queue_dma); | |
809 | ||
810 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
811 | if (adapter->rx_buff_pool[i].active) | |
812 | ibmveth_free_buffer_pool(adapter, | |
813 | &adapter->rx_buff_pool[i]); | |
814 | ||
10c2aba8 NC |
815 | for (i = 0; i < netdev->real_num_tx_queues; i++) |
816 | ibmveth_free_tx_ltb(adapter, i); | |
1da177e4 | 817 | |
c43ced18 | 818 | netdev_dbg(netdev, "close complete\n"); |
1da177e4 LT |
819 | |
820 | return 0; | |
821 | } | |
822 | ||
2c91e231 DM |
823 | /** |
824 | * ibmveth_reset - Handle scheduled reset work | |
825 | * | |
826 | * @w: pointer to work_struct embedded in adapter structure | |
827 | * | |
828 | * Context: This routine acquires rtnl_mutex and disables its NAPI through | |
829 | * ibmveth_close. It can't be called directly in a context that has | |
830 | * already acquired rtnl_mutex or disabled its NAPI, or directly from | |
831 | * a poll routine. | |
832 | * | |
833 | * Return: void | |
834 | */ | |
835 | static void ibmveth_reset(struct work_struct *w) | |
836 | { | |
837 | struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); | |
838 | struct net_device *netdev = adapter->netdev; | |
839 | ||
840 | netdev_dbg(netdev, "reset starting\n"); | |
841 | ||
842 | rtnl_lock(); | |
843 | ||
844 | dev_close(adapter->netdev); | |
845 | dev_open(adapter->netdev, NULL); | |
846 | ||
847 | rtnl_unlock(); | |
848 | ||
849 | netdev_dbg(netdev, "reset complete\n"); | |
850 | } | |
851 | ||
9aedc6e2 CF |
852 | static int ibmveth_set_link_ksettings(struct net_device *dev, |
853 | const struct ethtool_link_ksettings *cmd) | |
f148f61d | 854 | { |
9aedc6e2 CF |
855 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
856 | ||
857 | return ethtool_virtdev_set_link_ksettings(dev, cmd, | |
858 | &adapter->speed, | |
859 | &adapter->duplex); | |
860 | } | |
861 | ||
862 | static int ibmveth_get_link_ksettings(struct net_device *dev, | |
863 | struct ethtool_link_ksettings *cmd) | |
864 | { | |
865 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
866 | ||
867 | cmd->base.speed = adapter->speed; | |
868 | cmd->base.duplex = adapter->duplex; | |
869 | cmd->base.port = PORT_OTHER; | |
9ce8c2df | 870 | |
1da177e4 LT |
871 | return 0; |
872 | } | |
873 | ||
9aedc6e2 CF |
874 | static void ibmveth_init_link_settings(struct net_device *dev) |
875 | { | |
876 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
877 | ||
878 | adapter->speed = SPEED_1000; | |
879 | adapter->duplex = DUPLEX_FULL; | |
880 | } | |
881 | ||
f148f61d SL |
882 | static void netdev_get_drvinfo(struct net_device *dev, |
883 | struct ethtool_drvinfo *info) | |
884 | { | |
f029c781 WS |
885 | strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); |
886 | strscpy(info->version, ibmveth_driver_version, sizeof(info->version)); | |
1da177e4 LT |
887 | } |
888 | ||
c8f44aff MM |
889 | static netdev_features_t ibmveth_fix_features(struct net_device *dev, |
890 | netdev_features_t features) | |
5fc7e01c | 891 | { |
b9367bf3 MM |
892 | /* |
893 | * Since the ibmveth firmware interface does not have the | |
894 | * concept of separate tx/rx checksum offload enable, if rx | |
895 | * checksum is disabled we also have to disable tx checksum | |
896 | * offload. Once we disable rx checksum offload, we are no | |
897 | * longer allowed to send tx buffers that are not properly | |
898 | * checksummed. | |
899 | */ | |
5fc7e01c | 900 | |
b9367bf3 | 901 | if (!(features & NETIF_F_RXCSUM)) |
a188222b | 902 | features &= ~NETIF_F_CSUM_MASK; |
5fc7e01c | 903 | |
b9367bf3 | 904 | return features; |
5fc7e01c BK |
905 | } |
906 | ||
b9367bf3 | 907 | static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) |
5fc7e01c | 908 | { |
4cf1653a | 909 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
ff5bfc35 | 910 | unsigned long set_attr, clr_attr, ret_attr; |
ab78df75 | 911 | unsigned long set_attr6, clr_attr6; |
fb82fd20 | 912 | long ret, ret4, ret6; |
5fc7e01c BK |
913 | int rc1 = 0, rc2 = 0; |
914 | int restart = 0; | |
915 | ||
916 | if (netif_running(dev)) { | |
917 | restart = 1; | |
5fc7e01c | 918 | ibmveth_close(dev); |
5fc7e01c BK |
919 | } |
920 | ||
79ef4a4d BK |
921 | set_attr = 0; |
922 | clr_attr = 0; | |
fb82fd20 AB |
923 | set_attr6 = 0; |
924 | clr_attr6 = 0; | |
5fc7e01c | 925 | |
ab78df75 | 926 | if (data) { |
79ef4a4d | 927 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
ab78df75 SL |
928 | set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; |
929 | } else { | |
79ef4a4d | 930 | clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
ab78df75 SL |
931 | clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; |
932 | } | |
5fc7e01c | 933 | |
79ef4a4d | 934 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
5fc7e01c | 935 | |
66aa0678 | 936 | if (ret == H_SUCCESS && |
79ef4a4d | 937 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
fb82fd20 | 938 | ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, |
79ef4a4d | 939 | set_attr, &ret_attr); |
5fc7e01c | 940 | |
fb82fd20 | 941 | if (ret4 != H_SUCCESS) { |
21c2dece SL |
942 | netdev_err(dev, "unable to change IPv4 checksum " |
943 | "offload settings. %d rc=%ld\n", | |
fb82fd20 AB |
944 | data, ret4); |
945 | ||
946 | h_illan_attributes(adapter->vdev->unit_address, | |
947 | set_attr, clr_attr, &ret_attr); | |
948 | ||
949 | if (data == 1) | |
950 | dev->features &= ~NETIF_F_IP_CSUM; | |
5fc7e01c | 951 | |
f148f61d | 952 | } else { |
ab78df75 | 953 | adapter->fw_ipv4_csum_support = data; |
f148f61d | 954 | } |
ab78df75 SL |
955 | |
956 | ret6 = h_illan_attributes(adapter->vdev->unit_address, | |
957 | clr_attr6, set_attr6, &ret_attr); | |
958 | ||
959 | if (ret6 != H_SUCCESS) { | |
21c2dece SL |
960 | netdev_err(dev, "unable to change IPv6 checksum " |
961 | "offload settings. %d rc=%ld\n", | |
fb82fd20 AB |
962 | data, ret6); |
963 | ||
964 | h_illan_attributes(adapter->vdev->unit_address, | |
965 | set_attr6, clr_attr6, &ret_attr); | |
966 | ||
967 | if (data == 1) | |
968 | dev->features &= ~NETIF_F_IPV6_CSUM; | |
ab78df75 | 969 | |
ab78df75 SL |
970 | } else |
971 | adapter->fw_ipv6_csum_support = data; | |
972 | ||
fb82fd20 | 973 | if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) |
b9367bf3 | 974 | adapter->rx_csum = data; |
ab78df75 SL |
975 | else |
976 | rc1 = -EIO; | |
5fc7e01c BK |
977 | } else { |
978 | rc1 = -EIO; | |
21c2dece SL |
979 | netdev_err(dev, "unable to change checksum offload settings." |
980 | " %d rc=%ld ret_attr=%lx\n", data, ret, | |
981 | ret_attr); | |
5fc7e01c BK |
982 | } |
983 | ||
984 | if (restart) | |
985 | rc2 = ibmveth_open(dev); | |
986 | ||
987 | return rc1 ? rc1 : rc2; | |
988 | } | |
989 | ||
07e6a97d TF |
990 | static int ibmveth_set_tso(struct net_device *dev, u32 data) |
991 | { | |
992 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
993 | unsigned long set_attr, clr_attr, ret_attr; | |
994 | long ret1, ret2; | |
995 | int rc1 = 0, rc2 = 0; | |
996 | int restart = 0; | |
997 | ||
998 | if (netif_running(dev)) { | |
999 | restart = 1; | |
07e6a97d | 1000 | ibmveth_close(dev); |
07e6a97d TF |
1001 | } |
1002 | ||
1003 | set_attr = 0; | |
1004 | clr_attr = 0; | |
1005 | ||
1006 | if (data) | |
1007 | set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; | |
1008 | else | |
1009 | clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; | |
1010 | ||
1011 | ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); | |
1012 | ||
1013 | if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && | |
1014 | !old_large_send) { | |
1015 | ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, | |
1016 | set_attr, &ret_attr); | |
1017 | ||
1018 | if (ret2 != H_SUCCESS) { | |
1019 | netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", | |
1020 | data, ret2); | |
1021 | ||
1022 | h_illan_attributes(adapter->vdev->unit_address, | |
1023 | set_attr, clr_attr, &ret_attr); | |
1024 | ||
1025 | if (data == 1) | |
1026 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | |
1027 | rc1 = -EIO; | |
1028 | ||
1029 | } else { | |
1030 | adapter->fw_large_send_support = data; | |
1031 | adapter->large_send = data; | |
1032 | } | |
1033 | } else { | |
1034 | /* Older firmware version of large send offload does not | |
1035 | * support tcp6/ipv6 | |
1036 | */ | |
1037 | if (data == 1) { | |
1038 | dev->features &= ~NETIF_F_TSO6; | |
1039 | netdev_info(dev, "TSO feature requires all partitions to have updated driver"); | |
1040 | } | |
1041 | adapter->large_send = data; | |
1042 | } | |
1043 | ||
1044 | if (restart) | |
1045 | rc2 = ibmveth_open(dev); | |
1046 | ||
1047 | return rc1 ? rc1 : rc2; | |
1048 | } | |
1049 | ||
c8f44aff MM |
1050 | static int ibmveth_set_features(struct net_device *dev, |
1051 | netdev_features_t features) | |
5fc7e01c | 1052 | { |
4cf1653a | 1053 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
b9367bf3 | 1054 | int rx_csum = !!(features & NETIF_F_RXCSUM); |
07e6a97d TF |
1055 | int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); |
1056 | int rc1 = 0, rc2 = 0; | |
5fc7e01c | 1057 | |
07e6a97d TF |
1058 | if (rx_csum != adapter->rx_csum) { |
1059 | rc1 = ibmveth_set_csum_offload(dev, rx_csum); | |
1060 | if (rc1 && !adapter->rx_csum) | |
1061 | dev->features = | |
a188222b TH |
1062 | features & ~(NETIF_F_CSUM_MASK | |
1063 | NETIF_F_RXCSUM); | |
07e6a97d | 1064 | } |
5fc7e01c | 1065 | |
07e6a97d TF |
1066 | if (large_send != adapter->large_send) { |
1067 | rc2 = ibmveth_set_tso(dev, large_send); | |
1068 | if (rc2 && !adapter->large_send) | |
1069 | dev->features = | |
1070 | features & ~(NETIF_F_TSO | NETIF_F_TSO6); | |
1071 | } | |
5fc7e01c | 1072 | |
07e6a97d | 1073 | return rc1 ? rc1 : rc2; |
5fc7e01c BK |
1074 | } |
1075 | ||
ddbb4de9 BK |
1076 | static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
1077 | { | |
1078 | int i; | |
1079 | ||
1080 | if (stringset != ETH_SS_STATS) | |
1081 | return; | |
1082 | ||
1083 | for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) | |
1084 | memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); | |
1085 | } | |
1086 | ||
b9f2c044 | 1087 | static int ibmveth_get_sset_count(struct net_device *dev, int sset) |
ddbb4de9 | 1088 | { |
b9f2c044 JG |
1089 | switch (sset) { |
1090 | case ETH_SS_STATS: | |
1091 | return ARRAY_SIZE(ibmveth_stats); | |
1092 | default: | |
1093 | return -EOPNOTSUPP; | |
1094 | } | |
ddbb4de9 BK |
1095 | } |
1096 | ||
1097 | static void ibmveth_get_ethtool_stats(struct net_device *dev, | |
1098 | struct ethtool_stats *stats, u64 *data) | |
1099 | { | |
1100 | int i; | |
4cf1653a | 1101 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
ddbb4de9 BK |
1102 | |
1103 | for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) | |
1104 | data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); | |
1105 | } | |
1106 | ||
10c2aba8 NC |
1107 | static void ibmveth_get_channels(struct net_device *netdev, |
1108 | struct ethtool_channels *channels) | |
1109 | { | |
1110 | channels->max_tx = ibmveth_real_max_tx_queues(); | |
1111 | channels->tx_count = netdev->real_num_tx_queues; | |
1112 | ||
1113 | channels->max_rx = netdev->real_num_rx_queues; | |
1114 | channels->rx_count = netdev->real_num_rx_queues; | |
1115 | } | |
1116 | ||
1117 | static int ibmveth_set_channels(struct net_device *netdev, | |
1118 | struct ethtool_channels *channels) | |
1119 | { | |
1120 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | |
1121 | unsigned int old = netdev->real_num_tx_queues, | |
1122 | goal = channels->tx_count; | |
1123 | int rc, i; | |
1124 | ||
1125 | /* If ndo_open has not been called yet then don't allocate, just set | |
1126 | * desired netdev_queue's and return | |
1127 | */ | |
1128 | if (!(netdev->flags & IFF_UP)) | |
1129 | return netif_set_real_num_tx_queues(netdev, goal); | |
1130 | ||
1131 | /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated | |
1132 | * but we may need to alloc/free the ltb's. | |
1133 | */ | |
1134 | netif_tx_stop_all_queues(netdev); | |
1135 | ||
1136 | /* Allocate any queue that we need */ | |
1137 | for (i = old; i < goal; i++) { | |
1138 | if (adapter->tx_ltb_ptr[i]) | |
1139 | continue; | |
1140 | ||
1141 | rc = ibmveth_allocate_tx_ltb(adapter, i); | |
1142 | if (!rc) | |
1143 | continue; | |
1144 | ||
1145 | /* if something goes wrong, free everything we just allocated */ | |
1146 | netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n", | |
1147 | old); | |
1148 | goal = old; | |
1149 | old = i; | |
1150 | break; | |
1151 | } | |
1152 | rc = netif_set_real_num_tx_queues(netdev, goal); | |
1153 | if (rc) { | |
1154 | netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n", | |
1155 | old); | |
1156 | goal = old; | |
1157 | old = i; | |
1158 | } | |
1159 | /* Free any that are no longer needed */ | |
1160 | for (i = old; i > goal; i--) { | |
1161 | if (adapter->tx_ltb_ptr[i - 1]) | |
1162 | ibmveth_free_tx_ltb(adapter, i - 1); | |
1163 | } | |
1164 | ||
1165 | netif_tx_wake_all_queues(netdev); | |
1166 | ||
1167 | return rc; | |
1168 | } | |
1169 | ||
7282d491 | 1170 | static const struct ethtool_ops netdev_ethtool_ops = { |
9aedc6e2 CF |
1171 | .get_drvinfo = netdev_get_drvinfo, |
1172 | .get_link = ethtool_op_get_link, | |
1173 | .get_strings = ibmveth_get_strings, | |
1174 | .get_sset_count = ibmveth_get_sset_count, | |
1175 | .get_ethtool_stats = ibmveth_get_ethtool_stats, | |
1176 | .get_link_ksettings = ibmveth_get_link_ksettings, | |
1177 | .set_link_ksettings = ibmveth_set_link_ksettings, | |
10c2aba8 NC |
1178 | .get_channels = ibmveth_get_channels, |
1179 | .set_channels = ibmveth_set_channels | |
1da177e4 LT |
1180 | }; |
1181 | ||
1182 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1183 | { | |
1184 | return -EOPNOTSUPP; | |
1185 | } | |
1186 | ||
6e8ab30e | 1187 | static int ibmveth_send(struct ibmveth_adapter *adapter, |
d6832ca4 | 1188 | unsigned long desc, unsigned long mss) |
1da177e4 | 1189 | { |
1da177e4 LT |
1190 | unsigned long correlator; |
1191 | unsigned int retry_count; | |
6e8ab30e SL |
1192 | unsigned long ret; |
1193 | ||
1194 | /* | |
1195 | * The retry count sets a maximum for the number of broadcast and | |
1196 | * multicast destinations within the system. | |
1197 | */ | |
1198 | retry_count = 1024; | |
1199 | correlator = 0; | |
1200 | do { | |
d6832ca4 NC |
1201 | ret = h_send_logical_lan(adapter->vdev->unit_address, desc, |
1202 | correlator, &correlator, mss, | |
1203 | adapter->fw_large_send_support); | |
6e8ab30e SL |
1204 | } while ((ret == H_BUSY) && (retry_count--)); |
1205 | ||
1206 | if (ret != H_SUCCESS && ret != H_DROPPED) { | |
21c2dece SL |
1207 | netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " |
1208 | "with rc=%ld\n", ret); | |
6e8ab30e SL |
1209 | return 1; |
1210 | } | |
1211 | ||
1212 | return 0; | |
1213 | } | |
60296d9e | 1214 | |
6f227543 CF |
1215 | static int ibmveth_is_packet_unsupported(struct sk_buff *skb, |
1216 | struct net_device *netdev) | |
1217 | { | |
1218 | struct ethhdr *ether_header; | |
1219 | int ret = 0; | |
1220 | ||
1221 | ether_header = eth_hdr(skb); | |
1222 | ||
1223 | if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { | |
1224 | netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n"); | |
1225 | netdev->stats.tx_dropped++; | |
1226 | ret = -EOPNOTSUPP; | |
1227 | } | |
1228 | ||
6f227543 CF |
1229 | return ret; |
1230 | } | |
1231 | ||
6e8ab30e SL |
1232 | static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, |
1233 | struct net_device *netdev) | |
1234 | { | |
1235 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | |
d926793c | 1236 | unsigned int desc_flags, total_bytes; |
d6832ca4 | 1237 | union ibmveth_buf_desc desc; |
d926793c | 1238 | int i, queue_num = skb_get_queue_mapping(skb); |
07e6a97d | 1239 | unsigned long mss = 0; |
6e8ab30e | 1240 | |
6f227543 CF |
1241 | if (ibmveth_is_packet_unsupported(skb, netdev)) |
1242 | goto out; | |
6e8ab30e | 1243 | /* veth can't checksum offload UDP */ |
f4ff2872 | 1244 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
ab78df75 SL |
1245 | ((skb->protocol == htons(ETH_P_IP) && |
1246 | ip_hdr(skb)->protocol != IPPROTO_TCP) || | |
1247 | (skb->protocol == htons(ETH_P_IPV6) && | |
1248 | ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && | |
1249 | skb_checksum_help(skb)) { | |
1250 | ||
21c2dece | 1251 | netdev_err(netdev, "tx: failed to checksum packet\n"); |
6e8ab30e | 1252 | netdev->stats.tx_dropped++; |
f4ff2872 BK |
1253 | goto out; |
1254 | } | |
1255 | ||
6e8ab30e SL |
1256 | desc_flags = IBMVETH_BUF_VALID; |
1257 | ||
f4ff2872 | 1258 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
6e8ab30e SL |
1259 | unsigned char *buf = skb_transport_header(skb) + |
1260 | skb->csum_offset; | |
f4ff2872 | 1261 | |
6e8ab30e | 1262 | desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); |
f4ff2872 BK |
1263 | |
1264 | /* Need to zero out the checksum */ | |
1265 | buf[0] = 0; | |
1266 | buf[1] = 0; | |
66aa0678 SK |
1267 | |
1268 | if (skb_is_gso(skb) && adapter->fw_large_send_support) | |
1269 | desc_flags |= IBMVETH_BUF_LRG_SND; | |
f4ff2872 BK |
1270 | } |
1271 | ||
66aa0678 | 1272 | if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { |
07e6a97d TF |
1273 | if (adapter->fw_large_send_support) { |
1274 | mss = (unsigned long)skb_shinfo(skb)->gso_size; | |
1275 | adapter->tx_large_packets++; | |
1276 | } else if (!skb_is_gso_v6(skb)) { | |
1277 | /* Put -1 in the IP checksum to tell phyp it | |
1278 | * is a largesend packet. Put the mss in | |
1279 | * the TCP checksum. | |
1280 | */ | |
1281 | ip_hdr(skb)->check = 0xffff; | |
1282 | tcp_hdr(skb)->check = | |
1283 | cpu_to_be16(skb_shinfo(skb)->gso_size); | |
1284 | adapter->tx_large_packets++; | |
1285 | } | |
8641dd85 TF |
1286 | } |
1287 | ||
d6832ca4 NC |
1288 | /* Copy header into mapped buffer */ |
1289 | if (unlikely(skb->len > adapter->tx_ltb_size)) { | |
1290 | netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n", | |
1291 | skb->len, adapter->tx_ltb_size); | |
1292 | netdev->stats.tx_dropped++; | |
1293 | goto out; | |
1294 | } | |
d926793c | 1295 | memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb)); |
d6832ca4 NC |
1296 | total_bytes = skb_headlen(skb); |
1297 | /* Copy frags into mapped buffers */ | |
1298 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1299 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1300 | ||
d926793c NC |
1301 | memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes, |
1302 | skb_frag_address_safe(frag), skb_frag_size(frag)); | |
d6832ca4 NC |
1303 | total_bytes += skb_frag_size(frag); |
1304 | } | |
1305 | ||
1306 | if (unlikely(total_bytes != skb->len)) { | |
1307 | netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n", | |
1308 | skb->len, total_bytes); | |
1309 | netdev->stats.tx_dropped++; | |
1310 | goto out; | |
1311 | } | |
1312 | desc.fields.flags_len = desc_flags | skb->len; | |
d926793c | 1313 | desc.fields.address = adapter->tx_ltb_dma[queue_num]; |
d6832ca4 NC |
1314 | /* finish writing to long_term_buff before VIOS accessing it */ |
1315 | dma_wmb(); | |
1316 | ||
1317 | if (ibmveth_send(adapter, desc.desc, mss)) { | |
6e8ab30e SL |
1318 | adapter->tx_send_failed++; |
1319 | netdev->stats.tx_dropped++; | |
1da177e4 | 1320 | } else { |
6e8ab30e SL |
1321 | netdev->stats.tx_packets++; |
1322 | netdev->stats.tx_bytes += skb->len; | |
1da177e4 LT |
1323 | } |
1324 | ||
e8cb7eb4 | 1325 | out: |
26faa9d7 | 1326 | dev_consume_skb_any(skb); |
6ed10654 | 1327 | return NETDEV_TX_OK; |
6e8ab30e | 1328 | |
6e8ab30e | 1329 | |
1da177e4 LT |
1330 | } |
1331 | ||
7b596738 TF |
1332 | static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
1333 | { | |
94acf164 | 1334 | struct tcphdr *tcph; |
7b596738 | 1335 | int offset = 0; |
94acf164 | 1336 | int hdr_len; |
7b596738 TF |
1337 | |
1338 | /* only TCP packets will be aggregated */ | |
1339 | if (skb->protocol == htons(ETH_P_IP)) { | |
1340 | struct iphdr *iph = (struct iphdr *)skb->data; | |
1341 | ||
1342 | if (iph->protocol == IPPROTO_TCP) { | |
1343 | offset = iph->ihl * 4; | |
1344 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
1345 | } else { | |
1346 | return; | |
1347 | } | |
1348 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
1349 | struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; | |
1350 | ||
1351 | if (iph6->nexthdr == IPPROTO_TCP) { | |
1352 | offset = sizeof(struct ipv6hdr); | |
1353 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
1354 | } else { | |
1355 | return; | |
1356 | } | |
1357 | } else { | |
1358 | return; | |
1359 | } | |
1360 | /* if mss is not set through Large Packet bit/mss in rx buffer, | |
1361 | * expect that the mss will be written to the tcp header checksum. | |
1362 | */ | |
94acf164 | 1363 | tcph = (struct tcphdr *)(skb->data + offset); |
7b596738 TF |
1364 | if (lrg_pkt) { |
1365 | skb_shinfo(skb)->gso_size = mss; | |
1366 | } else if (offset) { | |
7b596738 TF |
1367 | skb_shinfo(skb)->gso_size = ntohs(tcph->check); |
1368 | tcph->check = 0; | |
1369 | } | |
94acf164 TF |
1370 | |
1371 | if (skb_shinfo(skb)->gso_size) { | |
1372 | hdr_len = offset + tcph->doff * 4; | |
1373 | skb_shinfo(skb)->gso_segs = | |
1374 | DIV_ROUND_UP(skb->len - hdr_len, | |
1375 | skb_shinfo(skb)->gso_size); | |
1376 | } | |
7b596738 TF |
1377 | } |
1378 | ||
66aa0678 SK |
1379 | static void ibmveth_rx_csum_helper(struct sk_buff *skb, |
1380 | struct ibmveth_adapter *adapter) | |
1381 | { | |
1382 | struct iphdr *iph = NULL; | |
1383 | struct ipv6hdr *iph6 = NULL; | |
1384 | __be16 skb_proto = 0; | |
1385 | u16 iphlen = 0; | |
1386 | u16 iph_proto = 0; | |
1387 | u16 tcphdrlen = 0; | |
1388 | ||
1389 | skb_proto = be16_to_cpu(skb->protocol); | |
1390 | ||
1391 | if (skb_proto == ETH_P_IP) { | |
1392 | iph = (struct iphdr *)skb->data; | |
1393 | ||
1394 | /* If the IP checksum is not offloaded and if the packet | |
1395 | * is large send, the checksum must be rebuilt. | |
1396 | */ | |
1397 | if (iph->check == 0xffff) { | |
1398 | iph->check = 0; | |
1399 | iph->check = ip_fast_csum((unsigned char *)iph, | |
1400 | iph->ihl); | |
1401 | } | |
1402 | ||
1403 | iphlen = iph->ihl * 4; | |
1404 | iph_proto = iph->protocol; | |
1405 | } else if (skb_proto == ETH_P_IPV6) { | |
1406 | iph6 = (struct ipv6hdr *)skb->data; | |
1407 | iphlen = sizeof(struct ipv6hdr); | |
1408 | iph_proto = iph6->nexthdr; | |
1409 | } | |
1410 | ||
7525de25 DW |
1411 | /* When CSO is enabled the TCP checksum may have be set to NULL by |
1412 | * the sender given that we zeroed out TCP checksum field in | |
1413 | * transmit path (refer ibmveth_start_xmit routine). In this case set | |
1414 | * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will | |
1415 | * then be recalculated by the destination NIC (CSO must be enabled | |
1416 | * on the destination NIC). | |
1417 | * | |
1418 | * In an OVS environment, when a flow is not cached, specifically for a | |
1419 | * new TCP connection, the first packet information is passed up to | |
66aa0678 SK |
1420 | * the user space for finding a flow. During this process, OVS computes |
1421 | * checksum on the first packet when CHECKSUM_PARTIAL flag is set. | |
1422 | * | |
51e7a666 | 1423 | * So, re-compute TCP pseudo header checksum. |
66aa0678 | 1424 | */ |
51e7a666 | 1425 | |
7525de25 | 1426 | if (iph_proto == IPPROTO_TCP) { |
66aa0678 | 1427 | struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); |
51e7a666 | 1428 | |
7525de25 DW |
1429 | if (tcph->check == 0x0000) { |
1430 | /* Recompute TCP pseudo header checksum */ | |
51e7a666 DW |
1431 | tcphdrlen = skb->len - iphlen; |
1432 | if (skb_proto == ETH_P_IP) | |
1433 | tcph->check = | |
1434 | ~csum_tcpudp_magic(iph->saddr, | |
1435 | iph->daddr, tcphdrlen, iph_proto, 0); | |
1436 | else if (skb_proto == ETH_P_IPV6) | |
1437 | tcph->check = | |
1438 | ~csum_ipv6_magic(&iph6->saddr, | |
1439 | &iph6->daddr, tcphdrlen, iph_proto, 0); | |
7525de25 DW |
1440 | /* Setup SKB fields for checksum offload */ |
1441 | skb_partial_csum_set(skb, iphlen, | |
1442 | offsetof(struct tcphdr, check)); | |
1443 | skb_reset_network_header(skb); | |
1444 | } | |
66aa0678 SK |
1445 | } |
1446 | } | |
1447 | ||
bea3348e | 1448 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
1da177e4 | 1449 | { |
f148f61d SL |
1450 | struct ibmveth_adapter *adapter = |
1451 | container_of(napi, struct ibmveth_adapter, napi); | |
bea3348e | 1452 | struct net_device *netdev = adapter->netdev; |
1da177e4 | 1453 | int frames_processed = 0; |
1da177e4 | 1454 | unsigned long lpar_rc; |
7b596738 | 1455 | u16 mss = 0; |
1da177e4 | 1456 | |
f128c7cf | 1457 | restart_poll: |
cb013ea1 | 1458 | while (frames_processed < budget) { |
bea3348e SH |
1459 | if (!ibmveth_rxq_pending_buffer(adapter)) |
1460 | break; | |
1da177e4 | 1461 | |
f89e49e7 | 1462 | smp_rmb(); |
bea3348e SH |
1463 | if (!ibmveth_rxq_buffer_valid(adapter)) { |
1464 | wmb(); /* suggested by larson1 */ | |
1465 | adapter->rx_invalid_buffer++; | |
c43ced18 | 1466 | netdev_dbg(netdev, "recycling invalid buffer\n"); |
2c91e231 DM |
1467 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
1468 | break; | |
bea3348e | 1469 | } else { |
8d86c61a | 1470 | struct sk_buff *skb, *new_skb; |
bea3348e SH |
1471 | int length = ibmveth_rxq_frame_length(adapter); |
1472 | int offset = ibmveth_rxq_frame_offset(adapter); | |
f4ff2872 | 1473 | int csum_good = ibmveth_rxq_csum_good(adapter); |
7b596738 | 1474 | int lrg_pkt = ibmveth_rxq_large_packet(adapter); |
413f142c | 1475 | __sum16 iph_check = 0; |
f4ff2872 | 1476 | |
bea3348e | 1477 | skb = ibmveth_rxq_get_buffer(adapter); |
2c91e231 DM |
1478 | if (unlikely(!skb)) |
1479 | break; | |
1da177e4 | 1480 | |
7b596738 TF |
1481 | /* if the large packet bit is set in the rx queue |
1482 | * descriptor, the mss will be written by PHYP eight | |
1483 | * bytes from the start of the rx buffer, which is | |
1484 | * skb->data at this stage | |
1485 | */ | |
1486 | if (lrg_pkt) { | |
1487 | __be64 *rxmss = (__be64 *)(skb->data + 8); | |
1488 | ||
1489 | mss = (u16)be64_to_cpu(*rxmss); | |
1490 | } | |
1491 | ||
8d86c61a SL |
1492 | new_skb = NULL; |
1493 | if (length < rx_copybreak) | |
1494 | new_skb = netdev_alloc_skb(netdev, length); | |
1495 | ||
1496 | if (new_skb) { | |
1497 | skb_copy_to_linear_data(new_skb, | |
1498 | skb->data + offset, | |
1499 | length); | |
0c26b677 SL |
1500 | if (rx_flush) |
1501 | ibmveth_flush_buffer(skb->data, | |
1502 | length + offset); | |
2c91e231 DM |
1503 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
1504 | break; | |
8d86c61a | 1505 | skb = new_skb; |
8d86c61a | 1506 | } else { |
2c91e231 DM |
1507 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false))) |
1508 | break; | |
8d86c61a SL |
1509 | skb_reserve(skb, offset); |
1510 | } | |
1da177e4 | 1511 | |
bea3348e SH |
1512 | skb_put(skb, length); |
1513 | skb->protocol = eth_type_trans(skb, netdev); | |
1da177e4 | 1514 | |
413f142c DW |
1515 | /* PHYP without PLSO support places a -1 in the ip |
1516 | * checksum for large send frames. | |
1517 | */ | |
1518 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { | |
1519 | struct iphdr *iph = (struct iphdr *)skb->data; | |
1520 | ||
1521 | iph_check = iph->check; | |
1522 | } | |
1523 | ||
1524 | if ((length > netdev->mtu + ETH_HLEN) || | |
1525 | lrg_pkt || iph_check == 0xffff) { | |
7b596738 TF |
1526 | ibmveth_rx_mss_helper(skb, mss, lrg_pkt); |
1527 | adapter->rx_large_packets++; | |
1528 | } | |
1529 | ||
5ce9ad81 DW |
1530 | if (csum_good) { |
1531 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1532 | ibmveth_rx_csum_helper(skb, adapter); | |
1533 | } | |
1534 | ||
92ec8279 | 1535 | napi_gro_receive(napi, skb); /* send it up */ |
1da177e4 | 1536 | |
09f75cd7 JG |
1537 | netdev->stats.rx_packets++; |
1538 | netdev->stats.rx_bytes += length; | |
bea3348e | 1539 | frames_processed++; |
1da177e4 | 1540 | } |
cb013ea1 | 1541 | } |
1da177e4 | 1542 | |
e2adbcb4 | 1543 | ibmveth_replenish_task(adapter); |
1da177e4 | 1544 | |
f128c7cf NC |
1545 | if (frames_processed == budget) |
1546 | goto out; | |
4736edc7 | 1547 | |
f128c7cf NC |
1548 | if (!napi_complete_done(napi, frames_processed)) |
1549 | goto out; | |
1da177e4 | 1550 | |
f128c7cf NC |
1551 | /* We think we are done - reenable interrupts, |
1552 | * then check once more to make sure we are done. | |
1553 | */ | |
1554 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); | |
2c91e231 DM |
1555 | if (WARN_ON(lpar_rc != H_SUCCESS)) { |
1556 | schedule_work(&adapter->work); | |
1557 | goto out; | |
1558 | } | |
1da177e4 | 1559 | |
f128c7cf NC |
1560 | if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { |
1561 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | |
1562 | VIO_IRQ_DISABLE); | |
1563 | goto restart_poll; | |
1da177e4 LT |
1564 | } |
1565 | ||
f128c7cf | 1566 | out: |
bea3348e | 1567 | return frames_processed; |
1da177e4 LT |
1568 | } |
1569 | ||
7d12e780 | 1570 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) |
d7fbeba6 | 1571 | { |
1da177e4 | 1572 | struct net_device *netdev = dev_instance; |
4cf1653a | 1573 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1da177e4 LT |
1574 | unsigned long lpar_rc; |
1575 | ||
288379f0 | 1576 | if (napi_schedule_prep(&adapter->napi)) { |
bea3348e SH |
1577 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1578 | VIO_IRQ_DISABLE); | |
2c91e231 | 1579 | WARN_ON(lpar_rc != H_SUCCESS); |
288379f0 | 1580 | __napi_schedule(&adapter->napi); |
1da177e4 LT |
1581 | } |
1582 | return IRQ_HANDLED; | |
1583 | } | |
1584 | ||
1da177e4 LT |
1585 | static void ibmveth_set_multicast_list(struct net_device *netdev) |
1586 | { | |
4cf1653a | 1587 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1da177e4 LT |
1588 | unsigned long lpar_rc; |
1589 | ||
4cd24eaf JP |
1590 | if ((netdev->flags & IFF_PROMISC) || |
1591 | (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { | |
1da177e4 LT |
1592 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1593 | IbmVethMcastEnableRecv | | |
1594 | IbmVethMcastDisableFiltering, | |
1595 | 0); | |
f148f61d | 1596 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1597 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1598 | "entering promisc mode\n", lpar_rc); | |
1da177e4 LT |
1599 | } |
1600 | } else { | |
22bedad3 | 1601 | struct netdev_hw_addr *ha; |
1da177e4 LT |
1602 | /* clear the filter table & disable filtering */ |
1603 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
1604 | IbmVethMcastEnableRecv | | |
1605 | IbmVethMcastDisableFiltering | | |
1606 | IbmVethMcastClearFilterTable, | |
1607 | 0); | |
f148f61d | 1608 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1609 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1610 | "attempting to clear filter table\n", | |
1611 | lpar_rc); | |
1da177e4 LT |
1612 | } |
1613 | /* add the addresses to the filter table */ | |
22bedad3 | 1614 | netdev_for_each_mc_addr(ha, netdev) { |
f148f61d | 1615 | /* add the multicast address to the filter table */ |
d746ca95 | 1616 | u64 mcast_addr; |
5c8b3485 | 1617 | mcast_addr = ether_addr_to_u64(ha->addr); |
1da177e4 LT |
1618 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1619 | IbmVethMcastAddFilter, | |
1620 | mcast_addr); | |
f148f61d | 1621 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1622 | netdev_err(netdev, "h_multicast_ctrl rc=%ld " |
1623 | "when adding an entry to the filter " | |
1624 | "table\n", lpar_rc); | |
1da177e4 LT |
1625 | } |
1626 | } | |
d7fbeba6 | 1627 | |
1da177e4 LT |
1628 | /* re-enable filtering */ |
1629 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
1630 | IbmVethMcastEnableFiltering, | |
1631 | 0); | |
f148f61d | 1632 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1633 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1634 | "enabling filtering\n", lpar_rc); | |
1da177e4 LT |
1635 | } |
1636 | } | |
1637 | } | |
1638 | ||
1639 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |
1640 | { | |
4cf1653a | 1641 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
1096d63d | 1642 | struct vio_dev *viodev = adapter->vdev; |
860f242e | 1643 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
0645bab7 RJ |
1644 | int i, rc; |
1645 | int need_restart = 0; | |
b6d35182 | 1646 | |
517e80e6 | 1647 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
4fce1482 | 1648 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) |
ce6eea58 BK |
1649 | break; |
1650 | ||
517e80e6 | 1651 | if (i == IBMVETH_NUM_BUFF_POOLS) |
ce6eea58 BK |
1652 | return -EINVAL; |
1653 | ||
ea866e65 SL |
1654 | /* Deactivate all the buffer pools so that the next loop can activate |
1655 | only the buffer pools necessary to hold the new MTU */ | |
0645bab7 RJ |
1656 | if (netif_running(adapter->netdev)) { |
1657 | need_restart = 1; | |
0645bab7 | 1658 | ibmveth_close(adapter->netdev); |
0645bab7 | 1659 | } |
ea866e65 | 1660 | |
860f242e | 1661 | /* Look for an active buffer pool that can hold the new MTU */ |
f148f61d | 1662 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
ea866e65 | 1663 | adapter->rx_buff_pool[i].active = 1; |
ce6eea58 | 1664 | |
4fce1482 | 1665 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { |
1eb2cded | 1666 | WRITE_ONCE(dev->mtu, new_mtu); |
1096d63d RJ |
1667 | vio_cmo_set_dev_desired(viodev, |
1668 | ibmveth_get_desired_dma | |
1669 | (viodev)); | |
0645bab7 RJ |
1670 | if (need_restart) { |
1671 | return ibmveth_open(adapter->netdev); | |
1672 | } | |
860f242e | 1673 | return 0; |
b6d35182 | 1674 | } |
b6d35182 | 1675 | } |
0645bab7 RJ |
1676 | |
1677 | if (need_restart && (rc = ibmveth_open(adapter->netdev))) | |
1678 | return rc; | |
1679 | ||
860f242e | 1680 | return -EINVAL; |
1da177e4 LT |
1681 | } |
1682 | ||
6b422374 SL |
1683 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1684 | static void ibmveth_poll_controller(struct net_device *dev) | |
1685 | { | |
4cf1653a | 1686 | ibmveth_replenish_task(netdev_priv(dev)); |
5f77113c | 1687 | ibmveth_interrupt(dev->irq, dev); |
6b422374 SL |
1688 | } |
1689 | #endif | |
1690 | ||
1096d63d RJ |
1691 | /** |
1692 | * ibmveth_get_desired_dma - Calculate IO memory desired by the driver | |
1693 | * | |
1694 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | |
1695 | * | |
1696 | * Return value: | |
1697 | * Number of bytes of IO data the driver will need to perform well. | |
1698 | */ | |
1699 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |
1700 | { | |
1701 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | |
1702 | struct ibmveth_adapter *adapter; | |
d0847757 | 1703 | struct iommu_table *tbl; |
1096d63d RJ |
1704 | unsigned long ret; |
1705 | int i; | |
1706 | int rxqentries = 1; | |
1707 | ||
d0847757 AP |
1708 | tbl = get_iommu_table_base(&vdev->dev); |
1709 | ||
1096d63d RJ |
1710 | /* netdev inits at probe time along with the structures we need below*/ |
1711 | if (netdev == NULL) | |
d0847757 | 1712 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); |
1096d63d RJ |
1713 | |
1714 | adapter = netdev_priv(netdev); | |
1715 | ||
1716 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | |
d0847757 | 1717 | ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); |
d6832ca4 NC |
1718 | /* add size of mapped tx buffers */ |
1719 | ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl); | |
1096d63d | 1720 | |
517e80e6 | 1721 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1096d63d RJ |
1722 | /* add the size of the active receive buffers */ |
1723 | if (adapter->rx_buff_pool[i].active) | |
1724 | ret += | |
1725 | adapter->rx_buff_pool[i].size * | |
1726 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | |
d0847757 | 1727 | buff_size, tbl); |
1096d63d RJ |
1728 | rxqentries += adapter->rx_buff_pool[i].size; |
1729 | } | |
1730 | /* add the size of the receive queue entries */ | |
d0847757 AP |
1731 | ret += IOMMU_PAGE_ALIGN( |
1732 | rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); | |
1096d63d RJ |
1733 | |
1734 | return ret; | |
1735 | } | |
1736 | ||
c77c761f TF |
1737 | static int ibmveth_set_mac_addr(struct net_device *dev, void *p) |
1738 | { | |
1739 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
1740 | struct sockaddr *addr = p; | |
1741 | u64 mac_address; | |
1742 | int rc; | |
1743 | ||
1744 | if (!is_valid_ether_addr(addr->sa_data)) | |
1745 | return -EADDRNOTAVAIL; | |
1746 | ||
5c8b3485 | 1747 | mac_address = ether_addr_to_u64(addr->sa_data); |
c77c761f TF |
1748 | rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); |
1749 | if (rc) { | |
1750 | netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); | |
1751 | return rc; | |
1752 | } | |
1753 | ||
f3956ebb | 1754 | eth_hw_addr_set(dev, addr->sa_data); |
c77c761f TF |
1755 | |
1756 | return 0; | |
1757 | } | |
1758 | ||
e186d174 AB |
1759 | static const struct net_device_ops ibmveth_netdev_ops = { |
1760 | .ndo_open = ibmveth_open, | |
1761 | .ndo_stop = ibmveth_close, | |
1762 | .ndo_start_xmit = ibmveth_start_xmit, | |
afc4b13d | 1763 | .ndo_set_rx_mode = ibmveth_set_multicast_list, |
a7605370 | 1764 | .ndo_eth_ioctl = ibmveth_ioctl, |
e186d174 | 1765 | .ndo_change_mtu = ibmveth_change_mtu, |
b9367bf3 MM |
1766 | .ndo_fix_features = ibmveth_fix_features, |
1767 | .ndo_set_features = ibmveth_set_features, | |
e186d174 | 1768 | .ndo_validate_addr = eth_validate_addr, |
c77c761f | 1769 | .ndo_set_mac_address = ibmveth_set_mac_addr, |
e186d174 AB |
1770 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1771 | .ndo_poll_controller = ibmveth_poll_controller, | |
1772 | #endif | |
1773 | }; | |
1774 | ||
1dd06ae8 | 1775 | static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1da177e4 | 1776 | { |
13f85203 | 1777 | int rc, i, mac_len; |
1da177e4 | 1778 | struct net_device *netdev; |
9dc83afd | 1779 | struct ibmveth_adapter *adapter; |
1da177e4 | 1780 | unsigned char *mac_addr_p; |
66cf4710 | 1781 | __be32 *mcastFilterSize_p; |
07e6a97d TF |
1782 | long ret; |
1783 | unsigned long ret_attr; | |
1da177e4 | 1784 | |
c43ced18 SL |
1785 | dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", |
1786 | dev->unit_address); | |
1da177e4 | 1787 | |
f148f61d | 1788 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, |
13f85203 | 1789 | &mac_len); |
f148f61d | 1790 | if (!mac_addr_p) { |
21c2dece | 1791 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); |
be35ae9e | 1792 | return -EINVAL; |
1da177e4 | 1793 | } |
13f85203 BH |
1794 | /* Workaround for old/broken pHyp */ |
1795 | if (mac_len == 8) | |
1796 | mac_addr_p += 2; | |
1797 | else if (mac_len != 6) { | |
1798 | dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", | |
1799 | mac_len); | |
1800 | return -EINVAL; | |
1801 | } | |
d7fbeba6 | 1802 | |
66cf4710 TF |
1803 | mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, |
1804 | VETH_MCAST_FILTER_SIZE, | |
1805 | NULL); | |
f148f61d | 1806 | if (!mcastFilterSize_p) { |
21c2dece SL |
1807 | dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " |
1808 | "attribute\n"); | |
be35ae9e | 1809 | return -EINVAL; |
1da177e4 | 1810 | } |
d7fbeba6 | 1811 | |
d926793c | 1812 | netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1); |
f148f61d | 1813 | if (!netdev) |
1da177e4 LT |
1814 | return -ENOMEM; |
1815 | ||
4cf1653a | 1816 | adapter = netdev_priv(netdev); |
c7ae011d | 1817 | dev_set_drvdata(&dev->dev, netdev); |
1da177e4 LT |
1818 | |
1819 | adapter->vdev = dev; | |
1820 | adapter->netdev = netdev; | |
2c91e231 | 1821 | INIT_WORK(&adapter->work, ibmveth_reset); |
66cf4710 | 1822 | adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); |
9aedc6e2 | 1823 | ibmveth_init_link_settings(netdev); |
d7fbeba6 | 1824 | |
b707b89f | 1825 | netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); |
bea3348e | 1826 | |
1da177e4 | 1827 | netdev->irq = dev->irq; |
e186d174 AB |
1828 | netdev->netdev_ops = &ibmveth_netdev_ops; |
1829 | netdev->ethtool_ops = &netdev_ethtool_ops; | |
1da177e4 | 1830 | SET_NETDEV_DEV(netdev, &dev->dev); |
23d28a85 TH |
1831 | netdev->hw_features = NETIF_F_SG; |
1832 | if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { | |
1833 | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1834 | NETIF_F_RXCSUM; | |
1835 | } | |
07e6a97d | 1836 | |
b9367bf3 | 1837 | netdev->features |= netdev->hw_features; |
1da177e4 | 1838 | |
07e6a97d TF |
1839 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
1840 | ||
1841 | /* If running older firmware, TSO should not be enabled by default */ | |
1842 | if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && | |
1843 | !old_large_send) { | |
1844 | netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; | |
1845 | netdev->features |= netdev->hw_features; | |
1846 | } else { | |
1847 | netdev->hw_features |= NETIF_F_TSO; | |
1848 | } | |
8641dd85 | 1849 | |
66aa0678 SK |
1850 | adapter->is_active_trunk = false; |
1851 | if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) { | |
1852 | adapter->is_active_trunk = true; | |
1853 | netdev->hw_features |= NETIF_F_FRAGLIST; | |
1854 | netdev->features |= NETIF_F_FRAGLIST; | |
1855 | } | |
1856 | ||
2094200b MC |
1857 | if (ret == H_SUCCESS && |
1858 | (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) { | |
1859 | adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL; | |
1860 | netdev_dbg(netdev, | |
1861 | "RX Multi-buffer hcall supported by FW, batch set to %u\n", | |
1862 | adapter->rx_buffers_per_hcall); | |
1863 | } else { | |
1864 | adapter->rx_buffers_per_hcall = 1; | |
1865 | netdev_dbg(netdev, | |
1866 | "RX Single-buffer hcall mode, batch set to %u\n", | |
1867 | adapter->rx_buffers_per_hcall); | |
1868 | } | |
1869 | ||
d894be57 | 1870 | netdev->min_mtu = IBMVETH_MIN_MTU; |
5948378b | 1871 | netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; |
d894be57 | 1872 | |
a96d317f | 1873 | eth_hw_addr_set(netdev, mac_addr_p); |
1da177e4 | 1874 | |
cd7c7ec3 TF |
1875 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1876 | memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); | |
1877 | ||
f148f61d | 1878 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
860f242e | 1879 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
8dde2a96 GKH |
1880 | int error; |
1881 | ||
d7fbeba6 JG |
1882 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
1883 | pool_count[i], pool_size[i], | |
860f242e | 1884 | pool_active[i]); |
8dde2a96 GKH |
1885 | error = kobject_init_and_add(kobj, &ktype_veth_pool, |
1886 | &dev->dev.kobj, "pool%d", i); | |
1887 | if (!error) | |
1888 | kobject_uevent(kobj, KOBJ_ADD); | |
860f242e | 1889 | } |
1da177e4 | 1890 | |
742c60e1 NC |
1891 | rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(), |
1892 | IBMVETH_DEFAULT_QUEUES)); | |
10c2aba8 NC |
1893 | if (rc) { |
1894 | netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n", | |
1895 | rc); | |
1896 | free_netdev(netdev); | |
1897 | return rc; | |
1898 | } | |
d926793c | 1899 | adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE); |
10c2aba8 NC |
1900 | for (i = 0; i < IBMVETH_MAX_QUEUES; i++) |
1901 | adapter->tx_ltb_ptr[i] = NULL; | |
d926793c | 1902 | |
c43ced18 | 1903 | netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); |
c43ced18 | 1904 | netdev_dbg(netdev, "registering netdev...\n"); |
1da177e4 | 1905 | |
b801a4e7 MM |
1906 | ibmveth_set_features(netdev, netdev->features); |
1907 | ||
1da177e4 LT |
1908 | rc = register_netdev(netdev); |
1909 | ||
f148f61d | 1910 | if (rc) { |
c43ced18 | 1911 | netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); |
1da177e4 LT |
1912 | free_netdev(netdev); |
1913 | return rc; | |
1914 | } | |
1915 | ||
c43ced18 | 1916 | netdev_dbg(netdev, "registered\n"); |
1da177e4 | 1917 | |
1da177e4 LT |
1918 | return 0; |
1919 | } | |
1920 | ||
386a966f | 1921 | static void ibmveth_remove(struct vio_dev *dev) |
1da177e4 | 1922 | { |
c7ae011d | 1923 | struct net_device *netdev = dev_get_drvdata(&dev->dev); |
4cf1653a | 1924 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
860f242e SL |
1925 | int i; |
1926 | ||
2c91e231 DM |
1927 | cancel_work_sync(&adapter->work); |
1928 | ||
f148f61d | 1929 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
c10997f6 | 1930 | kobject_put(&adapter->rx_buff_pool[i].kobj); |
1da177e4 LT |
1931 | |
1932 | unregister_netdev(netdev); | |
1933 | ||
1da177e4 | 1934 | free_netdev(netdev); |
1096d63d | 1935 | dev_set_drvdata(&dev->dev, NULL); |
1da177e4 LT |
1936 | } |
1937 | ||
860f242e SL |
1938 | static struct attribute veth_active_attr; |
1939 | static struct attribute veth_num_attr; | |
1940 | static struct attribute veth_size_attr; | |
1941 | ||
f148f61d SL |
1942 | static ssize_t veth_pool_show(struct kobject *kobj, |
1943 | struct attribute *attr, char *buf) | |
860f242e | 1944 | { |
d7fbeba6 | 1945 | struct ibmveth_buff_pool *pool = container_of(kobj, |
860f242e SL |
1946 | struct ibmveth_buff_pool, |
1947 | kobj); | |
1948 | ||
1949 | if (attr == &veth_active_attr) | |
1950 | return sprintf(buf, "%d\n", pool->active); | |
1951 | else if (attr == &veth_num_attr) | |
1952 | return sprintf(buf, "%d\n", pool->size); | |
1953 | else if (attr == &veth_size_attr) | |
1954 | return sprintf(buf, "%d\n", pool->buff_size); | |
1955 | return 0; | |
1956 | } | |
1957 | ||
46431fd5 DM |
1958 | /** |
1959 | * veth_pool_store - sysfs store handler for pool attributes | |
1960 | * @kobj: kobject embedded in pool | |
1961 | * @attr: attribute being changed | |
1962 | * @buf: value being stored | |
1963 | * @count: length of @buf in bytes | |
1964 | * | |
1965 | * Stores new value in pool attribute. Verifies the range of the new value for | |
1966 | * size and buff_size. Verifies that at least one pool remains available to | |
1967 | * receive MTU-sized packets. | |
1968 | * | |
1969 | * Context: Process context. | |
1970 | * Takes and releases rtnl_mutex to ensure correct ordering of close | |
1971 | * and open calls. | |
1972 | * Return: | |
1973 | * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools | |
1974 | * * %-EINVAL - New pool size or buffer size is out of range | |
1975 | * * count - Return count for success | |
1976 | * * other - Return value from a failed ibmveth_open call | |
1977 | */ | |
f148f61d SL |
1978 | static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, |
1979 | const char *buf, size_t count) | |
860f242e | 1980 | { |
d7fbeba6 | 1981 | struct ibmveth_buff_pool *pool = container_of(kobj, |
860f242e SL |
1982 | struct ibmveth_buff_pool, |
1983 | kobj); | |
1756055d | 1984 | struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); |
4cf1653a | 1985 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
860f242e | 1986 | long value = simple_strtol(buf, NULL, 10); |
46431fd5 DM |
1987 | bool change = false; |
1988 | u32 newbuff_size; | |
1989 | u32 oldbuff_size; | |
1990 | int newactive; | |
1991 | int oldactive; | |
1992 | u32 newsize; | |
1993 | u32 oldsize; | |
860f242e SL |
1994 | long rc; |
1995 | ||
053f3ff6 DM |
1996 | rtnl_lock(); |
1997 | ||
46431fd5 DM |
1998 | oldbuff_size = pool->buff_size; |
1999 | oldactive = pool->active; | |
2000 | oldsize = pool->size; | |
2001 | ||
2002 | newbuff_size = oldbuff_size; | |
2003 | newactive = oldactive; | |
2004 | newsize = oldsize; | |
2005 | ||
860f242e | 2006 | if (attr == &veth_active_attr) { |
46431fd5 DM |
2007 | if (value && !oldactive) { |
2008 | newactive = 1; | |
2009 | change = true; | |
2010 | } else if (!value && oldactive) { | |
860f242e SL |
2011 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; |
2012 | int i; | |
2013 | /* Make sure there is a buffer pool with buffers that | |
2014 | can hold a packet of the size of the MTU */ | |
517e80e6 | 2015 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
860f242e SL |
2016 | if (pool == &adapter->rx_buff_pool[i]) |
2017 | continue; | |
2018 | if (!adapter->rx_buff_pool[i].active) | |
2019 | continue; | |
76b9cfcc BK |
2020 | if (mtu <= adapter->rx_buff_pool[i].buff_size) |
2021 | break; | |
860f242e | 2022 | } |
76b9cfcc | 2023 | |
517e80e6 | 2024 | if (i == IBMVETH_NUM_BUFF_POOLS) { |
21c2dece | 2025 | netdev_err(netdev, "no active pool >= MTU\n"); |
053f3ff6 DM |
2026 | rc = -EPERM; |
2027 | goto unlock_err; | |
860f242e | 2028 | } |
76b9cfcc | 2029 | |
46431fd5 DM |
2030 | newactive = 0; |
2031 | change = true; | |
860f242e SL |
2032 | } |
2033 | } else if (attr == &veth_num_attr) { | |
f148f61d | 2034 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { |
053f3ff6 DM |
2035 | rc = -EINVAL; |
2036 | goto unlock_err; | |
46431fd5 DM |
2037 | } |
2038 | if (value != oldsize) { | |
2039 | newsize = value; | |
2040 | change = true; | |
860f242e SL |
2041 | } |
2042 | } else if (attr == &veth_size_attr) { | |
f148f61d | 2043 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { |
053f3ff6 DM |
2044 | rc = -EINVAL; |
2045 | goto unlock_err; | |
46431fd5 DM |
2046 | } |
2047 | if (value != oldbuff_size) { | |
2048 | newbuff_size = value; | |
2049 | change = true; | |
2050 | } | |
2051 | } | |
2052 | ||
2053 | if (change) { | |
2054 | if (netif_running(netdev)) | |
2055 | ibmveth_close(netdev); | |
2056 | ||
2057 | pool->active = newactive; | |
2058 | pool->buff_size = newbuff_size; | |
2059 | pool->size = newsize; | |
2060 | ||
2061 | if (netif_running(netdev)) { | |
2062 | rc = ibmveth_open(netdev); | |
2063 | if (rc) { | |
2064 | pool->active = oldactive; | |
2065 | pool->buff_size = oldbuff_size; | |
2066 | pool->size = oldsize; | |
2067 | goto unlock_err; | |
f148f61d | 2068 | } |
860f242e SL |
2069 | } |
2070 | } | |
053f3ff6 | 2071 | rtnl_unlock(); |
860f242e SL |
2072 | |
2073 | /* kick the interrupt handler to allocate/deallocate pools */ | |
7d12e780 | 2074 | ibmveth_interrupt(netdev->irq, netdev); |
860f242e | 2075 | return count; |
053f3ff6 DM |
2076 | |
2077 | unlock_err: | |
2078 | rtnl_unlock(); | |
2079 | return rc; | |
860f242e SL |
2080 | } |
2081 | ||
2082 | ||
f148f61d SL |
2083 | #define ATTR(_name, _mode) \ |
2084 | struct attribute veth_##_name##_attr = { \ | |
2085 | .name = __stringify(_name), .mode = _mode, \ | |
2086 | }; | |
860f242e SL |
2087 | |
2088 | static ATTR(active, 0644); | |
2089 | static ATTR(num, 0644); | |
2090 | static ATTR(size, 0644); | |
2091 | ||
f148f61d | 2092 | static struct attribute *veth_pool_attrs[] = { |
860f242e SL |
2093 | &veth_active_attr, |
2094 | &veth_num_attr, | |
2095 | &veth_size_attr, | |
2096 | NULL, | |
2097 | }; | |
c288bc0d | 2098 | ATTRIBUTE_GROUPS(veth_pool); |
860f242e | 2099 | |
52cf25d0 | 2100 | static const struct sysfs_ops veth_pool_ops = { |
860f242e SL |
2101 | .show = veth_pool_show, |
2102 | .store = veth_pool_store, | |
2103 | }; | |
2104 | ||
2105 | static struct kobj_type ktype_veth_pool = { | |
2106 | .release = NULL, | |
2107 | .sysfs_ops = &veth_pool_ops, | |
c288bc0d | 2108 | .default_groups = veth_pool_groups, |
860f242e SL |
2109 | }; |
2110 | ||
e7a3af5d BK |
2111 | static int ibmveth_resume(struct device *dev) |
2112 | { | |
2113 | struct net_device *netdev = dev_get_drvdata(dev); | |
2114 | ibmveth_interrupt(netdev->irq, netdev); | |
2115 | return 0; | |
2116 | } | |
860f242e | 2117 | |
71450804 | 2118 | static const struct vio_device_id ibmveth_device_table[] = { |
1da177e4 | 2119 | { "network", "IBM,l-lan"}, |
fb120da6 | 2120 | { "", "" } |
1da177e4 | 2121 | }; |
1da177e4 LT |
2122 | MODULE_DEVICE_TABLE(vio, ibmveth_device_table); |
2123 | ||
eb60a73d | 2124 | static const struct dev_pm_ops ibmveth_pm_ops = { |
e7a3af5d BK |
2125 | .resume = ibmveth_resume |
2126 | }; | |
2127 | ||
1da177e4 | 2128 | static struct vio_driver ibmveth_driver = { |
6fdf5392 SR |
2129 | .id_table = ibmveth_device_table, |
2130 | .probe = ibmveth_probe, | |
2131 | .remove = ibmveth_remove, | |
1096d63d | 2132 | .get_desired_dma = ibmveth_get_desired_dma, |
cb52d897 BH |
2133 | .name = ibmveth_driver_name, |
2134 | .pm = &ibmveth_pm_ops, | |
1da177e4 LT |
2135 | }; |
2136 | ||
2137 | static int __init ibmveth_module_init(void) | |
2138 | { | |
21c2dece SL |
2139 | printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, |
2140 | ibmveth_driver_string, ibmveth_driver_version); | |
1da177e4 | 2141 | |
1da177e4 LT |
2142 | return vio_register_driver(&ibmveth_driver); |
2143 | } | |
2144 | ||
2145 | static void __exit ibmveth_module_exit(void) | |
2146 | { | |
2147 | vio_unregister_driver(&ibmveth_driver); | |
d7fbeba6 | 2148 | } |
1da177e4 LT |
2149 | |
2150 | module_init(ibmveth_module_init); | |
2151 | module_exit(ibmveth_module_exit); | |
8a97de24 DM |
2152 | |
2153 | #ifdef CONFIG_IBMVETH_KUNIT_TEST | |
2154 | #include <kunit/test.h> | |
2155 | ||
2156 | /** | |
2157 | * ibmveth_reset_kunit - reset routine for running in KUnit environment | |
2158 | * | |
2159 | * @w: pointer to work_struct embedded in adapter structure | |
2160 | * | |
2161 | * Context: Called in the KUnit environment. Does nothing. | |
2162 | * | |
2163 | * Return: void | |
2164 | */ | |
2165 | static void ibmveth_reset_kunit(struct work_struct *w) | |
2166 | { | |
2167 | netdev_dbg(NULL, "reset_kunit starting\n"); | |
2168 | netdev_dbg(NULL, "reset_kunit complete\n"); | |
2169 | } | |
2170 | ||
2171 | /** | |
2172 | * ibmveth_remove_buffer_from_pool_test - unit test for some of | |
2173 | * ibmveth_remove_buffer_from_pool | |
2174 | * @test: pointer to kunit structure | |
2175 | * | |
2176 | * Tests the error returns from ibmveth_remove_buffer_from_pool. | |
2177 | * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be | |
2178 | * checked to see that these warnings happened. | |
2179 | * | |
2180 | * Return: void | |
2181 | */ | |
2182 | static void ibmveth_remove_buffer_from_pool_test(struct kunit *test) | |
2183 | { | |
2184 | struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); | |
2185 | struct ibmveth_buff_pool *pool; | |
2186 | u64 correlator; | |
2187 | ||
2188 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); | |
2189 | ||
2190 | INIT_WORK(&adapter->work, ibmveth_reset_kunit); | |
2191 | ||
2192 | /* Set sane values for buffer pools */ | |
2193 | for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
2194 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | |
2195 | pool_count[i], pool_size[i], | |
2196 | pool_active[i]); | |
2197 | ||
2198 | pool = &adapter->rx_buff_pool[0]; | |
2199 | pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); | |
2200 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); | |
2201 | ||
2202 | correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0; | |
2203 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2204 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2205 | ||
2206 | correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size; | |
2207 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2208 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2209 | ||
2210 | correlator = (u64)0 | 0; | |
2211 | pool->skbuff[0] = NULL; | |
2212 | KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2213 | KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2214 | ||
2215 | flush_work(&adapter->work); | |
2216 | } | |
2217 | ||
2218 | /** | |
2219 | * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer | |
2220 | * @test: pointer to kunit structure | |
2221 | * | |
2222 | * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for | |
2223 | * the NULL returns, so dmesg should be checked to see that these warnings | |
2224 | * happened. | |
2225 | * | |
2226 | * Return: void | |
2227 | */ | |
2228 | static void ibmveth_rxq_get_buffer_test(struct kunit *test) | |
2229 | { | |
2230 | struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); | |
2231 | struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL); | |
2232 | struct ibmveth_buff_pool *pool; | |
2233 | ||
2234 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); | |
2235 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); | |
2236 | ||
2237 | INIT_WORK(&adapter->work, ibmveth_reset_kunit); | |
2238 | ||
2239 | adapter->rx_queue.queue_len = 1; | |
2240 | adapter->rx_queue.index = 0; | |
2241 | adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry), | |
2242 | GFP_KERNEL); | |
2243 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr); | |
2244 | ||
2245 | /* Set sane values for buffer pools */ | |
2246 | for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
2247 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | |
2248 | pool_count[i], pool_size[i], | |
2249 | pool_active[i]); | |
2250 | ||
2251 | pool = &adapter->rx_buff_pool[0]; | |
2252 | pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); | |
2253 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); | |
2254 | ||
2255 | adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0; | |
2256 | KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); | |
2257 | ||
2258 | adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size; | |
2259 | KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); | |
2260 | ||
2261 | pool->skbuff[0] = skb; | |
2262 | adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0; | |
2263 | KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter)); | |
2264 | ||
2265 | flush_work(&adapter->work); | |
2266 | } | |
2267 | ||
2268 | static struct kunit_case ibmveth_test_cases[] = { | |
2269 | KUNIT_CASE(ibmveth_remove_buffer_from_pool_test), | |
2270 | KUNIT_CASE(ibmveth_rxq_get_buffer_test), | |
2271 | {} | |
2272 | }; | |
2273 | ||
2274 | static struct kunit_suite ibmveth_test_suite = { | |
2275 | .name = "ibmveth-kunit-test", | |
2276 | .test_cases = ibmveth_test_cases, | |
2277 | }; | |
2278 | ||
2279 | kunit_test_suite(ibmveth_test_suite); | |
2280 | #endif |