Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
f148f61d | 2 | /* |
9d348af4 | 3 | * IBM Power Virtual Ethernet Device Driver |
f148f61d | 4 | * |
9d348af4 SL |
5 | * Copyright (C) IBM Corporation, 2003, 2010 |
6 | * | |
7 | * Authors: Dave Larson <larson1@us.ibm.com> | |
8 | * Santiago Leon <santil@linux.vnet.ibm.com> | |
9 | * Brian King <brking@linux.vnet.ibm.com> | |
10 | * Robert Jennings <rcj@linux.vnet.ibm.com> | |
11 | * Anton Blanchard <anton@au.ibm.com> | |
f148f61d | 12 | */ |
1da177e4 | 13 | |
1da177e4 | 14 | #include <linux/module.h> |
1da177e4 LT |
15 | #include <linux/types.h> |
16 | #include <linux/errno.h> | |
1da177e4 LT |
17 | #include <linux/dma-mapping.h> |
18 | #include <linux/kernel.h> | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/etherdevice.h> | |
21 | #include <linux/skbuff.h> | |
22 | #include <linux/init.h> | |
a6b7a407 | 23 | #include <linux/interrupt.h> |
1da177e4 | 24 | #include <linux/mm.h> |
e7a3af5d | 25 | #include <linux/pm.h> |
1da177e4 | 26 | #include <linux/ethtool.h> |
f4ff2872 BK |
27 | #include <linux/in.h> |
28 | #include <linux/ip.h> | |
ab78df75 | 29 | #include <linux/ipv6.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
1da177e4 | 31 | #include <asm/hvcall.h> |
60063497 | 32 | #include <linux/atomic.h> |
1da177e4 | 33 | #include <asm/vio.h> |
1096d63d | 34 | #include <asm/iommu.h> |
1096d63d | 35 | #include <asm/firmware.h> |
66aa0678 SK |
36 | #include <net/tcp.h> |
37 | #include <net/ip6_checksum.h> | |
1da177e4 LT |
38 | |
39 | #include "ibmveth.h" | |
40 | ||
7d12e780 | 41 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
1096d63d | 42 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); |
e295fe83 | 43 | |
860f242e | 44 | static struct kobj_type ktype_veth_pool; |
1da177e4 | 45 | |
1096d63d | 46 | |
1da177e4 | 47 | static const char ibmveth_driver_name[] = "ibmveth"; |
9d348af4 | 48 | static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; |
7b596738 | 49 | #define ibmveth_driver_version "1.06" |
1da177e4 | 50 | |
9d348af4 SL |
51 | MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); |
52 | MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); | |
1da177e4 LT |
53 | MODULE_LICENSE("GPL"); |
54 | MODULE_VERSION(ibmveth_driver_version); | |
55 | ||
c08cc3cc SL |
56 | static unsigned int tx_copybreak __read_mostly = 128; |
57 | module_param(tx_copybreak, uint, 0644); | |
58 | MODULE_PARM_DESC(tx_copybreak, | |
59 | "Maximum size of packet that is copied to a new buffer on transmit"); | |
60 | ||
8d86c61a SL |
61 | static unsigned int rx_copybreak __read_mostly = 128; |
62 | module_param(rx_copybreak, uint, 0644); | |
63 | MODULE_PARM_DESC(rx_copybreak, | |
64 | "Maximum size of packet that is copied to a new buffer on receive"); | |
65 | ||
0c26b677 SL |
66 | static unsigned int rx_flush __read_mostly = 0; |
67 | module_param(rx_flush, uint, 0644); | |
68 | MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); | |
69 | ||
07e6a97d | 70 | static bool old_large_send __read_mostly; |
d3757ba4 | 71 | module_param(old_large_send, bool, 0444); |
07e6a97d TF |
72 | MODULE_PARM_DESC(old_large_send, |
73 | "Use old large send method on firmware that supports the new method"); | |
74 | ||
ddbb4de9 BK |
75 | struct ibmveth_stat { |
76 | char name[ETH_GSTRING_LEN]; | |
77 | int offset; | |
78 | }; | |
79 | ||
80 | #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) | |
81 | #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) | |
82 | ||
a0cfa79f | 83 | static struct ibmveth_stat ibmveth_stats[] = { |
ddbb4de9 BK |
84 | { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, |
85 | { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, | |
f148f61d SL |
86 | { "replenish_add_buff_failure", |
87 | IBMVETH_STAT_OFF(replenish_add_buff_failure) }, | |
88 | { "replenish_add_buff_success", | |
89 | IBMVETH_STAT_OFF(replenish_add_buff_success) }, | |
ddbb4de9 BK |
90 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, |
91 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, | |
ddbb4de9 BK |
92 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, |
93 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, | |
ab78df75 SL |
94 | { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, |
95 | { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, | |
8641dd85 | 96 | { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, |
07e6a97d TF |
97 | { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, |
98 | { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } | |
ddbb4de9 BK |
99 | }; |
100 | ||
1da177e4 | 101 | /* simple methods of getting data from the current rxq entry */ |
79ef4a4d BK |
102 | static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) |
103 | { | |
0b536be7 | 104 | return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); |
79ef4a4d BK |
105 | } |
106 | ||
107 | static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) | |
108 | { | |
f148f61d SL |
109 | return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> |
110 | IBMVETH_RXQ_TOGGLE_SHIFT; | |
79ef4a4d BK |
111 | } |
112 | ||
1da177e4 LT |
113 | static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) |
114 | { | |
f148f61d | 115 | return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; |
1da177e4 LT |
116 | } |
117 | ||
118 | static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) | |
119 | { | |
f148f61d | 120 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; |
1da177e4 LT |
121 | } |
122 | ||
123 | static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) | |
124 | { | |
f148f61d | 125 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; |
1da177e4 LT |
126 | } |
127 | ||
7b596738 TF |
128 | static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) |
129 | { | |
130 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; | |
131 | } | |
132 | ||
1da177e4 LT |
133 | static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) |
134 | { | |
0b536be7 | 135 | return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); |
1da177e4 LT |
136 | } |
137 | ||
f4ff2872 BK |
138 | static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) |
139 | { | |
f148f61d | 140 | return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; |
f4ff2872 BK |
141 | } |
142 | ||
10c2aba8 NC |
143 | static unsigned int ibmveth_real_max_tx_queues(void) |
144 | { | |
145 | unsigned int n_cpu = num_online_cpus(); | |
146 | ||
147 | return min(n_cpu, IBMVETH_MAX_QUEUES); | |
148 | } | |
149 | ||
1da177e4 | 150 | /* setup the initial settings for a buffer pool */ |
f148f61d SL |
151 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, |
152 | u32 pool_index, u32 pool_size, | |
153 | u32 buff_size, u32 pool_active) | |
1da177e4 LT |
154 | { |
155 | pool->size = pool_size; | |
156 | pool->index = pool_index; | |
157 | pool->buff_size = buff_size; | |
c033a6d1 | 158 | pool->threshold = pool_size * 7 / 8; |
860f242e | 159 | pool->active = pool_active; |
1da177e4 LT |
160 | } |
161 | ||
162 | /* allocate and setup an buffer pool - called during open */ | |
163 | static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |
164 | { | |
165 | int i; | |
166 | ||
6da2ec56 | 167 | pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL); |
1da177e4 | 168 | |
f148f61d | 169 | if (!pool->free_map) |
1da177e4 | 170 | return -1; |
1da177e4 | 171 | |
076ef440 | 172 | pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); |
f148f61d | 173 | if (!pool->dma_addr) { |
1da177e4 LT |
174 | kfree(pool->free_map); |
175 | pool->free_map = NULL; | |
176 | return -1; | |
177 | } | |
178 | ||
a05abcb5 | 179 | pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); |
1da177e4 | 180 | |
f148f61d | 181 | if (!pool->skbuff) { |
1da177e4 LT |
182 | kfree(pool->dma_addr); |
183 | pool->dma_addr = NULL; | |
184 | ||
185 | kfree(pool->free_map); | |
186 | pool->free_map = NULL; | |
187 | return -1; | |
188 | } | |
189 | ||
f148f61d | 190 | for (i = 0; i < pool->size; ++i) |
1da177e4 | 191 | pool->free_map[i] = i; |
1da177e4 LT |
192 | |
193 | atomic_set(&pool->available, 0); | |
194 | pool->producer_index = 0; | |
195 | pool->consumer_index = 0; | |
196 | ||
197 | return 0; | |
198 | } | |
199 | ||
0c26b677 SL |
200 | static inline void ibmveth_flush_buffer(void *addr, unsigned long length) |
201 | { | |
202 | unsigned long offset; | |
203 | ||
204 | for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) | |
bfedba3b | 205 | asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); |
0c26b677 SL |
206 | } |
207 | ||
1da177e4 LT |
208 | /* replenish the buffers for a pool. note that we don't need to |
209 | * skb_reserve these since they are used for incoming... | |
210 | */ | |
f148f61d SL |
211 | static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
212 | struct ibmveth_buff_pool *pool) | |
1da177e4 LT |
213 | { |
214 | u32 i; | |
215 | u32 count = pool->size - atomic_read(&pool->available); | |
216 | u32 buffers_added = 0; | |
1096d63d RJ |
217 | struct sk_buff *skb; |
218 | unsigned int free_index, index; | |
219 | u64 correlator; | |
220 | unsigned long lpar_rc; | |
221 | dma_addr_t dma_addr; | |
1da177e4 LT |
222 | |
223 | mb(); | |
224 | ||
f148f61d | 225 | for (i = 0; i < count; ++i) { |
1da177e4 | 226 | union ibmveth_buf_desc desc; |
1da177e4 | 227 | |
b5381a55 NC |
228 | free_index = pool->consumer_index; |
229 | index = pool->free_map[free_index]; | |
230 | skb = NULL; | |
231 | ||
2c91e231 DM |
232 | if (WARN_ON(index == IBM_VETH_INVALID_MAP)) { |
233 | schedule_work(&adapter->work); | |
234 | goto bad_index_failure; | |
235 | } | |
b5381a55 NC |
236 | |
237 | /* are we allocating a new buffer or recycling an old one */ | |
238 | if (pool->skbuff[index]) | |
239 | goto reuse; | |
240 | ||
003212cc | 241 | skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); |
1da177e4 | 242 | |
f148f61d | 243 | if (!skb) { |
c43ced18 SL |
244 | netdev_dbg(adapter->netdev, |
245 | "replenish: unable to allocate skb\n"); | |
1da177e4 LT |
246 | adapter->replenish_no_mem++; |
247 | break; | |
248 | } | |
249 | ||
1da177e4 LT |
250 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
251 | pool->buff_size, DMA_FROM_DEVICE); | |
252 | ||
c713e7cb | 253 | if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) |
1096d63d RJ |
254 | goto failure; |
255 | ||
1da177e4 LT |
256 | pool->dma_addr[index] = dma_addr; |
257 | pool->skbuff[index] = skb; | |
258 | ||
0c26b677 SL |
259 | if (rx_flush) { |
260 | unsigned int len = min(pool->buff_size, | |
b5381a55 NC |
261 | adapter->netdev->mtu + |
262 | IBMVETH_BUFF_OH); | |
0c26b677 SL |
263 | ibmveth_flush_buffer(skb->data, len); |
264 | } | |
b5381a55 NC |
265 | reuse: |
266 | dma_addr = pool->dma_addr[index]; | |
267 | desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; | |
268 | desc.fields.address = dma_addr; | |
269 | ||
270 | correlator = ((u64)pool->index << 32) | index; | |
271 | *(u64 *)pool->skbuff[index]->data = correlator; | |
272 | ||
f148f61d SL |
273 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, |
274 | desc.desc); | |
d7fbeba6 | 275 | |
f148f61d | 276 | if (lpar_rc != H_SUCCESS) { |
b5381a55 NC |
277 | netdev_warn(adapter->netdev, |
278 | "%sadd_logical_lan failed %lu\n", | |
279 | skb ? "" : "When recycling: ", lpar_rc); | |
1096d63d | 280 | goto failure; |
1da177e4 | 281 | } |
b5381a55 NC |
282 | |
283 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | |
284 | pool->consumer_index++; | |
285 | if (pool->consumer_index >= pool->size) | |
286 | pool->consumer_index = 0; | |
287 | ||
288 | buffers_added++; | |
289 | adapter->replenish_add_buff_success++; | |
1da177e4 | 290 | } |
d7fbeba6 | 291 | |
1096d63d RJ |
292 | mb(); |
293 | atomic_add(buffers_added, &(pool->available)); | |
294 | return; | |
295 | ||
296 | failure: | |
b5381a55 NC |
297 | |
298 | if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr)) | |
1096d63d RJ |
299 | dma_unmap_single(&adapter->vdev->dev, |
300 | pool->dma_addr[index], pool->buff_size, | |
301 | DMA_FROM_DEVICE); | |
b5381a55 NC |
302 | dev_kfree_skb_any(pool->skbuff[index]); |
303 | pool->skbuff[index] = NULL; | |
2c91e231 | 304 | bad_index_failure: |
1096d63d RJ |
305 | adapter->replenish_add_buff_failure++; |
306 | ||
1da177e4 LT |
307 | mb(); |
308 | atomic_add(buffers_added, &(pool->available)); | |
309 | } | |
310 | ||
cbd52281 AB |
311 | /* |
312 | * The final 8 bytes of the buffer list is a counter of frames dropped | |
313 | * because there was not a buffer in the buffer list capable of holding | |
314 | * the frame. | |
315 | */ | |
316 | static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) | |
317 | { | |
318 | __be64 *p = adapter->buffer_list_addr + 4096 - 8; | |
319 | ||
320 | adapter->rx_no_buffer = be64_to_cpup(p); | |
321 | } | |
322 | ||
e2adbcb4 | 323 | /* replenish routine */ |
d7fbeba6 | 324 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
1da177e4 | 325 | { |
b6d35182 SL |
326 | int i; |
327 | ||
1da177e4 LT |
328 | adapter->replenish_task_cycles++; |
329 | ||
517e80e6 | 330 | for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { |
c033a6d1 SL |
331 | struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; |
332 | ||
333 | if (pool->active && | |
334 | (atomic_read(&pool->available) < pool->threshold)) | |
335 | ibmveth_replenish_buffer_pool(adapter, pool); | |
336 | } | |
1da177e4 | 337 | |
cbd52281 | 338 | ibmveth_update_rx_no_buffer(adapter); |
1da177e4 LT |
339 | } |
340 | ||
341 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | |
f148f61d SL |
342 | static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, |
343 | struct ibmveth_buff_pool *pool) | |
1da177e4 LT |
344 | { |
345 | int i; | |
346 | ||
b4558ea9 JJ |
347 | kfree(pool->free_map); |
348 | pool->free_map = NULL; | |
1da177e4 | 349 | |
f148f61d SL |
350 | if (pool->skbuff && pool->dma_addr) { |
351 | for (i = 0; i < pool->size; ++i) { | |
1da177e4 | 352 | struct sk_buff *skb = pool->skbuff[i]; |
f148f61d | 353 | if (skb) { |
1da177e4 LT |
354 | dma_unmap_single(&adapter->vdev->dev, |
355 | pool->dma_addr[i], | |
356 | pool->buff_size, | |
357 | DMA_FROM_DEVICE); | |
358 | dev_kfree_skb_any(skb); | |
359 | pool->skbuff[i] = NULL; | |
360 | } | |
361 | } | |
362 | } | |
363 | ||
f148f61d | 364 | if (pool->dma_addr) { |
1da177e4 LT |
365 | kfree(pool->dma_addr); |
366 | pool->dma_addr = NULL; | |
367 | } | |
368 | ||
f148f61d | 369 | if (pool->skbuff) { |
1da177e4 LT |
370 | kfree(pool->skbuff); |
371 | pool->skbuff = NULL; | |
372 | } | |
373 | } | |
374 | ||
2c91e231 DM |
375 | /** |
376 | * ibmveth_remove_buffer_from_pool - remove a buffer from a pool | |
377 | * @adapter: adapter instance | |
378 | * @correlator: identifies pool and index | |
379 | * @reuse: whether to reuse buffer | |
380 | * | |
381 | * Return: | |
382 | * * %0 - success | |
383 | * * %-EINVAL - correlator maps to pool or index out of range | |
384 | * * %-EFAULT - pool and index map to null skb | |
385 | */ | |
386 | static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, | |
387 | u64 correlator, bool reuse) | |
1da177e4 LT |
388 | { |
389 | unsigned int pool = correlator >> 32; | |
390 | unsigned int index = correlator & 0xffffffffUL; | |
391 | unsigned int free_index; | |
392 | struct sk_buff *skb; | |
393 | ||
2c91e231 DM |
394 | if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
395 | WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { | |
396 | schedule_work(&adapter->work); | |
397 | return -EINVAL; | |
398 | } | |
1da177e4 LT |
399 | |
400 | skb = adapter->rx_buff_pool[pool].skbuff[index]; | |
2c91e231 DM |
401 | if (WARN_ON(!skb)) { |
402 | schedule_work(&adapter->work); | |
403 | return -EFAULT; | |
404 | } | |
1da177e4 | 405 | |
b5381a55 NC |
406 | /* if we are going to reuse the buffer then keep the pointers around |
407 | * but mark index as available. replenish will see the skb pointer and | |
408 | * assume it is to be recycled. | |
409 | */ | |
410 | if (!reuse) { | |
411 | /* remove the skb pointer to mark free. actual freeing is done | |
412 | * by upper level networking after gro_recieve | |
413 | */ | |
414 | adapter->rx_buff_pool[pool].skbuff[index] = NULL; | |
1da177e4 | 415 | |
b5381a55 NC |
416 | dma_unmap_single(&adapter->vdev->dev, |
417 | adapter->rx_buff_pool[pool].dma_addr[index], | |
418 | adapter->rx_buff_pool[pool].buff_size, | |
419 | DMA_FROM_DEVICE); | |
420 | } | |
1da177e4 | 421 | |
047a66d4 | 422 | free_index = adapter->rx_buff_pool[pool].producer_index; |
a613f581 SL |
423 | adapter->rx_buff_pool[pool].producer_index++; |
424 | if (adapter->rx_buff_pool[pool].producer_index >= | |
425 | adapter->rx_buff_pool[pool].size) | |
426 | adapter->rx_buff_pool[pool].producer_index = 0; | |
1da177e4 LT |
427 | adapter->rx_buff_pool[pool].free_map[free_index] = index; |
428 | ||
429 | mb(); | |
430 | ||
431 | atomic_dec(&(adapter->rx_buff_pool[pool].available)); | |
2c91e231 DM |
432 | |
433 | return 0; | |
1da177e4 LT |
434 | } |
435 | ||
436 | /* get the current buffer on the rx queue */ | |
437 | static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) | |
438 | { | |
439 | u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; | |
440 | unsigned int pool = correlator >> 32; | |
441 | unsigned int index = correlator & 0xffffffffUL; | |
442 | ||
2c91e231 DM |
443 | if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
444 | WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { | |
445 | schedule_work(&adapter->work); | |
446 | return NULL; | |
447 | } | |
1da177e4 LT |
448 | |
449 | return adapter->rx_buff_pool[pool].skbuff[index]; | |
450 | } | |
451 | ||
2c91e231 DM |
452 | /** |
453 | * ibmveth_rxq_harvest_buffer - Harvest buffer from pool | |
454 | * | |
455 | * @adapter: pointer to adapter | |
456 | * @reuse: whether to reuse buffer | |
457 | * | |
458 | * Context: called from ibmveth_poll | |
459 | * | |
460 | * Return: | |
461 | * * %0 - success | |
462 | * * other - non-zero return from ibmveth_remove_buffer_from_pool | |
463 | */ | |
464 | static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, | |
465 | bool reuse) | |
1da177e4 | 466 | { |
b5381a55 | 467 | u64 cor; |
2c91e231 | 468 | int rc; |
1da177e4 | 469 | |
b5381a55 | 470 | cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; |
2c91e231 DM |
471 | rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse); |
472 | if (unlikely(rc)) | |
473 | return rc; | |
1da177e4 | 474 | |
f148f61d | 475 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
1da177e4 LT |
476 | adapter->rx_queue.index = 0; |
477 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | |
478 | } | |
2c91e231 DM |
479 | |
480 | return 0; | |
1da177e4 LT |
481 | } |
482 | ||
10c2aba8 NC |
483 | static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx) |
484 | { | |
485 | dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx], | |
486 | adapter->tx_ltb_size, DMA_TO_DEVICE); | |
487 | kfree(adapter->tx_ltb_ptr[idx]); | |
488 | adapter->tx_ltb_ptr[idx] = NULL; | |
489 | } | |
490 | ||
491 | static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx) | |
492 | { | |
493 | adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size, | |
494 | GFP_KERNEL); | |
495 | if (!adapter->tx_ltb_ptr[idx]) { | |
496 | netdev_err(adapter->netdev, | |
497 | "unable to allocate tx long term buffer\n"); | |
498 | return -ENOMEM; | |
499 | } | |
500 | adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev, | |
501 | adapter->tx_ltb_ptr[idx], | |
502 | adapter->tx_ltb_size, | |
503 | DMA_TO_DEVICE); | |
504 | if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) { | |
505 | netdev_err(adapter->netdev, | |
506 | "unable to DMA map tx long term buffer\n"); | |
507 | kfree(adapter->tx_ltb_ptr[idx]); | |
508 | adapter->tx_ltb_ptr[idx] = NULL; | |
509 | return -ENOMEM; | |
510 | } | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
bbedefcc ME |
515 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
516 | union ibmveth_buf_desc rxq_desc, u64 mac_address) | |
517 | { | |
518 | int rc, try_again = 1; | |
519 | ||
f148f61d SL |
520 | /* |
521 | * After a kexec the adapter will still be open, so our attempt to | |
522 | * open it will fail. So if we get a failure we free the adapter and | |
523 | * try again, but only once. | |
524 | */ | |
bbedefcc ME |
525 | retry: |
526 | rc = h_register_logical_lan(adapter->vdev->unit_address, | |
527 | adapter->buffer_list_dma, rxq_desc.desc, | |
528 | adapter->filter_list_dma, mac_address); | |
529 | ||
530 | if (rc != H_SUCCESS && try_again) { | |
531 | do { | |
532 | rc = h_free_logical_lan(adapter->vdev->unit_address); | |
533 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | |
534 | ||
535 | try_again = 0; | |
536 | goto retry; | |
537 | } | |
538 | ||
539 | return rc; | |
540 | } | |
541 | ||
1da177e4 LT |
542 | static int ibmveth_open(struct net_device *netdev) |
543 | { | |
4cf1653a | 544 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
d746ca95 | 545 | u64 mac_address; |
b6d35182 | 546 | int rxq_entries = 1; |
1da177e4 LT |
547 | unsigned long lpar_rc; |
548 | int rc; | |
549 | union ibmveth_buf_desc rxq_desc; | |
b6d35182 | 550 | int i; |
8d8bb39b | 551 | struct device *dev; |
1da177e4 | 552 | |
c43ced18 | 553 | netdev_dbg(netdev, "open starting\n"); |
1da177e4 | 554 | |
bea3348e SH |
555 | napi_enable(&adapter->napi); |
556 | ||
517e80e6 | 557 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
b6d35182 | 558 | rxq_entries += adapter->rx_buff_pool[i].size; |
d7fbeba6 | 559 | |
d43732ce | 560 | rc = -ENOMEM; |
1da177e4 | 561 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
d43732ce CH |
562 | if (!adapter->buffer_list_addr) { |
563 | netdev_err(netdev, "unable to allocate list pages\n"); | |
564 | goto out; | |
565 | } | |
d7fbeba6 | 566 | |
d43732ce CH |
567 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
568 | if (!adapter->filter_list_addr) { | |
569 | netdev_err(netdev, "unable to allocate filter pages\n"); | |
570 | goto out_free_buffer_list; | |
1da177e4 LT |
571 | } |
572 | ||
d90c92fe SL |
573 | dev = &adapter->vdev->dev; |
574 | ||
f148f61d SL |
575 | adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * |
576 | rxq_entries; | |
d90c92fe | 577 | adapter->rx_queue.queue_addr = |
d0320f75 JP |
578 | dma_alloc_coherent(dev, adapter->rx_queue.queue_len, |
579 | &adapter->rx_queue.queue_dma, GFP_KERNEL); | |
d43732ce CH |
580 | if (!adapter->rx_queue.queue_addr) |
581 | goto out_free_filter_list; | |
1da177e4 | 582 | |
8d8bb39b | 583 | adapter->buffer_list_dma = dma_map_single(dev, |
1da177e4 | 584 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
d43732ce CH |
585 | if (dma_mapping_error(dev, adapter->buffer_list_dma)) { |
586 | netdev_err(netdev, "unable to map buffer list pages\n"); | |
587 | goto out_free_queue_mem; | |
588 | } | |
589 | ||
8d8bb39b | 590 | adapter->filter_list_dma = dma_map_single(dev, |
1da177e4 | 591 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
d43732ce CH |
592 | if (dma_mapping_error(dev, adapter->filter_list_dma)) { |
593 | netdev_err(netdev, "unable to map filter list pages\n"); | |
594 | goto out_unmap_buffer_list; | |
1da177e4 LT |
595 | } |
596 | ||
10c2aba8 NC |
597 | for (i = 0; i < netdev->real_num_tx_queues; i++) { |
598 | if (ibmveth_allocate_tx_ltb(adapter, i)) | |
599 | goto out_free_tx_ltb; | |
d6832ca4 NC |
600 | } |
601 | ||
1da177e4 LT |
602 | adapter->rx_queue.index = 0; |
603 | adapter->rx_queue.num_slots = rxq_entries; | |
604 | adapter->rx_queue.toggle = 1; | |
605 | ||
5c8b3485 | 606 | mac_address = ether_addr_to_u64(netdev->dev_addr); |
1da177e4 | 607 | |
f148f61d SL |
608 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
609 | adapter->rx_queue.queue_len; | |
1da177e4 LT |
610 | rxq_desc.fields.address = adapter->rx_queue.queue_dma; |
611 | ||
c43ced18 SL |
612 | netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); |
613 | netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); | |
614 | netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | |
1da177e4 | 615 | |
4347ef15 SL |
616 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); |
617 | ||
bbedefcc | 618 | lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); |
1da177e4 | 619 | |
f148f61d | 620 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
621 | netdev_err(netdev, "h_register_logical_lan failed with %ld\n", |
622 | lpar_rc); | |
623 | netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " | |
624 | "desc:0x%llx MAC:0x%llx\n", | |
1da177e4 LT |
625 | adapter->buffer_list_dma, |
626 | adapter->filter_list_dma, | |
627 | rxq_desc.desc, | |
628 | mac_address); | |
88426f2a | 629 | rc = -ENONET; |
d43732ce | 630 | goto out_unmap_filter_list; |
1da177e4 LT |
631 | } |
632 | ||
f148f61d SL |
633 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
634 | if (!adapter->rx_buff_pool[i].active) | |
860f242e SL |
635 | continue; |
636 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | |
21c2dece | 637 | netdev_err(netdev, "unable to alloc pool\n"); |
860f242e | 638 | adapter->rx_buff_pool[i].active = 0; |
88426f2a | 639 | rc = -ENOMEM; |
d43732ce | 640 | goto out_free_buffer_pools; |
860f242e SL |
641 | } |
642 | } | |
643 | ||
c43ced18 | 644 | netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); |
f148f61d SL |
645 | rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, |
646 | netdev); | |
647 | if (rc != 0) { | |
21c2dece SL |
648 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
649 | netdev->irq, rc); | |
1da177e4 | 650 | do { |
95de86cf BK |
651 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
652 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | |
1da177e4 | 653 | |
d43732ce | 654 | goto out_free_buffer_pools; |
1da177e4 LT |
655 | } |
656 | ||
d43732ce | 657 | rc = -ENOMEM; |
d43732ce | 658 | |
c43ced18 | 659 | netdev_dbg(netdev, "initial replenish cycle\n"); |
7d12e780 | 660 | ibmveth_interrupt(netdev->irq, netdev); |
1da177e4 | 661 | |
d926793c | 662 | netif_tx_start_all_queues(netdev); |
1da177e4 | 663 | |
c43ced18 | 664 | netdev_dbg(netdev, "open complete\n"); |
1da177e4 LT |
665 | |
666 | return 0; | |
88426f2a | 667 | |
d43732ce CH |
668 | out_free_buffer_pools: |
669 | while (--i >= 0) { | |
670 | if (adapter->rx_buff_pool[i].active) | |
671 | ibmveth_free_buffer_pool(adapter, | |
672 | &adapter->rx_buff_pool[i]); | |
673 | } | |
674 | out_unmap_filter_list: | |
675 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | |
676 | DMA_BIDIRECTIONAL); | |
d6832ca4 | 677 | |
10c2aba8 | 678 | out_free_tx_ltb: |
d926793c | 679 | while (--i >= 0) { |
10c2aba8 | 680 | ibmveth_free_tx_ltb(adapter, i); |
d926793c | 681 | } |
d6832ca4 | 682 | |
d43732ce CH |
683 | out_unmap_buffer_list: |
684 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | |
685 | DMA_BIDIRECTIONAL); | |
686 | out_free_queue_mem: | |
687 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | |
688 | adapter->rx_queue.queue_addr, | |
689 | adapter->rx_queue.queue_dma); | |
690 | out_free_filter_list: | |
691 | free_page((unsigned long)adapter->filter_list_addr); | |
692 | out_free_buffer_list: | |
693 | free_page((unsigned long)adapter->buffer_list_addr); | |
694 | out: | |
88426f2a DK |
695 | napi_disable(&adapter->napi); |
696 | return rc; | |
1da177e4 LT |
697 | } |
698 | ||
699 | static int ibmveth_close(struct net_device *netdev) | |
700 | { | |
4cf1653a | 701 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
d43732ce | 702 | struct device *dev = &adapter->vdev->dev; |
1da177e4 | 703 | long lpar_rc; |
d43732ce | 704 | int i; |
d7fbeba6 | 705 | |
c43ced18 | 706 | netdev_dbg(netdev, "close starting\n"); |
1da177e4 | 707 | |
bea3348e SH |
708 | napi_disable(&adapter->napi); |
709 | ||
127b7218 | 710 | netif_tx_stop_all_queues(netdev); |
1da177e4 | 711 | |
ee2e6114 | 712 | h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); |
1da177e4 | 713 | |
1da177e4 LT |
714 | do { |
715 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | |
706c8c93 | 716 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
1da177e4 | 717 | |
f148f61d | 718 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
719 | netdev_err(netdev, "h_free_logical_lan failed with %lx, " |
720 | "continuing with close\n", lpar_rc); | |
1da177e4 LT |
721 | } |
722 | ||
ee2e6114 RJ |
723 | free_irq(netdev->irq, netdev); |
724 | ||
cbd52281 | 725 | ibmveth_update_rx_no_buffer(adapter); |
1da177e4 | 726 | |
d43732ce CH |
727 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
728 | DMA_BIDIRECTIONAL); | |
729 | free_page((unsigned long)adapter->buffer_list_addr); | |
730 | ||
731 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | |
732 | DMA_BIDIRECTIONAL); | |
733 | free_page((unsigned long)adapter->filter_list_addr); | |
734 | ||
735 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | |
736 | adapter->rx_queue.queue_addr, | |
737 | adapter->rx_queue.queue_dma); | |
738 | ||
739 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
740 | if (adapter->rx_buff_pool[i].active) | |
741 | ibmveth_free_buffer_pool(adapter, | |
742 | &adapter->rx_buff_pool[i]); | |
743 | ||
10c2aba8 NC |
744 | for (i = 0; i < netdev->real_num_tx_queues; i++) |
745 | ibmveth_free_tx_ltb(adapter, i); | |
1da177e4 | 746 | |
c43ced18 | 747 | netdev_dbg(netdev, "close complete\n"); |
1da177e4 LT |
748 | |
749 | return 0; | |
750 | } | |
751 | ||
2c91e231 DM |
752 | /** |
753 | * ibmveth_reset - Handle scheduled reset work | |
754 | * | |
755 | * @w: pointer to work_struct embedded in adapter structure | |
756 | * | |
757 | * Context: This routine acquires rtnl_mutex and disables its NAPI through | |
758 | * ibmveth_close. It can't be called directly in a context that has | |
759 | * already acquired rtnl_mutex or disabled its NAPI, or directly from | |
760 | * a poll routine. | |
761 | * | |
762 | * Return: void | |
763 | */ | |
764 | static void ibmveth_reset(struct work_struct *w) | |
765 | { | |
766 | struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); | |
767 | struct net_device *netdev = adapter->netdev; | |
768 | ||
769 | netdev_dbg(netdev, "reset starting\n"); | |
770 | ||
771 | rtnl_lock(); | |
772 | ||
773 | dev_close(adapter->netdev); | |
774 | dev_open(adapter->netdev, NULL); | |
775 | ||
776 | rtnl_unlock(); | |
777 | ||
778 | netdev_dbg(netdev, "reset complete\n"); | |
779 | } | |
780 | ||
9aedc6e2 CF |
781 | static int ibmveth_set_link_ksettings(struct net_device *dev, |
782 | const struct ethtool_link_ksettings *cmd) | |
f148f61d | 783 | { |
9aedc6e2 CF |
784 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
785 | ||
786 | return ethtool_virtdev_set_link_ksettings(dev, cmd, | |
787 | &adapter->speed, | |
788 | &adapter->duplex); | |
789 | } | |
790 | ||
791 | static int ibmveth_get_link_ksettings(struct net_device *dev, | |
792 | struct ethtool_link_ksettings *cmd) | |
793 | { | |
794 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
795 | ||
796 | cmd->base.speed = adapter->speed; | |
797 | cmd->base.duplex = adapter->duplex; | |
798 | cmd->base.port = PORT_OTHER; | |
9ce8c2df | 799 | |
1da177e4 LT |
800 | return 0; |
801 | } | |
802 | ||
9aedc6e2 CF |
803 | static void ibmveth_init_link_settings(struct net_device *dev) |
804 | { | |
805 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
806 | ||
807 | adapter->speed = SPEED_1000; | |
808 | adapter->duplex = DUPLEX_FULL; | |
809 | } | |
810 | ||
f148f61d SL |
811 | static void netdev_get_drvinfo(struct net_device *dev, |
812 | struct ethtool_drvinfo *info) | |
813 | { | |
f029c781 WS |
814 | strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); |
815 | strscpy(info->version, ibmveth_driver_version, sizeof(info->version)); | |
1da177e4 LT |
816 | } |
817 | ||
c8f44aff MM |
818 | static netdev_features_t ibmveth_fix_features(struct net_device *dev, |
819 | netdev_features_t features) | |
5fc7e01c | 820 | { |
b9367bf3 MM |
821 | /* |
822 | * Since the ibmveth firmware interface does not have the | |
823 | * concept of separate tx/rx checksum offload enable, if rx | |
824 | * checksum is disabled we also have to disable tx checksum | |
825 | * offload. Once we disable rx checksum offload, we are no | |
826 | * longer allowed to send tx buffers that are not properly | |
827 | * checksummed. | |
828 | */ | |
5fc7e01c | 829 | |
b9367bf3 | 830 | if (!(features & NETIF_F_RXCSUM)) |
a188222b | 831 | features &= ~NETIF_F_CSUM_MASK; |
5fc7e01c | 832 | |
b9367bf3 | 833 | return features; |
5fc7e01c BK |
834 | } |
835 | ||
b9367bf3 | 836 | static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) |
5fc7e01c | 837 | { |
4cf1653a | 838 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
ff5bfc35 | 839 | unsigned long set_attr, clr_attr, ret_attr; |
ab78df75 | 840 | unsigned long set_attr6, clr_attr6; |
fb82fd20 | 841 | long ret, ret4, ret6; |
5fc7e01c BK |
842 | int rc1 = 0, rc2 = 0; |
843 | int restart = 0; | |
844 | ||
845 | if (netif_running(dev)) { | |
846 | restart = 1; | |
5fc7e01c | 847 | ibmveth_close(dev); |
5fc7e01c BK |
848 | } |
849 | ||
79ef4a4d BK |
850 | set_attr = 0; |
851 | clr_attr = 0; | |
fb82fd20 AB |
852 | set_attr6 = 0; |
853 | clr_attr6 = 0; | |
5fc7e01c | 854 | |
ab78df75 | 855 | if (data) { |
79ef4a4d | 856 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
ab78df75 SL |
857 | set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; |
858 | } else { | |
79ef4a4d | 859 | clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
ab78df75 SL |
860 | clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; |
861 | } | |
5fc7e01c | 862 | |
79ef4a4d | 863 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
5fc7e01c | 864 | |
66aa0678 | 865 | if (ret == H_SUCCESS && |
79ef4a4d | 866 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
fb82fd20 | 867 | ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, |
79ef4a4d | 868 | set_attr, &ret_attr); |
5fc7e01c | 869 | |
fb82fd20 | 870 | if (ret4 != H_SUCCESS) { |
21c2dece SL |
871 | netdev_err(dev, "unable to change IPv4 checksum " |
872 | "offload settings. %d rc=%ld\n", | |
fb82fd20 AB |
873 | data, ret4); |
874 | ||
875 | h_illan_attributes(adapter->vdev->unit_address, | |
876 | set_attr, clr_attr, &ret_attr); | |
877 | ||
878 | if (data == 1) | |
879 | dev->features &= ~NETIF_F_IP_CSUM; | |
5fc7e01c | 880 | |
f148f61d | 881 | } else { |
ab78df75 | 882 | adapter->fw_ipv4_csum_support = data; |
f148f61d | 883 | } |
ab78df75 SL |
884 | |
885 | ret6 = h_illan_attributes(adapter->vdev->unit_address, | |
886 | clr_attr6, set_attr6, &ret_attr); | |
887 | ||
888 | if (ret6 != H_SUCCESS) { | |
21c2dece SL |
889 | netdev_err(dev, "unable to change IPv6 checksum " |
890 | "offload settings. %d rc=%ld\n", | |
fb82fd20 AB |
891 | data, ret6); |
892 | ||
893 | h_illan_attributes(adapter->vdev->unit_address, | |
894 | set_attr6, clr_attr6, &ret_attr); | |
895 | ||
896 | if (data == 1) | |
897 | dev->features &= ~NETIF_F_IPV6_CSUM; | |
ab78df75 | 898 | |
ab78df75 SL |
899 | } else |
900 | adapter->fw_ipv6_csum_support = data; | |
901 | ||
fb82fd20 | 902 | if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) |
b9367bf3 | 903 | adapter->rx_csum = data; |
ab78df75 SL |
904 | else |
905 | rc1 = -EIO; | |
5fc7e01c BK |
906 | } else { |
907 | rc1 = -EIO; | |
21c2dece SL |
908 | netdev_err(dev, "unable to change checksum offload settings." |
909 | " %d rc=%ld ret_attr=%lx\n", data, ret, | |
910 | ret_attr); | |
5fc7e01c BK |
911 | } |
912 | ||
913 | if (restart) | |
914 | rc2 = ibmveth_open(dev); | |
915 | ||
916 | return rc1 ? rc1 : rc2; | |
917 | } | |
918 | ||
07e6a97d TF |
919 | static int ibmveth_set_tso(struct net_device *dev, u32 data) |
920 | { | |
921 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
922 | unsigned long set_attr, clr_attr, ret_attr; | |
923 | long ret1, ret2; | |
924 | int rc1 = 0, rc2 = 0; | |
925 | int restart = 0; | |
926 | ||
927 | if (netif_running(dev)) { | |
928 | restart = 1; | |
07e6a97d | 929 | ibmveth_close(dev); |
07e6a97d TF |
930 | } |
931 | ||
932 | set_attr = 0; | |
933 | clr_attr = 0; | |
934 | ||
935 | if (data) | |
936 | set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; | |
937 | else | |
938 | clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; | |
939 | ||
940 | ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); | |
941 | ||
942 | if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && | |
943 | !old_large_send) { | |
944 | ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, | |
945 | set_attr, &ret_attr); | |
946 | ||
947 | if (ret2 != H_SUCCESS) { | |
948 | netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", | |
949 | data, ret2); | |
950 | ||
951 | h_illan_attributes(adapter->vdev->unit_address, | |
952 | set_attr, clr_attr, &ret_attr); | |
953 | ||
954 | if (data == 1) | |
955 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | |
956 | rc1 = -EIO; | |
957 | ||
958 | } else { | |
959 | adapter->fw_large_send_support = data; | |
960 | adapter->large_send = data; | |
961 | } | |
962 | } else { | |
963 | /* Older firmware version of large send offload does not | |
964 | * support tcp6/ipv6 | |
965 | */ | |
966 | if (data == 1) { | |
967 | dev->features &= ~NETIF_F_TSO6; | |
968 | netdev_info(dev, "TSO feature requires all partitions to have updated driver"); | |
969 | } | |
970 | adapter->large_send = data; | |
971 | } | |
972 | ||
973 | if (restart) | |
974 | rc2 = ibmveth_open(dev); | |
975 | ||
976 | return rc1 ? rc1 : rc2; | |
977 | } | |
978 | ||
c8f44aff MM |
979 | static int ibmveth_set_features(struct net_device *dev, |
980 | netdev_features_t features) | |
5fc7e01c | 981 | { |
4cf1653a | 982 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
b9367bf3 | 983 | int rx_csum = !!(features & NETIF_F_RXCSUM); |
07e6a97d TF |
984 | int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); |
985 | int rc1 = 0, rc2 = 0; | |
5fc7e01c | 986 | |
07e6a97d TF |
987 | if (rx_csum != adapter->rx_csum) { |
988 | rc1 = ibmveth_set_csum_offload(dev, rx_csum); | |
989 | if (rc1 && !adapter->rx_csum) | |
990 | dev->features = | |
a188222b TH |
991 | features & ~(NETIF_F_CSUM_MASK | |
992 | NETIF_F_RXCSUM); | |
07e6a97d | 993 | } |
5fc7e01c | 994 | |
07e6a97d TF |
995 | if (large_send != adapter->large_send) { |
996 | rc2 = ibmveth_set_tso(dev, large_send); | |
997 | if (rc2 && !adapter->large_send) | |
998 | dev->features = | |
999 | features & ~(NETIF_F_TSO | NETIF_F_TSO6); | |
1000 | } | |
5fc7e01c | 1001 | |
07e6a97d | 1002 | return rc1 ? rc1 : rc2; |
5fc7e01c BK |
1003 | } |
1004 | ||
ddbb4de9 BK |
1005 | static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
1006 | { | |
1007 | int i; | |
1008 | ||
1009 | if (stringset != ETH_SS_STATS) | |
1010 | return; | |
1011 | ||
1012 | for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) | |
1013 | memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); | |
1014 | } | |
1015 | ||
b9f2c044 | 1016 | static int ibmveth_get_sset_count(struct net_device *dev, int sset) |
ddbb4de9 | 1017 | { |
b9f2c044 JG |
1018 | switch (sset) { |
1019 | case ETH_SS_STATS: | |
1020 | return ARRAY_SIZE(ibmveth_stats); | |
1021 | default: | |
1022 | return -EOPNOTSUPP; | |
1023 | } | |
ddbb4de9 BK |
1024 | } |
1025 | ||
1026 | static void ibmveth_get_ethtool_stats(struct net_device *dev, | |
1027 | struct ethtool_stats *stats, u64 *data) | |
1028 | { | |
1029 | int i; | |
4cf1653a | 1030 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
ddbb4de9 BK |
1031 | |
1032 | for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) | |
1033 | data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); | |
1034 | } | |
1035 | ||
10c2aba8 NC |
1036 | static void ibmveth_get_channels(struct net_device *netdev, |
1037 | struct ethtool_channels *channels) | |
1038 | { | |
1039 | channels->max_tx = ibmveth_real_max_tx_queues(); | |
1040 | channels->tx_count = netdev->real_num_tx_queues; | |
1041 | ||
1042 | channels->max_rx = netdev->real_num_rx_queues; | |
1043 | channels->rx_count = netdev->real_num_rx_queues; | |
1044 | } | |
1045 | ||
1046 | static int ibmveth_set_channels(struct net_device *netdev, | |
1047 | struct ethtool_channels *channels) | |
1048 | { | |
1049 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | |
1050 | unsigned int old = netdev->real_num_tx_queues, | |
1051 | goal = channels->tx_count; | |
1052 | int rc, i; | |
1053 | ||
1054 | /* If ndo_open has not been called yet then don't allocate, just set | |
1055 | * desired netdev_queue's and return | |
1056 | */ | |
1057 | if (!(netdev->flags & IFF_UP)) | |
1058 | return netif_set_real_num_tx_queues(netdev, goal); | |
1059 | ||
1060 | /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated | |
1061 | * but we may need to alloc/free the ltb's. | |
1062 | */ | |
1063 | netif_tx_stop_all_queues(netdev); | |
1064 | ||
1065 | /* Allocate any queue that we need */ | |
1066 | for (i = old; i < goal; i++) { | |
1067 | if (adapter->tx_ltb_ptr[i]) | |
1068 | continue; | |
1069 | ||
1070 | rc = ibmveth_allocate_tx_ltb(adapter, i); | |
1071 | if (!rc) | |
1072 | continue; | |
1073 | ||
1074 | /* if something goes wrong, free everything we just allocated */ | |
1075 | netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n", | |
1076 | old); | |
1077 | goal = old; | |
1078 | old = i; | |
1079 | break; | |
1080 | } | |
1081 | rc = netif_set_real_num_tx_queues(netdev, goal); | |
1082 | if (rc) { | |
1083 | netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n", | |
1084 | old); | |
1085 | goal = old; | |
1086 | old = i; | |
1087 | } | |
1088 | /* Free any that are no longer needed */ | |
1089 | for (i = old; i > goal; i--) { | |
1090 | if (adapter->tx_ltb_ptr[i - 1]) | |
1091 | ibmveth_free_tx_ltb(adapter, i - 1); | |
1092 | } | |
1093 | ||
1094 | netif_tx_wake_all_queues(netdev); | |
1095 | ||
1096 | return rc; | |
1097 | } | |
1098 | ||
7282d491 | 1099 | static const struct ethtool_ops netdev_ethtool_ops = { |
9aedc6e2 CF |
1100 | .get_drvinfo = netdev_get_drvinfo, |
1101 | .get_link = ethtool_op_get_link, | |
1102 | .get_strings = ibmveth_get_strings, | |
1103 | .get_sset_count = ibmveth_get_sset_count, | |
1104 | .get_ethtool_stats = ibmveth_get_ethtool_stats, | |
1105 | .get_link_ksettings = ibmveth_get_link_ksettings, | |
1106 | .set_link_ksettings = ibmveth_set_link_ksettings, | |
10c2aba8 NC |
1107 | .get_channels = ibmveth_get_channels, |
1108 | .set_channels = ibmveth_set_channels | |
1da177e4 LT |
1109 | }; |
1110 | ||
1111 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1112 | { | |
1113 | return -EOPNOTSUPP; | |
1114 | } | |
1115 | ||
6e8ab30e | 1116 | static int ibmveth_send(struct ibmveth_adapter *adapter, |
d6832ca4 | 1117 | unsigned long desc, unsigned long mss) |
1da177e4 | 1118 | { |
1da177e4 LT |
1119 | unsigned long correlator; |
1120 | unsigned int retry_count; | |
6e8ab30e SL |
1121 | unsigned long ret; |
1122 | ||
1123 | /* | |
1124 | * The retry count sets a maximum for the number of broadcast and | |
1125 | * multicast destinations within the system. | |
1126 | */ | |
1127 | retry_count = 1024; | |
1128 | correlator = 0; | |
1129 | do { | |
d6832ca4 NC |
1130 | ret = h_send_logical_lan(adapter->vdev->unit_address, desc, |
1131 | correlator, &correlator, mss, | |
1132 | adapter->fw_large_send_support); | |
6e8ab30e SL |
1133 | } while ((ret == H_BUSY) && (retry_count--)); |
1134 | ||
1135 | if (ret != H_SUCCESS && ret != H_DROPPED) { | |
21c2dece SL |
1136 | netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " |
1137 | "with rc=%ld\n", ret); | |
6e8ab30e SL |
1138 | return 1; |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | } | |
60296d9e | 1143 | |
6f227543 CF |
1144 | static int ibmveth_is_packet_unsupported(struct sk_buff *skb, |
1145 | struct net_device *netdev) | |
1146 | { | |
1147 | struct ethhdr *ether_header; | |
1148 | int ret = 0; | |
1149 | ||
1150 | ether_header = eth_hdr(skb); | |
1151 | ||
1152 | if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { | |
1153 | netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n"); | |
1154 | netdev->stats.tx_dropped++; | |
1155 | ret = -EOPNOTSUPP; | |
1156 | } | |
1157 | ||
6f227543 CF |
1158 | return ret; |
1159 | } | |
1160 | ||
6e8ab30e SL |
1161 | static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, |
1162 | struct net_device *netdev) | |
1163 | { | |
1164 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | |
d926793c | 1165 | unsigned int desc_flags, total_bytes; |
d6832ca4 | 1166 | union ibmveth_buf_desc desc; |
d926793c | 1167 | int i, queue_num = skb_get_queue_mapping(skb); |
07e6a97d | 1168 | unsigned long mss = 0; |
6e8ab30e | 1169 | |
6f227543 CF |
1170 | if (ibmveth_is_packet_unsupported(skb, netdev)) |
1171 | goto out; | |
6e8ab30e | 1172 | /* veth can't checksum offload UDP */ |
f4ff2872 | 1173 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
ab78df75 SL |
1174 | ((skb->protocol == htons(ETH_P_IP) && |
1175 | ip_hdr(skb)->protocol != IPPROTO_TCP) || | |
1176 | (skb->protocol == htons(ETH_P_IPV6) && | |
1177 | ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && | |
1178 | skb_checksum_help(skb)) { | |
1179 | ||
21c2dece | 1180 | netdev_err(netdev, "tx: failed to checksum packet\n"); |
6e8ab30e | 1181 | netdev->stats.tx_dropped++; |
f4ff2872 BK |
1182 | goto out; |
1183 | } | |
1184 | ||
6e8ab30e SL |
1185 | desc_flags = IBMVETH_BUF_VALID; |
1186 | ||
f4ff2872 | 1187 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
6e8ab30e SL |
1188 | unsigned char *buf = skb_transport_header(skb) + |
1189 | skb->csum_offset; | |
f4ff2872 | 1190 | |
6e8ab30e | 1191 | desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); |
f4ff2872 BK |
1192 | |
1193 | /* Need to zero out the checksum */ | |
1194 | buf[0] = 0; | |
1195 | buf[1] = 0; | |
66aa0678 SK |
1196 | |
1197 | if (skb_is_gso(skb) && adapter->fw_large_send_support) | |
1198 | desc_flags |= IBMVETH_BUF_LRG_SND; | |
f4ff2872 BK |
1199 | } |
1200 | ||
66aa0678 | 1201 | if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { |
07e6a97d TF |
1202 | if (adapter->fw_large_send_support) { |
1203 | mss = (unsigned long)skb_shinfo(skb)->gso_size; | |
1204 | adapter->tx_large_packets++; | |
1205 | } else if (!skb_is_gso_v6(skb)) { | |
1206 | /* Put -1 in the IP checksum to tell phyp it | |
1207 | * is a largesend packet. Put the mss in | |
1208 | * the TCP checksum. | |
1209 | */ | |
1210 | ip_hdr(skb)->check = 0xffff; | |
1211 | tcp_hdr(skb)->check = | |
1212 | cpu_to_be16(skb_shinfo(skb)->gso_size); | |
1213 | adapter->tx_large_packets++; | |
1214 | } | |
8641dd85 TF |
1215 | } |
1216 | ||
d6832ca4 NC |
1217 | /* Copy header into mapped buffer */ |
1218 | if (unlikely(skb->len > adapter->tx_ltb_size)) { | |
1219 | netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n", | |
1220 | skb->len, adapter->tx_ltb_size); | |
1221 | netdev->stats.tx_dropped++; | |
1222 | goto out; | |
1223 | } | |
d926793c | 1224 | memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb)); |
d6832ca4 NC |
1225 | total_bytes = skb_headlen(skb); |
1226 | /* Copy frags into mapped buffers */ | |
1227 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1228 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1229 | ||
d926793c NC |
1230 | memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes, |
1231 | skb_frag_address_safe(frag), skb_frag_size(frag)); | |
d6832ca4 NC |
1232 | total_bytes += skb_frag_size(frag); |
1233 | } | |
1234 | ||
1235 | if (unlikely(total_bytes != skb->len)) { | |
1236 | netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n", | |
1237 | skb->len, total_bytes); | |
1238 | netdev->stats.tx_dropped++; | |
1239 | goto out; | |
1240 | } | |
1241 | desc.fields.flags_len = desc_flags | skb->len; | |
d926793c | 1242 | desc.fields.address = adapter->tx_ltb_dma[queue_num]; |
d6832ca4 NC |
1243 | /* finish writing to long_term_buff before VIOS accessing it */ |
1244 | dma_wmb(); | |
1245 | ||
1246 | if (ibmveth_send(adapter, desc.desc, mss)) { | |
6e8ab30e SL |
1247 | adapter->tx_send_failed++; |
1248 | netdev->stats.tx_dropped++; | |
1da177e4 | 1249 | } else { |
6e8ab30e SL |
1250 | netdev->stats.tx_packets++; |
1251 | netdev->stats.tx_bytes += skb->len; | |
1da177e4 LT |
1252 | } |
1253 | ||
e8cb7eb4 | 1254 | out: |
26faa9d7 | 1255 | dev_consume_skb_any(skb); |
6ed10654 | 1256 | return NETDEV_TX_OK; |
6e8ab30e | 1257 | |
6e8ab30e | 1258 | |
1da177e4 LT |
1259 | } |
1260 | ||
7b596738 TF |
1261 | static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
1262 | { | |
94acf164 | 1263 | struct tcphdr *tcph; |
7b596738 | 1264 | int offset = 0; |
94acf164 | 1265 | int hdr_len; |
7b596738 TF |
1266 | |
1267 | /* only TCP packets will be aggregated */ | |
1268 | if (skb->protocol == htons(ETH_P_IP)) { | |
1269 | struct iphdr *iph = (struct iphdr *)skb->data; | |
1270 | ||
1271 | if (iph->protocol == IPPROTO_TCP) { | |
1272 | offset = iph->ihl * 4; | |
1273 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
1274 | } else { | |
1275 | return; | |
1276 | } | |
1277 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
1278 | struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; | |
1279 | ||
1280 | if (iph6->nexthdr == IPPROTO_TCP) { | |
1281 | offset = sizeof(struct ipv6hdr); | |
1282 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
1283 | } else { | |
1284 | return; | |
1285 | } | |
1286 | } else { | |
1287 | return; | |
1288 | } | |
1289 | /* if mss is not set through Large Packet bit/mss in rx buffer, | |
1290 | * expect that the mss will be written to the tcp header checksum. | |
1291 | */ | |
94acf164 | 1292 | tcph = (struct tcphdr *)(skb->data + offset); |
7b596738 TF |
1293 | if (lrg_pkt) { |
1294 | skb_shinfo(skb)->gso_size = mss; | |
1295 | } else if (offset) { | |
7b596738 TF |
1296 | skb_shinfo(skb)->gso_size = ntohs(tcph->check); |
1297 | tcph->check = 0; | |
1298 | } | |
94acf164 TF |
1299 | |
1300 | if (skb_shinfo(skb)->gso_size) { | |
1301 | hdr_len = offset + tcph->doff * 4; | |
1302 | skb_shinfo(skb)->gso_segs = | |
1303 | DIV_ROUND_UP(skb->len - hdr_len, | |
1304 | skb_shinfo(skb)->gso_size); | |
1305 | } | |
7b596738 TF |
1306 | } |
1307 | ||
66aa0678 SK |
1308 | static void ibmveth_rx_csum_helper(struct sk_buff *skb, |
1309 | struct ibmveth_adapter *adapter) | |
1310 | { | |
1311 | struct iphdr *iph = NULL; | |
1312 | struct ipv6hdr *iph6 = NULL; | |
1313 | __be16 skb_proto = 0; | |
1314 | u16 iphlen = 0; | |
1315 | u16 iph_proto = 0; | |
1316 | u16 tcphdrlen = 0; | |
1317 | ||
1318 | skb_proto = be16_to_cpu(skb->protocol); | |
1319 | ||
1320 | if (skb_proto == ETH_P_IP) { | |
1321 | iph = (struct iphdr *)skb->data; | |
1322 | ||
1323 | /* If the IP checksum is not offloaded and if the packet | |
1324 | * is large send, the checksum must be rebuilt. | |
1325 | */ | |
1326 | if (iph->check == 0xffff) { | |
1327 | iph->check = 0; | |
1328 | iph->check = ip_fast_csum((unsigned char *)iph, | |
1329 | iph->ihl); | |
1330 | } | |
1331 | ||
1332 | iphlen = iph->ihl * 4; | |
1333 | iph_proto = iph->protocol; | |
1334 | } else if (skb_proto == ETH_P_IPV6) { | |
1335 | iph6 = (struct ipv6hdr *)skb->data; | |
1336 | iphlen = sizeof(struct ipv6hdr); | |
1337 | iph_proto = iph6->nexthdr; | |
1338 | } | |
1339 | ||
7525de25 DW |
1340 | /* When CSO is enabled the TCP checksum may have be set to NULL by |
1341 | * the sender given that we zeroed out TCP checksum field in | |
1342 | * transmit path (refer ibmveth_start_xmit routine). In this case set | |
1343 | * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will | |
1344 | * then be recalculated by the destination NIC (CSO must be enabled | |
1345 | * on the destination NIC). | |
1346 | * | |
1347 | * In an OVS environment, when a flow is not cached, specifically for a | |
1348 | * new TCP connection, the first packet information is passed up to | |
66aa0678 SK |
1349 | * the user space for finding a flow. During this process, OVS computes |
1350 | * checksum on the first packet when CHECKSUM_PARTIAL flag is set. | |
1351 | * | |
51e7a666 | 1352 | * So, re-compute TCP pseudo header checksum. |
66aa0678 | 1353 | */ |
51e7a666 | 1354 | |
7525de25 | 1355 | if (iph_proto == IPPROTO_TCP) { |
66aa0678 | 1356 | struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); |
51e7a666 | 1357 | |
7525de25 DW |
1358 | if (tcph->check == 0x0000) { |
1359 | /* Recompute TCP pseudo header checksum */ | |
51e7a666 DW |
1360 | tcphdrlen = skb->len - iphlen; |
1361 | if (skb_proto == ETH_P_IP) | |
1362 | tcph->check = | |
1363 | ~csum_tcpudp_magic(iph->saddr, | |
1364 | iph->daddr, tcphdrlen, iph_proto, 0); | |
1365 | else if (skb_proto == ETH_P_IPV6) | |
1366 | tcph->check = | |
1367 | ~csum_ipv6_magic(&iph6->saddr, | |
1368 | &iph6->daddr, tcphdrlen, iph_proto, 0); | |
7525de25 DW |
1369 | /* Setup SKB fields for checksum offload */ |
1370 | skb_partial_csum_set(skb, iphlen, | |
1371 | offsetof(struct tcphdr, check)); | |
1372 | skb_reset_network_header(skb); | |
1373 | } | |
66aa0678 SK |
1374 | } |
1375 | } | |
1376 | ||
bea3348e | 1377 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
1da177e4 | 1378 | { |
f148f61d SL |
1379 | struct ibmveth_adapter *adapter = |
1380 | container_of(napi, struct ibmveth_adapter, napi); | |
bea3348e | 1381 | struct net_device *netdev = adapter->netdev; |
1da177e4 | 1382 | int frames_processed = 0; |
1da177e4 | 1383 | unsigned long lpar_rc; |
7b596738 | 1384 | u16 mss = 0; |
1da177e4 | 1385 | |
f128c7cf | 1386 | restart_poll: |
cb013ea1 | 1387 | while (frames_processed < budget) { |
bea3348e SH |
1388 | if (!ibmveth_rxq_pending_buffer(adapter)) |
1389 | break; | |
1da177e4 | 1390 | |
f89e49e7 | 1391 | smp_rmb(); |
bea3348e SH |
1392 | if (!ibmveth_rxq_buffer_valid(adapter)) { |
1393 | wmb(); /* suggested by larson1 */ | |
1394 | adapter->rx_invalid_buffer++; | |
c43ced18 | 1395 | netdev_dbg(netdev, "recycling invalid buffer\n"); |
2c91e231 DM |
1396 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
1397 | break; | |
bea3348e | 1398 | } else { |
8d86c61a | 1399 | struct sk_buff *skb, *new_skb; |
bea3348e SH |
1400 | int length = ibmveth_rxq_frame_length(adapter); |
1401 | int offset = ibmveth_rxq_frame_offset(adapter); | |
f4ff2872 | 1402 | int csum_good = ibmveth_rxq_csum_good(adapter); |
7b596738 | 1403 | int lrg_pkt = ibmveth_rxq_large_packet(adapter); |
413f142c | 1404 | __sum16 iph_check = 0; |
f4ff2872 | 1405 | |
bea3348e | 1406 | skb = ibmveth_rxq_get_buffer(adapter); |
2c91e231 DM |
1407 | if (unlikely(!skb)) |
1408 | break; | |
1da177e4 | 1409 | |
7b596738 TF |
1410 | /* if the large packet bit is set in the rx queue |
1411 | * descriptor, the mss will be written by PHYP eight | |
1412 | * bytes from the start of the rx buffer, which is | |
1413 | * skb->data at this stage | |
1414 | */ | |
1415 | if (lrg_pkt) { | |
1416 | __be64 *rxmss = (__be64 *)(skb->data + 8); | |
1417 | ||
1418 | mss = (u16)be64_to_cpu(*rxmss); | |
1419 | } | |
1420 | ||
8d86c61a SL |
1421 | new_skb = NULL; |
1422 | if (length < rx_copybreak) | |
1423 | new_skb = netdev_alloc_skb(netdev, length); | |
1424 | ||
1425 | if (new_skb) { | |
1426 | skb_copy_to_linear_data(new_skb, | |
1427 | skb->data + offset, | |
1428 | length); | |
0c26b677 SL |
1429 | if (rx_flush) |
1430 | ibmveth_flush_buffer(skb->data, | |
1431 | length + offset); | |
2c91e231 DM |
1432 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
1433 | break; | |
8d86c61a | 1434 | skb = new_skb; |
8d86c61a | 1435 | } else { |
2c91e231 DM |
1436 | if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false))) |
1437 | break; | |
8d86c61a SL |
1438 | skb_reserve(skb, offset); |
1439 | } | |
1da177e4 | 1440 | |
bea3348e SH |
1441 | skb_put(skb, length); |
1442 | skb->protocol = eth_type_trans(skb, netdev); | |
1da177e4 | 1443 | |
413f142c DW |
1444 | /* PHYP without PLSO support places a -1 in the ip |
1445 | * checksum for large send frames. | |
1446 | */ | |
1447 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { | |
1448 | struct iphdr *iph = (struct iphdr *)skb->data; | |
1449 | ||
1450 | iph_check = iph->check; | |
1451 | } | |
1452 | ||
1453 | if ((length > netdev->mtu + ETH_HLEN) || | |
1454 | lrg_pkt || iph_check == 0xffff) { | |
7b596738 TF |
1455 | ibmveth_rx_mss_helper(skb, mss, lrg_pkt); |
1456 | adapter->rx_large_packets++; | |
1457 | } | |
1458 | ||
5ce9ad81 DW |
1459 | if (csum_good) { |
1460 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1461 | ibmveth_rx_csum_helper(skb, adapter); | |
1462 | } | |
1463 | ||
92ec8279 | 1464 | napi_gro_receive(napi, skb); /* send it up */ |
1da177e4 | 1465 | |
09f75cd7 JG |
1466 | netdev->stats.rx_packets++; |
1467 | netdev->stats.rx_bytes += length; | |
bea3348e | 1468 | frames_processed++; |
1da177e4 | 1469 | } |
cb013ea1 | 1470 | } |
1da177e4 | 1471 | |
e2adbcb4 | 1472 | ibmveth_replenish_task(adapter); |
1da177e4 | 1473 | |
f128c7cf NC |
1474 | if (frames_processed == budget) |
1475 | goto out; | |
4736edc7 | 1476 | |
f128c7cf NC |
1477 | if (!napi_complete_done(napi, frames_processed)) |
1478 | goto out; | |
1da177e4 | 1479 | |
f128c7cf NC |
1480 | /* We think we are done - reenable interrupts, |
1481 | * then check once more to make sure we are done. | |
1482 | */ | |
1483 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); | |
2c91e231 DM |
1484 | if (WARN_ON(lpar_rc != H_SUCCESS)) { |
1485 | schedule_work(&adapter->work); | |
1486 | goto out; | |
1487 | } | |
1da177e4 | 1488 | |
f128c7cf NC |
1489 | if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { |
1490 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | |
1491 | VIO_IRQ_DISABLE); | |
1492 | goto restart_poll; | |
1da177e4 LT |
1493 | } |
1494 | ||
f128c7cf | 1495 | out: |
bea3348e | 1496 | return frames_processed; |
1da177e4 LT |
1497 | } |
1498 | ||
7d12e780 | 1499 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) |
d7fbeba6 | 1500 | { |
1da177e4 | 1501 | struct net_device *netdev = dev_instance; |
4cf1653a | 1502 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1da177e4 LT |
1503 | unsigned long lpar_rc; |
1504 | ||
288379f0 | 1505 | if (napi_schedule_prep(&adapter->napi)) { |
bea3348e SH |
1506 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1507 | VIO_IRQ_DISABLE); | |
2c91e231 | 1508 | WARN_ON(lpar_rc != H_SUCCESS); |
288379f0 | 1509 | __napi_schedule(&adapter->napi); |
1da177e4 LT |
1510 | } |
1511 | return IRQ_HANDLED; | |
1512 | } | |
1513 | ||
1da177e4 LT |
1514 | static void ibmveth_set_multicast_list(struct net_device *netdev) |
1515 | { | |
4cf1653a | 1516 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1da177e4 LT |
1517 | unsigned long lpar_rc; |
1518 | ||
4cd24eaf JP |
1519 | if ((netdev->flags & IFF_PROMISC) || |
1520 | (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { | |
1da177e4 LT |
1521 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1522 | IbmVethMcastEnableRecv | | |
1523 | IbmVethMcastDisableFiltering, | |
1524 | 0); | |
f148f61d | 1525 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1526 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1527 | "entering promisc mode\n", lpar_rc); | |
1da177e4 LT |
1528 | } |
1529 | } else { | |
22bedad3 | 1530 | struct netdev_hw_addr *ha; |
1da177e4 LT |
1531 | /* clear the filter table & disable filtering */ |
1532 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
1533 | IbmVethMcastEnableRecv | | |
1534 | IbmVethMcastDisableFiltering | | |
1535 | IbmVethMcastClearFilterTable, | |
1536 | 0); | |
f148f61d | 1537 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1538 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1539 | "attempting to clear filter table\n", | |
1540 | lpar_rc); | |
1da177e4 LT |
1541 | } |
1542 | /* add the addresses to the filter table */ | |
22bedad3 | 1543 | netdev_for_each_mc_addr(ha, netdev) { |
f148f61d | 1544 | /* add the multicast address to the filter table */ |
d746ca95 | 1545 | u64 mcast_addr; |
5c8b3485 | 1546 | mcast_addr = ether_addr_to_u64(ha->addr); |
1da177e4 LT |
1547 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1548 | IbmVethMcastAddFilter, | |
1549 | mcast_addr); | |
f148f61d | 1550 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1551 | netdev_err(netdev, "h_multicast_ctrl rc=%ld " |
1552 | "when adding an entry to the filter " | |
1553 | "table\n", lpar_rc); | |
1da177e4 LT |
1554 | } |
1555 | } | |
d7fbeba6 | 1556 | |
1da177e4 LT |
1557 | /* re-enable filtering */ |
1558 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | |
1559 | IbmVethMcastEnableFiltering, | |
1560 | 0); | |
f148f61d | 1561 | if (lpar_rc != H_SUCCESS) { |
21c2dece SL |
1562 | netdev_err(netdev, "h_multicast_ctrl rc=%ld when " |
1563 | "enabling filtering\n", lpar_rc); | |
1da177e4 LT |
1564 | } |
1565 | } | |
1566 | } | |
1567 | ||
1568 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |
1569 | { | |
4cf1653a | 1570 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
1096d63d | 1571 | struct vio_dev *viodev = adapter->vdev; |
860f242e | 1572 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
0645bab7 RJ |
1573 | int i, rc; |
1574 | int need_restart = 0; | |
b6d35182 | 1575 | |
517e80e6 | 1576 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
4fce1482 | 1577 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) |
ce6eea58 BK |
1578 | break; |
1579 | ||
517e80e6 | 1580 | if (i == IBMVETH_NUM_BUFF_POOLS) |
ce6eea58 BK |
1581 | return -EINVAL; |
1582 | ||
ea866e65 SL |
1583 | /* Deactivate all the buffer pools so that the next loop can activate |
1584 | only the buffer pools necessary to hold the new MTU */ | |
0645bab7 RJ |
1585 | if (netif_running(adapter->netdev)) { |
1586 | need_restart = 1; | |
0645bab7 | 1587 | ibmveth_close(adapter->netdev); |
0645bab7 | 1588 | } |
ea866e65 | 1589 | |
860f242e | 1590 | /* Look for an active buffer pool that can hold the new MTU */ |
f148f61d | 1591 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
ea866e65 | 1592 | adapter->rx_buff_pool[i].active = 1; |
ce6eea58 | 1593 | |
4fce1482 | 1594 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { |
1eb2cded | 1595 | WRITE_ONCE(dev->mtu, new_mtu); |
1096d63d RJ |
1596 | vio_cmo_set_dev_desired(viodev, |
1597 | ibmveth_get_desired_dma | |
1598 | (viodev)); | |
0645bab7 RJ |
1599 | if (need_restart) { |
1600 | return ibmveth_open(adapter->netdev); | |
1601 | } | |
860f242e | 1602 | return 0; |
b6d35182 | 1603 | } |
b6d35182 | 1604 | } |
0645bab7 RJ |
1605 | |
1606 | if (need_restart && (rc = ibmveth_open(adapter->netdev))) | |
1607 | return rc; | |
1608 | ||
860f242e | 1609 | return -EINVAL; |
1da177e4 LT |
1610 | } |
1611 | ||
6b422374 SL |
1612 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1613 | static void ibmveth_poll_controller(struct net_device *dev) | |
1614 | { | |
4cf1653a | 1615 | ibmveth_replenish_task(netdev_priv(dev)); |
5f77113c | 1616 | ibmveth_interrupt(dev->irq, dev); |
6b422374 SL |
1617 | } |
1618 | #endif | |
1619 | ||
1096d63d RJ |
1620 | /** |
1621 | * ibmveth_get_desired_dma - Calculate IO memory desired by the driver | |
1622 | * | |
1623 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | |
1624 | * | |
1625 | * Return value: | |
1626 | * Number of bytes of IO data the driver will need to perform well. | |
1627 | */ | |
1628 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |
1629 | { | |
1630 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | |
1631 | struct ibmveth_adapter *adapter; | |
d0847757 | 1632 | struct iommu_table *tbl; |
1096d63d RJ |
1633 | unsigned long ret; |
1634 | int i; | |
1635 | int rxqentries = 1; | |
1636 | ||
d0847757 AP |
1637 | tbl = get_iommu_table_base(&vdev->dev); |
1638 | ||
1096d63d RJ |
1639 | /* netdev inits at probe time along with the structures we need below*/ |
1640 | if (netdev == NULL) | |
d0847757 | 1641 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); |
1096d63d RJ |
1642 | |
1643 | adapter = netdev_priv(netdev); | |
1644 | ||
1645 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | |
d0847757 | 1646 | ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); |
d6832ca4 NC |
1647 | /* add size of mapped tx buffers */ |
1648 | ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl); | |
1096d63d | 1649 | |
517e80e6 | 1650 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1096d63d RJ |
1651 | /* add the size of the active receive buffers */ |
1652 | if (adapter->rx_buff_pool[i].active) | |
1653 | ret += | |
1654 | adapter->rx_buff_pool[i].size * | |
1655 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | |
d0847757 | 1656 | buff_size, tbl); |
1096d63d RJ |
1657 | rxqentries += adapter->rx_buff_pool[i].size; |
1658 | } | |
1659 | /* add the size of the receive queue entries */ | |
d0847757 AP |
1660 | ret += IOMMU_PAGE_ALIGN( |
1661 | rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); | |
1096d63d RJ |
1662 | |
1663 | return ret; | |
1664 | } | |
1665 | ||
c77c761f TF |
1666 | static int ibmveth_set_mac_addr(struct net_device *dev, void *p) |
1667 | { | |
1668 | struct ibmveth_adapter *adapter = netdev_priv(dev); | |
1669 | struct sockaddr *addr = p; | |
1670 | u64 mac_address; | |
1671 | int rc; | |
1672 | ||
1673 | if (!is_valid_ether_addr(addr->sa_data)) | |
1674 | return -EADDRNOTAVAIL; | |
1675 | ||
5c8b3485 | 1676 | mac_address = ether_addr_to_u64(addr->sa_data); |
c77c761f TF |
1677 | rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); |
1678 | if (rc) { | |
1679 | netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); | |
1680 | return rc; | |
1681 | } | |
1682 | ||
f3956ebb | 1683 | eth_hw_addr_set(dev, addr->sa_data); |
c77c761f TF |
1684 | |
1685 | return 0; | |
1686 | } | |
1687 | ||
e186d174 AB |
1688 | static const struct net_device_ops ibmveth_netdev_ops = { |
1689 | .ndo_open = ibmveth_open, | |
1690 | .ndo_stop = ibmveth_close, | |
1691 | .ndo_start_xmit = ibmveth_start_xmit, | |
afc4b13d | 1692 | .ndo_set_rx_mode = ibmveth_set_multicast_list, |
a7605370 | 1693 | .ndo_eth_ioctl = ibmveth_ioctl, |
e186d174 | 1694 | .ndo_change_mtu = ibmveth_change_mtu, |
b9367bf3 MM |
1695 | .ndo_fix_features = ibmveth_fix_features, |
1696 | .ndo_set_features = ibmveth_set_features, | |
e186d174 | 1697 | .ndo_validate_addr = eth_validate_addr, |
c77c761f | 1698 | .ndo_set_mac_address = ibmveth_set_mac_addr, |
e186d174 AB |
1699 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1700 | .ndo_poll_controller = ibmveth_poll_controller, | |
1701 | #endif | |
1702 | }; | |
1703 | ||
1dd06ae8 | 1704 | static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1da177e4 | 1705 | { |
13f85203 | 1706 | int rc, i, mac_len; |
1da177e4 | 1707 | struct net_device *netdev; |
9dc83afd | 1708 | struct ibmveth_adapter *adapter; |
1da177e4 | 1709 | unsigned char *mac_addr_p; |
66cf4710 | 1710 | __be32 *mcastFilterSize_p; |
07e6a97d TF |
1711 | long ret; |
1712 | unsigned long ret_attr; | |
1da177e4 | 1713 | |
c43ced18 SL |
1714 | dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", |
1715 | dev->unit_address); | |
1da177e4 | 1716 | |
f148f61d | 1717 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, |
13f85203 | 1718 | &mac_len); |
f148f61d | 1719 | if (!mac_addr_p) { |
21c2dece | 1720 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); |
be35ae9e | 1721 | return -EINVAL; |
1da177e4 | 1722 | } |
13f85203 BH |
1723 | /* Workaround for old/broken pHyp */ |
1724 | if (mac_len == 8) | |
1725 | mac_addr_p += 2; | |
1726 | else if (mac_len != 6) { | |
1727 | dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", | |
1728 | mac_len); | |
1729 | return -EINVAL; | |
1730 | } | |
d7fbeba6 | 1731 | |
66cf4710 TF |
1732 | mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, |
1733 | VETH_MCAST_FILTER_SIZE, | |
1734 | NULL); | |
f148f61d | 1735 | if (!mcastFilterSize_p) { |
21c2dece SL |
1736 | dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " |
1737 | "attribute\n"); | |
be35ae9e | 1738 | return -EINVAL; |
1da177e4 | 1739 | } |
d7fbeba6 | 1740 | |
d926793c | 1741 | netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1); |
f148f61d | 1742 | if (!netdev) |
1da177e4 LT |
1743 | return -ENOMEM; |
1744 | ||
4cf1653a | 1745 | adapter = netdev_priv(netdev); |
c7ae011d | 1746 | dev_set_drvdata(&dev->dev, netdev); |
1da177e4 LT |
1747 | |
1748 | adapter->vdev = dev; | |
1749 | adapter->netdev = netdev; | |
2c91e231 | 1750 | INIT_WORK(&adapter->work, ibmveth_reset); |
66cf4710 | 1751 | adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); |
9aedc6e2 | 1752 | ibmveth_init_link_settings(netdev); |
d7fbeba6 | 1753 | |
b707b89f | 1754 | netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); |
bea3348e | 1755 | |
1da177e4 | 1756 | netdev->irq = dev->irq; |
e186d174 AB |
1757 | netdev->netdev_ops = &ibmveth_netdev_ops; |
1758 | netdev->ethtool_ops = &netdev_ethtool_ops; | |
1da177e4 | 1759 | SET_NETDEV_DEV(netdev, &dev->dev); |
23d28a85 TH |
1760 | netdev->hw_features = NETIF_F_SG; |
1761 | if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { | |
1762 | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1763 | NETIF_F_RXCSUM; | |
1764 | } | |
07e6a97d | 1765 | |
b9367bf3 | 1766 | netdev->features |= netdev->hw_features; |
1da177e4 | 1767 | |
07e6a97d TF |
1768 | ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); |
1769 | ||
1770 | /* If running older firmware, TSO should not be enabled by default */ | |
1771 | if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && | |
1772 | !old_large_send) { | |
1773 | netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; | |
1774 | netdev->features |= netdev->hw_features; | |
1775 | } else { | |
1776 | netdev->hw_features |= NETIF_F_TSO; | |
1777 | } | |
8641dd85 | 1778 | |
66aa0678 SK |
1779 | adapter->is_active_trunk = false; |
1780 | if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) { | |
1781 | adapter->is_active_trunk = true; | |
1782 | netdev->hw_features |= NETIF_F_FRAGLIST; | |
1783 | netdev->features |= NETIF_F_FRAGLIST; | |
1784 | } | |
1785 | ||
d894be57 | 1786 | netdev->min_mtu = IBMVETH_MIN_MTU; |
5948378b | 1787 | netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; |
d894be57 | 1788 | |
a96d317f | 1789 | eth_hw_addr_set(netdev, mac_addr_p); |
1da177e4 | 1790 | |
cd7c7ec3 TF |
1791 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1792 | memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); | |
1793 | ||
f148f61d | 1794 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
860f242e | 1795 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
8dde2a96 GKH |
1796 | int error; |
1797 | ||
d7fbeba6 JG |
1798 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
1799 | pool_count[i], pool_size[i], | |
860f242e | 1800 | pool_active[i]); |
8dde2a96 GKH |
1801 | error = kobject_init_and_add(kobj, &ktype_veth_pool, |
1802 | &dev->dev.kobj, "pool%d", i); | |
1803 | if (!error) | |
1804 | kobject_uevent(kobj, KOBJ_ADD); | |
860f242e | 1805 | } |
1da177e4 | 1806 | |
742c60e1 NC |
1807 | rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(), |
1808 | IBMVETH_DEFAULT_QUEUES)); | |
10c2aba8 NC |
1809 | if (rc) { |
1810 | netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n", | |
1811 | rc); | |
1812 | free_netdev(netdev); | |
1813 | return rc; | |
1814 | } | |
d926793c | 1815 | adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE); |
10c2aba8 NC |
1816 | for (i = 0; i < IBMVETH_MAX_QUEUES; i++) |
1817 | adapter->tx_ltb_ptr[i] = NULL; | |
d926793c | 1818 | |
c43ced18 | 1819 | netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); |
c43ced18 | 1820 | netdev_dbg(netdev, "registering netdev...\n"); |
1da177e4 | 1821 | |
b801a4e7 MM |
1822 | ibmveth_set_features(netdev, netdev->features); |
1823 | ||
1da177e4 LT |
1824 | rc = register_netdev(netdev); |
1825 | ||
f148f61d | 1826 | if (rc) { |
c43ced18 | 1827 | netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); |
1da177e4 LT |
1828 | free_netdev(netdev); |
1829 | return rc; | |
1830 | } | |
1831 | ||
c43ced18 | 1832 | netdev_dbg(netdev, "registered\n"); |
1da177e4 | 1833 | |
1da177e4 LT |
1834 | return 0; |
1835 | } | |
1836 | ||
386a966f | 1837 | static void ibmveth_remove(struct vio_dev *dev) |
1da177e4 | 1838 | { |
c7ae011d | 1839 | struct net_device *netdev = dev_get_drvdata(&dev->dev); |
4cf1653a | 1840 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
860f242e SL |
1841 | int i; |
1842 | ||
2c91e231 DM |
1843 | cancel_work_sync(&adapter->work); |
1844 | ||
f148f61d | 1845 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
c10997f6 | 1846 | kobject_put(&adapter->rx_buff_pool[i].kobj); |
1da177e4 LT |
1847 | |
1848 | unregister_netdev(netdev); | |
1849 | ||
1da177e4 | 1850 | free_netdev(netdev); |
1096d63d | 1851 | dev_set_drvdata(&dev->dev, NULL); |
1da177e4 LT |
1852 | } |
1853 | ||
860f242e SL |
1854 | static struct attribute veth_active_attr; |
1855 | static struct attribute veth_num_attr; | |
1856 | static struct attribute veth_size_attr; | |
1857 | ||
f148f61d SL |
1858 | static ssize_t veth_pool_show(struct kobject *kobj, |
1859 | struct attribute *attr, char *buf) | |
860f242e | 1860 | { |
d7fbeba6 | 1861 | struct ibmveth_buff_pool *pool = container_of(kobj, |
860f242e SL |
1862 | struct ibmveth_buff_pool, |
1863 | kobj); | |
1864 | ||
1865 | if (attr == &veth_active_attr) | |
1866 | return sprintf(buf, "%d\n", pool->active); | |
1867 | else if (attr == &veth_num_attr) | |
1868 | return sprintf(buf, "%d\n", pool->size); | |
1869 | else if (attr == &veth_size_attr) | |
1870 | return sprintf(buf, "%d\n", pool->buff_size); | |
1871 | return 0; | |
1872 | } | |
1873 | ||
46431fd5 DM |
1874 | /** |
1875 | * veth_pool_store - sysfs store handler for pool attributes | |
1876 | * @kobj: kobject embedded in pool | |
1877 | * @attr: attribute being changed | |
1878 | * @buf: value being stored | |
1879 | * @count: length of @buf in bytes | |
1880 | * | |
1881 | * Stores new value in pool attribute. Verifies the range of the new value for | |
1882 | * size and buff_size. Verifies that at least one pool remains available to | |
1883 | * receive MTU-sized packets. | |
1884 | * | |
1885 | * Context: Process context. | |
1886 | * Takes and releases rtnl_mutex to ensure correct ordering of close | |
1887 | * and open calls. | |
1888 | * Return: | |
1889 | * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools | |
1890 | * * %-EINVAL - New pool size or buffer size is out of range | |
1891 | * * count - Return count for success | |
1892 | * * other - Return value from a failed ibmveth_open call | |
1893 | */ | |
f148f61d SL |
1894 | static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, |
1895 | const char *buf, size_t count) | |
860f242e | 1896 | { |
d7fbeba6 | 1897 | struct ibmveth_buff_pool *pool = container_of(kobj, |
860f242e SL |
1898 | struct ibmveth_buff_pool, |
1899 | kobj); | |
1756055d | 1900 | struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); |
4cf1653a | 1901 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
860f242e | 1902 | long value = simple_strtol(buf, NULL, 10); |
46431fd5 DM |
1903 | bool change = false; |
1904 | u32 newbuff_size; | |
1905 | u32 oldbuff_size; | |
1906 | int newactive; | |
1907 | int oldactive; | |
1908 | u32 newsize; | |
1909 | u32 oldsize; | |
860f242e SL |
1910 | long rc; |
1911 | ||
053f3ff6 DM |
1912 | rtnl_lock(); |
1913 | ||
46431fd5 DM |
1914 | oldbuff_size = pool->buff_size; |
1915 | oldactive = pool->active; | |
1916 | oldsize = pool->size; | |
1917 | ||
1918 | newbuff_size = oldbuff_size; | |
1919 | newactive = oldactive; | |
1920 | newsize = oldsize; | |
1921 | ||
860f242e | 1922 | if (attr == &veth_active_attr) { |
46431fd5 DM |
1923 | if (value && !oldactive) { |
1924 | newactive = 1; | |
1925 | change = true; | |
1926 | } else if (!value && oldactive) { | |
860f242e SL |
1927 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; |
1928 | int i; | |
1929 | /* Make sure there is a buffer pool with buffers that | |
1930 | can hold a packet of the size of the MTU */ | |
517e80e6 | 1931 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
860f242e SL |
1932 | if (pool == &adapter->rx_buff_pool[i]) |
1933 | continue; | |
1934 | if (!adapter->rx_buff_pool[i].active) | |
1935 | continue; | |
76b9cfcc BK |
1936 | if (mtu <= adapter->rx_buff_pool[i].buff_size) |
1937 | break; | |
860f242e | 1938 | } |
76b9cfcc | 1939 | |
517e80e6 | 1940 | if (i == IBMVETH_NUM_BUFF_POOLS) { |
21c2dece | 1941 | netdev_err(netdev, "no active pool >= MTU\n"); |
053f3ff6 DM |
1942 | rc = -EPERM; |
1943 | goto unlock_err; | |
860f242e | 1944 | } |
76b9cfcc | 1945 | |
46431fd5 DM |
1946 | newactive = 0; |
1947 | change = true; | |
860f242e SL |
1948 | } |
1949 | } else if (attr == &veth_num_attr) { | |
f148f61d | 1950 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { |
053f3ff6 DM |
1951 | rc = -EINVAL; |
1952 | goto unlock_err; | |
46431fd5 DM |
1953 | } |
1954 | if (value != oldsize) { | |
1955 | newsize = value; | |
1956 | change = true; | |
860f242e SL |
1957 | } |
1958 | } else if (attr == &veth_size_attr) { | |
f148f61d | 1959 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { |
053f3ff6 DM |
1960 | rc = -EINVAL; |
1961 | goto unlock_err; | |
46431fd5 DM |
1962 | } |
1963 | if (value != oldbuff_size) { | |
1964 | newbuff_size = value; | |
1965 | change = true; | |
1966 | } | |
1967 | } | |
1968 | ||
1969 | if (change) { | |
1970 | if (netif_running(netdev)) | |
1971 | ibmveth_close(netdev); | |
1972 | ||
1973 | pool->active = newactive; | |
1974 | pool->buff_size = newbuff_size; | |
1975 | pool->size = newsize; | |
1976 | ||
1977 | if (netif_running(netdev)) { | |
1978 | rc = ibmveth_open(netdev); | |
1979 | if (rc) { | |
1980 | pool->active = oldactive; | |
1981 | pool->buff_size = oldbuff_size; | |
1982 | pool->size = oldsize; | |
1983 | goto unlock_err; | |
f148f61d | 1984 | } |
860f242e SL |
1985 | } |
1986 | } | |
053f3ff6 | 1987 | rtnl_unlock(); |
860f242e SL |
1988 | |
1989 | /* kick the interrupt handler to allocate/deallocate pools */ | |
7d12e780 | 1990 | ibmveth_interrupt(netdev->irq, netdev); |
860f242e | 1991 | return count; |
053f3ff6 DM |
1992 | |
1993 | unlock_err: | |
1994 | rtnl_unlock(); | |
1995 | return rc; | |
860f242e SL |
1996 | } |
1997 | ||
1998 | ||
f148f61d SL |
1999 | #define ATTR(_name, _mode) \ |
2000 | struct attribute veth_##_name##_attr = { \ | |
2001 | .name = __stringify(_name), .mode = _mode, \ | |
2002 | }; | |
860f242e SL |
2003 | |
2004 | static ATTR(active, 0644); | |
2005 | static ATTR(num, 0644); | |
2006 | static ATTR(size, 0644); | |
2007 | ||
f148f61d | 2008 | static struct attribute *veth_pool_attrs[] = { |
860f242e SL |
2009 | &veth_active_attr, |
2010 | &veth_num_attr, | |
2011 | &veth_size_attr, | |
2012 | NULL, | |
2013 | }; | |
c288bc0d | 2014 | ATTRIBUTE_GROUPS(veth_pool); |
860f242e | 2015 | |
52cf25d0 | 2016 | static const struct sysfs_ops veth_pool_ops = { |
860f242e SL |
2017 | .show = veth_pool_show, |
2018 | .store = veth_pool_store, | |
2019 | }; | |
2020 | ||
2021 | static struct kobj_type ktype_veth_pool = { | |
2022 | .release = NULL, | |
2023 | .sysfs_ops = &veth_pool_ops, | |
c288bc0d | 2024 | .default_groups = veth_pool_groups, |
860f242e SL |
2025 | }; |
2026 | ||
e7a3af5d BK |
2027 | static int ibmveth_resume(struct device *dev) |
2028 | { | |
2029 | struct net_device *netdev = dev_get_drvdata(dev); | |
2030 | ibmveth_interrupt(netdev->irq, netdev); | |
2031 | return 0; | |
2032 | } | |
860f242e | 2033 | |
71450804 | 2034 | static const struct vio_device_id ibmveth_device_table[] = { |
1da177e4 | 2035 | { "network", "IBM,l-lan"}, |
fb120da6 | 2036 | { "", "" } |
1da177e4 | 2037 | }; |
1da177e4 LT |
2038 | MODULE_DEVICE_TABLE(vio, ibmveth_device_table); |
2039 | ||
eb60a73d | 2040 | static const struct dev_pm_ops ibmveth_pm_ops = { |
e7a3af5d BK |
2041 | .resume = ibmveth_resume |
2042 | }; | |
2043 | ||
1da177e4 | 2044 | static struct vio_driver ibmveth_driver = { |
6fdf5392 SR |
2045 | .id_table = ibmveth_device_table, |
2046 | .probe = ibmveth_probe, | |
2047 | .remove = ibmveth_remove, | |
1096d63d | 2048 | .get_desired_dma = ibmveth_get_desired_dma, |
cb52d897 BH |
2049 | .name = ibmveth_driver_name, |
2050 | .pm = &ibmveth_pm_ops, | |
1da177e4 LT |
2051 | }; |
2052 | ||
2053 | static int __init ibmveth_module_init(void) | |
2054 | { | |
21c2dece SL |
2055 | printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, |
2056 | ibmveth_driver_string, ibmveth_driver_version); | |
1da177e4 | 2057 | |
1da177e4 LT |
2058 | return vio_register_driver(&ibmveth_driver); |
2059 | } | |
2060 | ||
2061 | static void __exit ibmveth_module_exit(void) | |
2062 | { | |
2063 | vio_unregister_driver(&ibmveth_driver); | |
d7fbeba6 | 2064 | } |
1da177e4 LT |
2065 | |
2066 | module_init(ibmveth_module_init); | |
2067 | module_exit(ibmveth_module_exit); | |
8a97de24 DM |
2068 | |
2069 | #ifdef CONFIG_IBMVETH_KUNIT_TEST | |
2070 | #include <kunit/test.h> | |
2071 | ||
2072 | /** | |
2073 | * ibmveth_reset_kunit - reset routine for running in KUnit environment | |
2074 | * | |
2075 | * @w: pointer to work_struct embedded in adapter structure | |
2076 | * | |
2077 | * Context: Called in the KUnit environment. Does nothing. | |
2078 | * | |
2079 | * Return: void | |
2080 | */ | |
2081 | static void ibmveth_reset_kunit(struct work_struct *w) | |
2082 | { | |
2083 | netdev_dbg(NULL, "reset_kunit starting\n"); | |
2084 | netdev_dbg(NULL, "reset_kunit complete\n"); | |
2085 | } | |
2086 | ||
2087 | /** | |
2088 | * ibmveth_remove_buffer_from_pool_test - unit test for some of | |
2089 | * ibmveth_remove_buffer_from_pool | |
2090 | * @test: pointer to kunit structure | |
2091 | * | |
2092 | * Tests the error returns from ibmveth_remove_buffer_from_pool. | |
2093 | * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be | |
2094 | * checked to see that these warnings happened. | |
2095 | * | |
2096 | * Return: void | |
2097 | */ | |
2098 | static void ibmveth_remove_buffer_from_pool_test(struct kunit *test) | |
2099 | { | |
2100 | struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); | |
2101 | struct ibmveth_buff_pool *pool; | |
2102 | u64 correlator; | |
2103 | ||
2104 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); | |
2105 | ||
2106 | INIT_WORK(&adapter->work, ibmveth_reset_kunit); | |
2107 | ||
2108 | /* Set sane values for buffer pools */ | |
2109 | for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
2110 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | |
2111 | pool_count[i], pool_size[i], | |
2112 | pool_active[i]); | |
2113 | ||
2114 | pool = &adapter->rx_buff_pool[0]; | |
2115 | pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); | |
2116 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); | |
2117 | ||
2118 | correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0; | |
2119 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2120 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2121 | ||
2122 | correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size; | |
2123 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2124 | KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2125 | ||
2126 | correlator = (u64)0 | 0; | |
2127 | pool->skbuff[0] = NULL; | |
2128 | KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); | |
2129 | KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); | |
2130 | ||
2131 | flush_work(&adapter->work); | |
2132 | } | |
2133 | ||
2134 | /** | |
2135 | * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer | |
2136 | * @test: pointer to kunit structure | |
2137 | * | |
2138 | * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for | |
2139 | * the NULL returns, so dmesg should be checked to see that these warnings | |
2140 | * happened. | |
2141 | * | |
2142 | * Return: void | |
2143 | */ | |
2144 | static void ibmveth_rxq_get_buffer_test(struct kunit *test) | |
2145 | { | |
2146 | struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); | |
2147 | struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL); | |
2148 | struct ibmveth_buff_pool *pool; | |
2149 | ||
2150 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); | |
2151 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); | |
2152 | ||
2153 | INIT_WORK(&adapter->work, ibmveth_reset_kunit); | |
2154 | ||
2155 | adapter->rx_queue.queue_len = 1; | |
2156 | adapter->rx_queue.index = 0; | |
2157 | adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry), | |
2158 | GFP_KERNEL); | |
2159 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr); | |
2160 | ||
2161 | /* Set sane values for buffer pools */ | |
2162 | for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | |
2163 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | |
2164 | pool_count[i], pool_size[i], | |
2165 | pool_active[i]); | |
2166 | ||
2167 | pool = &adapter->rx_buff_pool[0]; | |
2168 | pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); | |
2169 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); | |
2170 | ||
2171 | adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0; | |
2172 | KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); | |
2173 | ||
2174 | adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size; | |
2175 | KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); | |
2176 | ||
2177 | pool->skbuff[0] = skb; | |
2178 | adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0; | |
2179 | KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter)); | |
2180 | ||
2181 | flush_work(&adapter->work); | |
2182 | } | |
2183 | ||
2184 | static struct kunit_case ibmveth_test_cases[] = { | |
2185 | KUNIT_CASE(ibmveth_remove_buffer_from_pool_test), | |
2186 | KUNIT_CASE(ibmveth_rxq_get_buffer_test), | |
2187 | {} | |
2188 | }; | |
2189 | ||
2190 | static struct kunit_suite ibmveth_test_suite = { | |
2191 | .name = "ibmveth-kunit-test", | |
2192 | .test_cases = ibmveth_test_cases, | |
2193 | }; | |
2194 | ||
2195 | kunit_test_suite(ibmveth_test_suite); | |
2196 | #endif |