Commit | Line | Data |
---|---|---|
dfae55d6 | 1 | /* |
2 | * Copyright (C) ST-Ericsson AB 2010 | |
3 | * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com | |
4 | * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com, | |
5 | * Daniel Martensson / daniel.martensson@stericsson.com | |
6 | * License terms: GNU General Public License (GPL) version 2 | |
7 | */ | |
8 | ||
e8329323 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt |
dfae55d6 | 10 | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/if_arp.h> | |
6e4a7629 | 16 | #include <linux/io.h> |
dfae55d6 | 17 | |
18 | #include <net/caif/caif_device.h> | |
19 | #include <net/caif/caif_shm.h> | |
20 | ||
21 | #define NR_TX_BUF 6 | |
22 | #define NR_RX_BUF 6 | |
23 | #define TX_BUF_SZ 0x2000 | |
24 | #define RX_BUF_SZ 0x2000 | |
25 | ||
26 | #define CAIF_NEEDED_HEADROOM 32 | |
27 | ||
28 | #define CAIF_FLOW_ON 1 | |
29 | #define CAIF_FLOW_OFF 0 | |
30 | ||
31 | #define LOW_WATERMARK 3 | |
32 | #define HIGH_WATERMARK 4 | |
33 | ||
34 | /* Maximum number of CAIF buffers per shared memory buffer. */ | |
35 | #define SHM_MAX_FRMS_PER_BUF 10 | |
36 | ||
37 | /* | |
38 | * Size in bytes of the descriptor area | |
39 | * (With end of descriptor signalling) | |
40 | */ | |
41 | #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \ | |
42 | sizeof(struct shm_pck_desc)) | |
43 | ||
44 | /* | |
45 | * Offset to the first CAIF frame within a shared memory buffer. | |
46 | * Aligned on 32 bytes. | |
47 | */ | |
48 | #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32)) | |
49 | ||
50 | /* Number of bytes for CAIF shared memory header. */ | |
51 | #define SHM_HDR_LEN 1 | |
52 | ||
53 | /* Number of padding bytes for the complete CAIF frame. */ | |
54 | #define SHM_FRM_PAD_LEN 4 | |
55 | ||
56 | #define CAIF_MAX_MTU 4096 | |
57 | ||
58 | #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0) | |
59 | #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1) | |
60 | ||
61 | #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4) | |
62 | #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1) | |
63 | ||
64 | #define SHM_FULL_MASK (0x0F << 0) | |
65 | #define SHM_EMPTY_MASK (0x0F << 4) | |
66 | ||
67 | struct shm_pck_desc { | |
68 | /* | |
69 | * Offset from start of shared memory area to start of | |
70 | * shared memory CAIF frame. | |
71 | */ | |
72 | u32 frm_ofs; | |
73 | u32 frm_len; | |
74 | }; | |
75 | ||
76 | struct buf_list { | |
77 | unsigned char *desc_vptr; | |
78 | u32 phy_addr; | |
79 | u32 index; | |
80 | u32 len; | |
81 | u32 frames; | |
82 | u32 frm_ofs; | |
83 | struct list_head list; | |
84 | }; | |
85 | ||
86 | struct shm_caif_frm { | |
87 | /* Number of bytes of padding before the CAIF frame. */ | |
88 | u8 hdr_ofs; | |
89 | }; | |
90 | ||
91 | struct shmdrv_layer { | |
92 | /* caif_dev_common must always be first in the structure*/ | |
93 | struct caif_dev_common cfdev; | |
94 | ||
95 | u32 shm_tx_addr; | |
96 | u32 shm_rx_addr; | |
97 | u32 shm_base_addr; | |
98 | u32 tx_empty_available; | |
99 | spinlock_t lock; | |
100 | ||
101 | struct list_head tx_empty_list; | |
102 | struct list_head tx_pend_list; | |
103 | struct list_head tx_full_list; | |
104 | struct list_head rx_empty_list; | |
105 | struct list_head rx_pend_list; | |
106 | struct list_head rx_full_list; | |
107 | ||
108 | struct workqueue_struct *pshm_tx_workqueue; | |
109 | struct workqueue_struct *pshm_rx_workqueue; | |
110 | ||
111 | struct work_struct shm_tx_work; | |
112 | struct work_struct shm_rx_work; | |
113 | ||
114 | struct sk_buff_head sk_qhead; | |
115 | struct shmdev_layer *pshm_dev; | |
116 | }; | |
117 | ||
118 | static int shm_netdev_open(struct net_device *shm_netdev) | |
119 | { | |
120 | netif_wake_queue(shm_netdev); | |
121 | return 0; | |
122 | } | |
123 | ||
124 | static int shm_netdev_close(struct net_device *shm_netdev) | |
125 | { | |
126 | netif_stop_queue(shm_netdev); | |
127 | return 0; | |
128 | } | |
129 | ||
130 | int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv) | |
131 | { | |
132 | struct buf_list *pbuf; | |
133 | struct shmdrv_layer *pshm_drv; | |
134 | struct list_head *pos; | |
135 | u32 avail_emptybuff = 0; | |
136 | unsigned long flags = 0; | |
137 | ||
43d620c8 | 138 | pshm_drv = priv; |
dfae55d6 | 139 | |
140 | /* Check for received buffers. */ | |
141 | if (mbx_msg & SHM_FULL_MASK) { | |
142 | int idx; | |
143 | ||
144 | spin_lock_irqsave(&pshm_drv->lock, flags); | |
145 | ||
146 | /* Check whether we have any outstanding buffers. */ | |
147 | if (list_empty(&pshm_drv->rx_empty_list)) { | |
148 | ||
149 | /* Release spin lock. */ | |
150 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
151 | ||
152 | /* We print even in IRQ context... */ | |
153 | pr_warn("No empty Rx buffers to fill: " | |
154 | "mbx_msg:%x\n", mbx_msg); | |
155 | ||
156 | /* Bail out. */ | |
157 | goto err_sync; | |
158 | } | |
159 | ||
160 | pbuf = | |
161 | list_entry(pshm_drv->rx_empty_list.next, | |
162 | struct buf_list, list); | |
163 | idx = pbuf->index; | |
164 | ||
165 | /* Check buffer synchronization. */ | |
166 | if (idx != SHM_GET_FULL(mbx_msg)) { | |
167 | ||
168 | /* We print even in IRQ context... */ | |
169 | pr_warn( | |
170 | "phyif_shm_mbx_msg_cb: RX full out of sync:" | |
171 | " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n", | |
172 | idx, mbx_msg, SHM_GET_FULL(mbx_msg)); | |
173 | ||
174 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
175 | ||
176 | /* Bail out. */ | |
177 | goto err_sync; | |
178 | } | |
179 | ||
180 | list_del_init(&pbuf->list); | |
181 | list_add_tail(&pbuf->list, &pshm_drv->rx_full_list); | |
182 | ||
183 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
184 | ||
185 | /* Schedule RX work queue. */ | |
186 | if (!work_pending(&pshm_drv->shm_rx_work)) | |
187 | queue_work(pshm_drv->pshm_rx_workqueue, | |
188 | &pshm_drv->shm_rx_work); | |
189 | } | |
190 | ||
191 | /* Check for emptied buffers. */ | |
192 | if (mbx_msg & SHM_EMPTY_MASK) { | |
193 | int idx; | |
194 | ||
195 | spin_lock_irqsave(&pshm_drv->lock, flags); | |
196 | ||
197 | /* Check whether we have any outstanding buffers. */ | |
198 | if (list_empty(&pshm_drv->tx_full_list)) { | |
199 | ||
200 | /* We print even in IRQ context... */ | |
201 | pr_warn("No TX to empty: msg:%x\n", mbx_msg); | |
202 | ||
203 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
204 | ||
205 | /* Bail out. */ | |
206 | goto err_sync; | |
207 | } | |
208 | ||
209 | pbuf = | |
210 | list_entry(pshm_drv->tx_full_list.next, | |
211 | struct buf_list, list); | |
212 | idx = pbuf->index; | |
213 | ||
214 | /* Check buffer synchronization. */ | |
215 | if (idx != SHM_GET_EMPTY(mbx_msg)) { | |
216 | ||
217 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
218 | ||
219 | /* We print even in IRQ context... */ | |
220 | pr_warn("TX empty " | |
221 | "out of sync:idx:%d, msg:%x\n", idx, mbx_msg); | |
222 | ||
223 | /* Bail out. */ | |
224 | goto err_sync; | |
225 | } | |
226 | list_del_init(&pbuf->list); | |
227 | ||
228 | /* Reset buffer parameters. */ | |
229 | pbuf->frames = 0; | |
230 | pbuf->frm_ofs = SHM_CAIF_FRM_OFS; | |
231 | ||
232 | list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list); | |
233 | ||
234 | /* Check the available no. of buffers in the empty list */ | |
235 | list_for_each(pos, &pshm_drv->tx_empty_list) | |
236 | avail_emptybuff++; | |
237 | ||
238 | /* Check whether we have to wake up the transmitter. */ | |
239 | if ((avail_emptybuff > HIGH_WATERMARK) && | |
240 | (!pshm_drv->tx_empty_available)) { | |
241 | pshm_drv->tx_empty_available = 1; | |
095d2a71 | 242 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
dfae55d6 | 243 | pshm_drv->cfdev.flowctrl |
244 | (pshm_drv->pshm_dev->pshm_netdev, | |
245 | CAIF_FLOW_ON); | |
246 | ||
dfae55d6 | 247 | |
248 | /* Schedule the work queue. if required */ | |
249 | if (!work_pending(&pshm_drv->shm_tx_work)) | |
250 | queue_work(pshm_drv->pshm_tx_workqueue, | |
251 | &pshm_drv->shm_tx_work); | |
252 | } else | |
253 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
254 | } | |
255 | ||
256 | return 0; | |
257 | ||
258 | err_sync: | |
259 | return -EIO; | |
260 | } | |
261 | ||
262 | static void shm_rx_work_func(struct work_struct *rx_work) | |
263 | { | |
264 | struct shmdrv_layer *pshm_drv; | |
265 | struct buf_list *pbuf; | |
266 | unsigned long flags = 0; | |
267 | struct sk_buff *skb; | |
268 | char *p; | |
269 | int ret; | |
270 | ||
271 | pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work); | |
272 | ||
273 | while (1) { | |
274 | ||
275 | struct shm_pck_desc *pck_desc; | |
276 | ||
277 | spin_lock_irqsave(&pshm_drv->lock, flags); | |
278 | ||
279 | /* Check for received buffers. */ | |
280 | if (list_empty(&pshm_drv->rx_full_list)) { | |
281 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
282 | break; | |
283 | } | |
284 | ||
285 | pbuf = | |
286 | list_entry(pshm_drv->rx_full_list.next, struct buf_list, | |
287 | list); | |
288 | list_del_init(&pbuf->list); | |
095d2a71 | 289 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
dfae55d6 | 290 | |
291 | /* Retrieve pointer to start of the packet descriptor area. */ | |
292 | pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; | |
293 | ||
294 | /* | |
295 | * Check whether descriptor contains a CAIF shared memory | |
296 | * frame. | |
297 | */ | |
298 | while (pck_desc->frm_ofs) { | |
299 | unsigned int frm_buf_ofs; | |
300 | unsigned int frm_pck_ofs; | |
301 | unsigned int frm_pck_len; | |
302 | /* | |
303 | * Check whether offset is within buffer limits | |
304 | * (lower). | |
305 | */ | |
306 | if (pck_desc->frm_ofs < | |
307 | (pbuf->phy_addr - pshm_drv->shm_base_addr)) | |
308 | break; | |
309 | /* | |
310 | * Check whether offset is within buffer limits | |
311 | * (higher). | |
312 | */ | |
313 | if (pck_desc->frm_ofs > | |
314 | ((pbuf->phy_addr - pshm_drv->shm_base_addr) + | |
315 | pbuf->len)) | |
316 | break; | |
317 | ||
318 | /* Calculate offset from start of buffer. */ | |
319 | frm_buf_ofs = | |
320 | pck_desc->frm_ofs - (pbuf->phy_addr - | |
321 | pshm_drv->shm_base_addr); | |
322 | ||
323 | /* | |
324 | * Calculate offset and length of CAIF packet while | |
325 | * taking care of the shared memory header. | |
326 | */ | |
327 | frm_pck_ofs = | |
328 | frm_buf_ofs + SHM_HDR_LEN + | |
329 | (*(pbuf->desc_vptr + frm_buf_ofs)); | |
330 | frm_pck_len = | |
331 | (pck_desc->frm_len - SHM_HDR_LEN - | |
332 | (*(pbuf->desc_vptr + frm_buf_ofs))); | |
333 | ||
334 | /* Check whether CAIF packet is within buffer limits */ | |
335 | if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len) | |
336 | break; | |
337 | ||
338 | /* Get a suitable CAIF packet and copy in data. */ | |
339 | skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, | |
340 | frm_pck_len + 1); | |
720a43ef | 341 | if (skb == NULL) |
f84ea779 | 342 | break; |
dfae55d6 | 343 | |
344 | p = skb_put(skb, frm_pck_len); | |
345 | memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); | |
346 | ||
347 | skb->protocol = htons(ETH_P_CAIF); | |
348 | skb_reset_mac_header(skb); | |
349 | skb->dev = pshm_drv->pshm_dev->pshm_netdev; | |
350 | ||
351 | /* Push received packet up the stack. */ | |
352 | ret = netif_rx_ni(skb); | |
353 | ||
354 | if (!ret) { | |
355 | pshm_drv->pshm_dev->pshm_netdev->stats. | |
356 | rx_packets++; | |
357 | pshm_drv->pshm_dev->pshm_netdev->stats. | |
358 | rx_bytes += pck_desc->frm_len; | |
359 | } else | |
360 | ++pshm_drv->pshm_dev->pshm_netdev->stats. | |
361 | rx_dropped; | |
362 | /* Move to next packet descriptor. */ | |
363 | pck_desc++; | |
364 | } | |
365 | ||
095d2a71 | 366 | spin_lock_irqsave(&pshm_drv->lock, flags); |
dfae55d6 | 367 | list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); |
368 | ||
369 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
370 | ||
371 | } | |
372 | ||
373 | /* Schedule the work queue. if required */ | |
374 | if (!work_pending(&pshm_drv->shm_tx_work)) | |
375 | queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); | |
376 | ||
377 | } | |
378 | ||
379 | static void shm_tx_work_func(struct work_struct *tx_work) | |
380 | { | |
381 | u32 mbox_msg; | |
382 | unsigned int frmlen, avail_emptybuff, append = 0; | |
383 | unsigned long flags = 0; | |
384 | struct buf_list *pbuf = NULL; | |
385 | struct shmdrv_layer *pshm_drv; | |
386 | struct shm_caif_frm *frm; | |
387 | struct sk_buff *skb; | |
388 | struct shm_pck_desc *pck_desc; | |
389 | struct list_head *pos; | |
390 | ||
391 | pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work); | |
392 | ||
393 | do { | |
394 | /* Initialize mailbox message. */ | |
395 | mbox_msg = 0x00; | |
396 | avail_emptybuff = 0; | |
397 | ||
398 | spin_lock_irqsave(&pshm_drv->lock, flags); | |
399 | ||
400 | /* Check for pending receive buffers. */ | |
401 | if (!list_empty(&pshm_drv->rx_pend_list)) { | |
402 | ||
403 | pbuf = list_entry(pshm_drv->rx_pend_list.next, | |
404 | struct buf_list, list); | |
405 | ||
406 | list_del_init(&pbuf->list); | |
407 | list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list); | |
408 | /* | |
409 | * Value index is never changed, | |
410 | * so read access should be safe. | |
411 | */ | |
412 | mbox_msg |= SHM_SET_EMPTY(pbuf->index); | |
413 | } | |
414 | ||
415 | skb = skb_peek(&pshm_drv->sk_qhead); | |
416 | ||
417 | if (skb == NULL) | |
418 | goto send_msg; | |
dfae55d6 | 419 | /* Check the available no. of buffers in the empty list */ |
420 | list_for_each(pos, &pshm_drv->tx_empty_list) | |
421 | avail_emptybuff++; | |
422 | ||
423 | if ((avail_emptybuff < LOW_WATERMARK) && | |
424 | pshm_drv->tx_empty_available) { | |
425 | /* Update blocking condition. */ | |
426 | pshm_drv->tx_empty_available = 0; | |
095d2a71 | 427 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
dfae55d6 | 428 | pshm_drv->cfdev.flowctrl |
429 | (pshm_drv->pshm_dev->pshm_netdev, | |
430 | CAIF_FLOW_OFF); | |
095d2a71 | 431 | spin_lock_irqsave(&pshm_drv->lock, flags); |
dfae55d6 | 432 | } |
433 | /* | |
434 | * We simply return back to the caller if we do not have space | |
435 | * either in Tx pending list or Tx empty list. In this case, | |
436 | * we hold the received skb in the skb list, waiting to | |
437 | * be transmitted once Tx buffers become available | |
438 | */ | |
439 | if (list_empty(&pshm_drv->tx_empty_list)) | |
440 | goto send_msg; | |
441 | ||
442 | /* Get the first free Tx buffer. */ | |
443 | pbuf = list_entry(pshm_drv->tx_empty_list.next, | |
444 | struct buf_list, list); | |
445 | do { | |
446 | if (append) { | |
447 | skb = skb_peek(&pshm_drv->sk_qhead); | |
448 | if (skb == NULL) | |
449 | break; | |
450 | } | |
451 | ||
452 | frm = (struct shm_caif_frm *) | |
453 | (pbuf->desc_vptr + pbuf->frm_ofs); | |
454 | ||
455 | frm->hdr_ofs = 0; | |
456 | frmlen = 0; | |
457 | frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len; | |
458 | ||
459 | /* Add tail padding if needed. */ | |
460 | if (frmlen % SHM_FRM_PAD_LEN) | |
461 | frmlen += SHM_FRM_PAD_LEN - | |
462 | (frmlen % SHM_FRM_PAD_LEN); | |
463 | ||
464 | /* | |
465 | * Verify that packet, header and additional padding | |
466 | * can fit within the buffer frame area. | |
467 | */ | |
468 | if (frmlen >= (pbuf->len - pbuf->frm_ofs)) | |
469 | break; | |
470 | ||
471 | if (!append) { | |
472 | list_del_init(&pbuf->list); | |
473 | append = 1; | |
474 | } | |
475 | ||
476 | skb = skb_dequeue(&pshm_drv->sk_qhead); | |
095d2a71 | 477 | if (skb == NULL) |
478 | break; | |
dfae55d6 | 479 | /* Copy in CAIF frame. */ |
480 | skb_copy_bits(skb, 0, pbuf->desc_vptr + | |
481 | pbuf->frm_ofs + SHM_HDR_LEN + | |
482 | frm->hdr_ofs, skb->len); | |
483 | ||
484 | pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; | |
485 | pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += | |
486 | frmlen; | |
095d2a71 | 487 | dev_kfree_skb_irq(skb); |
dfae55d6 | 488 | |
489 | /* Fill in the shared memory packet descriptor area. */ | |
490 | pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); | |
491 | /* Forward to current frame. */ | |
492 | pck_desc += pbuf->frames; | |
493 | pck_desc->frm_ofs = (pbuf->phy_addr - | |
494 | pshm_drv->shm_base_addr) + | |
495 | pbuf->frm_ofs; | |
496 | pck_desc->frm_len = frmlen; | |
497 | /* Terminate packet descriptor area. */ | |
498 | pck_desc++; | |
499 | pck_desc->frm_ofs = 0; | |
500 | /* Update buffer parameters. */ | |
501 | pbuf->frames++; | |
502 | pbuf->frm_ofs += frmlen + (frmlen % 32); | |
503 | ||
504 | } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF); | |
505 | ||
506 | /* Assign buffer as full. */ | |
507 | list_add_tail(&pbuf->list, &pshm_drv->tx_full_list); | |
508 | append = 0; | |
509 | mbox_msg |= SHM_SET_FULL(pbuf->index); | |
510 | send_msg: | |
511 | spin_unlock_irqrestore(&pshm_drv->lock, flags); | |
512 | ||
513 | if (mbox_msg) | |
514 | pshm_drv->pshm_dev->pshmdev_mbxsend | |
515 | (pshm_drv->pshm_dev->shm_id, mbox_msg); | |
516 | } while (mbox_msg); | |
517 | } | |
518 | ||
519 | static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) | |
520 | { | |
521 | struct shmdrv_layer *pshm_drv; | |
dfae55d6 | 522 | |
523 | pshm_drv = netdev_priv(shm_netdev); | |
524 | ||
dfae55d6 | 525 | skb_queue_tail(&pshm_drv->sk_qhead, skb); |
526 | ||
dfae55d6 | 527 | /* Schedule Tx work queue. for deferred processing of skbs*/ |
528 | if (!work_pending(&pshm_drv->shm_tx_work)) | |
529 | queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); | |
530 | ||
531 | return 0; | |
532 | } | |
533 | ||
534 | static const struct net_device_ops netdev_ops = { | |
535 | .ndo_open = shm_netdev_open, | |
536 | .ndo_stop = shm_netdev_close, | |
537 | .ndo_start_xmit = shm_netdev_tx, | |
538 | }; | |
539 | ||
540 | static void shm_netdev_setup(struct net_device *pshm_netdev) | |
541 | { | |
542 | struct shmdrv_layer *pshm_drv; | |
543 | pshm_netdev->netdev_ops = &netdev_ops; | |
544 | ||
545 | pshm_netdev->mtu = CAIF_MAX_MTU; | |
546 | pshm_netdev->type = ARPHRD_CAIF; | |
547 | pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM; | |
548 | pshm_netdev->tx_queue_len = 0; | |
549 | pshm_netdev->destructor = free_netdev; | |
550 | ||
551 | pshm_drv = netdev_priv(pshm_netdev); | |
552 | ||
553 | /* Initialize structures in a clean state. */ | |
554 | memset(pshm_drv, 0, sizeof(struct shmdrv_layer)); | |
555 | ||
556 | pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY; | |
557 | } | |
558 | ||
559 | int caif_shmcore_probe(struct shmdev_layer *pshm_dev) | |
560 | { | |
561 | int result, j; | |
562 | struct shmdrv_layer *pshm_drv = NULL; | |
563 | ||
564 | pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer), | |
565 | "cfshm%d", shm_netdev_setup); | |
566 | if (!pshm_dev->pshm_netdev) | |
567 | return -ENOMEM; | |
568 | ||
569 | pshm_drv = netdev_priv(pshm_dev->pshm_netdev); | |
570 | pshm_drv->pshm_dev = pshm_dev; | |
571 | ||
572 | /* | |
573 | * Initialization starts with the verification of the | |
574 | * availability of MBX driver by calling its setup function. | |
575 | * MBX driver must be available by this time for proper | |
576 | * functioning of SHM driver. | |
577 | */ | |
578 | if ((pshm_dev->pshmdev_mbxsetup | |
579 | (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) { | |
580 | pr_warn("Could not config. SHM Mailbox," | |
581 | " Bailing out.....\n"); | |
582 | free_netdev(pshm_dev->pshm_netdev); | |
583 | return -ENODEV; | |
584 | } | |
585 | ||
586 | skb_queue_head_init(&pshm_drv->sk_qhead); | |
587 | ||
588 | pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER" | |
589 | " INSTANCE AT pshm_drv =0x%p\n", | |
590 | pshm_drv->pshm_dev->shm_id, pshm_drv); | |
591 | ||
592 | if (pshm_dev->shm_total_sz < | |
593 | (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) { | |
594 | ||
595 | pr_warn("ERROR, Amount of available" | |
25985edc | 596 | " Phys. SHM cannot accommodate current SHM " |
dfae55d6 | 597 | "driver configuration, Bailing out ...\n"); |
598 | free_netdev(pshm_dev->pshm_netdev); | |
599 | return -ENOMEM; | |
600 | } | |
601 | ||
602 | pshm_drv->shm_base_addr = pshm_dev->shm_base_addr; | |
603 | pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr; | |
604 | ||
605 | if (pshm_dev->shm_loopback) | |
606 | pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr; | |
607 | else | |
608 | pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + | |
609 | (NR_TX_BUF * TX_BUF_SZ); | |
610 | ||
095d2a71 | 611 | spin_lock_init(&pshm_drv->lock); |
dfae55d6 | 612 | INIT_LIST_HEAD(&pshm_drv->tx_empty_list); |
613 | INIT_LIST_HEAD(&pshm_drv->tx_pend_list); | |
614 | INIT_LIST_HEAD(&pshm_drv->tx_full_list); | |
615 | ||
616 | INIT_LIST_HEAD(&pshm_drv->rx_empty_list); | |
617 | INIT_LIST_HEAD(&pshm_drv->rx_pend_list); | |
618 | INIT_LIST_HEAD(&pshm_drv->rx_full_list); | |
619 | ||
620 | INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func); | |
621 | INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func); | |
622 | ||
623 | pshm_drv->pshm_tx_workqueue = | |
624 | create_singlethread_workqueue("shm_tx_work"); | |
625 | pshm_drv->pshm_rx_workqueue = | |
626 | create_singlethread_workqueue("shm_rx_work"); | |
627 | ||
628 | for (j = 0; j < NR_TX_BUF; j++) { | |
629 | struct buf_list *tx_buf = | |
630 | kmalloc(sizeof(struct buf_list), GFP_KERNEL); | |
631 | ||
632 | if (tx_buf == NULL) { | |
dfae55d6 | 633 | free_netdev(pshm_dev->pshm_netdev); |
634 | return -ENOMEM; | |
635 | } | |
636 | tx_buf->index = j; | |
637 | tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j); | |
638 | tx_buf->len = TX_BUF_SZ; | |
639 | tx_buf->frames = 0; | |
640 | tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; | |
641 | ||
642 | if (pshm_dev->shm_loopback) | |
095d2a71 | 643 | tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; |
dfae55d6 | 644 | else |
6e4a7629 AB |
645 | /* |
646 | * FIXME: the result of ioremap is not a pointer - arnd | |
647 | */ | |
dfae55d6 | 648 | tx_buf->desc_vptr = |
649 | ioremap(tx_buf->phy_addr, TX_BUF_SZ); | |
650 | ||
651 | list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list); | |
652 | } | |
653 | ||
654 | for (j = 0; j < NR_RX_BUF; j++) { | |
655 | struct buf_list *rx_buf = | |
656 | kmalloc(sizeof(struct buf_list), GFP_KERNEL); | |
657 | ||
658 | if (rx_buf == NULL) { | |
dfae55d6 | 659 | free_netdev(pshm_dev->pshm_netdev); |
660 | return -ENOMEM; | |
661 | } | |
662 | rx_buf->index = j; | |
663 | rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j); | |
664 | rx_buf->len = RX_BUF_SZ; | |
665 | ||
666 | if (pshm_dev->shm_loopback) | |
095d2a71 | 667 | rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr; |
dfae55d6 | 668 | else |
669 | rx_buf->desc_vptr = | |
670 | ioremap(rx_buf->phy_addr, RX_BUF_SZ); | |
671 | list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list); | |
672 | } | |
673 | ||
674 | pshm_drv->tx_empty_available = 1; | |
675 | result = register_netdev(pshm_dev->pshm_netdev); | |
676 | if (result) | |
677 | pr_warn("ERROR[%d], SHM could not, " | |
678 | "register with NW FRMWK Bailing out ...\n", result); | |
679 | ||
680 | return result; | |
681 | } | |
682 | ||
683 | void caif_shmcore_remove(struct net_device *pshm_netdev) | |
684 | { | |
685 | struct buf_list *pbuf; | |
686 | struct shmdrv_layer *pshm_drv = NULL; | |
687 | ||
688 | pshm_drv = netdev_priv(pshm_netdev); | |
689 | ||
690 | while (!(list_empty(&pshm_drv->tx_pend_list))) { | |
691 | pbuf = | |
692 | list_entry(pshm_drv->tx_pend_list.next, | |
693 | struct buf_list, list); | |
694 | ||
695 | list_del(&pbuf->list); | |
696 | kfree(pbuf); | |
697 | } | |
698 | ||
699 | while (!(list_empty(&pshm_drv->tx_full_list))) { | |
700 | pbuf = | |
701 | list_entry(pshm_drv->tx_full_list.next, | |
702 | struct buf_list, list); | |
703 | list_del(&pbuf->list); | |
704 | kfree(pbuf); | |
705 | } | |
706 | ||
707 | while (!(list_empty(&pshm_drv->tx_empty_list))) { | |
708 | pbuf = | |
709 | list_entry(pshm_drv->tx_empty_list.next, | |
710 | struct buf_list, list); | |
711 | list_del(&pbuf->list); | |
712 | kfree(pbuf); | |
713 | } | |
714 | ||
715 | while (!(list_empty(&pshm_drv->rx_full_list))) { | |
716 | pbuf = | |
717 | list_entry(pshm_drv->tx_full_list.next, | |
718 | struct buf_list, list); | |
719 | list_del(&pbuf->list); | |
720 | kfree(pbuf); | |
721 | } | |
722 | ||
723 | while (!(list_empty(&pshm_drv->rx_pend_list))) { | |
724 | pbuf = | |
725 | list_entry(pshm_drv->tx_pend_list.next, | |
726 | struct buf_list, list); | |
727 | list_del(&pbuf->list); | |
728 | kfree(pbuf); | |
729 | } | |
730 | ||
731 | while (!(list_empty(&pshm_drv->rx_empty_list))) { | |
732 | pbuf = | |
733 | list_entry(pshm_drv->rx_empty_list.next, | |
734 | struct buf_list, list); | |
735 | list_del(&pbuf->list); | |
736 | kfree(pbuf); | |
737 | } | |
738 | ||
739 | /* Destroy work queues. */ | |
740 | destroy_workqueue(pshm_drv->pshm_tx_workqueue); | |
741 | destroy_workqueue(pshm_drv->pshm_rx_workqueue); | |
742 | ||
743 | unregister_netdev(pshm_netdev); | |
744 | } |