Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
37448f7d | 2 | * drivers/net/ibm_emac/ibm_emac_mal.c |
1da177e4 | 3 | * |
37448f7d ES |
4 | * Memory Access Layer (MAL) support |
5 | * | |
6 | * Copyright (c) 2004, 2005 Zultys Technologies. | |
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
1da177e4 | 8 | * |
37448f7d ES |
9 | * Based on original work by |
10 | * Benjamin Herrenschmidt <benh@kernel.crashing.org>, | |
11 | * David Gibson <hermes@gibson.dropbear.id.au>, | |
12 | * | |
13 | * Armin Kuster <akuster@mvista.com> | |
14 | * Copyright 2002 MontaVista Softare Inc. | |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
37448f7d | 20 | * |
1da177e4 | 21 | */ |
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/init.h> | |
37448f7d | 27 | #include <linux/interrupt.h> |
1da177e4 LT |
28 | #include <linux/dma-mapping.h> |
29 | ||
1da177e4 LT |
30 | #include <asm/ocp.h> |
31 | ||
37448f7d | 32 | #include "ibm_emac_core.h" |
1da177e4 | 33 | #include "ibm_emac_mal.h" |
37448f7d | 34 | #include "ibm_emac_debug.h" |
1da177e4 | 35 | |
37448f7d ES |
36 | int __init mal_register_commac(struct ibm_ocp_mal *mal, |
37 | struct mal_commac *commac) | |
1da177e4 LT |
38 | { |
39 | unsigned long flags; | |
37448f7d | 40 | local_irq_save(flags); |
1da177e4 | 41 | |
37448f7d ES |
42 | MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index, |
43 | commac->tx_chan_mask, commac->rx_chan_mask); | |
1da177e4 | 44 | |
37448f7d | 45 | /* Don't let multiple commacs claim the same channel(s) */ |
1da177e4 LT |
46 | if ((mal->tx_chan_mask & commac->tx_chan_mask) || |
47 | (mal->rx_chan_mask & commac->rx_chan_mask)) { | |
37448f7d ES |
48 | local_irq_restore(flags); |
49 | printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", | |
50 | mal->def->index); | |
1da177e4 LT |
51 | return -EBUSY; |
52 | } | |
53 | ||
54 | mal->tx_chan_mask |= commac->tx_chan_mask; | |
55 | mal->rx_chan_mask |= commac->rx_chan_mask; | |
37448f7d | 56 | list_add(&commac->list, &mal->list); |
1da177e4 | 57 | |
37448f7d | 58 | local_irq_restore(flags); |
1da177e4 LT |
59 | return 0; |
60 | } | |
61 | ||
37448f7d ES |
62 | void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, |
63 | struct mal_commac *commac) | |
1da177e4 LT |
64 | { |
65 | unsigned long flags; | |
37448f7d | 66 | local_irq_save(flags); |
1da177e4 | 67 | |
37448f7d ES |
68 | MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index, |
69 | commac->tx_chan_mask, commac->rx_chan_mask); | |
1da177e4 LT |
70 | |
71 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | |
72 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | |
1da177e4 LT |
73 | list_del_init(&commac->list); |
74 | ||
37448f7d | 75 | local_irq_restore(flags); |
1da177e4 LT |
76 | } |
77 | ||
78 | int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) | |
79 | { | |
37448f7d ES |
80 | struct ocp_func_mal_data *maldata = mal->def->additions; |
81 | BUG_ON(channel < 0 || channel >= maldata->num_rx_chans || | |
82 | size > MAL_MAX_RX_SIZE); | |
83 | ||
84 | MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size); | |
85 | ||
86 | if (size & 0xf) { | |
87 | printk(KERN_WARNING | |
88 | "mal%d: incorrect RX size %lu for the channel %d\n", | |
89 | mal->def->index, size, channel); | |
1da177e4 LT |
90 | return -EINVAL; |
91 | } | |
92 | ||
37448f7d | 93 | set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); |
1da177e4 LT |
94 | return 0; |
95 | } | |
96 | ||
37448f7d | 97 | int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel) |
1da177e4 | 98 | { |
37448f7d ES |
99 | struct ocp_func_mal_data *maldata = mal->def->additions; |
100 | BUG_ON(channel < 0 || channel >= maldata->num_tx_chans); | |
101 | return channel * NUM_TX_BUFF; | |
102 | } | |
1da177e4 | 103 | |
37448f7d ES |
104 | int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel) |
105 | { | |
106 | struct ocp_func_mal_data *maldata = mal->def->additions; | |
107 | BUG_ON(channel < 0 || channel >= maldata->num_rx_chans); | |
108 | return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; | |
109 | } | |
1da177e4 | 110 | |
37448f7d ES |
111 | void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel) |
112 | { | |
113 | local_bh_disable(); | |
114 | MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel); | |
115 | set_mal_dcrn(mal, MAL_TXCASR, | |
116 | get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); | |
117 | local_bh_enable(); | |
118 | } | |
1da177e4 | 119 | |
37448f7d ES |
120 | void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel) |
121 | { | |
122 | set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); | |
123 | MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel); | |
124 | } | |
1da177e4 | 125 | |
37448f7d ES |
126 | void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel) |
127 | { | |
128 | local_bh_disable(); | |
129 | MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel); | |
130 | set_mal_dcrn(mal, MAL_RXCASR, | |
131 | get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); | |
132 | local_bh_enable(); | |
133 | } | |
1da177e4 | 134 | |
37448f7d ES |
135 | void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel) |
136 | { | |
137 | set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); | |
138 | MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel); | |
139 | } | |
1da177e4 | 140 | |
37448f7d ES |
141 | void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
142 | { | |
143 | local_bh_disable(); | |
144 | MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac); | |
145 | list_add_tail(&commac->poll_list, &mal->poll_list); | |
146 | local_bh_enable(); | |
1da177e4 LT |
147 | } |
148 | ||
37448f7d ES |
149 | void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
150 | { | |
151 | local_bh_disable(); | |
152 | MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac); | |
153 | list_del(&commac->poll_list); | |
154 | local_bh_enable(); | |
155 | } | |
156 | ||
157 | /* synchronized by mal_poll() */ | |
158 | static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal) | |
159 | { | |
160 | MAL_DBG2("%d: enable_irq" NL, mal->def->index); | |
161 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | |
162 | } | |
163 | ||
164 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | |
165 | static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal) | |
166 | { | |
167 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); | |
168 | MAL_DBG2("%d: disable_irq" NL, mal->def->index); | |
169 | } | |
170 | ||
7d12e780 | 171 | static irqreturn_t mal_serr(int irq, void *dev_instance) |
1da177e4 LT |
172 | { |
173 | struct ibm_ocp_mal *mal = dev_instance; | |
37448f7d | 174 | u32 esr = get_mal_dcrn(mal, MAL_ESR); |
1da177e4 | 175 | |
37448f7d ES |
176 | /* Clear the error status register */ |
177 | set_mal_dcrn(mal, MAL_ESR, esr); | |
1da177e4 | 178 | |
37448f7d | 179 | MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr); |
1da177e4 | 180 | |
37448f7d ES |
181 | if (esr & MAL_ESR_EVB) { |
182 | if (esr & MAL_ESR_DE) { | |
183 | /* We ignore Descriptor error, | |
184 | * TXDE or RXDE interrupt will be generated anyway. | |
185 | */ | |
186 | return IRQ_HANDLED; | |
1da177e4 | 187 | } |
37448f7d ES |
188 | |
189 | if (esr & MAL_ESR_PEIN) { | |
190 | /* PLB error, it's probably buggy hardware or | |
191 | * incorrect physical address in BD (i.e. bug) | |
192 | */ | |
193 | if (net_ratelimit()) | |
194 | printk(KERN_ERR | |
195 | "mal%d: system error, PLB (ESR = 0x%08x)\n", | |
196 | mal->def->index, esr); | |
197 | return IRQ_HANDLED; | |
198 | } | |
199 | ||
200 | /* OPB error, it's probably buggy hardware or incorrect EBC setup */ | |
201 | if (net_ratelimit()) | |
202 | printk(KERN_ERR | |
203 | "mal%d: system error, OPB (ESR = 0x%08x)\n", | |
204 | mal->def->index, esr); | |
1da177e4 | 205 | } |
37448f7d ES |
206 | return IRQ_HANDLED; |
207 | } | |
208 | ||
209 | static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) | |
210 | { | |
211 | if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { | |
212 | MAL_DBG2("%d: schedule_poll" NL, mal->def->index); | |
213 | mal_disable_eob_irq(mal); | |
214 | __netif_rx_schedule(&mal->poll_dev); | |
215 | } else | |
216 | MAL_DBG2("%d: already in poll" NL, mal->def->index); | |
217 | } | |
1da177e4 | 218 | |
7d12e780 | 219 | static irqreturn_t mal_txeob(int irq, void *dev_instance) |
37448f7d ES |
220 | { |
221 | struct ibm_ocp_mal *mal = dev_instance; | |
222 | u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); | |
223 | MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r); | |
224 | mal_schedule_poll(mal); | |
225 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | |
1da177e4 LT |
226 | return IRQ_HANDLED; |
227 | } | |
228 | ||
7d12e780 | 229 | static irqreturn_t mal_rxeob(int irq, void *dev_instance) |
1da177e4 LT |
230 | { |
231 | struct ibm_ocp_mal *mal = dev_instance; | |
37448f7d ES |
232 | u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); |
233 | MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r); | |
234 | mal_schedule_poll(mal); | |
235 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | |
236 | return IRQ_HANDLED; | |
237 | } | |
1da177e4 | 238 | |
7d12e780 | 239 | static irqreturn_t mal_txde(int irq, void *dev_instance) |
37448f7d ES |
240 | { |
241 | struct ibm_ocp_mal *mal = dev_instance; | |
242 | u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
243 | set_mal_dcrn(mal, MAL_TXDEIR, deir); | |
1da177e4 | 244 | |
37448f7d | 245 | MAL_DBG("%d: txde %08x" NL, mal->def->index, deir); |
1da177e4 | 246 | |
37448f7d ES |
247 | if (net_ratelimit()) |
248 | printk(KERN_ERR | |
249 | "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", | |
250 | mal->def->index, deir); | |
1da177e4 LT |
251 | |
252 | return IRQ_HANDLED; | |
253 | } | |
254 | ||
7d12e780 | 255 | static irqreturn_t mal_rxde(int irq, void *dev_instance) |
1da177e4 LT |
256 | { |
257 | struct ibm_ocp_mal *mal = dev_instance; | |
258 | struct list_head *l; | |
37448f7d | 259 | u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); |
1da177e4 | 260 | |
37448f7d | 261 | MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir); |
1da177e4 | 262 | |
37448f7d | 263 | list_for_each(l, &mal->list) { |
1da177e4 | 264 | struct mal_commac *mc = list_entry(l, struct mal_commac, list); |
37448f7d ES |
265 | if (deir & mc->rx_chan_mask) { |
266 | mc->rx_stopped = 1; | |
267 | mc->ops->rxde(mc->dev); | |
1da177e4 LT |
268 | } |
269 | } | |
37448f7d ES |
270 | |
271 | mal_schedule_poll(mal); | |
272 | set_mal_dcrn(mal, MAL_RXDEIR, deir); | |
1da177e4 LT |
273 | |
274 | return IRQ_HANDLED; | |
275 | } | |
276 | ||
37448f7d | 277 | static int mal_poll(struct net_device *ndev, int *budget) |
1da177e4 | 278 | { |
37448f7d | 279 | struct ibm_ocp_mal *mal = ndev->priv; |
1da177e4 | 280 | struct list_head *l; |
37448f7d ES |
281 | int rx_work_limit = min(ndev->quota, *budget), received = 0, done; |
282 | ||
283 | MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, | |
284 | rx_work_limit); | |
285 | again: | |
286 | /* Process TX skbs */ | |
287 | list_for_each(l, &mal->poll_list) { | |
288 | struct mal_commac *mc = | |
289 | list_entry(l, struct mal_commac, poll_list); | |
290 | mc->ops->poll_tx(mc->dev); | |
291 | } | |
1da177e4 | 292 | |
37448f7d ES |
293 | /* Process RX skbs. |
294 | * We _might_ need something more smart here to enforce polling fairness. | |
1da177e4 | 295 | */ |
37448f7d ES |
296 | list_for_each(l, &mal->poll_list) { |
297 | struct mal_commac *mc = | |
298 | list_entry(l, struct mal_commac, poll_list); | |
299 | int n = mc->ops->poll_rx(mc->dev, rx_work_limit); | |
300 | if (n) { | |
301 | received += n; | |
302 | rx_work_limit -= n; | |
303 | if (rx_work_limit <= 0) { | |
304 | done = 0; | |
305 | goto more_work; // XXX What if this is the last one ? | |
306 | } | |
307 | } | |
308 | } | |
1da177e4 | 309 | |
37448f7d ES |
310 | /* We need to disable IRQs to protect from RXDE IRQ here */ |
311 | local_irq_disable(); | |
312 | __netif_rx_complete(ndev); | |
313 | mal_enable_eob_irq(mal); | |
314 | local_irq_enable(); | |
315 | ||
316 | done = 1; | |
317 | ||
318 | /* Check for "rotting" packet(s) */ | |
319 | list_for_each(l, &mal->poll_list) { | |
320 | struct mal_commac *mc = | |
321 | list_entry(l, struct mal_commac, poll_list); | |
322 | if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { | |
323 | MAL_DBG2("%d: rotting packet" NL, mal->def->index); | |
324 | if (netif_rx_reschedule(ndev, received)) | |
325 | mal_disable_eob_irq(mal); | |
326 | else | |
327 | MAL_DBG2("%d: already in poll list" NL, | |
328 | mal->def->index); | |
329 | ||
330 | if (rx_work_limit > 0) | |
331 | goto again; | |
332 | else | |
333 | goto more_work; | |
1da177e4 | 334 | } |
37448f7d | 335 | mc->ops->poll_tx(mc->dev); |
1da177e4 | 336 | } |
1da177e4 | 337 | |
37448f7d ES |
338 | more_work: |
339 | ndev->quota -= received; | |
340 | *budget -= received; | |
341 | ||
342 | MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, | |
343 | done ? 0 : 1); | |
344 | return done ? 0 : 1; | |
345 | } | |
346 | ||
347 | static void mal_reset(struct ibm_ocp_mal *mal) | |
348 | { | |
349 | int n = 10; | |
350 | MAL_DBG("%d: reset" NL, mal->def->index); | |
351 | ||
352 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); | |
353 | ||
354 | /* Wait for reset to complete (1 system clock) */ | |
355 | while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) | |
356 | --n; | |
357 | ||
358 | if (unlikely(!n)) | |
359 | printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index); | |
360 | } | |
361 | ||
362 | int mal_get_regs_len(struct ibm_ocp_mal *mal) | |
363 | { | |
364 | return sizeof(struct emac_ethtool_regs_subhdr) + | |
365 | sizeof(struct ibm_mal_regs); | |
366 | } | |
367 | ||
368 | void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf) | |
369 | { | |
370 | struct emac_ethtool_regs_subhdr *hdr = buf; | |
371 | struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1); | |
372 | struct ocp_func_mal_data *maldata = mal->def->additions; | |
373 | int i; | |
374 | ||
375 | hdr->version = MAL_VERSION; | |
376 | hdr->index = mal->def->index; | |
377 | ||
378 | regs->tx_count = maldata->num_tx_chans; | |
379 | regs->rx_count = maldata->num_rx_chans; | |
380 | ||
381 | regs->cfg = get_mal_dcrn(mal, MAL_CFG); | |
382 | regs->esr = get_mal_dcrn(mal, MAL_ESR); | |
383 | regs->ier = get_mal_dcrn(mal, MAL_IER); | |
384 | regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); | |
385 | regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); | |
386 | regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); | |
387 | regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); | |
388 | regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); | |
389 | regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); | |
390 | regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); | |
391 | regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); | |
392 | ||
393 | for (i = 0; i < regs->tx_count; ++i) | |
394 | regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); | |
395 | ||
396 | for (i = 0; i < regs->rx_count; ++i) { | |
397 | regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); | |
398 | regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); | |
399 | } | |
400 | return regs + 1; | |
1da177e4 LT |
401 | } |
402 | ||
403 | static int __init mal_probe(struct ocp_device *ocpdev) | |
404 | { | |
37448f7d | 405 | struct ibm_ocp_mal *mal; |
1da177e4 | 406 | struct ocp_func_mal_data *maldata; |
37448f7d ES |
407 | int err = 0, i, bd_size; |
408 | ||
409 | MAL_DBG("%d: probe" NL, ocpdev->def->index); | |
1da177e4 | 410 | |
37448f7d | 411 | maldata = ocpdev->def->additions; |
1da177e4 | 412 | if (maldata == NULL) { |
37448f7d | 413 | printk(KERN_ERR "mal%d: missing additional data!\n", |
1da177e4 LT |
414 | ocpdev->def->index); |
415 | return -ENODEV; | |
416 | } | |
417 | ||
37448f7d ES |
418 | mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); |
419 | if (!mal) { | |
1da177e4 | 420 | printk(KERN_ERR |
37448f7d | 421 | "mal%d: out of memory allocating MAL structure!\n", |
1da177e4 LT |
422 | ocpdev->def->index); |
423 | return -ENOMEM; | |
424 | } | |
37448f7d ES |
425 | mal->dcrbase = maldata->dcr_base; |
426 | mal->def = ocpdev->def; | |
1da177e4 | 427 | |
37448f7d ES |
428 | INIT_LIST_HEAD(&mal->poll_list); |
429 | set_bit(__LINK_STATE_START, &mal->poll_dev.state); | |
430 | mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; | |
431 | mal->poll_dev.poll = mal_poll; | |
432 | mal->poll_dev.priv = mal; | |
433 | atomic_set(&mal->poll_dev.refcnt, 1); | |
1da177e4 | 434 | |
37448f7d | 435 | INIT_LIST_HEAD(&mal->list); |
1da177e4 | 436 | |
37448f7d ES |
437 | /* Load power-on reset defaults */ |
438 | mal_reset(mal); | |
1da177e4 LT |
439 | |
440 | /* Set the MAL configuration register */ | |
37448f7d ES |
441 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB | |
442 | MAL_CFG_OPBBL | MAL_CFG_LEA); | |
443 | ||
444 | mal_enable_eob_irq(mal); | |
445 | ||
446 | /* Allocate space for BD rings */ | |
447 | BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32); | |
448 | BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32); | |
449 | bd_size = sizeof(struct mal_descriptor) * | |
450 | (NUM_TX_BUFF * maldata->num_tx_chans + | |
451 | NUM_RX_BUFF * maldata->num_rx_chans); | |
452 | mal->bd_virt = | |
453 | dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL); | |
454 | ||
455 | if (!mal->bd_virt) { | |
1da177e4 | 456 | printk(KERN_ERR |
37448f7d ES |
457 | "mal%d: out of memory allocating RX/TX descriptors!\n", |
458 | mal->def->index); | |
1da177e4 LT |
459 | err = -ENOMEM; |
460 | goto fail; | |
461 | } | |
37448f7d | 462 | memset(mal->bd_virt, 0, bd_size); |
1da177e4 | 463 | |
37448f7d ES |
464 | for (i = 0; i < maldata->num_tx_chans; ++i) |
465 | set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + | |
466 | sizeof(struct mal_descriptor) * | |
467 | mal_tx_bd_offset(mal, i)); | |
468 | ||
469 | for (i = 0; i < maldata->num_rx_chans; ++i) | |
470 | set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + | |
471 | sizeof(struct mal_descriptor) * | |
472 | mal_rx_bd_offset(mal, i)); | |
1da177e4 LT |
473 | |
474 | err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); | |
475 | if (err) | |
37448f7d ES |
476 | goto fail2; |
477 | err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal); | |
1da177e4 | 478 | if (err) |
37448f7d | 479 | goto fail3; |
1da177e4 LT |
480 | err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); |
481 | if (err) | |
37448f7d | 482 | goto fail4; |
1da177e4 LT |
483 | err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); |
484 | if (err) | |
37448f7d | 485 | goto fail5; |
1da177e4 LT |
486 | err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); |
487 | if (err) | |
37448f7d | 488 | goto fail6; |
1da177e4 | 489 | |
37448f7d ES |
490 | /* Enable all MAL SERR interrupt sources */ |
491 | set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); | |
1da177e4 | 492 | |
37448f7d | 493 | /* Advertise this instance to the rest of the world */ |
1da177e4 LT |
494 | ocp_set_drvdata(ocpdev, mal); |
495 | ||
37448f7d | 496 | mal_dbg_register(mal->def->index, mal); |
1da177e4 | 497 | |
37448f7d ES |
498 | printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n", |
499 | mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans); | |
1da177e4 LT |
500 | return 0; |
501 | ||
37448f7d ES |
502 | fail6: |
503 | free_irq(maldata->rxde_irq, mal); | |
504 | fail5: | |
505 | free_irq(maldata->txeob_irq, mal); | |
506 | fail4: | |
507 | free_irq(maldata->txde_irq, mal); | |
508 | fail3: | |
509 | free_irq(maldata->serr_irq, mal); | |
510 | fail2: | |
511 | dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma); | |
1da177e4 | 512 | fail: |
37448f7d | 513 | kfree(mal); |
1da177e4 LT |
514 | return err; |
515 | } | |
516 | ||
517 | static void __exit mal_remove(struct ocp_device *ocpdev) | |
518 | { | |
519 | struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); | |
37448f7d ES |
520 | struct ocp_func_mal_data *maldata = mal->def->additions; |
521 | ||
522 | MAL_DBG("%d: remove" NL, mal->def->index); | |
1da177e4 | 523 | |
37448f7d ES |
524 | /* Syncronize with scheduled polling, |
525 | stolen from net/core/dev.c:dev_close() | |
526 | */ | |
527 | clear_bit(__LINK_STATE_START, &mal->poll_dev.state); | |
528 | netif_poll_disable(&mal->poll_dev); | |
529 | ||
530 | if (!list_empty(&mal->list)) { | |
531 | /* This is *very* bad */ | |
532 | printk(KERN_EMERG | |
533 | "mal%d: commac list is not empty on remove!\n", | |
534 | mal->def->index); | |
535 | } | |
1da177e4 LT |
536 | |
537 | ocp_set_drvdata(ocpdev, NULL); | |
538 | ||
1da177e4 LT |
539 | free_irq(maldata->serr_irq, mal); |
540 | free_irq(maldata->txde_irq, mal); | |
541 | free_irq(maldata->txeob_irq, mal); | |
542 | free_irq(maldata->rxde_irq, mal); | |
543 | free_irq(maldata->rxeob_irq, mal); | |
544 | ||
37448f7d | 545 | mal_reset(mal); |
1da177e4 | 546 | |
37448f7d ES |
547 | mal_dbg_register(mal->def->index, NULL); |
548 | ||
549 | dma_free_coherent(&ocpdev->dev, | |
550 | sizeof(struct mal_descriptor) * | |
551 | (NUM_TX_BUFF * maldata->num_tx_chans + | |
552 | NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt, | |
553 | mal->bd_dma); | |
1da177e4 LT |
554 | |
555 | kfree(mal); | |
556 | } | |
557 | ||
558 | /* Structure for a device driver */ | |
559 | static struct ocp_device_id mal_ids[] = { | |
37448f7d ES |
560 | { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL }, |
561 | { .vendor = OCP_VENDOR_INVALID} | |
1da177e4 LT |
562 | }; |
563 | ||
564 | static struct ocp_driver mal_driver = { | |
565 | .name = "mal", | |
566 | .id_table = mal_ids, | |
567 | ||
568 | .probe = mal_probe, | |
569 | .remove = mal_remove, | |
570 | }; | |
571 | ||
37448f7d | 572 | int __init mal_init(void) |
1da177e4 | 573 | { |
37448f7d ES |
574 | MAL_DBG(": init" NL); |
575 | return ocp_register_driver(&mal_driver); | |
1da177e4 LT |
576 | } |
577 | ||
37448f7d | 578 | void __exit mal_exit(void) |
1da177e4 | 579 | { |
37448f7d | 580 | MAL_DBG(": exit" NL); |
1da177e4 LT |
581 | ocp_unregister_driver(&mal_driver); |
582 | } |