Fix common misspellings
[linux-2.6-block.git] / drivers / staging / brcm80211 / util / hnddma.c
CommitLineData
a9533e7e
HP
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
3327989a
BR
17#include <linux/kernel.h>
18#include <linux/string.h>
c6ac24e9
BR
19#include <linux/netdevice.h>
20#include <linux/pci.h>
a9533e7e
HP
21#include <bcmdefs.h>
22#include <bcmdevs.h>
a9533e7e
HP
23#include <hndsoc.h>
24#include <bcmutils.h>
25#include <siutils.h>
26
27#include <sbhnddma.h>
28#include <hnddma.h>
29
8968af14
BR
30#if defined(__mips__)
31#include <asm/addrspace.h>
32#endif
33
235742ae
BR
34#ifdef BRCM_FULLMAC
35#error "hnddma.c shouldn't be needed for FULLMAC"
36#endif
37
a9533e7e
HP
38/* debug/trace */
39#ifdef BCMDBG
c5fe41c3
JC
40#define DMA_ERROR(args) \
41 do { \
42 if (!(*di->msg_level & 1)) \
43 ; \
44 else \
0bef7748 45 printk args; \
c5fe41c3
JC
46 } while (0)
47#define DMA_TRACE(args) \
48 do { \
49 if (!(*di->msg_level & 2)) \
50 ; \
51 else \
0bef7748 52 printk args; \
c5fe41c3 53 } while (0)
a9533e7e
HP
54#else
55#define DMA_ERROR(args)
56#define DMA_TRACE(args)
57#endif /* BCMDBG */
58
59#define DMA_NONE(args)
60
a9533e7e
HP
61#define d64txregs dregs.d64_u.txregs_64
62#define d64rxregs dregs.d64_u.rxregs_64
63#define txd64 dregs.d64_u.txd_64
64#define rxd64 dregs.d64_u.rxd_64
65
66/* default dma message level (if input msg_level pointer is null in dma_attach()) */
7e85c729 67static uint dma_msg_level;
a9533e7e
HP
68
69#define MAXNAMEL 8 /* 8 char names */
70
71#define DI_INFO(dmah) ((dma_info_t *)dmah)
72
8968af14
BR
73#define R_SM(r) (*(r))
74#define W_SM(r, v) (*(r) = (v))
75
a9533e7e
HP
76/* dma engine software state */
77typedef struct dma_info {
17d76651 78 struct hnddma_pub hnddma; /* exported structure */
a9533e7e
HP
79 uint *msg_level; /* message level pointer */
80 char name[MAXNAMEL]; /* callers name for diag msgs */
81
06d278c5 82 void *pbus; /* bus handle */
a9533e7e
HP
83
84 bool dma64; /* this dma engine is operating in 64-bit mode */
85 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
86
87 union {
a9533e7e
HP
88 struct {
89 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
90 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
91 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
92 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
93 } d64_u;
94 } dregs;
95
7d4df48e 96 u16 dmadesc_align; /* alignment requirement for dma descriptors */
a9533e7e 97
7d4df48e
GKH
98 u16 ntxd; /* # tx descriptors tunable */
99 u16 txin; /* index of next descriptor to reclaim */
100 u16 txout; /* index of next descriptor to post */
a9533e7e 101 void **txp; /* pointer to parallel array of pointers to packets */
a9533e7e
HP
102 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
103 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
104 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
7d4df48e 105 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
66cbd3ab
GKH
106 u32 txdalloc; /* #bytes allocated for the ring */
107 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
a9533e7e
HP
108 * is not just an index, it needs all 13 bits to be
109 * an offset from the addr register.
110 */
111
7d4df48e
GKH
112 u16 nrxd; /* # rx descriptors tunable */
113 u16 rxin; /* index of next descriptor to reclaim */
114 u16 rxout; /* index of next descriptor to post */
a9533e7e 115 void **rxp; /* pointer to parallel array of pointers to packets */
a9533e7e
HP
116 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
117 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
118 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
7d4df48e 119 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
66cbd3ab
GKH
120 u32 rxdalloc; /* #bytes allocated for the ring */
121 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
a9533e7e
HP
122
123 /* tunables */
c09cc586 124 unsigned int rxbufsize; /* rx buffer size in bytes,
a9533e7e
HP
125 * not including the extra headroom
126 */
127 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
128 * e.g. some rx pkt buffers will be bridged to tx side
129 * without byte copying. The extra headroom needs to be
130 * large enough to fit txheader needs.
131 * Some dongle driver may not need it.
132 */
133 uint nrxpost; /* # rx buffers to keep posted */
c09cc586 134 unsigned int rxoffset; /* rxcontrol offset */
a9533e7e
HP
135 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
136 uint ddoffsethigh; /* high 32 bits */
137 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
138 uint dataoffsethigh; /* high 32 bits */
139 bool aligndesc_4k; /* descriptor base need to be aligned or not */
140} dma_info_t;
141
a9533e7e
HP
142/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
143#ifdef BCMDMASGLISTOSL
0f0881b0 144#define DMASGLIST_ENAB true
a9533e7e 145#else
0965ae88 146#define DMASGLIST_ENAB false
a9533e7e
HP
147#endif /* BCMDMASGLISTOSL */
148
149/* descriptor bumping macros */
150#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
151#define TXD(x) XXD((x), di->ntxd)
152#define RXD(x) XXD((x), di->nrxd)
153#define NEXTTXD(i) TXD((i) + 1)
154#define PREVTXD(i) TXD((i) - 1)
155#define NEXTRXD(i) RXD((i) + 1)
156#define PREVRXD(i) RXD((i) - 1)
157
158#define NTXDACTIVE(h, t) TXD((t) - (h))
159#define NRXDACTIVE(h, t) RXD((t) - (h))
160
161/* macros to convert between byte offsets and indexes */
162#define B2I(bytes, type) ((bytes) / sizeof(type))
163#define I2B(index, type) ((index) * sizeof(type))
164
165#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
166#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
167
168#define PCI64ADDR_HIGH 0x80000000 /* address[63] */
169#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
170
171/* Common prototypes */
7cc4a4c0
JC
172static bool _dma_isaddrext(dma_info_t *di);
173static bool _dma_descriptor_align(dma_info_t *di);
174static bool _dma_alloc(dma_info_t *di, uint direction);
175static void _dma_detach(dma_info_t *di);
176static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
177static void _dma_rxinit(dma_info_t *di);
178static void *_dma_rx(dma_info_t *di);
179static bool _dma_rxfill(dma_info_t *di);
180static void _dma_rxreclaim(dma_info_t *di);
181static void _dma_rxenable(dma_info_t *di);
182static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
7d4df48e
GKH
183static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
184 u16 *rxbufsize);
7cc4a4c0
JC
185
186static void _dma_txblock(dma_info_t *di);
187static void _dma_txunblock(dma_info_t *di);
188static uint _dma_txactive(dma_info_t *di);
189static uint _dma_rxactive(dma_info_t *di);
190static uint _dma_txpending(dma_info_t *di);
191static uint _dma_txcommitted(dma_info_t *di);
192
193static void *_dma_peeknexttxp(dma_info_t *di);
194static void *_dma_peeknextrxp(dma_info_t *di);
f024c48a 195static unsigned long _dma_getvar(dma_info_t *di, const char *name);
7cc4a4c0
JC
196static void _dma_counterreset(dma_info_t *di);
197static void _dma_fifoloopbackenable(dma_info_t *di);
198static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
36ef9a1e 199static u8 dma_align_sizetobits(uint size);
06d278c5 200static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
7d4df48e 201 u16 *alignbits, uint *alloced,
3c4d93d4 202 dmaaddr_t *descpa);
a9533e7e 203
a9533e7e 204/* Prototypes for 64-bit routines */
7cc4a4c0
JC
205static bool dma64_alloc(dma_info_t *di, uint direction);
206static bool dma64_txreset(dma_info_t *di);
207static bool dma64_rxreset(dma_info_t *di);
208static bool dma64_txsuspendedidle(dma_info_t *di);
c26b1378 209static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
7cc4a4c0
JC
210static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
211static void *dma64_getpos(dma_info_t *di, bool direction);
212static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
213static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
214static void dma64_txrotate(dma_info_t *di);
215
216static bool dma64_rxidle(dma_info_t *di);
217static void dma64_txinit(dma_info_t *di);
218static bool dma64_txenabled(dma_info_t *di);
219static void dma64_txsuspend(dma_info_t *di);
220static void dma64_txresume(dma_info_t *di);
221static bool dma64_txsuspended(dma_info_t *di);
222static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
223static bool dma64_txstopped(dma_info_t *di);
224static bool dma64_rxstopped(dma_info_t *di);
225static bool dma64_rxenabled(dma_info_t *di);
26bcc181 226static bool _dma64_addrext(dma64regs_t *dma64regs);
a9533e7e 227
66cbd3ab 228static inline u32 parity32(u32 data);
a9533e7e
HP
229
230const di_fcn_t dma64proc = {
231 (di_detach_t) _dma_detach,
232 (di_txinit_t) dma64_txinit,
233 (di_txreset_t) dma64_txreset,
234 (di_txenabled_t) dma64_txenabled,
235 (di_txsuspend_t) dma64_txsuspend,
236 (di_txresume_t) dma64_txresume,
237 (di_txsuspended_t) dma64_txsuspended,
238 (di_txsuspendedidle_t) dma64_txsuspendedidle,
239 (di_txfast_t) dma64_txfast,
240 (di_txunframed_t) dma64_txunframed,
241 (di_getpos_t) dma64_getpos,
242 (di_txstopped_t) dma64_txstopped,
243 (di_txreclaim_t) dma64_txreclaim,
244 (di_getnexttxp_t) dma64_getnexttxp,
245 (di_peeknexttxp_t) _dma_peeknexttxp,
246 (di_txblock_t) _dma_txblock,
247 (di_txunblock_t) _dma_txunblock,
248 (di_txactive_t) _dma_txactive,
249 (di_txrotate_t) dma64_txrotate,
250
251 (di_rxinit_t) _dma_rxinit,
252 (di_rxreset_t) dma64_rxreset,
253 (di_rxidle_t) dma64_rxidle,
254 (di_rxstopped_t) dma64_rxstopped,
255 (di_rxenable_t) _dma_rxenable,
256 (di_rxenabled_t) dma64_rxenabled,
257 (di_rx_t) _dma_rx,
258 (di_rxfill_t) _dma_rxfill,
259 (di_rxreclaim_t) _dma_rxreclaim,
260 (di_getnextrxp_t) _dma_getnextrxp,
261 (di_peeknextrxp_t) _dma_peeknextrxp,
262 (di_rxparam_get_t) _dma_rx_param_get,
263
264 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
265 (di_getvar_t) _dma_getvar,
266 (di_counterreset_t) _dma_counterreset,
267 (di_ctrlflags_t) _dma_ctrlflags,
268 NULL,
269 NULL,
270 NULL,
271 (di_rxactive_t) _dma_rxactive,
272 (di_txpending_t) _dma_txpending,
273 (di_txcommitted_t) _dma_txcommitted,
274 39
275};
276
3c9d4c37 277struct hnddma_pub *dma_attach(char *name, si_t *sih,
e69284f2
BR
278 void *dmaregstx, void *dmaregsrx, uint ntxd,
279 uint nrxd, uint rxbufsize, int rxextheadroom,
280 uint nrxpost, uint rxoffset, uint *msg_level)
a9533e7e
HP
281{
282 dma_info_t *di;
283 uint size;
284
285 /* allocate private info structure */
5fcc1fcb 286 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
ca8c1e59 287 if (di == NULL) {
a9533e7e 288#ifdef BCMDBG
0bef7748 289 printk(KERN_ERR "dma_attach: out of memory\n");
a9533e7e 290#endif
90ea2296 291 return NULL;
a9533e7e
HP
292 }
293
a9533e7e
HP
294 di->msg_level = msg_level ? msg_level : &dma_msg_level;
295
296 /* old chips w/o sb is no longer supported */
297 ASSERT(sih != NULL);
298
36e319bd 299 di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
a9533e7e
HP
300
301 /* check arguments */
302 ASSERT(ISPOWEROF2(ntxd));
303 ASSERT(ISPOWEROF2(nrxd));
304
305 if (nrxd == 0)
306 ASSERT(dmaregsrx == NULL);
307 if (ntxd == 0)
308 ASSERT(dmaregstx == NULL);
309
310 /* init dma reg pointer */
36e319bd
RV
311 ASSERT(ntxd <= D64MAXDD);
312 ASSERT(nrxd <= D64MAXDD);
313 di->d64txregs = (dma64regs_t *) dmaregstx;
314 di->d64rxregs = (dma64regs_t *) dmaregsrx;
315 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
a9533e7e
HP
316
317 /* Default flags (which can be changed by the driver calling dma_ctrlflags
318 * before enable): For backwards compatibility both Rx Overflow Continue
319 * and Parity are DISABLED.
320 * supports it.
321 */
322 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
323 0);
324
3c9d4c37 325 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
36e319bd 326 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
3c9d4c37 327 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
36e319bd
RV
328 di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
329 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
a9533e7e
HP
330
331 /* make a private copy of our callers name */
332 strncpy(di->name, name, MAXNAMEL);
333 di->name[MAXNAMEL - 1] = '\0';
334
4fe9042f 335 di->pbus = ((struct si_info *)sih)->pbus;
a9533e7e
HP
336
337 /* save tunables */
7d4df48e
GKH
338 di->ntxd = (u16) ntxd;
339 di->nrxd = (u16) nrxd;
a9533e7e
HP
340
341 /* the actual dma size doesn't include the extra headroom */
342 di->rxextrahdrroom =
343 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
344 if (rxbufsize > BCMEXTRAHDROOM)
7d4df48e 345 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
a9533e7e 346 else
7d4df48e 347 di->rxbufsize = (u16) rxbufsize;
a9533e7e 348
7d4df48e 349 di->nrxpost = (u16) nrxpost;
36ef9a1e 350 di->rxoffset = (u8) rxoffset;
a9533e7e
HP
351
352 /*
353 * figure out the DMA physical address offset for dd and data
354 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
355 * Other bus: use zero
356 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
357 */
358 di->ddoffsetlow = 0;
359 di->dataoffsetlow = 0;
360 /* for pci bus, add offset */
361 if (sih->bustype == PCI_BUS) {
36e319bd
RV
362 /* pcie with DMA64 */
363 di->ddoffsetlow = 0;
364 di->ddoffsethigh = SI_PCIE_DMA_H32;
a9533e7e
HP
365 di->dataoffsetlow = di->ddoffsetlow;
366 di->dataoffsethigh = di->ddoffsethigh;
367 }
368#if defined(__mips__) && defined(IL_BIGENDIAN)
369 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
370#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
371 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
372 if ((si_coreid(sih) == SDIOD_CORE_ID)
373 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
374 di->addrext = 0;
375 else if ((si_coreid(sih) == I2S_CORE_ID) &&
376 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
377 di->addrext = 0;
378 else
379 di->addrext = _dma_isaddrext(di);
380
381 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
382 di->aligndesc_4k = _dma_descriptor_align(di);
383 if (di->aligndesc_4k) {
36e319bd
RV
384 di->dmadesc_align = D64RINGALIGN_BITS;
385 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
386 /* for smaller dd table, HW relax alignment reqmnt */
387 di->dmadesc_align = D64RINGALIGN_BITS - 1;
388 }
a9533e7e
HP
389 } else
390 di->dmadesc_align = 4; /* 16 byte alignment */
391
392 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
393 di->aligndesc_4k, di->dmadesc_align));
394
395 /* allocate tx packet pointer vector */
396 if (ntxd) {
397 size = ntxd * sizeof(void *);
5fcc1fcb 398 di->txp = kzalloc(size, GFP_ATOMIC);
ca8c1e59 399 if (di->txp == NULL) {
97e17d0e 400 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
a9533e7e
HP
401 goto fail;
402 }
a9533e7e
HP
403 }
404
405 /* allocate rx packet pointer vector */
406 if (nrxd) {
407 size = nrxd * sizeof(void *);
5fcc1fcb 408 di->rxp = kzalloc(size, GFP_ATOMIC);
ca8c1e59 409 if (di->rxp == NULL) {
97e17d0e 410 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
a9533e7e
HP
411 goto fail;
412 }
a9533e7e
HP
413 }
414
415 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
416 if (ntxd) {
417 if (!_dma_alloc(di, DMA_TX))
418 goto fail;
419 }
420
421 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
422 if (nrxd) {
423 if (!_dma_alloc(di, DMA_RX))
424 goto fail;
425 }
426
427 if ((di->ddoffsetlow != 0) && !di->addrext) {
428 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
66cbd3ab 429 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
a9533e7e
HP
430 goto fail;
431 }
432 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
66cbd3ab 433 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
a9533e7e
HP
434 goto fail;
435 }
436 }
437
438 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
439
440 /* allocate DMA mapping vectors */
441 if (DMASGLIST_ENAB) {
442 if (ntxd) {
443 size = ntxd * sizeof(hnddma_seg_map_t);
5fcc1fcb 444 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
ca8c1e59 445 if (di->txp_dmah == NULL)
a9533e7e 446 goto fail;
a9533e7e
HP
447 }
448
449 if (nrxd) {
450 size = nrxd * sizeof(hnddma_seg_map_t);
5fcc1fcb 451 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
ca8c1e59 452 if (di->rxp_dmah == NULL)
a9533e7e 453 goto fail;
a9533e7e
HP
454 }
455 }
456
17d76651 457 return (struct hnddma_pub *) di;
a9533e7e
HP
458
459 fail:
460 _dma_detach(di);
90ea2296 461 return NULL;
a9533e7e
HP
462}
463
a9533e7e 464/* Check for odd number of 1's */
66cbd3ab 465static inline u32 parity32(u32 data)
a9533e7e
HP
466{
467 data ^= data >> 16;
468 data ^= data >> 8;
469 data ^= data >> 4;
470 data ^= data >> 2;
471 data ^= data >> 1;
472
90ea2296 473 return data & 1;
a9533e7e
HP
474}
475
476#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
477
2d956e22 478static inline void
7cc4a4c0 479dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
66cbd3ab 480 u32 *flags, u32 bufcount)
a9533e7e 481{
66cbd3ab 482 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
a9533e7e
HP
483
484 /* PCI bus with big(>1G) physical address, use address extension */
485#if defined(__mips__) && defined(IL_BIGENDIAN)
486 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
487 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
488#else
489 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
490#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
491 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
492
493 W_SM(&ddring[outidx].addrlow,
494 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
495 W_SM(&ddring[outidx].addrhigh,
496 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
497 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
498 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
499 } else {
500 /* address extension for 32-bit PCI */
66cbd3ab 501 u32 ae;
a9533e7e
HP
502 ASSERT(di->addrext);
503
504 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
505 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
506 ASSERT(PHYSADDRHI(pa) == 0);
507
508 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
509 W_SM(&ddring[outidx].addrlow,
510 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
511 W_SM(&ddring[outidx].addrhigh,
512 BUS_SWAP32(0 + di->dataoffsethigh));
513 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
514 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
515 }
516 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
517 if (DMA64_DD_PARITY(&ddring[outidx])) {
518 W_SM(&ddring[outidx].ctrl2,
519 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
520 }
521 }
522}
523
7cc4a4c0 524static bool _dma_alloc(dma_info_t *di, uint direction)
a9533e7e 525{
36e319bd 526 return dma64_alloc(di, direction);
a9533e7e
HP
527}
528
06d278c5 529void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
235742ae
BR
530 uint *alloced, unsigned long *pap)
531{
532 if (align_bits) {
533 u16 align = (1 << align_bits);
534 if (!IS_ALIGNED(PAGE_SIZE, align))
535 size += align;
536 *alloced = size;
537 }
06d278c5 538 return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
235742ae
BR
539}
540
a9533e7e 541/* !! may be called with core in reset */
7cc4a4c0 542static void _dma_detach(dma_info_t *di)
a9533e7e
HP
543{
544
545 DMA_TRACE(("%s: dma_detach\n", di->name));
546
547 /* shouldn't be here if descriptors are unreclaimed */
548 ASSERT(di->txin == di->txout);
549 ASSERT(di->rxin == di->rxout);
550
551 /* free dma descriptor rings */
36e319bd 552 if (di->txd64)
06d278c5 553 pci_free_consistent(di->pbus, di->txdalloc,
235742ae
BR
554 ((s8 *)di->txd64 - di->txdalign),
555 (di->txdpaorig));
36e319bd 556 if (di->rxd64)
06d278c5 557 pci_free_consistent(di->pbus, di->rxdalloc,
235742ae
BR
558 ((s8 *)di->rxd64 - di->rxdalign),
559 (di->rxdpaorig));
a9533e7e
HP
560
561 /* free packet pointer vectors */
46d994b1
IM
562 kfree(di->txp);
563 kfree(di->rxp);
a9533e7e
HP
564
565 /* free tx packet DMA handles */
46d994b1 566 kfree(di->txp_dmah);
a9533e7e
HP
567
568 /* free rx packet DMA handles */
46d994b1 569 kfree(di->rxp_dmah);
a9533e7e
HP
570
571 /* free our private info structure */
46d994b1 572 kfree(di);
a9533e7e
HP
573
574}
575
7cc4a4c0 576static bool _dma_descriptor_align(dma_info_t *di)
a9533e7e 577{
36e319bd
RV
578 u32 addrl;
579
580 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
581 if (di->d64txregs != NULL) {
ff31c54c
AS
582 W_REG(&di->d64txregs->addrlow, 0xff0);
583 addrl = R_REG(&di->d64txregs->addrlow);
36e319bd
RV
584 if (addrl != 0)
585 return false;
586 } else if (di->d64rxregs != NULL) {
ff31c54c
AS
587 W_REG(&di->d64rxregs->addrlow, 0xff0);
588 addrl = R_REG(&di->d64rxregs->addrlow);
36e319bd
RV
589 if (addrl != 0)
590 return false;
a9533e7e 591 }
0f0881b0 592 return true;
a9533e7e
HP
593}
594
0965ae88 595/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
7cc4a4c0 596static bool _dma_isaddrext(dma_info_t *di)
a9533e7e 597{
36e319bd 598 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
a9533e7e 599
36e319bd
RV
600 /* not all tx or rx channel are available */
601 if (di->d64txregs != NULL) {
26bcc181 602 if (!_dma64_addrext(di->d64txregs)) {
36e319bd
RV
603 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
604 "AE set\n", di->name));
605 ASSERT(0);
a9533e7e 606 }
36e319bd
RV
607 return true;
608 } else if (di->d64rxregs != NULL) {
26bcc181 609 if (!_dma64_addrext(di->d64rxregs)) {
36e319bd
RV
610 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
611 "AE set\n", di->name));
612 ASSERT(0);
613 }
614 return true;
615 }
0965ae88 616 return false;
a9533e7e
HP
617}
618
619/* initialize descriptor table base address */
7cc4a4c0 620static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
a9533e7e 621{
36e319bd
RV
622 if (!di->aligndesc_4k) {
623 if (direction == DMA_TX)
624 di->xmtptrbase = PHYSADDRLO(pa);
625 else
626 di->rcvptrbase = PHYSADDRLO(pa);
627 }
a9533e7e 628
36e319bd
RV
629 if ((di->ddoffsetlow == 0)
630 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
631 if (direction == DMA_TX) {
ff31c54c 632 W_REG(&di->d64txregs->addrlow,
36e319bd 633 (PHYSADDRLO(pa) + di->ddoffsetlow));
ff31c54c 634 W_REG(&di->d64txregs->addrhigh,
36e319bd 635 (PHYSADDRHI(pa) + di->ddoffsethigh));
a9533e7e 636 } else {
ff31c54c 637 W_REG(&di->d64rxregs->addrlow,
36e319bd 638 (PHYSADDRLO(pa) + di->ddoffsetlow));
ff31c54c 639 W_REG(&di->d64rxregs->addrhigh,
36e319bd
RV
640 (PHYSADDRHI(pa) + di->ddoffsethigh));
641 }
642 } else {
643 /* DMA64 32bits address extension */
644 u32 ae;
645 ASSERT(di->addrext);
646 ASSERT(PHYSADDRHI(pa) == 0);
a9533e7e 647
36e319bd
RV
648 /* shift the high bit(s) from pa to ae */
649 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
650 PCI32ADDR_HIGH_SHIFT;
651 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
652
653 if (direction == DMA_TX) {
ff31c54c 654 W_REG(&di->d64txregs->addrlow,
36e319bd 655 (PHYSADDRLO(pa) + di->ddoffsetlow));
ff31c54c 656 W_REG(&di->d64txregs->addrhigh,
36e319bd 657 di->ddoffsethigh);
ff31c54c 658 SET_REG(&di->d64txregs->control,
36e319bd
RV
659 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
660 } else {
ff31c54c 661 W_REG(&di->d64rxregs->addrlow,
36e319bd 662 (PHYSADDRLO(pa) + di->ddoffsetlow));
ff31c54c 663 W_REG(&di->d64rxregs->addrhigh,
36e319bd 664 di->ddoffsethigh);
ff31c54c 665 SET_REG(&di->d64rxregs->control,
36e319bd 666 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
a9533e7e 667 }
36e319bd 668 }
a9533e7e
HP
669}
670
7cc4a4c0 671static void _dma_fifoloopbackenable(dma_info_t *di)
a9533e7e
HP
672{
673 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
674
ff31c54c 675 OR_REG(&di->d64txregs->control, D64_XC_LE);
a9533e7e
HP
676}
677
7cc4a4c0 678static void _dma_rxinit(dma_info_t *di)
a9533e7e
HP
679{
680 DMA_TRACE(("%s: dma_rxinit\n", di->name));
681
682 if (di->nrxd == 0)
683 return;
684
685 di->rxin = di->rxout = 0;
686
687 /* clear rx descriptor ring */
36e319bd
RV
688 memset((void *)di->rxd64, '\0',
689 (di->nrxd * sizeof(dma64dd_t)));
a9533e7e 690
36e319bd
RV
691 /* DMA engine with out alignment requirement requires table to be inited
692 * before enabling the engine
693 */
694 if (!di->aligndesc_4k)
695 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
a9533e7e 696
36e319bd 697 _dma_rxenable(di);
a9533e7e 698
36e319bd
RV
699 if (di->aligndesc_4k)
700 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
a9533e7e
HP
701}
702
7cc4a4c0 703static void _dma_rxenable(dma_info_t *di)
a9533e7e
HP
704{
705 uint dmactrlflags = di->hnddma.dmactrlflags;
36e319bd 706 u32 control;
a9533e7e
HP
707
708 DMA_TRACE(("%s: dma_rxenable\n", di->name));
709
36e319bd 710 control =
ff31c54c 711 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
36e319bd 712 D64_RC_RE;
a9533e7e 713
36e319bd
RV
714 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
715 control |= D64_RC_PD;
a9533e7e 716
36e319bd
RV
717 if (dmactrlflags & DMA_CTRL_ROC)
718 control |= D64_RC_OC;
a9533e7e 719
ff31c54c 720 W_REG(&di->d64rxregs->control,
36e319bd 721 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
a9533e7e
HP
722}
723
724static void
7d4df48e 725_dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
a9533e7e
HP
726{
727 /* the normal values fit into 16 bits */
7d4df48e
GKH
728 *rxoffset = (u16) di->rxoffset;
729 *rxbufsize = (u16) di->rxbufsize;
a9533e7e
HP
730}
731
732/* !! rx entry routine
733 * returns a pointer to the next frame received, or NULL if there are no more
734 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
735 * with pkts chain
736 * otherwise, it's treated as giant pkt and will be tossed.
737 * The DMA scattering starts with normal DMA header, followed by first buffer data.
738 * After it reaches the max size of buffer, the data continues in next DMA descriptor
739 * buffer WITHOUT DMA header
740 */
7cc4a4c0 741static void *BCMFASTPATH _dma_rx(dma_info_t *di)
a9533e7e 742{
c26b1378 743 struct sk_buff *p, *head, *tail;
a9533e7e
HP
744 uint len;
745 uint pkt_len;
746 int resid = 0;
747
748 next_frame:
0965ae88 749 head = _dma_getnextrxp(di, false);
a9533e7e 750 if (head == NULL)
90ea2296 751 return NULL;
a9533e7e 752
628f10ba 753 len = le16_to_cpu(*(u16 *) (head->data));
a9533e7e
HP
754 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
755
756#if defined(__mips__)
8968af14 757#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
a9533e7e 758 if (!len) {
54991ad6 759 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
7383141b 760 udelay(1);
a9533e7e 761
628f10ba 762 *(u16 *) (head->data) = cpu_to_le16((u16) len);
a9533e7e
HP
763 }
764#endif /* defined(__mips__) */
765
766 /* set actual length */
7068c2f1 767 pkt_len = min((di->rxoffset + len), di->rxbufsize);
2cb8ada6 768 __skb_trim(head, pkt_len);
a9533e7e
HP
769 resid = len - (di->rxbufsize - di->rxoffset);
770
771 /* check for single or multi-buffer rx */
772 if (resid > 0) {
773 tail = head;
0965ae88 774 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
54991ad6 775 tail->next = p;
7068c2f1 776 pkt_len = min(resid, (int)di->rxbufsize);
2cb8ada6 777 __skb_trim(p, pkt_len);
a9533e7e
HP
778
779 tail = p;
780 resid -= di->rxbufsize;
781 }
782
783#ifdef BCMDBG
784 if (resid > 0) {
785 uint cur;
786 ASSERT(p == NULL);
36e319bd 787 cur =
ff31c54c 788 B2I(((R_REG(&di->d64rxregs->status0) &
a9533e7e
HP
789 D64_RS0_CD_MASK) -
790 di->rcvptrbase) & D64_RS0_CD_MASK,
36e319bd 791 dma64dd_t);
a9533e7e
HP
792 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
793 di->rxin, di->rxout, cur));
794 }
795#endif /* BCMDBG */
796
797 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
798 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
799 di->name, len));
a30825a3 800 pkt_buf_free_skb(head);
a9533e7e
HP
801 di->hnddma.rxgiants++;
802 goto next_frame;
803 }
804 }
805
90ea2296 806 return head;
a9533e7e
HP
807}
808
809/* post receive buffers
0965ae88 810 * return false is refill failed completely and ring is empty
a9533e7e
HP
811 * this will stall the rx dma and user might want to call rxfill again asap
812 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
813 */
7cc4a4c0 814static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
a9533e7e 815{
c26b1378 816 struct sk_buff *p;
7d4df48e 817 u16 rxin, rxout;
66cbd3ab 818 u32 flags = 0;
a9533e7e
HP
819 uint n;
820 uint i;
821 dmaaddr_t pa;
822 uint extra_offset = 0;
823 bool ring_empty;
824
0965ae88 825 ring_empty = false;
a9533e7e
HP
826
827 /*
828 * Determine how many receive buffers we're lacking
829 * from the full complement, allocate, initialize,
830 * and post them, then update the chip rx lastdscr.
831 */
832
833 rxin = di->rxin;
834 rxout = di->rxout;
835
836 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
837
838 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
839
840 if (di->rxbufsize > BCMEXTRAHDROOM)
841 extra_offset = di->rxextrahdrroom;
842
843 for (i = 0; i < n; i++) {
844 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
845 size to be allocated
846 */
847
a30825a3 848 p = pkt_buf_get_skb(di->rxbufsize + extra_offset);
a9533e7e
HP
849
850 if (p == NULL) {
851 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
852 di->name));
36e319bd
RV
853 if (i == 0 && dma64_rxidle(di)) {
854 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
855 di->name));
856 ring_empty = true;
a9533e7e
HP
857 }
858 di->hnddma.rxnobuf++;
859 break;
860 }
861 /* reserve an extra headroom, if applicable */
862 if (extra_offset)
c303ecbd 863 skb_pull(p, extra_offset);
a9533e7e
HP
864
865 /* Do a cached write instead of uncached write since DMA_MAP
866 * will flush the cache.
867 */
54991ad6 868 *(u32 *) (p->data) = 0;
a9533e7e
HP
869
870 if (DMASGLIST_ENAB)
9249ede9
BR
871 memset(&di->rxp_dmah[rxout], 0,
872 sizeof(hnddma_seg_map_t));
a9533e7e 873
06d278c5 874 pa = pci_map_single(di->pbus, p->data,
9010c46c 875 di->rxbufsize, PCI_DMA_FROMDEVICE);
a9533e7e 876
36c63ff6 877 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
a9533e7e
HP
878
879 /* save the free packet pointer */
880 ASSERT(di->rxp[rxout] == NULL);
881 di->rxp[rxout] = p;
882
883 /* reset flags for each descriptor */
884 flags = 0;
36e319bd
RV
885 if (rxout == (di->nrxd - 1))
886 flags = D64_CTRL1_EOT;
a9533e7e 887
36e319bd
RV
888 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
889 di->rxbufsize);
a9533e7e
HP
890 rxout = NEXTRXD(rxout);
891 }
892
893 di->rxout = rxout;
894
895 /* update the chip lastdscr pointer */
ff31c54c 896 W_REG(&di->d64rxregs->ptr,
36e319bd 897 di->rcvptrbase + I2B(rxout, dma64dd_t));
a9533e7e
HP
898
899 return ring_empty;
900}
901
902/* like getnexttxp but no reclaim */
7cc4a4c0 903static void *_dma_peeknexttxp(dma_info_t *di)
a9533e7e
HP
904{
905 uint end, i;
906
907 if (di->ntxd == 0)
90ea2296 908 return NULL;
a9533e7e 909
36e319bd 910 end =
ff31c54c 911 B2I(((R_REG(&di->d64txregs->status0) &
36e319bd
RV
912 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
913 dma64dd_t);
a9533e7e
HP
914
915 for (i = di->txin; i != end; i = NEXTTXD(i))
916 if (di->txp[i])
90ea2296 917 return di->txp[i];
a9533e7e 918
90ea2296 919 return NULL;
a9533e7e
HP
920}
921
922/* like getnextrxp but not take off the ring */
7cc4a4c0 923static void *_dma_peeknextrxp(dma_info_t *di)
a9533e7e
HP
924{
925 uint end, i;
926
927 if (di->nrxd == 0)
90ea2296 928 return NULL;
a9533e7e 929
36e319bd 930 end =
ff31c54c 931 B2I(((R_REG(&di->d64rxregs->status0) &
36e319bd
RV
932 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
933 dma64dd_t);
a9533e7e
HP
934
935 for (i = di->rxin; i != end; i = NEXTRXD(i))
936 if (di->rxp[i])
90ea2296 937 return di->rxp[i];
a9533e7e 938
90ea2296 939 return NULL;
a9533e7e
HP
940}
941
7cc4a4c0 942static void _dma_rxreclaim(dma_info_t *di)
a9533e7e
HP
943{
944 void *p;
945
a9533e7e
HP
946 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
947
0f0881b0 948 while ((p = _dma_getnextrxp(di, true)))
a30825a3 949 pkt_buf_free_skb(p);
a9533e7e
HP
950}
951
7cc4a4c0 952static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
a9533e7e
HP
953{
954 if (di->nrxd == 0)
90ea2296 955 return NULL;
a9533e7e 956
36e319bd 957 return dma64_getnextrxp(di, forceall);
a9533e7e
HP
958}
959
7cc4a4c0 960static void _dma_txblock(dma_info_t *di)
a9533e7e
HP
961{
962 di->hnddma.txavail = 0;
963}
964
7cc4a4c0 965static void _dma_txunblock(dma_info_t *di)
a9533e7e
HP
966{
967 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
968}
969
7cc4a4c0 970static uint _dma_txactive(dma_info_t *di)
a9533e7e
HP
971{
972 return NTXDACTIVE(di->txin, di->txout);
973}
974
7cc4a4c0 975static uint _dma_txpending(dma_info_t *di)
a9533e7e
HP
976{
977 uint curr;
978
36e319bd 979 curr =
ff31c54c 980 B2I(((R_REG(&di->d64txregs->status0) &
36e319bd
RV
981 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
982 dma64dd_t);
a9533e7e
HP
983
984 return NTXDACTIVE(curr, di->txout);
985}
986
7cc4a4c0 987static uint _dma_txcommitted(dma_info_t *di)
a9533e7e
HP
988{
989 uint ptr;
990 uint txin = di->txin;
991
992 if (txin == di->txout)
993 return 0;
994
ff31c54c 995 ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
a9533e7e
HP
996
997 return NTXDACTIVE(di->txin, ptr);
998}
999
7cc4a4c0 1000static uint _dma_rxactive(dma_info_t *di)
a9533e7e
HP
1001{
1002 return NRXDACTIVE(di->rxin, di->rxout);
1003}
1004
7cc4a4c0 1005static void _dma_counterreset(dma_info_t *di)
a9533e7e
HP
1006{
1007 /* reset all software counter */
1008 di->hnddma.rxgiants = 0;
1009 di->hnddma.rxnobuf = 0;
1010 di->hnddma.txnobuf = 0;
1011}
1012
7cc4a4c0 1013static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
a9533e7e
HP
1014{
1015 uint dmactrlflags = di->hnddma.dmactrlflags;
1016
1017 if (di == NULL) {
1018 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
90ea2296 1019 return 0;
a9533e7e
HP
1020 }
1021
1022 ASSERT((flags & ~mask) == 0);
1023
1024 dmactrlflags &= ~mask;
1025 dmactrlflags |= flags;
1026
1027 /* If trying to enable parity, check if parity is actually supported */
1028 if (dmactrlflags & DMA_CTRL_PEN) {
66cbd3ab 1029 u32 control;
a9533e7e 1030
ff31c54c
AS
1031 control = R_REG(&di->d64txregs->control);
1032 W_REG(&di->d64txregs->control,
36e319bd 1033 control | D64_XC_PD);
ff31c54c 1034 if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
36e319bd
RV
1035 /* We *can* disable it so it is supported,
1036 * restore control register
1037 */
ff31c54c 1038 W_REG(&di->d64txregs->control,
36e319bd
RV
1039 control);
1040 } else {
1041 /* Not supported, don't allow it to be enabled */
1042 dmactrlflags &= ~DMA_CTRL_PEN;
1043 }
a9533e7e
HP
1044 }
1045
1046 di->hnddma.dmactrlflags = dmactrlflags;
1047
90ea2296 1048 return dmactrlflags;
a9533e7e
HP
1049}
1050
1051/* get the address of the var in order to change later */
f024c48a 1052static unsigned long _dma_getvar(dma_info_t *di, const char *name)
a9533e7e
HP
1053{
1054 if (!strcmp(name, "&txavail"))
f024c48a 1055 return (unsigned long)&(di->hnddma.txavail);
a9533e7e
HP
1056 else {
1057 ASSERT(0);
1058 }
90ea2296 1059 return 0;
a9533e7e
HP
1060}
1061
a9533e7e 1062static
36ef9a1e 1063u8 dma_align_sizetobits(uint size)
a9533e7e 1064{
36ef9a1e 1065 u8 bitpos = 0;
a9533e7e
HP
1066 ASSERT(size);
1067 ASSERT(!(size & (size - 1)));
1068 while (size >>= 1) {
1069 bitpos++;
1070 }
90ea2296 1071 return bitpos;
a9533e7e
HP
1072}
1073
1074/* This function ensures that the DMA descriptor ring will not get allocated
1075 * across Page boundary. If the allocation is done across the page boundary
1076 * at the first time, then it is freed and the allocation is done at
1077 * descriptor ring size aligned location. This will ensure that the ring will
1078 * not cross page boundary
1079 */
06d278c5 1080static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
7d4df48e 1081 u16 *alignbits, uint *alloced,
3c4d93d4 1082 dmaaddr_t *descpa)
a9533e7e
HP
1083{
1084 void *va;
66cbd3ab
GKH
1085 u32 desc_strtaddr;
1086 u32 alignbytes = 1 << *alignbits;
a9533e7e 1087
06d278c5 1088 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
235742ae 1089
ca8c1e59 1090 if (NULL == va)
a9533e7e
HP
1091 return NULL;
1092
f024c48a 1093 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
a9533e7e
HP
1094 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1095 & boundary)) {
1096 *alignbits = dma_align_sizetobits(size);
06d278c5
AS
1097 pci_free_consistent(di->pbus, size, va, *descpa);
1098 va = dma_alloc_consistent(di->pbus, size, *alignbits,
235742ae 1099 alloced, descpa);
a9533e7e
HP
1100 }
1101 return va;
1102}
1103
a9533e7e
HP
1104/* 64-bit DMA functions */
1105
7cc4a4c0 1106static void dma64_txinit(dma_info_t *di)
a9533e7e 1107{
66cbd3ab 1108 u32 control = D64_XC_XE;
a9533e7e
HP
1109
1110 DMA_TRACE(("%s: dma_txinit\n", di->name));
1111
1112 if (di->ntxd == 0)
1113 return;
1114
1115 di->txin = di->txout = 0;
1116 di->hnddma.txavail = di->ntxd - 1;
1117
1118 /* clear tx descriptor ring */
8968af14 1119 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
a9533e7e
HP
1120
1121 /* DMA engine with out alignment requirement requires table to be inited
1122 * before enabling the engine
1123 */
1124 if (!di->aligndesc_4k)
1125 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1126
1127 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1128 control |= D64_XC_PD;
ff31c54c 1129 OR_REG(&di->d64txregs->control, control);
a9533e7e
HP
1130
1131 /* DMA engine with alignment requirement requires table to be inited
1132 * before enabling the engine
1133 */
1134 if (di->aligndesc_4k)
1135 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1136}
1137
7cc4a4c0 1138static bool dma64_txenabled(dma_info_t *di)
a9533e7e 1139{
66cbd3ab 1140 u32 xc;
a9533e7e
HP
1141
1142 /* If the chip is dead, it is not enabled :-) */
ff31c54c 1143 xc = R_REG(&di->d64txregs->control);
90ea2296 1144 return (xc != 0xffffffff) && (xc & D64_XC_XE);
a9533e7e
HP
1145}
1146
7cc4a4c0 1147static void dma64_txsuspend(dma_info_t *di)
a9533e7e
HP
1148{
1149 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1150
1151 if (di->ntxd == 0)
1152 return;
1153
ff31c54c 1154 OR_REG(&di->d64txregs->control, D64_XC_SE);
a9533e7e
HP
1155}
1156
7cc4a4c0 1157static void dma64_txresume(dma_info_t *di)
a9533e7e
HP
1158{
1159 DMA_TRACE(("%s: dma_txresume\n", di->name));
1160
1161 if (di->ntxd == 0)
1162 return;
1163
ff31c54c 1164 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
a9533e7e
HP
1165}
1166
7cc4a4c0 1167static bool dma64_txsuspended(dma_info_t *di)
a9533e7e
HP
1168{
1169 return (di->ntxd == 0) ||
ff31c54c 1170 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
a9533e7e
HP
1171 D64_XC_SE);
1172}
1173
7cc4a4c0 1174static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
a9533e7e
HP
1175{
1176 void *p;
1177
1178 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1179 (range == HNDDMA_RANGE_ALL) ? "all" :
1180 ((range ==
1181 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
25985edc 1182 "transferred")));
a9533e7e
HP
1183
1184 if (di->txin == di->txout)
1185 return;
1186
1187 while ((p = dma64_getnexttxp(di, range))) {
1188 /* For unframed data, we don't have any packets to free */
1189 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
a30825a3 1190 pkt_buf_free_skb(p);
a9533e7e
HP
1191 }
1192}
1193
7cc4a4c0 1194static bool dma64_txstopped(dma_info_t *di)
a9533e7e 1195{
ff31c54c 1196 return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
a9533e7e
HP
1197 D64_XS0_XS_STOPPED);
1198}
1199
7cc4a4c0 1200static bool dma64_rxstopped(dma_info_t *di)
a9533e7e 1201{
ff31c54c 1202 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
a9533e7e
HP
1203 D64_RS0_RS_STOPPED);
1204}
1205
7cc4a4c0 1206static bool dma64_alloc(dma_info_t *di, uint direction)
a9533e7e 1207{
7d4df48e 1208 u16 size;
a9533e7e
HP
1209 uint ddlen;
1210 void *va;
1211 uint alloced = 0;
7d4df48e
GKH
1212 u16 align;
1213 u16 align_bits;
a9533e7e
HP
1214
1215 ddlen = sizeof(dma64dd_t);
1216
1217 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1218 align_bits = di->dmadesc_align;
1219 align = (1 << align_bits);
1220
1221 if (direction == DMA_TX) {
06d278c5 1222 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
3c4d93d4 1223 &alloced, &di->txdpaorig);
ca8c1e59 1224 if (va == NULL) {
a9533e7e 1225 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
0965ae88 1226 return false;
a9533e7e
HP
1227 }
1228 align = (1 << align_bits);
f024c48a 1229 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
c03b63c1 1230 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
a9533e7e
HP
1231 PHYSADDRLOSET(di->txdpa,
1232 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1233 /* Make sure that alignment didn't overflow */
1234 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1235
1236 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1237 di->txdalloc = alloced;
f024c48a 1238 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
a9533e7e 1239 } else {
06d278c5 1240 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
3c4d93d4 1241 &alloced, &di->rxdpaorig);
ca8c1e59 1242 if (va == NULL) {
a9533e7e 1243 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
0965ae88 1244 return false;
a9533e7e
HP
1245 }
1246 align = (1 << align_bits);
f024c48a 1247 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
c03b63c1 1248 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
a9533e7e
HP
1249 PHYSADDRLOSET(di->rxdpa,
1250 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1251 /* Make sure that alignment didn't overflow */
1252 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1253
1254 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1255 di->rxdalloc = alloced;
f024c48a 1256 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
a9533e7e
HP
1257 }
1258
0f0881b0 1259 return true;
a9533e7e
HP
1260}
1261
7cc4a4c0 1262static bool dma64_txreset(dma_info_t *di)
a9533e7e 1263{
66cbd3ab 1264 u32 status;
a9533e7e
HP
1265
1266 if (di->ntxd == 0)
0f0881b0 1267 return true;
a9533e7e
HP
1268
1269 /* suspend tx DMA first */
ff31c54c 1270 W_REG(&di->d64txregs->control, D64_XC_SE);
a9533e7e 1271 SPINWAIT(((status =
ff31c54c 1272 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
a9533e7e
HP
1273 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1274 && (status != D64_XS0_XS_STOPPED), 10000);
1275
ff31c54c 1276 W_REG(&di->d64txregs->control, 0);
a9533e7e 1277 SPINWAIT(((status =
ff31c54c 1278 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
a9533e7e
HP
1279 != D64_XS0_XS_DISABLED), 10000);
1280
1281 /* wait for the last transaction to complete */
7383141b 1282 udelay(300);
a9533e7e 1283
90ea2296 1284 return status == D64_XS0_XS_DISABLED;
a9533e7e
HP
1285}
1286
7cc4a4c0 1287static bool dma64_rxidle(dma_info_t *di)
a9533e7e
HP
1288{
1289 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1290
1291 if (di->nrxd == 0)
0f0881b0 1292 return true;
a9533e7e 1293
ff31c54c
AS
1294 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1295 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
a9533e7e
HP
1296}
1297
7cc4a4c0 1298static bool dma64_rxreset(dma_info_t *di)
a9533e7e 1299{
66cbd3ab 1300 u32 status;
a9533e7e
HP
1301
1302 if (di->nrxd == 0)
0f0881b0 1303 return true;
a9533e7e 1304
ff31c54c 1305 W_REG(&di->d64rxregs->control, 0);
a9533e7e 1306 SPINWAIT(((status =
ff31c54c 1307 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
a9533e7e
HP
1308 != D64_RS0_RS_DISABLED), 10000);
1309
90ea2296 1310 return status == D64_RS0_RS_DISABLED;
a9533e7e
HP
1311}
1312
7cc4a4c0 1313static bool dma64_rxenabled(dma_info_t *di)
a9533e7e 1314{
66cbd3ab 1315 u32 rc;
a9533e7e 1316
ff31c54c 1317 rc = R_REG(&di->d64rxregs->control);
90ea2296 1318 return (rc != 0xffffffff) && (rc & D64_RC_RE);
a9533e7e
HP
1319}
1320
7cc4a4c0 1321static bool dma64_txsuspendedidle(dma_info_t *di)
a9533e7e
HP
1322{
1323
1324 if (di->ntxd == 0)
0f0881b0 1325 return true;
a9533e7e 1326
ff31c54c 1327 if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
a9533e7e
HP
1328 return 0;
1329
ff31c54c 1330 if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
a9533e7e
HP
1331 D64_XS0_XS_IDLE)
1332 return 1;
1333
1334 return 0;
1335}
1336
1337/* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1338 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1339 * If DMA is idle, we return NULL.
1340 */
7cc4a4c0 1341static void *dma64_getpos(dma_info_t *di, bool direction)
a9533e7e
HP
1342{
1343 void *va;
1344 bool idle;
66cbd3ab 1345 u32 cd_offset;
a9533e7e
HP
1346
1347 if (direction == DMA_TX) {
1348 cd_offset =
ff31c54c 1349 R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
a9533e7e
HP
1350 idle = !NTXDACTIVE(di->txin, di->txout);
1351 va = di->txp[B2I(cd_offset, dma64dd_t)];
1352 } else {
1353 cd_offset =
ff31c54c 1354 R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
a9533e7e
HP
1355 idle = !NRXDACTIVE(di->rxin, di->rxout);
1356 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1357 }
1358
1359 /* If DMA is IDLE, return NULL */
1360 if (idle) {
1361 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1362 va = NULL;
1363 }
1364
1365 return va;
1366}
1367
1368/* TX of unframed data
1369 *
1370 * Adds a DMA ring descriptor for the data pointed to by "buf".
1371 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1372 * that take a pointer to a "packet"
1373 * Each call to this is results in a single descriptor being added for "len" bytes of
1374 * data starting at "buf", it doesn't handle chained buffers.
1375 */
7cc4a4c0 1376static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
a9533e7e 1377{
7d4df48e 1378 u16 txout;
66cbd3ab 1379 u32 flags = 0;
a9533e7e
HP
1380 dmaaddr_t pa; /* phys addr */
1381
1382 txout = di->txout;
1383
1384 /* return nonzero if out of tx descriptors */
1385 if (NEXTTXD(txout) == di->txin)
1386 goto outoftxd;
1387
1388 if (len == 0)
1389 return 0;
1390
06d278c5 1391 pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
a9533e7e
HP
1392
1393 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1394
1395 if (txout == (di->ntxd - 1))
1396 flags |= D64_CTRL1_EOT;
1397
1398 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1399 ASSERT(di->txp[txout] == NULL);
1400
1401 /* save the buffer pointer - used by dma_getpos */
1402 di->txp[txout] = buf;
1403
1404 txout = NEXTTXD(txout);
1405 /* bump the tx descriptor index */
1406 di->txout = txout;
1407
1408 /* kick the chip */
1409 if (commit) {
ff31c54c 1410 W_REG(&di->d64txregs->ptr,
a9533e7e
HP
1411 di->xmtptrbase + I2B(txout, dma64dd_t));
1412 }
1413
1414 /* tx flow control */
1415 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1416
90ea2296 1417 return 0;
a9533e7e
HP
1418
1419 outoftxd:
1420 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1421 di->hnddma.txavail = 0;
1422 di->hnddma.txnobuf++;
90ea2296 1423 return -1;
a9533e7e
HP
1424}
1425
1426/* !! tx entry routine
1427 * WARNING: call must check the return value for error.
1428 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1429 */
c26b1378
AS
1430static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1431 bool commit)
a9533e7e 1432{
c26b1378 1433 struct sk_buff *p, *next;
580a0bd9 1434 unsigned char *data;
a9533e7e 1435 uint len;
7d4df48e 1436 u16 txout;
66cbd3ab 1437 u32 flags = 0;
a9533e7e
HP
1438 dmaaddr_t pa;
1439
1440 DMA_TRACE(("%s: dma_txfast\n", di->name));
1441
1442 txout = di->txout;
1443
1444 /*
1445 * Walk the chain of packet buffers
1446 * allocating and initializing transmit descriptor entries.
1447 */
1448 for (p = p0; p; p = next) {
1449 uint nsegs, j;
1450 hnddma_seg_map_t *map;
1451
54991ad6
AS
1452 data = p->data;
1453 len = p->len;
a9533e7e
HP
1454#ifdef BCM_DMAPAD
1455 len += PKTDMAPAD(di->osh, p);
1456#endif /* BCM_DMAPAD */
54991ad6 1457 next = p->next;
a9533e7e
HP
1458
1459 /* return nonzero if out of tx descriptors */
1460 if (NEXTTXD(txout) == di->txin)
1461 goto outoftxd;
1462
1463 if (len == 0)
1464 continue;
1465
1466 /* get physical address of buffer start */
1467 if (DMASGLIST_ENAB)
9249ede9
BR
1468 memset(&di->txp_dmah[txout], 0,
1469 sizeof(hnddma_seg_map_t));
a9533e7e 1470
06d278c5 1471 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
a9533e7e
HP
1472
1473 if (DMASGLIST_ENAB) {
1474 map = &di->txp_dmah[txout];
1475
1476 /* See if all the segments can be accounted for */
1477 if (map->nsegs >
1478 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1479 1))
1480 goto outoftxd;
1481
1482 nsegs = map->nsegs;
1483 } else
1484 nsegs = 1;
1485
1486 for (j = 1; j <= nsegs; j++) {
1487 flags = 0;
1488 if (p == p0 && j == 1)
1489 flags |= D64_CTRL1_SOF;
1490
1491 /* With a DMA segment list, Descriptor table is filled
1492 * using the segment list instead of looping over
1493 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1494 * end of segment list is reached.
1495 */
1496 if ((!DMASGLIST_ENAB && next == NULL) ||
1497 (DMASGLIST_ENAB && j == nsegs))
1498 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1499 if (txout == (di->ntxd - 1))
1500 flags |= D64_CTRL1_EOT;
1501
1502 if (DMASGLIST_ENAB) {
1503 len = map->segs[j - 1].length;
1504 pa = map->segs[j - 1].addr;
1505 }
1506 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1507 ASSERT(di->txp[txout] == NULL);
1508
1509 txout = NEXTTXD(txout);
1510 }
1511
1512 /* See above. No need to loop over individual buffers */
1513 if (DMASGLIST_ENAB)
1514 break;
1515 }
1516
1517 /* if last txd eof not set, fix it */
1518 if (!(flags & D64_CTRL1_EOF))
1519 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1520 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1521
1522 /* save the packet */
1523 di->txp[PREVTXD(txout)] = p0;
1524
1525 /* bump the tx descriptor index */
1526 di->txout = txout;
1527
1528 /* kick the chip */
1529 if (commit)
ff31c54c 1530 W_REG(&di->d64txregs->ptr,
a9533e7e
HP
1531 di->xmtptrbase + I2B(txout, dma64dd_t));
1532
1533 /* tx flow control */
1534 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1535
90ea2296 1536 return 0;
a9533e7e
HP
1537
1538 outoftxd:
1539 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
a30825a3 1540 pkt_buf_free_skb(p0);
a9533e7e
HP
1541 di->hnddma.txavail = 0;
1542 di->hnddma.txnobuf++;
90ea2296 1543 return -1;
a9533e7e
HP
1544}
1545
1546/*
1547 * Reclaim next completed txd (txds if using chained buffers) in the range
1548 * specified and return associated packet.
1549 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1550 * transmitted as noted by the hardware "CurrDescr" pointer.
1551 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
25985edc 1552 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
a9533e7e
HP
1553 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1554 * return associated packet regardless of the value of hardware pointers.
1555 */
7cc4a4c0 1556static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
a9533e7e 1557{
7d4df48e
GKH
1558 u16 start, end, i;
1559 u16 active_desc;
a9533e7e
HP
1560 void *txp;
1561
1562 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1563 (range == HNDDMA_RANGE_ALL) ? "all" :
1564 ((range ==
1565 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
25985edc 1566 "transferred")));
a9533e7e
HP
1567
1568 if (di->ntxd == 0)
90ea2296 1569 return NULL;
a9533e7e
HP
1570
1571 txp = NULL;
1572
1573 start = di->txin;
1574 if (range == HNDDMA_RANGE_ALL)
1575 end = di->txout;
1576 else {
1577 dma64regs_t *dregs = di->d64txregs;
1578
1579 end =
7d4df48e 1580 (u16) (B2I
ff31c54c 1581 (((R_REG(&dregs->status0) &
a9533e7e
HP
1582 D64_XS0_CD_MASK) -
1583 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1584
1585 if (range == HNDDMA_RANGE_TRANSFERED) {
1586 active_desc =
ff31c54c 1587 (u16) (R_REG(&dregs->status1) &
a9533e7e
HP
1588 D64_XS1_AD_MASK);
1589 active_desc =
1590 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1591 active_desc = B2I(active_desc, dma64dd_t);
1592 if (end != active_desc)
1593 end = PREVTXD(active_desc);
1594 }
1595 }
1596
1597 if ((start == 0) && (end > di->txout))
1598 goto bogus;
1599
1600 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1601 dmaaddr_t pa;
1602 hnddma_seg_map_t *map = NULL;
1603 uint size, j, nsegs;
1604
1605 PHYSADDRLOSET(pa,
1606 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1607 di->dataoffsetlow));
1608 PHYSADDRHISET(pa,
1609 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1610 di->dataoffsethigh));
1611
1612 if (DMASGLIST_ENAB) {
1613 map = &di->txp_dmah[i];
1614 size = map->origsize;
1615 nsegs = map->nsegs;
1616 } else {
1617 size =
1618 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1619 D64_CTRL2_BC_MASK);
1620 nsegs = 1;
1621 }
1622
1623 for (j = nsegs; j > 0; j--) {
1624 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1625 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1626
1627 txp = di->txp[i];
1628 di->txp[i] = NULL;
1629 if (j > 1)
1630 i = NEXTTXD(i);
1631 }
1632
06d278c5 1633 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
a9533e7e
HP
1634 }
1635
1636 di->txin = i;
1637
1638 /* tx flow control */
1639 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1640
90ea2296 1641 return txp;
a9533e7e
HP
1642
1643 bogus:
1644 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
90ea2296 1645 return NULL;
a9533e7e
HP
1646}
1647
7cc4a4c0 1648static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
a9533e7e
HP
1649{
1650 uint i, curr;
1651 void *rxp;
1652 dmaaddr_t pa;
1653
1654 /* if forcing, dma engine must be disabled */
1655 ASSERT(!forceall || !dma64_rxenabled(di));
1656
1657 i = di->rxin;
1658
1659 /* return if no packets posted */
1660 if (i == di->rxout)
90ea2296 1661 return NULL;
a9533e7e
HP
1662
1663 curr =
ff31c54c 1664 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
a9533e7e
HP
1665 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1666
1667 /* ignore curr if forceall */
1668 if (!forceall && (i == curr))
90ea2296 1669 return NULL;
a9533e7e
HP
1670
1671 /* get the packet pointer that corresponds to the rx descriptor */
1672 rxp = di->rxp[i];
1673 ASSERT(rxp);
1674 di->rxp[i] = NULL;
1675
1676 PHYSADDRLOSET(pa,
1677 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1678 di->dataoffsetlow));
1679 PHYSADDRHISET(pa,
1680 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1681 di->dataoffsethigh));
1682
1683 /* clear this packet from the descriptor ring */
06d278c5 1684 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
a9533e7e
HP
1685
1686 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1687 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1688
1689 di->rxin = NEXTRXD(i);
1690
90ea2296 1691 return rxp;
a9533e7e
HP
1692}
1693
26bcc181 1694static bool _dma64_addrext(dma64regs_t *dma64regs)
a9533e7e 1695{
66cbd3ab 1696 u32 w;
ff31c54c
AS
1697 OR_REG(&dma64regs->control, D64_XC_AE);
1698 w = R_REG(&dma64regs->control);
1699 AND_REG(&dma64regs->control, ~D64_XC_AE);
90ea2296 1700 return (w & D64_XC_AE) == D64_XC_AE;
a9533e7e
HP
1701}
1702
1703/*
1704 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1705 */
7cc4a4c0 1706static void dma64_txrotate(dma_info_t *di)
a9533e7e 1707{
7d4df48e 1708 u16 ad;
a9533e7e
HP
1709 uint nactive;
1710 uint rot;
7d4df48e 1711 u16 old, new;
66cbd3ab 1712 u32 w;
7d4df48e 1713 u16 first, last;
a9533e7e
HP
1714
1715 ASSERT(dma64_txsuspendedidle(di));
1716
1717 nactive = _dma_txactive(di);
7d4df48e 1718 ad = (u16) (B2I
ff31c54c 1719 ((((R_REG(&di->d64txregs->status1) &
a9533e7e
HP
1720 D64_XS1_AD_MASK)
1721 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1722 rot = TXD(ad - di->txin);
1723
1724 ASSERT(rot < di->ntxd);
1725
1726 /* full-ring case is a lot harder - don't worry about this */
1727 if (rot >= (di->ntxd - nactive)) {
1728 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1729 return;
1730 }
1731
1732 first = di->txin;
1733 last = PREVTXD(di->txout);
1734
1735 /* move entries starting at last and moving backwards to first */
1736 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1737 new = TXD(old + rot);
1738
1739 /*
1740 * Move the tx dma descriptor.
1741 * EOT is set only in the last entry in the ring.
1742 */
1743 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1744 if (new == (di->ntxd - 1))
1745 w |= D64_CTRL1_EOT;
1746 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1747
1748 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1749 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1750
1751 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1752 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1753
1754 /* zap the old tx dma descriptor address field */
1755 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1756 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1757
1758 /* move the corresponding txp[] entry */
1759 ASSERT(di->txp[new] == NULL);
1760 di->txp[new] = di->txp[old];
1761
1762 /* Move the map */
1763 if (DMASGLIST_ENAB) {
02160695
SF
1764 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1765 sizeof(hnddma_seg_map_t));
9249ede9 1766 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
a9533e7e
HP
1767 }
1768
1769 di->txp[old] = NULL;
1770 }
1771
1772 /* update txin and txout */
1773 di->txin = ad;
1774 di->txout = TXD(di->txout + rot);
1775 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1776
1777 /* kick the chip */
ff31c54c 1778 W_REG(&di->d64txregs->ptr,
a9533e7e
HP
1779 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1780}
1781
7cc4a4c0 1782uint dma_addrwidth(si_t *sih, void *dmaregs)
a9533e7e 1783{
a9533e7e
HP
1784 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1785 /* DMA engine is 64-bit capable */
1786 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1787 /* backplane are 64-bit capable */
1788 if (si_backplane64(sih))
1789 /* If bus is System Backplane or PCIE then we can access 64-bits */
fa7a1db2
BR
1790 if ((sih->bustype == SI_BUS) ||
1791 ((sih->bustype == PCI_BUS) &&
a9533e7e 1792 (sih->buscoretype == PCIE_CORE_ID)))
90ea2296 1793 return DMADDRWIDTH_64;
a9533e7e 1794 }
6b4ba667
RV
1795 ASSERT(0); /* DMA hardware not supported by this driver*/
1796 return DMADDRWIDTH_64;
a9533e7e 1797}
36e319bd 1798
9ee63c6a
RV
1799/*
1800 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1801 * modified. The modified portion of the packet is not under control of the DMA
1802 * engine. This function calls a caller-supplied function for each packet in
1803 * the caller specified dma chain.
1804 */
1805void dma_walk_packets(struct hnddma_pub *dmah, void (*callback_fnc)
1806 (void *pkt, void *arg_a), void *arg_a)
1807{
1808 dma_info_t *di = (dma_info_t *) dmah;
1809 uint i = di->txin;
1810 uint end = di->txout;
1811 struct sk_buff *skb;
1812 struct ieee80211_tx_info *tx_info;
1813
1814 while (i != end) {
1815 skb = (struct sk_buff *)di->txp[i];
1816 if (skb != NULL) {
1817 tx_info = (struct ieee80211_tx_info *)skb->cb;
1818 (callback_fnc)(tx_info, arg_a);
1819 }
1820 i = NEXTTXD(i);
1821 }
1822}