1 /* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/list.h>
18 #include "altera_utils.h"
19 #include "altera_tse.h"
20 #include "altera_sgdmahw.h"
21 #include "altera_sgdma.h"
23 static void sgdma_descrip(struct sgdma_descrip *desc,
24 struct sgdma_descrip *ndesc,
25 dma_addr_t ndesc_phys,
33 static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc);
36 static int sgdma_async_read(struct altera_tse_private *priv);
39 sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc);
43 sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc);
46 static int sgdma_txbusy(struct altera_tse_private *priv);
48 static int sgdma_rxbusy(struct altera_tse_private *priv);
51 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
54 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
56 static struct tse_buffer *
57 dequeue_tx(struct altera_tse_private *priv);
59 static struct tse_buffer *
60 dequeue_rx(struct altera_tse_private *priv);
62 static struct tse_buffer *
63 queue_rx_peekhead(struct altera_tse_private *priv);
65 int sgdma_initialize(struct altera_tse_private *priv)
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
72 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd);
75 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0;
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL);
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
82 sgdma_uninitialize(priv);
83 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
87 priv->txdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
88 priv->rxdescmem, DMA_TO_DEVICE);
90 if (dma_mapping_error(priv->device, priv->txdescphys)) {
91 sgdma_uninitialize(priv);
92 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
99 void sgdma_uninitialize(struct altera_tse_private *priv)
101 if (priv->rxdescphys)
102 dma_unmap_single(priv->device, priv->rxdescphys,
103 priv->rxdescmem, DMA_BIDIRECTIONAL);
105 if (priv->txdescphys)
106 dma_unmap_single(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
110 /* This function resets the SGDMA controller and clears the
111 * descriptor memory used for transmits and receives.
113 void sgdma_reset(struct altera_tse_private *priv)
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
122 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen);
124 memset(prxdescripmem, 0, rxdescriplen);
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
127 iowrite32(0, &ptxsgdma->control);
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
130 iowrite32(0, &prxsgdma->control);
133 void sgdma_enable_rxirq(struct altera_tse_private *priv)
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
140 void sgdma_enable_txirq(struct altera_tse_private *priv)
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
147 /* for SGDMA, RX interrupts remain enabled after enabling */
148 void sgdma_disable_rxirq(struct altera_tse_private *priv)
152 /* for SGDMA, TX interrupts remain enabled after enabling */
153 void sgdma_disable_txirq(struct altera_tse_private *priv)
157 void sgdma_clear_rxirq(struct altera_tse_private *priv)
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
163 void sgdma_clear_txirq(struct altera_tse_private *priv)
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
169 /* transmits buffer through SGDMA. Returns number of buffers
170 * transmitted, 0 if not possible.
172 * tx_lock is held by the caller
174 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
177 struct sgdma_descrip *descbase =
178 (struct sgdma_descrip *)priv->tx_dma_desc;
180 struct sgdma_descrip *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1];
183 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv))
187 sgdma_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
197 pktstx = sgdma_async_write(priv, cdesc);
199 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer);
206 /* tx_lock held to protect access to queued tx list
208 u32 sgdma_tx_completions(struct altera_tse_private *priv)
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
213 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) {
222 int sgdma_add_rx_desc(struct altera_tse_private *priv,
223 struct tse_buffer *rxbuffer)
225 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
229 /* status is returned on upper 16 bits,
230 * length is returned in lower 16 bits
232 u32 sgdma_rx_status(struct altera_tse_private *priv)
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL;
243 dma_sync_single_for_cpu(priv->device,
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
250 (desc->status & SGDMA_STATUS_EOP)) {
251 pktlength = desc->bytes_xferred;
252 pktstatus = desc->status & 0x3f;
253 rxstatus = pktstatus;
254 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff);
259 rxbuffer = dequeue_rx(priv);
260 if (rxbuffer == NULL)
261 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n");
264 /* kick the rx sgdma after reaping this descriptor */
265 pktsrx = sgdma_async_read(priv);
272 /* Private functions */
273 static void sgdma_descrip(struct sgdma_descrip *desc,
274 struct sgdma_descrip *ndesc,
275 dma_addr_t ndesc_phys,
283 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control;
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl;
289 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop;
294 /* Channel is implicitly zero, initialized to 0 by default */
298 desc->next = lower_32_bits(ndesc_phys);
299 desc->control = ctrl;
303 desc->bytes = length;
304 desc->bytes_xferred = 0;
307 /* If hardware is busy, don't restart async read.
308 * if status register is 0 - meaning initial state, restart async read,
309 * probably for the first time when populating a receive buffer.
310 * If read status indicate not busy and a status, restart the async
313 static int sgdma_async_read(struct altera_tse_private *priv)
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
316 struct sgdma_descrip *descbase =
317 (struct sgdma_descrip *)priv->rx_dma_desc;
319 struct sgdma_descrip *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1];
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL;
325 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL)
330 sgdma_descrip(cdesc, /* current descriptor */
331 ndesc, /* next descriptor */
332 sgdma_rxphysaddr(priv, ndesc),
333 0, /* read addr 0 for rx dma */
334 rxbuffer->dma_addr, /* write addr for rx dma */
335 0, /* read 'til EOP */
336 0, /* EOP: NA for rx dma */
337 0, /* read fixed: NA for rx dma */
338 0); /* SOP: NA for rx DMA */
340 /* clear control and status */
341 iowrite32(0, &csr->control);
343 /* If statuc available, clear those bits */
345 iowrite32(0xf, &csr->status);
347 dma_sync_single_for_device(priv->device,
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
364 static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc)
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
369 if (sgdma_txbusy(priv))
372 /* clear control and status */
373 iowrite32(0, &csr->control);
374 iowrite32(0x1f, &csr->status);
376 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE);
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
389 sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc)
392 dma_addr_t paddr = priv->txdescmem_busaddr;
393 dma_addr_t offs = (dma_addr_t)((dma_addr_t)desc -
394 (dma_addr_t)priv->tx_dma_desc);
399 sgdma_rxphysaddr(struct altera_tse_private *priv,
400 struct sgdma_descrip *desc)
402 dma_addr_t paddr = priv->rxdescmem_busaddr;
403 dma_addr_t offs = (dma_addr_t)((dma_addr_t)desc -
404 (dma_addr_t)priv->rx_dma_desc);
408 #define list_remove_head(list, entry, type, member) \
411 if (!list_empty(list)) { \
412 entry = list_entry((list)->next, type, member); \
413 list_del_init(&entry->member); \
417 #define list_peek_head(list, entry, type, member) \
420 if (!list_empty(list)) { \
421 entry = list_entry((list)->next, type, member); \
425 /* adds a tse_buffer to the tail of a tx buffer list.
426 * assumes the caller is managing and holding a mutual exclusion
427 * primitive to avoid simultaneous pushes/pops to the list.
430 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
432 list_add_tail(&buffer->lh, &priv->txlisthd);
436 /* adds a tse_buffer to the tail of a rx buffer list
437 * assumes the caller is managing and holding a mutual exclusion
438 * primitive to avoid simultaneous pushes/pops to the list.
441 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
443 list_add_tail(&buffer->lh, &priv->rxlisthd);
446 /* dequeues a tse_buffer from the transmit buffer list, otherwise
447 * returns NULL if empty.
448 * assumes the caller is managing and holding a mutual exclusion
449 * primitive to avoid simultaneous pushes/pops to the list.
451 static struct tse_buffer *
452 dequeue_tx(struct altera_tse_private *priv)
454 struct tse_buffer *buffer = NULL;
455 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
459 /* dequeues a tse_buffer from the receive buffer list, otherwise
460 * returns NULL if empty
461 * assumes the caller is managing and holding a mutual exclusion
462 * primitive to avoid simultaneous pushes/pops to the list.
464 static struct tse_buffer *
465 dequeue_rx(struct altera_tse_private *priv)
467 struct tse_buffer *buffer = NULL;
468 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
472 /* dequeues a tse_buffer from the receive buffer list, otherwise
473 * returns NULL if empty
474 * assumes the caller is managing and holding a mutual exclusion
475 * primitive to avoid simultaneous pushes/pops to the list while the
476 * head is being examined.
478 static struct tse_buffer *
479 queue_rx_peekhead(struct altera_tse_private *priv)
481 struct tse_buffer *buffer = NULL;
482 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
486 /* check and return rx sgdma status without polling
488 static int sgdma_rxbusy(struct altera_tse_private *priv)
490 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
491 return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
494 /* waits for the tx sgdma to finish it's current operation, returns 0
495 * when it transitions to nonbusy, returns 1 if the operation times out
497 static int sgdma_txbusy(struct altera_tse_private *priv)
500 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
502 /* if DMA is busy, wait for current transactino to finish */
503 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
506 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
507 netdev_err(priv->dev, "timeout waiting for tx dma\n");