Merge tag 'xfs-5.3-merge-12' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-block.git] / drivers / net / can / xilinx_can.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
b1201e44
KA
2/* Xilinx CAN device driver
3 *
4 * Copyright (C) 2012 - 2014 Xilinx, Inc.
5 * Copyright (C) 2009 PetaLogix. All rights reserved.
9e5f1b27 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
b1201e44
KA
7 *
8 * Description:
9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
b1201e44
KA
10 */
11
12#include <linux/clk.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/of.h>
620050d9 21#include <linux/of_device.h>
b1201e44
KA
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
620050d9 24#include <linux/spinlock.h>
b1201e44
KA
25#include <linux/string.h>
26#include <linux/types.h>
27#include <linux/can/dev.h>
28#include <linux/can/error.h>
29#include <linux/can/led.h>
4716620d 30#include <linux/pm_runtime.h>
b1201e44
KA
31
32#define DRIVER_NAME "xilinx_can"
33
34/* CAN registers set */
35enum xcan_reg {
36 XCAN_SRR_OFFSET = 0x00, /* Software reset */
37 XCAN_MSR_OFFSET = 0x04, /* Mode select */
38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
40 XCAN_ECR_OFFSET = 0x10, /* Error counter */
41 XCAN_ESR_OFFSET = 0x14, /* Error status */
42 XCAN_SR_OFFSET = 0x18, /* Status */
43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
9e5f1b27
AH
46
47 /* not on CAN FD cores */
1598efe5
AH
48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
9e5f1b27
AH
50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
51
52 /* only on CAN FD cores */
53 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
54 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
55 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
56 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
57 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
0db90713 58 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
b1201e44
KA
59};
60
1598efe5
AH
61#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
62#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
63#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
64#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
65
9e5f1b27
AH
66#define XCAN_CANFD_FRAME_SIZE 0x48
67#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
68 XCAN_CANFD_FRAME_SIZE * (n))
69#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
70 XCAN_CANFD_FRAME_SIZE * (n))
0db90713
SD
71#define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
72 XCAN_CANFD_FRAME_SIZE * (n))
9e5f1b27
AH
73
74/* the single TX mailbox used by this driver on CAN FD HW */
75#define XCAN_TX_MAILBOX_IDX 0
76
b1201e44
KA
77/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
78#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
79#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
80#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
81#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
82#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
83#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
84#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
85#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
9e5f1b27
AH
86#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
87#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
88#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
b1201e44
KA
89#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
90#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
91#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
92#define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
93#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
94#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
95#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
96#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
97#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
98#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
99#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
100#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
101#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
9e5f1b27 102#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
b1201e44
KA
103#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
104#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
105#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
106#define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
107#define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
108#define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
109#define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
110#define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
111#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
112#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
113#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
114#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
115#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
116#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
117#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
118#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
119#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
9e5f1b27
AH
120#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
121#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
122#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
b1201e44 123
b1201e44
KA
124/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
125#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
126#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
9e5f1b27
AH
127#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
128#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
b1201e44
KA
129#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
130#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
131#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
132#define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
133
134/* CAN frame length constants */
135#define XCAN_FRAME_MAX_DATA_LEN 8
136#define XCAN_TIMEOUT (1 * HZ)
137
1598efe5
AH
138/* TX-FIFO-empty interrupt available */
139#define XCAN_FLAG_TXFEMP 0x0001
9e5f1b27
AH
140/* RX Match Not Finished interrupt available */
141#define XCAN_FLAG_RXMNF 0x0002
142/* Extended acceptance filters with control at 0xE0 */
143#define XCAN_FLAG_EXT_FILTERS 0x0004
144/* TX mailboxes instead of TX FIFO */
145#define XCAN_FLAG_TX_MAILBOXES 0x0008
146/* RX FIFO with each buffer in separate registers at 0x1100
147 * instead of the regular FIFO at 0x50
148 */
149#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
0db90713 150#define XCAN_FLAG_CANFD_2 0x0020
1598efe5
AH
151
152struct xcan_devtype_data {
153 unsigned int flags;
154 const struct can_bittiming_const *bittiming_const;
155 const char *bus_clk_name;
9e5f1b27
AH
156 unsigned int btr_ts2_shift;
157 unsigned int btr_sjw_shift;
1598efe5
AH
158};
159
b1201e44
KA
160/**
161 * struct xcan_priv - This definition define CAN driver instance
162 * @can: CAN private data structure.
620050d9 163 * @tx_lock: Lock for synchronizing TX interrupt handling
b1201e44
KA
164 * @tx_head: Tx CAN packets ready to send on the queue
165 * @tx_tail: Tx CAN packets successfully sended on the queue
166 * @tx_max: Maximum number packets the driver can send
167 * @napi: NAPI structure
168 * @read_reg: For reading data from CAN registers
169 * @write_reg: For writing data to CAN registers
170 * @dev: Network device data structure
171 * @reg_base: Ioremapped address to registers
172 * @irq_flags: For request_irq()
173 * @bus_clk: Pointer to struct clk
174 * @can_clk: Pointer to struct clk
1598efe5 175 * @devtype: Device type specific constants
b1201e44
KA
176 */
177struct xcan_priv {
178 struct can_priv can;
620050d9 179 spinlock_t tx_lock;
b1201e44
KA
180 unsigned int tx_head;
181 unsigned int tx_tail;
182 unsigned int tx_max;
183 struct napi_struct napi;
184 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
185 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
186 u32 val);
4716620d 187 struct device *dev;
b1201e44
KA
188 void __iomem *reg_base;
189 unsigned long irq_flags;
190 struct clk *bus_clk;
191 struct clk *can_clk;
1598efe5 192 struct xcan_devtype_data devtype;
b1201e44
KA
193};
194
195/* CAN Bittiming constants as per Xilinx CAN specs */
196static const struct can_bittiming_const xcan_bittiming_const = {
197 .name = DRIVER_NAME,
198 .tseg1_min = 1,
199 .tseg1_max = 16,
200 .tseg2_min = 1,
201 .tseg2_max = 8,
202 .sjw_max = 4,
203 .brp_min = 1,
204 .brp_max = 256,
205 .brp_inc = 1,
206};
207
9e5f1b27
AH
208static const struct can_bittiming_const xcan_bittiming_const_canfd = {
209 .name = DRIVER_NAME,
210 .tseg1_min = 1,
211 .tseg1_max = 64,
212 .tseg2_min = 1,
213 .tseg2_max = 16,
214 .sjw_max = 16,
215 .brp_min = 1,
216 .brp_max = 256,
217 .brp_inc = 1,
218};
219
0db90713
SD
220static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
221 .name = DRIVER_NAME,
222 .tseg1_min = 1,
223 .tseg1_max = 256,
224 .tseg2_min = 1,
225 .tseg2_max = 128,
226 .sjw_max = 128,
227 .brp_min = 1,
228 .brp_max = 256,
229 .brp_inc = 1,
230};
231
b1201e44
KA
232/**
233 * xcan_write_reg_le - Write a value to the device register little endian
234 * @priv: Driver private data structure
235 * @reg: Register offset
236 * @val: Value to write at the Register offset
237 *
238 * Write data to the paricular CAN register
239 */
240static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
241 u32 val)
242{
243 iowrite32(val, priv->reg_base + reg);
244}
245
246/**
247 * xcan_read_reg_le - Read a value from the device register little endian
248 * @priv: Driver private data structure
249 * @reg: Register offset
250 *
251 * Read data from the particular CAN register
252 * Return: value read from the CAN register
253 */
254static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
255{
256 return ioread32(priv->reg_base + reg);
257}
258
259/**
260 * xcan_write_reg_be - Write a value to the device register big endian
261 * @priv: Driver private data structure
262 * @reg: Register offset
263 * @val: Value to write at the Register offset
264 *
265 * Write data to the paricular CAN register
266 */
267static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
268 u32 val)
269{
270 iowrite32be(val, priv->reg_base + reg);
271}
272
273/**
274 * xcan_read_reg_be - Read a value from the device register big endian
275 * @priv: Driver private data structure
276 * @reg: Register offset
277 *
278 * Read data from the particular CAN register
279 * Return: value read from the CAN register
280 */
281static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
282{
283 return ioread32be(priv->reg_base + reg);
284}
285
9e5f1b27
AH
286/**
287 * xcan_rx_int_mask - Get the mask for the receive interrupt
288 * @priv: Driver private data structure
289 *
290 * Return: The receive interrupt mask used by the driver on this HW
291 */
292static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
293{
294 /* RXNEMP is better suited for our use case as it cannot be cleared
295 * while the FIFO is non-empty, but CAN FD HW does not have it
296 */
297 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
298 return XCAN_IXR_RXOK_MASK;
299 else
300 return XCAN_IXR_RXNEMP_MASK;
301}
302
b1201e44
KA
303/**
304 * set_reset_mode - Resets the CAN device mode
305 * @ndev: Pointer to net_device structure
306 *
307 * This is the driver reset mode routine.The driver
308 * enters into configuration mode.
309 *
310 * Return: 0 on success and failure value on error
311 */
312static int set_reset_mode(struct net_device *ndev)
313{
314 struct xcan_priv *priv = netdev_priv(ndev);
315 unsigned long timeout;
316
317 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
318
319 timeout = jiffies + XCAN_TIMEOUT;
320 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
321 if (time_after(jiffies, timeout)) {
322 netdev_warn(ndev, "timed out for config mode\n");
323 return -ETIMEDOUT;
324 }
325 usleep_range(500, 10000);
326 }
327
620050d9
AH
328 /* reset clears FIFOs */
329 priv->tx_head = 0;
330 priv->tx_tail = 0;
331
b1201e44
KA
332 return 0;
333}
334
335/**
336 * xcan_set_bittiming - CAN set bit timing routine
337 * @ndev: Pointer to net_device structure
338 *
339 * This is the driver set bittiming routine.
340 * Return: 0 on success and failure value on error
341 */
342static int xcan_set_bittiming(struct net_device *ndev)
343{
344 struct xcan_priv *priv = netdev_priv(ndev);
345 struct can_bittiming *bt = &priv->can.bittiming;
346 u32 btr0, btr1;
347 u32 is_config_mode;
348
349 /* Check whether Xilinx CAN is in configuration mode.
350 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
351 */
352 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
353 XCAN_SR_CONFIG_MASK;
354 if (!is_config_mode) {
355 netdev_alert(ndev,
356 "BUG! Cannot set bittiming - CAN is not in config mode\n");
357 return -EPERM;
358 }
359
360 /* Setting Baud Rate prescalar value in BRPR Register */
361 btr0 = (bt->brp - 1);
362
363 /* Setting Time Segment 1 in BTR Register */
364 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
365
366 /* Setting Time Segment 2 in BTR Register */
9e5f1b27 367 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
b1201e44
KA
368
369 /* Setting Synchronous jump width in BTR Register */
9e5f1b27 370 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
b1201e44
KA
371
372 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
373 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
374
375 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
376 priv->read_reg(priv, XCAN_BRPR_OFFSET),
377 priv->read_reg(priv, XCAN_BTR_OFFSET));
378
379 return 0;
380}
381
382/**
383 * xcan_chip_start - This the drivers start routine
384 * @ndev: Pointer to net_device structure
385 *
386 * This is the drivers start routine.
387 * Based on the State of the CAN device it puts
388 * the CAN device into a proper mode.
389 *
390 * Return: 0 on success and failure value on error
391 */
392static int xcan_chip_start(struct net_device *ndev)
393{
394 struct xcan_priv *priv = netdev_priv(ndev);
fb3ec7ba
SM
395 u32 reg_msr, reg_sr_mask;
396 int err;
b1201e44 397 unsigned long timeout;
9e5f1b27 398 u32 ier;
b1201e44
KA
399
400 /* Check if it is in reset mode */
401 err = set_reset_mode(ndev);
402 if (err < 0)
403 return err;
404
405 err = xcan_set_bittiming(ndev);
406 if (err < 0)
407 return err;
408
409 /* Enable interrupts */
9e5f1b27
AH
410 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
411 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
412 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
413 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
414
415 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
416 ier |= XCAN_IXR_RXMNF_MASK;
417
418 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
b1201e44
KA
419
420 /* Check whether it is loopback mode or normal mode */
421 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
422 reg_msr = XCAN_MSR_LBACK_MASK;
423 reg_sr_mask = XCAN_SR_LBACK_MASK;
424 } else {
425 reg_msr = 0x0;
426 reg_sr_mask = XCAN_SR_NORMAL_MASK;
427 }
428
9e5f1b27
AH
429 /* enable the first extended filter, if any, as cores with extended
430 * filtering default to non-receipt if all filters are disabled
431 */
432 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
433 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
434
b1201e44
KA
435 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
436 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
437
438 timeout = jiffies + XCAN_TIMEOUT;
439 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
440 if (time_after(jiffies, timeout)) {
441 netdev_warn(ndev,
442 "timed out for correct mode\n");
443 return -ETIMEDOUT;
444 }
445 }
446 netdev_dbg(ndev, "status:#x%08x\n",
447 priv->read_reg(priv, XCAN_SR_OFFSET));
448
449 priv->can.state = CAN_STATE_ERROR_ACTIVE;
450 return 0;
451}
452
453/**
454 * xcan_do_set_mode - This sets the mode of the driver
455 * @ndev: Pointer to net_device structure
456 * @mode: Tells the mode of the driver
457 *
458 * This check the drivers state and calls the
459 * the corresponding modes to set.
460 *
461 * Return: 0 on success and failure value on error
462 */
463static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
464{
465 int ret;
466
467 switch (mode) {
468 case CAN_MODE_START:
469 ret = xcan_chip_start(ndev);
470 if (ret < 0) {
471 netdev_err(ndev, "xcan_chip_start failed!\n");
472 return ret;
473 }
474 netif_wake_queue(ndev);
475 break;
476 default:
477 ret = -EOPNOTSUPP;
478 break;
479 }
480
481 return ret;
482}
483
484/**
1598efe5
AH
485 * xcan_write_frame - Write a frame to HW
486 * @skb: sk_buff pointer that contains data to be Txed
487 * @frame_offset: Register offset to write the frame to
b1201e44 488 */
1598efe5
AH
489static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
490 int frame_offset)
b1201e44 491{
b1201e44 492 u32 id, dlc, data[2] = {0, 0};
1598efe5 493 struct can_frame *cf = (struct can_frame *)skb->data;
b1201e44
KA
494
495 /* Watch carefully on the bit sequence */
496 if (cf->can_id & CAN_EFF_FLAG) {
497 /* Extended CAN ID format */
498 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
499 XCAN_IDR_ID2_MASK;
500 id |= (((cf->can_id & CAN_EFF_MASK) >>
501 (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
502 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
503
504 /* The substibute remote TX request bit should be "1"
505 * for extended frames as in the Xilinx CAN datasheet
506 */
507 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
508
509 if (cf->can_id & CAN_RTR_FLAG)
510 /* Extended frames remote TX request */
511 id |= XCAN_IDR_RTR_MASK;
512 } else {
513 /* Standard CAN ID format */
514 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
515 XCAN_IDR_ID1_MASK;
516
517 if (cf->can_id & CAN_RTR_FLAG)
518 /* Standard frames remote TX request */
519 id |= XCAN_IDR_SRR_MASK;
520 }
521
522 dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
523
524 if (cf->can_dlc > 0)
525 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
526 if (cf->can_dlc > 4)
527 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
528
1598efe5 529 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
9e5f1b27
AH
530 /* If the CAN frame is RTR frame this write triggers transmission
531 * (not on CAN FD)
532 */
1598efe5
AH
533 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
534 if (!(cf->can_id & CAN_RTR_FLAG)) {
535 priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
536 data[0]);
537 /* If the CAN frame is Standard/Extended frame this
9e5f1b27 538 * write triggers transmission (not on CAN FD)
1598efe5
AH
539 */
540 priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
541 data[1]);
542 }
543}
544
545/**
546 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
547 *
548 * Return: 0 on success, -ENOSPC if FIFO is full.
549 */
550static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
551{
552 struct xcan_priv *priv = netdev_priv(ndev);
553 unsigned long flags;
554
555 /* Check if the TX buffer is full */
556 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
557 XCAN_SR_TXFLL_MASK))
558 return -ENOSPC;
559
b1201e44 560 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
620050d9
AH
561
562 spin_lock_irqsave(&priv->tx_lock, flags);
563
b1201e44
KA
564 priv->tx_head++;
565
1598efe5 566 xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
b1201e44 567
620050d9
AH
568 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
569 if (priv->tx_max > 1)
570 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
571
b1201e44
KA
572 /* Check if the TX buffer is full */
573 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
574 netif_stop_queue(ndev);
575
620050d9
AH
576 spin_unlock_irqrestore(&priv->tx_lock, flags);
577
1598efe5
AH
578 return 0;
579}
580
9e5f1b27
AH
581/**
582 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
583 *
584 * Return: 0 on success, -ENOSPC if there is no space
585 */
586static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
587{
588 struct xcan_priv *priv = netdev_priv(ndev);
589 unsigned long flags;
590
591 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
592 BIT(XCAN_TX_MAILBOX_IDX)))
593 return -ENOSPC;
594
595 can_put_echo_skb(skb, ndev, 0);
596
597 spin_lock_irqsave(&priv->tx_lock, flags);
598
599 priv->tx_head++;
600
601 xcan_write_frame(priv, skb,
602 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
603
604 /* Mark buffer as ready for transmit */
605 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
606
607 netif_stop_queue(ndev);
608
609 spin_unlock_irqrestore(&priv->tx_lock, flags);
610
611 return 0;
612}
613
1598efe5
AH
614/**
615 * xcan_start_xmit - Starts the transmission
616 * @skb: sk_buff pointer that contains data to be Txed
617 * @ndev: Pointer to net_device structure
618 *
9e5f1b27 619 * This function is invoked from upper layers to initiate transmission.
1598efe5
AH
620 *
621 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
622 */
81de0cd6 623static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1598efe5 624{
9e5f1b27 625 struct xcan_priv *priv = netdev_priv(ndev);
1598efe5
AH
626 int ret;
627
628 if (can_dropped_invalid_skb(ndev, skb))
629 return NETDEV_TX_OK;
630
9e5f1b27
AH
631 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
632 ret = xcan_start_xmit_mailbox(skb, ndev);
633 else
634 ret = xcan_start_xmit_fifo(skb, ndev);
1598efe5
AH
635
636 if (ret < 0) {
637 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
638 netif_stop_queue(ndev);
639 return NETDEV_TX_BUSY;
640 }
641
b1201e44
KA
642 return NETDEV_TX_OK;
643}
644
645/**
646 * xcan_rx - Is called from CAN isr to complete the received
647 * frame processing
648 * @ndev: Pointer to net_device structure
1598efe5 649 * @frame_base: Register offset to the frame to be read
b1201e44
KA
650 *
651 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
652 * does minimal processing and invokes "netif_receive_skb" to complete further
653 * processing.
654 * Return: 1 on success and 0 on failure.
655 */
1598efe5 656static int xcan_rx(struct net_device *ndev, int frame_base)
b1201e44
KA
657{
658 struct xcan_priv *priv = netdev_priv(ndev);
659 struct net_device_stats *stats = &ndev->stats;
660 struct can_frame *cf;
661 struct sk_buff *skb;
662 u32 id_xcan, dlc, data[2] = {0, 0};
663
664 skb = alloc_can_skb(ndev, &cf);
665 if (unlikely(!skb)) {
666 stats->rx_dropped++;
667 return 0;
668 }
669
670 /* Read a frame from Xilinx zynq CANPS */
1598efe5
AH
671 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
672 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
673 XCAN_DLCR_DLC_SHIFT;
b1201e44
KA
674
675 /* Change Xilinx CAN data length format to socketCAN data format */
676 cf->can_dlc = get_can_dlc(dlc);
677
678 /* Change Xilinx CAN ID format to socketCAN ID format */
679 if (id_xcan & XCAN_IDR_IDE_MASK) {
680 /* The received frame is an Extended format frame */
681 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
682 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
683 XCAN_IDR_ID2_SHIFT;
684 cf->can_id |= CAN_EFF_FLAG;
685 if (id_xcan & XCAN_IDR_RTR_MASK)
686 cf->can_id |= CAN_RTR_FLAG;
687 } else {
688 /* The received frame is a standard format frame */
689 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
690 XCAN_IDR_ID1_SHIFT;
691 if (id_xcan & XCAN_IDR_SRR_MASK)
692 cf->can_id |= CAN_RTR_FLAG;
693 }
694
5793affe 695 /* DW1/DW2 must always be read to remove message from RXFIFO */
1598efe5
AH
696 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
697 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
b1201e44 698
5793affe 699 if (!(cf->can_id & CAN_RTR_FLAG)) {
b1201e44
KA
700 /* Change Xilinx CAN data format to socketCAN data format */
701 if (cf->can_dlc > 0)
702 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
703 if (cf->can_dlc > 4)
704 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
705 }
706
707 stats->rx_bytes += cf->can_dlc;
708 stats->rx_packets++;
709 netif_receive_skb(skb);
710
711 return 1;
712}
713
877e0b75
AH
714/**
715 * xcan_current_error_state - Get current error state from HW
716 * @ndev: Pointer to net_device structure
717 *
718 * Checks the current CAN error state from the HW. Note that this
719 * only checks for ERROR_PASSIVE and ERROR_WARNING.
720 *
721 * Return:
722 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
723 * otherwise.
724 */
725static enum can_state xcan_current_error_state(struct net_device *ndev)
726{
727 struct xcan_priv *priv = netdev_priv(ndev);
728 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
729
730 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
731 return CAN_STATE_ERROR_PASSIVE;
732 else if (status & XCAN_SR_ERRWRN_MASK)
733 return CAN_STATE_ERROR_WARNING;
734 else
735 return CAN_STATE_ERROR_ACTIVE;
736}
737
738/**
739 * xcan_set_error_state - Set new CAN error state
740 * @ndev: Pointer to net_device structure
741 * @new_state: The new CAN state to be set
742 * @cf: Error frame to be populated or NULL
743 *
744 * Set new CAN error state for the device, updating statistics and
745 * populating the error frame if given.
746 */
747static void xcan_set_error_state(struct net_device *ndev,
748 enum can_state new_state,
749 struct can_frame *cf)
750{
751 struct xcan_priv *priv = netdev_priv(ndev);
752 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
753 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
754 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
6181dbc0
AH
755 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
756 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
877e0b75 757
6181dbc0
AH
758 /* non-ERROR states are handled elsewhere */
759 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
760 return;
761
762 can_change_state(ndev, cf, tx_state, rx_state);
877e0b75
AH
763
764 if (cf) {
877e0b75
AH
765 cf->data[6] = txerr;
766 cf->data[7] = rxerr;
767 }
877e0b75
AH
768}
769
770/**
771 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
772 * @ndev: Pointer to net_device structure
773 *
774 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
775 * the performed RX/TX has caused it to drop to a lesser state and set
776 * the interface state accordingly.
777 */
778static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
779{
780 struct xcan_priv *priv = netdev_priv(ndev);
781 enum can_state old_state = priv->can.state;
782 enum can_state new_state;
783
784 /* changing error state due to successful frame RX/TX can only
785 * occur from these states
786 */
787 if (old_state != CAN_STATE_ERROR_WARNING &&
788 old_state != CAN_STATE_ERROR_PASSIVE)
789 return;
790
791 new_state = xcan_current_error_state(ndev);
792
793 if (new_state != old_state) {
794 struct sk_buff *skb;
795 struct can_frame *cf;
796
797 skb = alloc_can_err_skb(ndev, &cf);
798
799 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
800
801 if (skb) {
802 struct net_device_stats *stats = &ndev->stats;
803
804 stats->rx_packets++;
805 stats->rx_bytes += cf->can_dlc;
806 netif_rx(skb);
807 }
808 }
809}
810
b1201e44
KA
811/**
812 * xcan_err_interrupt - error frame Isr
813 * @ndev: net_device pointer
814 * @isr: interrupt status register value
815 *
816 * This is the CAN error interrupt and it will
817 * check the the type of error and forward the error
818 * frame to upper layers.
819 */
820static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
821{
822 struct xcan_priv *priv = netdev_priv(ndev);
823 struct net_device_stats *stats = &ndev->stats;
824 struct can_frame *cf;
825 struct sk_buff *skb;
877e0b75 826 u32 err_status;
b1201e44
KA
827
828 skb = alloc_can_err_skb(ndev, &cf);
829
830 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
831 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
b1201e44
KA
832
833 if (isr & XCAN_IXR_BSOFF_MASK) {
834 priv->can.state = CAN_STATE_BUS_OFF;
835 priv->can.can_stats.bus_off++;
836 /* Leave device in Config Mode in bus-off state */
837 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
838 can_bus_off(ndev);
839 if (skb)
840 cf->can_id |= CAN_ERR_BUSOFF;
877e0b75
AH
841 } else {
842 enum can_state new_state = xcan_current_error_state(ndev);
843
7e2804aa
AH
844 if (new_state != priv->can.state)
845 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
b1201e44
KA
846 }
847
848 /* Check for Arbitration lost interrupt */
849 if (isr & XCAN_IXR_ARBLST_MASK) {
850 priv->can.can_stats.arbitration_lost++;
851 if (skb) {
852 cf->can_id |= CAN_ERR_LOSTARB;
853 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
854 }
855 }
856
857 /* Check for RX FIFO Overflow interrupt */
858 if (isr & XCAN_IXR_RXOFLW_MASK) {
859 stats->rx_over_errors++;
860 stats->rx_errors++;
b1201e44
KA
861 if (skb) {
862 cf->can_id |= CAN_ERR_CRTL;
863 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
864 }
865 }
866
9e5f1b27
AH
867 /* Check for RX Match Not Finished interrupt */
868 if (isr & XCAN_IXR_RXMNF_MASK) {
869 stats->rx_dropped++;
870 stats->rx_errors++;
871 netdev_err(ndev, "RX match not finished, frame discarded\n");
872 if (skb) {
873 cf->can_id |= CAN_ERR_CRTL;
874 cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
875 }
876 }
877
b1201e44
KA
878 /* Check for error interrupt */
879 if (isr & XCAN_IXR_ERROR_MASK) {
a2ec19f8 880 if (skb)
b1201e44 881 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
b1201e44
KA
882
883 /* Check for Ack error interrupt */
884 if (err_status & XCAN_ESR_ACKER_MASK) {
885 stats->tx_errors++;
886 if (skb) {
887 cf->can_id |= CAN_ERR_ACK;
ffd461f8 888 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
b1201e44
KA
889 }
890 }
891
892 /* Check for Bit error interrupt */
893 if (err_status & XCAN_ESR_BERR_MASK) {
894 stats->tx_errors++;
895 if (skb) {
896 cf->can_id |= CAN_ERR_PROT;
897 cf->data[2] = CAN_ERR_PROT_BIT;
898 }
899 }
900
901 /* Check for Stuff error interrupt */
902 if (err_status & XCAN_ESR_STER_MASK) {
903 stats->rx_errors++;
904 if (skb) {
905 cf->can_id |= CAN_ERR_PROT;
906 cf->data[2] = CAN_ERR_PROT_STUFF;
907 }
908 }
909
910 /* Check for Form error interrupt */
911 if (err_status & XCAN_ESR_FMER_MASK) {
912 stats->rx_errors++;
913 if (skb) {
914 cf->can_id |= CAN_ERR_PROT;
915 cf->data[2] = CAN_ERR_PROT_FORM;
916 }
917 }
918
919 /* Check for CRC error interrupt */
920 if (err_status & XCAN_ESR_CRCER_MASK) {
921 stats->rx_errors++;
922 if (skb) {
923 cf->can_id |= CAN_ERR_PROT;
ffd461f8 924 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
b1201e44
KA
925 }
926 }
927 priv->can.can_stats.bus_error++;
928 }
929
930 if (skb) {
931 stats->rx_packets++;
932 stats->rx_bytes += cf->can_dlc;
933 netif_rx(skb);
934 }
935
936 netdev_dbg(ndev, "%s: error status register:0x%x\n",
937 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
938}
939
940/**
941 * xcan_state_interrupt - It will check the state of the CAN device
942 * @ndev: net_device pointer
943 * @isr: interrupt status register value
944 *
945 * This will checks the state of the CAN device
946 * and puts the device into appropriate state.
947 */
948static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
949{
950 struct xcan_priv *priv = netdev_priv(ndev);
951
952 /* Check for Sleep interrupt if set put CAN device in sleep state */
953 if (isr & XCAN_IXR_SLP_MASK)
954 priv->can.state = CAN_STATE_SLEEPING;
955
956 /* Check for Wake up interrupt if set put CAN device in Active state */
957 if (isr & XCAN_IXR_WKUP_MASK)
958 priv->can.state = CAN_STATE_ERROR_ACTIVE;
959}
960
9e5f1b27
AH
961/**
962 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
963 *
964 * Return: Register offset of the next frame in RX FIFO.
965 */
966static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
967{
968 int offset;
969
970 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
971 u32 fsr;
972
973 /* clear RXOK before the is-empty check so that any newly
974 * received frame will reassert it without a race
975 */
976 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
977
978 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
979
980 /* check if RX FIFO is empty */
981 if (!(fsr & XCAN_FSR_FL_MASK))
982 return -ENOENT;
983
0db90713
SD
984 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
985 offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
986 else
987 offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
9e5f1b27
AH
988
989 } else {
990 /* check if RX FIFO is empty */
991 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
992 XCAN_IXR_RXNEMP_MASK))
993 return -ENOENT;
994
995 /* frames are read from a static offset */
996 offset = XCAN_RXFIFO_OFFSET;
997 }
998
999 return offset;
1000}
1001
b1201e44
KA
1002/**
1003 * xcan_rx_poll - Poll routine for rx packets (NAPI)
1004 * @napi: napi structure pointer
1005 * @quota: Max number of rx packets to be processed.
1006 *
1007 * This is the poll routine for rx part.
1008 * It will process the packets maximux quota value.
1009 *
1010 * Return: number of packets received
1011 */
1012static int xcan_rx_poll(struct napi_struct *napi, int quota)
1013{
1014 struct net_device *ndev = napi->dev;
1015 struct xcan_priv *priv = netdev_priv(ndev);
9e5f1b27 1016 u32 ier;
b1201e44 1017 int work_done = 0;
9e5f1b27
AH
1018 int frame_offset;
1019
1020 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1021 (work_done < quota)) {
1022 work_done += xcan_rx(ndev, frame_offset);
1023
1024 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1025 /* increment read index */
1026 priv->write_reg(priv, XCAN_FSR_OFFSET,
1027 XCAN_FSR_IRI_MASK);
1028 else
1029 /* clear rx-not-empty (will actually clear only if
1030 * empty)
1031 */
1032 priv->write_reg(priv, XCAN_ICR_OFFSET,
1033 XCAN_IXR_RXNEMP_MASK);
b1201e44
KA
1034 }
1035
877e0b75 1036 if (work_done) {
b1201e44 1037 can_led_event(ndev, CAN_LED_EVENT_RX);
877e0b75
AH
1038 xcan_update_error_state_after_rxtx(ndev);
1039 }
b1201e44
KA
1040
1041 if (work_done < quota) {
6ad20165 1042 napi_complete_done(napi, work_done);
b1201e44 1043 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
9e5f1b27 1044 ier |= xcan_rx_int_mask(priv);
b1201e44
KA
1045 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1046 }
1047 return work_done;
1048}
1049
1050/**
1051 * xcan_tx_interrupt - Tx Done Isr
1052 * @ndev: net_device pointer
1053 * @isr: Interrupt status register value
1054 */
1055static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1056{
1057 struct xcan_priv *priv = netdev_priv(ndev);
1058 struct net_device_stats *stats = &ndev->stats;
620050d9
AH
1059 unsigned int frames_in_fifo;
1060 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1061 unsigned long flags;
1062 int retries = 0;
1063
1064 /* Synchronize with xmit as we need to know the exact number
1065 * of frames in the FIFO to stay in sync due to the TXFEMP
1066 * handling.
1067 * This also prevents a race between netif_wake_queue() and
1068 * netif_stop_queue().
1069 */
1070 spin_lock_irqsave(&priv->tx_lock, flags);
1071
1072 frames_in_fifo = priv->tx_head - priv->tx_tail;
b1201e44 1073
620050d9
AH
1074 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1075 /* clear TXOK anyway to avoid getting back here */
b1201e44 1076 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
620050d9
AH
1077 spin_unlock_irqrestore(&priv->tx_lock, flags);
1078 return;
1079 }
1080
1081 /* Check if 2 frames were sent (TXOK only means that at least 1
1082 * frame was sent).
1083 */
1084 if (frames_in_fifo > 1) {
1085 WARN_ON(frames_in_fifo > priv->tx_max);
1086
1087 /* Synchronize TXOK and isr so that after the loop:
1088 * (1) isr variable is up-to-date at least up to TXOK clear
1089 * time. This avoids us clearing a TXOK of a second frame
1090 * but not noticing that the FIFO is now empty and thus
1091 * marking only a single frame as sent.
1092 * (2) No TXOK is left. Having one could mean leaving a
1093 * stray TXOK as we might process the associated frame
1094 * via TXFEMP handling as we read TXFEMP *after* TXOK
1095 * clear to satisfy (1).
1096 */
1097 while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
1098 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1099 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1100 }
1101
1102 if (isr & XCAN_IXR_TXFEMP_MASK) {
1103 /* nothing in FIFO anymore */
1104 frames_sent = frames_in_fifo;
1105 }
1106 } else {
1107 /* single frame in fifo, just clear TXOK */
1108 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1109 }
1110
1111 while (frames_sent--) {
11ee5fcd
AH
1112 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1113 priv->tx_max);
b1201e44
KA
1114 priv->tx_tail++;
1115 stats->tx_packets++;
b1201e44 1116 }
620050d9
AH
1117
1118 netif_wake_queue(ndev);
1119
1120 spin_unlock_irqrestore(&priv->tx_lock, flags);
1121
b1201e44 1122 can_led_event(ndev, CAN_LED_EVENT_TX);
877e0b75 1123 xcan_update_error_state_after_rxtx(ndev);
b1201e44
KA
1124}
1125
1126/**
1127 * xcan_interrupt - CAN Isr
1128 * @irq: irq number
1129 * @dev_id: device id poniter
1130 *
1131 * This is the xilinx CAN Isr. It checks for the type of interrupt
1132 * and invokes the corresponding ISR.
1133 *
1134 * Return:
1135 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1136 */
1137static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1138{
1139 struct net_device *ndev = (struct net_device *)dev_id;
1140 struct xcan_priv *priv = netdev_priv(ndev);
1141 u32 isr, ier;
2f4f0f33 1142 u32 isr_errors;
9e5f1b27 1143 u32 rx_int_mask = xcan_rx_int_mask(priv);
b1201e44
KA
1144
1145 /* Get the interrupt status from Xilinx CAN */
1146 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1147 if (!isr)
1148 return IRQ_NONE;
1149
1150 /* Check for the type of interrupt and Processing it */
1151 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1152 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1153 XCAN_IXR_WKUP_MASK));
1154 xcan_state_interrupt(ndev, isr);
1155 }
1156
1157 /* Check for Tx interrupt and Processing it */
1158 if (isr & XCAN_IXR_TXOK_MASK)
1159 xcan_tx_interrupt(ndev, isr);
1160
1161 /* Check for the type of error interrupt and Processing it */
2f4f0f33 1162 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
9e5f1b27
AH
1163 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1164 XCAN_IXR_RXMNF_MASK);
2f4f0f33
AH
1165 if (isr_errors) {
1166 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
b1201e44
KA
1167 xcan_err_interrupt(ndev, isr);
1168 }
1169
1170 /* Check for the type of receive interrupt and Processing it */
9e5f1b27 1171 if (isr & rx_int_mask) {
b1201e44 1172 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
9e5f1b27 1173 ier &= ~rx_int_mask;
b1201e44
KA
1174 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1175 napi_schedule(&priv->napi);
1176 }
1177 return IRQ_HANDLED;
1178}
1179
1180/**
1181 * xcan_chip_stop - Driver stop routine
1182 * @ndev: Pointer to net_device structure
1183 *
1184 * This is the drivers stop routine. It will disable the
1185 * interrupts and put the device into configuration mode.
1186 */
1187static void xcan_chip_stop(struct net_device *ndev)
1188{
1189 struct xcan_priv *priv = netdev_priv(ndev);
b1201e44
KA
1190
1191 /* Disable interrupts and leave the can in configuration mode */
8ebd83bd 1192 set_reset_mode(ndev);
b1201e44
KA
1193 priv->can.state = CAN_STATE_STOPPED;
1194}
1195
1196/**
1197 * xcan_open - Driver open routine
1198 * @ndev: Pointer to net_device structure
1199 *
1200 * This is the driver open routine.
1201 * Return: 0 on success and failure value on error
1202 */
1203static int xcan_open(struct net_device *ndev)
1204{
1205 struct xcan_priv *priv = netdev_priv(ndev);
1206 int ret;
1207
4716620d
KA
1208 ret = pm_runtime_get_sync(priv->dev);
1209 if (ret < 0) {
1210 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1211 __func__, ret);
1212 return ret;
1213 }
1214
b1201e44
KA
1215 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1216 ndev->name, ndev);
1217 if (ret < 0) {
1218 netdev_err(ndev, "irq allocation for CAN failed\n");
1219 goto err;
1220 }
1221
b1201e44
KA
1222 /* Set chip into reset mode */
1223 ret = set_reset_mode(ndev);
1224 if (ret < 0) {
1225 netdev_err(ndev, "mode resetting failed!\n");
4716620d 1226 goto err_irq;
b1201e44
KA
1227 }
1228
1229 /* Common open */
1230 ret = open_candev(ndev);
1231 if (ret)
4716620d 1232 goto err_irq;
b1201e44
KA
1233
1234 ret = xcan_chip_start(ndev);
1235 if (ret < 0) {
1236 netdev_err(ndev, "xcan_chip_start failed!\n");
1237 goto err_candev;
1238 }
1239
1240 can_led_event(ndev, CAN_LED_EVENT_OPEN);
1241 napi_enable(&priv->napi);
1242 netif_start_queue(ndev);
1243
1244 return 0;
1245
1246err_candev:
1247 close_candev(ndev);
b1201e44
KA
1248err_irq:
1249 free_irq(ndev->irq, ndev);
1250err:
4716620d
KA
1251 pm_runtime_put(priv->dev);
1252
b1201e44
KA
1253 return ret;
1254}
1255
1256/**
1257 * xcan_close - Driver close routine
1258 * @ndev: Pointer to net_device structure
1259 *
1260 * Return: 0 always
1261 */
1262static int xcan_close(struct net_device *ndev)
1263{
1264 struct xcan_priv *priv = netdev_priv(ndev);
1265
1266 netif_stop_queue(ndev);
1267 napi_disable(&priv->napi);
1268 xcan_chip_stop(ndev);
b1201e44
KA
1269 free_irq(ndev->irq, ndev);
1270 close_candev(ndev);
1271
1272 can_led_event(ndev, CAN_LED_EVENT_STOP);
4716620d 1273 pm_runtime_put(priv->dev);
b1201e44
KA
1274
1275 return 0;
1276}
1277
1278/**
1279 * xcan_get_berr_counter - error counter routine
1280 * @ndev: Pointer to net_device structure
1281 * @bec: Pointer to can_berr_counter structure
1282 *
1283 * This is the driver error counter routine.
1284 * Return: 0 on success and failure value on error
1285 */
1286static int xcan_get_berr_counter(const struct net_device *ndev,
1287 struct can_berr_counter *bec)
1288{
1289 struct xcan_priv *priv = netdev_priv(ndev);
1290 int ret;
1291
4716620d
KA
1292 ret = pm_runtime_get_sync(priv->dev);
1293 if (ret < 0) {
1294 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1295 __func__, ret);
1296 return ret;
1297 }
b1201e44
KA
1298
1299 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1300 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1301 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1302
4716620d 1303 pm_runtime_put(priv->dev);
b1201e44
KA
1304
1305 return 0;
b1201e44
KA
1306}
1307
1308
1309static const struct net_device_ops xcan_netdev_ops = {
1310 .ndo_open = xcan_open,
1311 .ndo_stop = xcan_close,
1312 .ndo_start_xmit = xcan_start_xmit,
92593a03 1313 .ndo_change_mtu = can_change_mtu,
b1201e44
KA
1314};
1315
1316/**
1317 * xcan_suspend - Suspend method for the driver
4716620d 1318 * @dev: Address of the device structure
b1201e44
KA
1319 *
1320 * Put the driver into low power mode.
4716620d 1321 * Return: 0 on success and failure value on error
b1201e44
KA
1322 */
1323static int __maybe_unused xcan_suspend(struct device *dev)
1324{
8ebd83bd 1325 struct net_device *ndev = dev_get_drvdata(dev);
4716620d 1326
8ebd83bd
AH
1327 if (netif_running(ndev)) {
1328 netif_stop_queue(ndev);
1329 netif_device_detach(ndev);
1330 xcan_chip_stop(ndev);
1331 }
1332
1333 return pm_runtime_force_suspend(dev);
4716620d
KA
1334}
1335
1336/**
1337 * xcan_resume - Resume from suspend
1338 * @dev: Address of the device structure
1339 *
1340 * Resume operation after suspend.
1341 * Return: 0 on success and failure value on error
1342 */
1343static int __maybe_unused xcan_resume(struct device *dev)
1344{
8ebd83bd
AH
1345 struct net_device *ndev = dev_get_drvdata(dev);
1346 int ret;
4716620d 1347
8ebd83bd
AH
1348 ret = pm_runtime_force_resume(dev);
1349 if (ret) {
1350 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1351 return ret;
1352 }
1353
1354 if (netif_running(ndev)) {
1355 ret = xcan_chip_start(ndev);
1356 if (ret) {
1357 dev_err(dev, "xcan_chip_start failed on resume\n");
1358 return ret;
1359 }
4716620d 1360
8ebd83bd
AH
1361 netif_device_attach(ndev);
1362 netif_start_queue(ndev);
1363 }
1364
1365 return 0;
4716620d
KA
1366}
1367
1368/**
1369 * xcan_runtime_suspend - Runtime suspend method for the driver
1370 * @dev: Address of the device structure
1371 *
1372 * Put the driver into low power mode.
1373 * Return: 0 always
1374 */
1375static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1376{
1377 struct net_device *ndev = dev_get_drvdata(dev);
b1201e44
KA
1378 struct xcan_priv *priv = netdev_priv(ndev);
1379
4716620d
KA
1380 clk_disable_unprepare(priv->bus_clk);
1381 clk_disable_unprepare(priv->can_clk);
b1201e44
KA
1382
1383 return 0;
1384}
1385
1386/**
4716620d
KA
1387 * xcan_runtime_resume - Runtime resume from suspend
1388 * @dev: Address of the device structure
b1201e44
KA
1389 *
1390 * Resume operation after suspend.
1391 * Return: 0 on success and failure value on error
1392 */
4716620d 1393static int __maybe_unused xcan_runtime_resume(struct device *dev)
b1201e44 1394{
4716620d 1395 struct net_device *ndev = dev_get_drvdata(dev);
b1201e44
KA
1396 struct xcan_priv *priv = netdev_priv(ndev);
1397 int ret;
1398
4716620d 1399 ret = clk_prepare_enable(priv->bus_clk);
b1201e44
KA
1400 if (ret) {
1401 dev_err(dev, "Cannot enable clock.\n");
1402 return ret;
1403 }
4716620d 1404 ret = clk_prepare_enable(priv->can_clk);
b1201e44
KA
1405 if (ret) {
1406 dev_err(dev, "Cannot enable clock.\n");
1407 clk_disable_unprepare(priv->bus_clk);
1408 return ret;
1409 }
1410
b1201e44
KA
1411 return 0;
1412}
1413
4716620d
KA
1414static const struct dev_pm_ops xcan_dev_pm_ops = {
1415 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1416 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1417};
b1201e44 1418
620050d9 1419static const struct xcan_devtype_data xcan_zynq_data = {
1598efe5 1420 .bittiming_const = &xcan_bittiming_const,
9e5f1b27
AH
1421 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1422 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1598efe5
AH
1423 .bus_clk_name = "pclk",
1424};
1425
1426static const struct xcan_devtype_data xcan_axi_data = {
1598efe5 1427 .bittiming_const = &xcan_bittiming_const,
9e5f1b27
AH
1428 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1429 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1430 .bus_clk_name = "s_axi_aclk",
1431};
1432
1433static const struct xcan_devtype_data xcan_canfd_data = {
1434 .flags = XCAN_FLAG_EXT_FILTERS |
1435 XCAN_FLAG_RXMNF |
1436 XCAN_FLAG_TX_MAILBOXES |
1437 XCAN_FLAG_RX_FIFO_MULTI,
904044dd 1438 .bittiming_const = &xcan_bittiming_const_canfd,
9e5f1b27
AH
1439 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1440 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1598efe5 1441 .bus_clk_name = "s_axi_aclk",
620050d9
AH
1442};
1443
0db90713
SD
1444static const struct xcan_devtype_data xcan_canfd2_data = {
1445 .flags = XCAN_FLAG_EXT_FILTERS |
1446 XCAN_FLAG_RXMNF |
1447 XCAN_FLAG_TX_MAILBOXES |
1448 XCAN_FLAG_CANFD_2 |
1449 XCAN_FLAG_RX_FIFO_MULTI,
1450 .bittiming_const = &xcan_bittiming_const_canfd2,
1451 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1452 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1453 .bus_clk_name = "s_axi_aclk",
1454};
1455
620050d9
AH
1456/* Match table for OF platform binding */
1457static const struct of_device_id xcan_of_match[] = {
1458 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1598efe5 1459 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
9e5f1b27 1460 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
0db90713 1461 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
620050d9
AH
1462 { /* end of list */ },
1463};
1464MODULE_DEVICE_TABLE(of, xcan_of_match);
1465
b1201e44
KA
1466/**
1467 * xcan_probe - Platform registration call
1468 * @pdev: Handle to the platform device structure
1469 *
1470 * This function does all the memory allocation and registration for the CAN
1471 * device.
1472 *
1473 * Return: 0 on success and failure value on error
1474 */
1475static int xcan_probe(struct platform_device *pdev)
1476{
1477 struct resource *res; /* IO mem resources */
1478 struct net_device *ndev;
1479 struct xcan_priv *priv;
620050d9 1480 const struct of_device_id *of_id;
1598efe5 1481 const struct xcan_devtype_data *devtype = &xcan_axi_data;
b1201e44 1482 void __iomem *addr;
9e5f1b27
AH
1483 int ret;
1484 int rx_max, tx_max;
1485 int hw_tx_max, hw_rx_max;
1486 const char *hw_tx_max_property;
b1201e44
KA
1487
1488 /* Get the virtual base address for the device */
1489 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1490 addr = devm_ioremap_resource(&pdev->dev, res);
1491 if (IS_ERR(addr)) {
1492 ret = PTR_ERR(addr);
1493 goto err;
1494 }
1495
620050d9 1496 of_id = of_match_device(xcan_of_match, &pdev->dev);
1598efe5
AH
1497 if (of_id && of_id->data)
1498 devtype = of_id->data;
620050d9 1499
9e5f1b27
AH
1500 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1501 "tx-mailbox-count" : "tx-fifo-depth";
1502
1503 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1504 &hw_tx_max);
1505 if (ret < 0) {
1506 dev_err(&pdev->dev, "missing %s property\n",
1507 hw_tx_max_property);
1508 goto err;
1509 }
1510
1511 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1512 &hw_rx_max);
1513 if (ret < 0) {
1514 dev_err(&pdev->dev,
1515 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1516 goto err;
1517 }
1518
1519 /* With TX FIFO:
1520 *
1521 * There is no way to directly figure out how many frames have been
1522 * sent when the TXOK interrupt is processed. If TXFEMP
620050d9
AH
1523 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1524 * to determine if 1 or 2 frames have been sent.
1525 * Theoretically we should be able to use TXFWMEMP to determine up
1526 * to 3 frames, but it seems that after putting a second frame in the
1527 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1528 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1529 * sent), which is not a sensible state - possibly TXFWMEMP is not
1530 * completely synchronized with the rest of the bits?
9e5f1b27
AH
1531 *
1532 * With TX mailboxes:
1533 *
1534 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1535 * we submit frames one at a time.
620050d9 1536 */
9e5f1b27
AH
1537 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1538 (devtype->flags & XCAN_FLAG_TXFEMP))
1539 tx_max = min(hw_tx_max, 2);
620050d9
AH
1540 else
1541 tx_max = 1;
1542
9e5f1b27
AH
1543 rx_max = hw_rx_max;
1544
b1201e44
KA
1545 /* Create a CAN device instance */
1546 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1547 if (!ndev)
1548 return -ENOMEM;
1549
1550 priv = netdev_priv(ndev);
4716620d 1551 priv->dev = &pdev->dev;
1598efe5 1552 priv->can.bittiming_const = devtype->bittiming_const;
b1201e44
KA
1553 priv->can.do_set_mode = xcan_do_set_mode;
1554 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1555 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1556 CAN_CTRLMODE_BERR_REPORTING;
1557 priv->reg_base = addr;
1558 priv->tx_max = tx_max;
1598efe5 1559 priv->devtype = *devtype;
620050d9 1560 spin_lock_init(&priv->tx_lock);
b1201e44
KA
1561
1562 /* Get IRQ for the device */
1563 ndev->irq = platform_get_irq(pdev, 0);
1564 ndev->flags |= IFF_ECHO; /* We support local echo */
1565
1566 platform_set_drvdata(pdev, ndev);
1567 SET_NETDEV_DEV(ndev, &pdev->dev);
1568 ndev->netdev_ops = &xcan_netdev_ops;
1569
1570 /* Getting the CAN can_clk info */
1571 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1572 if (IS_ERR(priv->can_clk)) {
1573 dev_err(&pdev->dev, "Device clock not found.\n");
1574 ret = PTR_ERR(priv->can_clk);
1575 goto err_free;
1576 }
1598efe5
AH
1577
1578 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1579 if (IS_ERR(priv->bus_clk)) {
1580 dev_err(&pdev->dev, "bus clock not found\n");
1581 ret = PTR_ERR(priv->bus_clk);
1582 goto err_free;
b1201e44
KA
1583 }
1584
b1201e44
KA
1585 priv->write_reg = xcan_write_reg_le;
1586 priv->read_reg = xcan_read_reg_le;
1587
4716620d
KA
1588 pm_runtime_enable(&pdev->dev);
1589 ret = pm_runtime_get_sync(&pdev->dev);
1590 if (ret < 0) {
1591 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1592 __func__, ret);
1593 goto err_pmdisable;
1594 }
1595
b1201e44
KA
1596 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1597 priv->write_reg = xcan_write_reg_be;
1598 priv->read_reg = xcan_read_reg_be;
1599 }
1600
1601 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1602
1603 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1604
1605 ret = register_candev(ndev);
1606 if (ret) {
1607 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
4716620d 1608 goto err_disableclks;
b1201e44
KA
1609 }
1610
1611 devm_can_led_init(ndev);
4716620d
KA
1612
1613 pm_runtime_put(&pdev->dev);
1614
9e5f1b27
AH
1615 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1616 priv->reg_base, ndev->irq, priv->can.clock.freq,
1617 hw_tx_max, priv->tx_max);
b1201e44
KA
1618
1619 return 0;
1620
4716620d
KA
1621err_disableclks:
1622 pm_runtime_put(priv->dev);
1623err_pmdisable:
1624 pm_runtime_disable(&pdev->dev);
b1201e44
KA
1625err_free:
1626 free_candev(ndev);
1627err:
1628 return ret;
1629}
1630
1631/**
1632 * xcan_remove - Unregister the device after releasing the resources
1633 * @pdev: Handle to the platform device structure
1634 *
1635 * This function frees all the resources allocated to the device.
1636 * Return: 0 always
1637 */
1638static int xcan_remove(struct platform_device *pdev)
1639{
1640 struct net_device *ndev = platform_get_drvdata(pdev);
1641 struct xcan_priv *priv = netdev_priv(ndev);
1642
b1201e44 1643 unregister_candev(ndev);
4716620d 1644 pm_runtime_disable(&pdev->dev);
b1201e44
KA
1645 netif_napi_del(&priv->napi);
1646 free_candev(ndev);
1647
1648 return 0;
1649}
1650
b1201e44
KA
1651static struct platform_driver xcan_driver = {
1652 .probe = xcan_probe,
1653 .remove = xcan_remove,
1654 .driver = {
b1201e44
KA
1655 .name = DRIVER_NAME,
1656 .pm = &xcan_dev_pm_ops,
1657 .of_match_table = xcan_of_match,
1658 },
1659};
1660
1661module_platform_driver(xcan_driver);
1662
1663MODULE_LICENSE("GPL");
1664MODULE_AUTHOR("Xilinx Inc");
1665MODULE_DESCRIPTION("Xilinx CAN interface");