can: at91_can: at91_irq_err_line(): make use of can_change_state() and can_bus_off()
[linux-block.git] / drivers / net / can / at91_can.c
CommitLineData
dd2878aa 1// SPDX-License-Identifier: GPL-2.0-only
99c4a634
DM
2/*
3 * at91_can.c - CAN network driver for AT91 SoC CAN controller
4 *
3e9ebd3c 5 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
0909c1ec 6 * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
99c4a634
DM
7 */
8
bd7854e8 9#include <linux/bitfield.h>
99c4a634
DM
10#include <linux/clk.h>
11#include <linux/errno.h>
409c188c 12#include <linux/ethtool.h>
99c4a634 13#include <linux/if_arp.h>
99c4a634
DM
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
3078cde7 18#include <linux/of.h>
3ecc0985 19#include <linux/phy/phy.h>
99c4a634 20#include <linux/platform_device.h>
3a5655a5 21#include <linux/rtnetlink.h>
99c4a634
DM
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/types.h>
26
99c4a634
DM
27#include <linux/can/dev.h>
28#include <linux/can/error.h>
29
e26ccc46 30#define AT91_MB_MASK(i) ((1 << (i)) - 1)
99c4a634
DM
31
32/* Common registers */
33enum at91_reg {
e26ccc46
MKB
34 AT91_MR = 0x000,
35 AT91_IER = 0x004,
36 AT91_IDR = 0x008,
37 AT91_IMR = 0x00C,
38 AT91_SR = 0x010,
39 AT91_BR = 0x014,
40 AT91_TIM = 0x018,
41 AT91_TIMESTP = 0x01C,
42 AT91_ECR = 0x020,
43 AT91_TCR = 0x024,
44 AT91_ACR = 0x028,
99c4a634
DM
45};
46
47/* Mailbox registers (0 <= i <= 15) */
e26ccc46
MKB
48#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20)))
49#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20)))
50#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20)))
51#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20)))
52#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20)))
53#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20)))
54#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20)))
55#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20)))
99c4a634
DM
56
57/* Register bits */
e26ccc46
MKB
58#define AT91_MR_CANEN BIT(0)
59#define AT91_MR_LPM BIT(1)
60#define AT91_MR_ABM BIT(2)
61#define AT91_MR_OVL BIT(3)
62#define AT91_MR_TEOF BIT(4)
63#define AT91_MR_TTM BIT(5)
64#define AT91_MR_TIMFRZ BIT(6)
65#define AT91_MR_DRPT BIT(7)
99c4a634 66
e26ccc46 67#define AT91_SR_RBSY BIT(29)
63446dc7
MKB
68#define AT91_SR_TBSY BIT(30)
69#define AT91_SR_OVLSY BIT(31)
99c4a634 70
bd7854e8
MKB
71#define AT91_BR_PHASE2_MASK GENMASK(2, 0)
72#define AT91_BR_PHASE1_MASK GENMASK(6, 4)
73#define AT91_BR_PROPAG_MASK GENMASK(10, 8)
74#define AT91_BR_SJW_MASK GENMASK(13, 12)
75#define AT91_BR_BRP_MASK GENMASK(22, 16)
76#define AT91_BR_SMP BIT(24)
77
63446dc7
MKB
78#define AT91_TIM_TIMER_MASK GENMASK(15, 0)
79
abe13487
MKB
80#define AT91_ECR_REC_MASK GENMASK(8, 0)
81#define AT91_ECR_TEC_MASK GENMASK(23, 16)
82
63446dc7
MKB
83#define AT91_TCR_TIMRST BIT(31)
84
53558ac1
MKB
85#define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0)
86#define AT91_MMR_PRIOR_MASK GENMASK(19, 16)
87#define AT91_MMR_MOT_MASK GENMASK(26, 24)
99c4a634 88
90aa9a25
MKB
89#define AT91_MID_MIDVB_MASK GENMASK(17, 0)
90#define AT91_MID_MIDVA_MASK GENMASK(28, 18)
e26ccc46 91#define AT91_MID_MIDE BIT(29)
99c4a634 92
bdfff143
MKB
93#define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0)
94#define AT91_MSR_MDLC_MASK GENMASK(19, 16)
e26ccc46
MKB
95#define AT91_MSR_MRTR BIT(20)
96#define AT91_MSR_MABT BIT(22)
97#define AT91_MSR_MRDY BIT(23)
98#define AT91_MSR_MMI BIT(24)
99c4a634 99
5e9c5bcc 100#define AT91_MCR_MDLC_MASK GENMASK(19, 16)
e26ccc46 101#define AT91_MCR_MRTR BIT(20)
5e9c5bcc 102#define AT91_MCR_MACR BIT(22)
e26ccc46 103#define AT91_MCR_MTCR BIT(23)
99c4a634
DM
104
105/* Mailbox Modes */
106enum at91_mb_mode {
e26ccc46
MKB
107 AT91_MB_MODE_DISABLED = 0,
108 AT91_MB_MODE_RX = 1,
109 AT91_MB_MODE_RX_OVRWR = 2,
110 AT91_MB_MODE_TX = 3,
111 AT91_MB_MODE_CONSUMER = 4,
112 AT91_MB_MODE_PRODUCER = 5,
99c4a634
DM
113};
114
115/* Interrupt mask bits */
e26ccc46
MKB
116#define AT91_IRQ_ERRA BIT(16)
117#define AT91_IRQ_WARN BIT(17)
118#define AT91_IRQ_ERRP BIT(18)
119#define AT91_IRQ_BOFF BIT(19)
120#define AT91_IRQ_SLEEP BIT(20)
121#define AT91_IRQ_WAKEUP BIT(21)
122#define AT91_IRQ_TOVF BIT(22)
123#define AT91_IRQ_TSTP BIT(23)
124#define AT91_IRQ_CERR BIT(24)
125#define AT91_IRQ_SERR BIT(25)
126#define AT91_IRQ_AERR BIT(26)
127#define AT91_IRQ_FERR BIT(27)
128#define AT91_IRQ_BERR BIT(28)
129
130#define AT91_IRQ_ERR_ALL (0x1fff0000)
131#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
132 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
133#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
134 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
135
136#define AT91_IRQ_ALL (0x1fffffff)
99c4a634 137
d3d47264
MKB
138enum at91_devtype {
139 AT91_DEVTYPE_SAM9263,
6388b396 140 AT91_DEVTYPE_SAM9X5,
d3d47264
MKB
141};
142
143struct at91_devtype_data {
144 unsigned int rx_first;
145 unsigned int rx_split;
146 unsigned int rx_last;
147 unsigned int tx_shift;
148 enum at91_devtype type;
149};
150
99c4a634 151struct at91_priv {
44d85666 152 struct can_priv can; /* must be the first member! */
44d85666 153 struct napi_struct napi;
3ecc0985 154 struct phy *transceiver;
99c4a634 155
44d85666 156 void __iomem *reg_base;
99c4a634 157
2f1a01a8
MKB
158 unsigned int tx_head;
159 unsigned int tx_tail;
44d85666 160 unsigned int rx_next;
d3d47264 161 struct at91_devtype_data devtype_data;
99c4a634 162
44d85666
MKB
163 struct clk *clk;
164 struct at91_can_data *pdata;
3a5655a5 165
44d85666 166 canid_t mb0_id;
99c4a634
DM
167};
168
3078cde7
LD
169static const struct at91_devtype_data at91_at91sam9263_data = {
170 .rx_first = 1,
171 .rx_split = 8,
172 .rx_last = 11,
173 .tx_shift = 2,
174 .type = AT91_DEVTYPE_SAM9263,
175};
176
177static const struct at91_devtype_data at91_at91sam9x5_data = {
178 .rx_first = 0,
179 .rx_split = 4,
180 .rx_last = 5,
181 .tx_shift = 1,
182 .type = AT91_DEVTYPE_SAM9X5,
d3d47264
MKB
183};
184
194b9a4c 185static const struct can_bittiming_const at91_bittiming_const = {
00389b08 186 .name = KBUILD_MODNAME,
99c4a634
DM
187 .tseg1_min = 4,
188 .tseg1_max = 16,
189 .tseg2_min = 2,
190 .tseg2_max = 8,
191 .sjw_max = 4,
fc1d97d4 192 .brp_min = 2,
99c4a634
DM
193 .brp_max = 128,
194 .brp_inc = 1,
195};
196
d3d47264 197#define AT91_IS(_model) \
4318b1aa 198static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \
d3d47264
MKB
199{ \
200 return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
201}
202
203AT91_IS(9263);
6388b396 204AT91_IS(9X5);
d3d47264
MKB
205
206static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
207{
208 return priv->devtype_data.rx_first;
209}
210
211static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
212{
213 return priv->devtype_data.rx_last;
214}
215
216static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
217{
218 return priv->devtype_data.rx_split;
219}
220
221static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
222{
223 return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
224}
225
79008997
MKB
226static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
227{
d3d47264 228 return get_mb_rx_split(priv) - 1;
79008997
MKB
229}
230
231static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
232{
d3d47264
MKB
233 return AT91_MB_MASK(get_mb_rx_split(priv)) &
234 ~AT91_MB_MASK(get_mb_rx_first(priv));
235}
236
237static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
238{
239 return priv->devtype_data.tx_shift;
79008997
MKB
240}
241
242static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
243{
d3d47264 244 return 1 << get_mb_tx_shift(priv);
79008997
MKB
245}
246
247static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
248{
d3d47264 249 return get_mb_rx_last(priv) + 1;
79008997
MKB
250}
251
252static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
253{
254 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
255}
256
2f1a01a8 257static inline unsigned int get_head_prio_shift(const struct at91_priv *priv)
79008997 258{
d3d47264 259 return get_mb_tx_shift(priv);
79008997
MKB
260}
261
2f1a01a8 262static inline unsigned int get_head_prio_mask(const struct at91_priv *priv)
79008997 263{
d3d47264 264 return 0xf << get_mb_tx_shift(priv);
79008997
MKB
265}
266
2f1a01a8 267static inline unsigned int get_head_mb_mask(const struct at91_priv *priv)
79008997 268{
d3d47264 269 return AT91_MB_MASK(get_mb_tx_shift(priv));
79008997
MKB
270}
271
2f1a01a8 272static inline unsigned int get_head_mask(const struct at91_priv *priv)
79008997 273{
2f1a01a8 274 return get_head_mb_mask(priv) | get_head_prio_mask(priv);
79008997
MKB
275}
276
277static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
278{
d3d47264
MKB
279 return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
280 ~AT91_MB_MASK(get_mb_rx_first(priv));
79008997
MKB
281}
282
283static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
284{
285 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
286 ~AT91_MB_MASK(get_mb_tx_first(priv));
287}
288
2f1a01a8 289static inline unsigned int get_tx_head_mb(const struct at91_priv *priv)
99c4a634 290{
2f1a01a8 291 return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
99c4a634
DM
292}
293
2f1a01a8 294static inline unsigned int get_tx_head_prio(const struct at91_priv *priv)
99c4a634 295{
2f1a01a8 296 return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf;
99c4a634
DM
297}
298
2f1a01a8 299static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv)
99c4a634 300{
2f1a01a8 301 return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
99c4a634
DM
302}
303
304static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
305{
af9bfbda 306 return readl_relaxed(priv->reg_base + reg);
99c4a634
DM
307}
308
309static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
ccc5f1c9 310 u32 value)
99c4a634 311{
af9bfbda 312 writel_relaxed(value, priv->reg_base + reg);
99c4a634
DM
313}
314
315static inline void set_mb_mode_prio(const struct at91_priv *priv,
ccc5f1c9 316 unsigned int mb, enum at91_mb_mode mode,
53558ac1 317 u8 prio)
99c4a634 318{
53558ac1
MKB
319 const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) |
320 FIELD_PREP(AT91_MMR_PRIOR_MASK, prio);
321
322 at91_write(priv, AT91_MMR(mb), reg_mmr);
99c4a634
DM
323}
324
325static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
ccc5f1c9 326 enum at91_mb_mode mode)
99c4a634
DM
327{
328 set_mb_mode_prio(priv, mb, mode, 0);
329}
330
3a5655a5
MKB
331static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
332{
333 u32 reg_mid;
334
335 if (can_id & CAN_EFF_FLAG)
90aa9a25
MKB
336 reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) |
337 AT91_MID_MIDE;
3a5655a5 338 else
90aa9a25 339 reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id);
3a5655a5
MKB
340
341 return reg_mid;
342}
343
99c4a634
DM
344static void at91_setup_mailboxes(struct net_device *dev)
345{
346 struct at91_priv *priv = netdev_priv(dev);
347 unsigned int i;
3a5655a5 348 u32 reg_mid;
99c4a634 349
5bbe6049 350 /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
2b08e521
MKB
351 * mailbox is disabled. The next mailboxes are used as a
352 * reception FIFO. The last of the RX mailboxes is configured with
9e0a2d1c
MKB
353 * overwrite option. The overwrite flag indicates a FIFO
354 * overflow.
99c4a634 355 */
3a5655a5 356 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
d3d47264 357 for (i = 0; i < get_mb_rx_first(priv); i++) {
9e0a2d1c 358 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
3a5655a5
MKB
359 at91_write(priv, AT91_MID(i), reg_mid);
360 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
361 }
362
d3d47264 363 for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
99c4a634 364 set_mb_mode(priv, i, AT91_MB_MODE_RX);
d3d47264 365 set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
99c4a634 366
8a0e0a49 367 /* reset acceptance mask and id register */
d3d47264 368 for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
44d85666 369 at91_write(priv, AT91_MAM(i), 0x0);
8a0e0a49
MKB
370 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
371 }
372
2b08e521 373 /* The last mailboxes are used for transmitting. */
79008997 374 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
99c4a634
DM
375 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
376
377 /* Reset tx and rx helper pointers */
2f1a01a8 378 priv->tx_head = priv->tx_tail = 0;
d3d47264 379 priv->rx_next = get_mb_rx_first(priv);
99c4a634
DM
380}
381
382static int at91_set_bittiming(struct net_device *dev)
383{
384 const struct at91_priv *priv = netdev_priv(dev);
385 const struct can_bittiming *bt = &priv->can.bittiming;
bd7854e8
MKB
386 u32 reg_br = 0;
387
388 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
389 reg_br |= AT91_BR_SMP;
99c4a634 390
bd7854e8
MKB
391 reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) |
392 FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) |
393 FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) |
394 FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) |
395 FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1);
99c4a634 396
ccd7cd07 397 netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br);
99c4a634
DM
398
399 at91_write(priv, AT91_BR, reg_br);
400
401 return 0;
402}
403
33a6f298 404static int at91_get_berr_counter(const struct net_device *dev,
ccc5f1c9 405 struct can_berr_counter *bec)
33a6f298
MKB
406{
407 const struct at91_priv *priv = netdev_priv(dev);
408 u32 reg_ecr = at91_read(priv, AT91_ECR);
409
abe13487
MKB
410 bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr);
411 bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr);
33a6f298
MKB
412
413 return 0;
414}
415
99c4a634
DM
416static void at91_chip_start(struct net_device *dev)
417{
418 struct at91_priv *priv = netdev_priv(dev);
419 u32 reg_mr, reg_ier;
420
421 /* disable interrupts */
422 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
423
424 /* disable chip */
425 reg_mr = at91_read(priv, AT91_MR);
426 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
427
b156fd04 428 at91_set_bittiming(dev);
99c4a634 429 at91_setup_mailboxes(dev);
99c4a634
DM
430
431 /* enable chip */
17a50ee4
YDR
432 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
433 reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
434 else
435 reg_mr = AT91_MR_CANEN;
436 at91_write(priv, AT91_MR, reg_mr);
99c4a634
DM
437
438 priv->can.state = CAN_STATE_ERROR_ACTIVE;
439
f13e8699
MKB
440 /* Dummy read to clear latched line error interrupts on
441 * sam9x5 and newer SoCs.
442 */
443 at91_read(priv, AT91_SR);
444
99c4a634 445 /* Enable interrupts */
9df2faf9 446 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME;
99c4a634
DM
447 at91_write(priv, AT91_IER, reg_ier);
448}
449
450static void at91_chip_stop(struct net_device *dev, enum can_state state)
451{
452 struct at91_priv *priv = netdev_priv(dev);
453 u32 reg_mr;
454
9df2faf9
MKB
455 /* Abort any pending TX requests. However this doesn't seem to
456 * work in case of bus-off on sama5d3.
457 */
458 at91_write(priv, AT91_ACR, get_irq_mb_tx(priv));
459
99c4a634
DM
460 /* disable interrupts */
461 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
462
463 reg_mr = at91_read(priv, AT91_MR);
464 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
465
99c4a634
DM
466 priv->can.state = state;
467}
468
5bbe6049 469/* theory of operation:
99c4a634
DM
470 *
471 * According to the datasheet priority 0 is the highest priority, 15
472 * is the lowest. If two mailboxes have the same priority level the
473 * message of the mailbox with the lowest number is sent first.
474 *
475 * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
476 * the next mailbox with prio 0, and so on, until all mailboxes are
477 * used. Then we start from the beginning with mailbox
478 * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
479 * prio 1. When we reach the last mailbox with prio 15, we have to
480 * stop sending, waiting for all messages to be delivered, then start
481 * again with mailbox AT91_MB_TX_FIRST prio 0.
482 *
2f1a01a8 483 * We use the priv->tx_head as counter for the next transmission
99c4a634
DM
484 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
485 * encode the mailbox number, the upper 4 bits the mailbox priority:
486 *
2f1a01a8 487 * priv->tx_head = (prio << get_next_prio_shift(priv)) |
d3d47264 488 * (mb - get_mb_tx_first(priv));
99c4a634
DM
489 *
490 */
491static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
492{
493 struct at91_priv *priv = netdev_priv(dev);
99c4a634
DM
494 struct can_frame *cf = (struct can_frame *)skb->data;
495 unsigned int mb, prio;
496 u32 reg_mid, reg_mcr;
497
ae64438b 498 if (can_dev_dropped_skb(dev, skb))
3ccd4c61
OH
499 return NETDEV_TX_OK;
500
2f1a01a8
MKB
501 mb = get_tx_head_mb(priv);
502 prio = get_tx_head_prio(priv);
99c4a634
DM
503
504 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
505 netif_stop_queue(dev);
506
882055c8 507 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
99c4a634
DM
508 return NETDEV_TX_BUSY;
509 }
3a5655a5 510 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
5e9c5bcc
MKB
511
512 reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) |
513 AT91_MCR_MTCR;
514
515 if (cf->can_id & CAN_RTR_FLAG)
516 reg_mcr |= AT91_MCR_MRTR;
99c4a634
DM
517
518 /* disable MB while writing ID (see datasheet) */
519 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
520 at91_write(priv, AT91_MID(mb), reg_mid);
521 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
522
523 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
524 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
525
526 /* This triggers transmission */
527 at91_write(priv, AT91_MCR(mb), reg_mcr);
528
25985edc 529 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
1dcb6e57 530 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
99c4a634 531
5bbe6049 532 /* we have to stop the queue and deliver all messages in case
99c4a634 533 * of a prio+mb counter wrap around. This is the case if
2f1a01a8 534 * tx_head buffer prio and mailbox equals 0.
99c4a634
DM
535 *
536 * also stop the queue if next buffer is still in use
537 * (== not ready)
538 */
2f1a01a8
MKB
539 priv->tx_head++;
540 if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) &
99c4a634 541 AT91_MSR_MRDY) ||
2f1a01a8 542 (priv->tx_head & get_head_mask(priv)) == 0)
99c4a634
DM
543 netif_stop_queue(dev);
544
545 /* Enable interrupt for this mailbox */
546 at91_write(priv, AT91_IER, 1 << mb);
547
548 return NETDEV_TX_OK;
549}
550
551/**
552 * at91_activate_rx_low - activate lower rx mailboxes
553 * @priv: a91 context
554 *
555 * Reenables the lower mailboxes for reception of new CAN messages
556 */
557static inline void at91_activate_rx_low(const struct at91_priv *priv)
558{
79008997 559 u32 mask = get_mb_rx_low_mask(priv);
933850c4 560
99c4a634
DM
561 at91_write(priv, AT91_TCR, mask);
562}
563
564/**
565 * at91_activate_rx_mb - reactive single rx mailbox
566 * @priv: a91 context
567 * @mb: mailbox to reactivate
568 *
569 * Reenables given mailbox for reception of new CAN messages
570 */
571static inline void at91_activate_rx_mb(const struct at91_priv *priv,
ccc5f1c9 572 unsigned int mb)
99c4a634
DM
573{
574 u32 mask = 1 << mb;
933850c4 575
99c4a634
DM
576 at91_write(priv, AT91_TCR, mask);
577}
578
579/**
580 * at91_rx_overflow_err - send error frame due to rx overflow
581 * @dev: net device
582 */
583static void at91_rx_overflow_err(struct net_device *dev)
584{
585 struct net_device_stats *stats = &dev->stats;
586 struct sk_buff *skb;
587 struct can_frame *cf;
588
882055c8 589 netdev_dbg(dev, "RX buffer overflow\n");
99c4a634
DM
590 stats->rx_over_errors++;
591 stats->rx_errors++;
592
593 skb = alloc_can_err_skb(dev, &cf);
594 if (unlikely(!skb))
595 return;
596
597 cf->can_id |= CAN_ERR_CRTL;
598 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
99c4a634 599
6ae3673d 600 netif_receive_skb(skb);
99c4a634
DM
601}
602
603/**
604 * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
605 * @dev: net device
606 * @mb: mailbox number to read from
607 * @cf: can frame where to store message
608 *
609 * Reads a CAN message from the given mailbox and stores data into
610 * given can frame. "mb" and "cf" must be valid.
611 */
612static void at91_read_mb(struct net_device *dev, unsigned int mb,
ccc5f1c9 613 struct can_frame *cf)
99c4a634
DM
614{
615 const struct at91_priv *priv = netdev_priv(dev);
616 u32 reg_msr, reg_mid;
617
618 reg_mid = at91_read(priv, AT91_MID(mb));
619 if (reg_mid & AT91_MID_MIDE)
90aa9a25
MKB
620 cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) |
621 CAN_EFF_FLAG;
99c4a634 622 else
90aa9a25 623 cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid);
99c4a634
DM
624
625 reg_msr = at91_read(priv, AT91_MSR(mb));
bdfff143 626 cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr));
99c4a634 627
02400533 628 if (reg_msr & AT91_MSR_MRTR) {
e14ee40b 629 cf->can_id |= CAN_RTR_FLAG;
02400533 630 } else {
e14ee40b
MKB
631 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
632 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
633 }
99c4a634 634
8a0e0a49
MKB
635 /* allow RX of extended frames */
636 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
637
d3d47264 638 if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
99c4a634
DM
639 at91_rx_overflow_err(dev);
640}
641
642/**
643 * at91_read_msg - read CAN message from mailbox
644 * @dev: net device
645 * @mb: mail box to read from
646 *
647 * Reads a CAN message from given mailbox, and put into linux network
648 * RX queue, does all housekeeping chores (stats, ...)
649 */
650static void at91_read_msg(struct net_device *dev, unsigned int mb)
651{
652 struct net_device_stats *stats = &dev->stats;
653 struct can_frame *cf;
654 struct sk_buff *skb;
655
656 skb = alloc_can_skb(dev, &cf);
657 if (unlikely(!skb)) {
658 stats->rx_dropped++;
659 return;
660 }
661
662 at91_read_mb(dev, mb, cf);
99c4a634
DM
663
664 stats->rx_packets++;
8e674ca7
VM
665 if (!(cf->can_id & CAN_RTR_FLAG))
666 stats->rx_bytes += cf->len;
667
6ae3673d 668 netif_receive_skb(skb);
99c4a634
DM
669}
670
671/**
672 * at91_poll_rx - read multiple CAN messages from mailboxes
673 * @dev: net device
674 * @quota: max number of pkgs we're allowed to receive
675 *
676 * Theory of Operation:
677 *
d3d47264
MKB
678 * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
679 * on the chip are reserved for RX. We split them into 2 groups. The
680 * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
99c4a634
DM
681 *
682 * Like it or not, but the chip always saves a received CAN message
683 * into the first free mailbox it finds (starting with the
684 * lowest). This makes it very difficult to read the messages in the
685 * right order from the chip. This is how we work around that problem:
686 *
9e0a2d1c 687 * The first message goes into mb nr. 1 and issues an interrupt. All
99c4a634 688 * rx ints are disabled in the interrupt handler and a napi poll is
88bfb9a7 689 * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
99c4a634
DM
690 * receive another message).
691 *
692 * lower mbxs upper
9e0a2d1c
MKB
693 * ____^______ __^__
694 * / \ / \
99c4a634 695 * +-+-+-+-+-+-+-+-++-+-+-+-+
9e0a2d1c 696 * | |x|x|x|x|x|x|x|| | | | |
99c4a634
DM
697 * +-+-+-+-+-+-+-+-++-+-+-+-+
698 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
699 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
9e0a2d1c
MKB
700 * ^
701 * |
702 * \
703 * unused, due to chip bug
99c4a634
DM
704 *
705 * The variable priv->rx_next points to the next mailbox to read a
706 * message from. As long we're in the lower mailboxes we just read the
88bfb9a7 707 * mailbox but not re-enable it.
99c4a634 708 *
88bfb9a7 709 * With completion of the last of the lower mailboxes, we re-enable the
99c4a634
DM
710 * whole first group, but continue to look for filled mailboxes in the
711 * upper mailboxes. Imagine the second group like overflow mailboxes,
712 * which takes CAN messages if the lower goup is full. While in the
88bfb9a7 713 * upper group we re-enable the mailbox right after reading it. Giving
99c4a634
DM
714 * the chip more room to store messages.
715 *
716 * After finishing we look again in the lower group if we've still
717 * quota.
718 *
719 */
720static int at91_poll_rx(struct net_device *dev, int quota)
721{
722 struct at91_priv *priv = netdev_priv(dev);
723 u32 reg_sr = at91_read(priv, AT91_SR);
724 const unsigned long *addr = (unsigned long *)&reg_sr;
725 unsigned int mb;
726 int received = 0;
727
79008997
MKB
728 if (priv->rx_next > get_mb_rx_low_last(priv) &&
729 reg_sr & get_mb_rx_low_mask(priv))
882055c8 730 netdev_info(dev,
ccc5f1c9 731 "order of incoming frames cannot be guaranteed\n");
99c4a634
DM
732
733 again:
79008997
MKB
734 for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
735 mb < get_mb_tx_first(priv) && quota > 0;
99c4a634 736 reg_sr = at91_read(priv, AT91_SR),
79008997 737 mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
99c4a634
DM
738 at91_read_msg(dev, mb);
739
740 /* reactivate mailboxes */
79008997 741 if (mb == get_mb_rx_low_last(priv))
99c4a634
DM
742 /* all lower mailboxed, if just finished it */
743 at91_activate_rx_low(priv);
79008997 744 else if (mb > get_mb_rx_low_last(priv))
99c4a634
DM
745 /* only the mailbox we read */
746 at91_activate_rx_mb(priv, mb);
747
748 received++;
749 quota--;
750 }
751
752 /* upper group completed, look again in lower */
79008997 753 if (priv->rx_next > get_mb_rx_low_last(priv) &&
43200a44 754 mb > get_mb_rx_last(priv)) {
d3d47264 755 priv->rx_next = get_mb_rx_first(priv);
43200a44
WG
756 if (quota > 0)
757 goto again;
99c4a634
DM
758 }
759
760 return received;
761}
762
99c4a634
DM
763static int at91_poll(struct napi_struct *napi, int quota)
764{
765 struct net_device *dev = napi->dev;
766 const struct at91_priv *priv = netdev_priv(dev);
767 u32 reg_sr = at91_read(priv, AT91_SR);
768 int work_done = 0;
769
79008997 770 if (reg_sr & get_irq_mb_rx(priv))
99c4a634
DM
771 work_done += at91_poll_rx(dev, quota - work_done);
772
99c4a634
DM
773 if (work_done < quota) {
774 /* enable IRQs for frame errors and all mailboxes >= rx_next */
775 u32 reg_ier = AT91_IRQ_ERR_FRAME;
933850c4 776
79008997 777 reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
99c4a634 778
6ad20165 779 napi_complete_done(napi, work_done);
99c4a634
DM
780 at91_write(priv, AT91_IER, reg_ier);
781 }
782
783 return work_done;
784}
785
5bbe6049 786/* theory of operation:
99c4a634 787 *
2f1a01a8 788 * priv->tx_tail holds the number of the oldest can_frame put for
99c4a634
DM
789 * transmission into the hardware, but not yet ACKed by the CAN tx
790 * complete IRQ.
791 *
2f1a01a8 792 * We iterate from priv->tx_tail to priv->tx_head and check if the
99c4a634
DM
793 * packet has been transmitted, echo it back to the CAN framework. If
794 * we discover a not yet transmitted package, stop looking for more.
795 *
796 */
797static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
798{
799 struct at91_priv *priv = netdev_priv(dev);
800 u32 reg_msr;
801 unsigned int mb;
802
803 /* masking of reg_sr not needed, already done by at91_irq */
804
2f1a01a8
MKB
805 for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) {
806 mb = get_tx_tail_mb(priv);
99c4a634
DM
807
808 /* no event in mailbox? */
809 if (!(reg_sr & (1 << mb)))
810 break;
811
812 /* Disable irq for this TX mailbox */
813 at91_write(priv, AT91_IDR, 1 << mb);
814
5bbe6049 815 /* only echo if mailbox signals us a transfer
99c4a634
DM
816 * complete (MSR_MRDY). Otherwise it's a tansfer
817 * abort. "can_bus_off()" takes care about the skbs
818 * parked in the echo queue.
819 */
820 reg_msr = at91_read(priv, AT91_MSR(mb));
18c98714
MKB
821 if (unlikely(!(reg_msr & AT91_MSR_MRDY &&
822 ~reg_msr & AT91_MSR_MABT)))
823 continue;
824
825 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
826 dev->stats.tx_bytes +=
827 can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
828 dev->stats.tx_packets++;
99c4a634
DM
829 }
830
5bbe6049 831 /* restart queue if we don't have a wrap around but restart if
99c4a634
DM
832 * we get a TX int for the last can frame directly before a
833 * wrap around.
834 */
2f1a01a8
MKB
835 if ((priv->tx_head & get_head_mask(priv)) != 0 ||
836 (priv->tx_tail & get_head_mask(priv)) == 0)
99c4a634
DM
837 netif_wake_queue(dev);
838}
839
f13e8699 840static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr)
99c4a634 841{
910f179a 842 enum can_state new_state, rx_state, tx_state;
99c4a634 843 struct at91_priv *priv = netdev_priv(dev);
910f179a 844 struct can_berr_counter bec;
99c4a634
DM
845 struct sk_buff *skb;
846 struct can_frame *cf;
910f179a
MKB
847
848 at91_get_berr_counter(dev, &bec);
849 can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state);
f13e8699
MKB
850
851 /* The chip automatically recovers from bus-off after 128
852 * occurrences of 11 consecutive recessive bits.
853 *
854 * After an auto-recovered bus-off, the error counters no
855 * longer reflect this fact. On the sam9263 the state bits in
856 * the SR register show the current state (based on the
857 * current error counters), while on sam9x5 and newer SoCs
858 * these bits are latched.
859 *
860 * Take any latched bus-off information from the SR register
861 * into account when calculating the CAN new state, to start
862 * the standard CAN bus off handling.
863 */
864 if (reg_sr & AT91_IRQ_BOFF)
865 rx_state = CAN_STATE_BUS_OFF;
866
910f179a 867 new_state = max(tx_state, rx_state);
99c4a634
DM
868
869 /* state hasn't changed */
870 if (likely(new_state == priv->can.state))
871 return;
872
9df2faf9
MKB
873 /* The skb allocation might fail, but can_change_state()
874 * handles cf == NULL.
875 */
99c4a634 876 skb = alloc_can_err_skb(dev, &cf);
9df2faf9
MKB
877 can_change_state(dev, cf, tx_state, rx_state);
878
879 if (new_state == CAN_STATE_BUS_OFF) {
880 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
881 can_bus_off(dev);
882 }
883
99c4a634
DM
884 if (unlikely(!skb))
885 return;
886
99c4a634 887
6ae3673d 888 netif_rx(skb);
99c4a634
DM
889}
890
e0c9db91
MKB
891static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr)
892{
893 struct net_device_stats *stats = &dev->stats;
894 struct at91_priv *priv = netdev_priv(dev);
895 struct sk_buff *skb;
896 struct can_frame *cf = NULL;
897
898 priv->can.can_stats.bus_error++;
899
900 skb = alloc_can_err_skb(dev, &cf);
901 if (cf)
902 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
903
904 if (reg_sr & AT91_IRQ_CERR) {
905 netdev_dbg(dev, "CRC error\n");
906
907 stats->rx_errors++;
908 if (cf)
909 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
910 }
911
912 if (reg_sr & AT91_IRQ_SERR) {
913 netdev_dbg(dev, "Stuff error\n");
914
915 stats->rx_errors++;
916 if (cf)
917 cf->data[2] |= CAN_ERR_PROT_STUFF;
918 }
919
920 if (reg_sr & AT91_IRQ_AERR) {
921 netdev_dbg(dev, "NACK error\n");
922
923 stats->tx_errors++;
924 if (cf) {
925 cf->can_id |= CAN_ERR_ACK;
926 cf->data[2] |= CAN_ERR_PROT_TX;
927 }
928 }
929
930 if (reg_sr & AT91_IRQ_FERR) {
931 netdev_dbg(dev, "Format error\n");
932
933 stats->rx_errors++;
934 if (cf)
935 cf->data[2] |= CAN_ERR_PROT_FORM;
936 }
937
938 if (reg_sr & AT91_IRQ_BERR) {
939 netdev_dbg(dev, "Bit error\n");
940
941 stats->tx_errors++;
942 if (cf)
943 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
944 }
945
946 if (!cf)
947 return;
948
949 netif_receive_skb(skb);
950}
951
5bbe6049 952/* interrupt handler
99c4a634
DM
953 */
954static irqreturn_t at91_irq(int irq, void *dev_id)
955{
956 struct net_device *dev = dev_id;
957 struct at91_priv *priv = netdev_priv(dev);
958 irqreturn_t handled = IRQ_NONE;
959 u32 reg_sr, reg_imr;
960
961 reg_sr = at91_read(priv, AT91_SR);
962 reg_imr = at91_read(priv, AT91_IMR);
963
964 /* Ignore masked interrupts */
965 reg_sr &= reg_imr;
966 if (!reg_sr)
967 goto exit;
968
969 handled = IRQ_HANDLED;
970
d3f4cf05
MKB
971 /* Receive interrupt? -> napi */
972 if (reg_sr & get_irq_mb_rx(priv)) {
99c4a634 973 at91_write(priv, AT91_IDR,
d3f4cf05 974 get_irq_mb_rx(priv));
99c4a634
DM
975 napi_schedule(&priv->napi);
976 }
977
978 /* Transmission complete interrupt */
79008997 979 if (reg_sr & get_irq_mb_tx(priv))
99c4a634
DM
980 at91_irq_tx(dev, reg_sr);
981
9df2faf9
MKB
982 /* Line Error interrupt */
983 if (reg_sr & AT91_IRQ_ERR_LINE ||
984 priv->can.state > CAN_STATE_ERROR_ACTIVE) {
985 at91_irq_err_line(dev, reg_sr);
986 }
99c4a634 987
d3f4cf05
MKB
988 /* Frame Error Interrupt */
989 if (reg_sr & AT91_IRQ_ERR_FRAME)
990 at91_irq_err_frame(dev, reg_sr);
991
99c4a634
DM
992 exit:
993 return handled;
994}
995
996static int at91_open(struct net_device *dev)
997{
998 struct at91_priv *priv = netdev_priv(dev);
999 int err;
1000
3ecc0985 1001 err = phy_power_on(priv->transceiver);
e77980e5
DD
1002 if (err)
1003 return err;
99c4a634
DM
1004
1005 /* check or determine and set bittime */
1006 err = open_candev(dev);
1007 if (err)
3ecc0985
MKB
1008 goto out_phy_power_off;
1009
1010 err = clk_prepare_enable(priv->clk);
1011 if (err)
1012 goto out_close_candev;
99c4a634
DM
1013
1014 /* register interrupt handler */
99f4ff41
MKB
1015 err = request_irq(dev->irq, at91_irq, IRQF_SHARED,
1016 dev->name, dev);
1017 if (err)
3ecc0985 1018 goto out_clock_disable_unprepare;
99c4a634
DM
1019
1020 /* start chip and queuing */
1021 at91_chip_start(dev);
1022 napi_enable(&priv->napi);
1023 netif_start_queue(dev);
1024
1025 return 0;
1026
3ecc0985 1027 out_clock_disable_unprepare:
e77980e5 1028 clk_disable_unprepare(priv->clk);
3ecc0985
MKB
1029 out_close_candev:
1030 close_candev(dev);
1031 out_phy_power_off:
1032 phy_power_off(priv->transceiver);
99c4a634
DM
1033
1034 return err;
1035}
1036
5bbe6049 1037/* stop CAN bus activity
99c4a634
DM
1038 */
1039static int at91_close(struct net_device *dev)
1040{
1041 struct at91_priv *priv = netdev_priv(dev);
1042
1043 netif_stop_queue(dev);
1044 napi_disable(&priv->napi);
1045 at91_chip_stop(dev, CAN_STATE_STOPPED);
1046
1047 free_irq(dev->irq, dev);
e77980e5 1048 clk_disable_unprepare(priv->clk);
3ecc0985 1049 phy_power_off(priv->transceiver);
99c4a634
DM
1050
1051 close_candev(dev);
1052
1053 return 0;
1054}
1055
1056static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1057{
1058 switch (mode) {
1059 case CAN_MODE_START:
1060 at91_chip_start(dev);
1061 netif_wake_queue(dev);
1062 break;
1063
1064 default:
1065 return -EOPNOTSUPP;
1066 }
1067
1068 return 0;
1069}
1070
1071static const struct net_device_ops at91_netdev_ops = {
1072 .ndo_open = at91_open,
1073 .ndo_stop = at91_close,
1074 .ndo_start_xmit = at91_start_xmit,
c971fa2a 1075 .ndo_change_mtu = can_change_mtu,
99c4a634
DM
1076};
1077
409c188c
VM
1078static const struct ethtool_ops at91_ethtool_ops = {
1079 .get_ts_info = ethtool_op_get_ts_info,
1080};
1081
42b9fd6e
ZL
1082static ssize_t mb0_id_show(struct device *dev,
1083 struct device_attribute *attr, char *buf)
3a5655a5
MKB
1084{
1085 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1086
1087 if (priv->mb0_id & CAN_EFF_FLAG)
7bc9ab0f 1088 return sysfs_emit(buf, "0x%08x\n", priv->mb0_id);
3a5655a5 1089 else
7bc9ab0f 1090 return sysfs_emit(buf, "0x%03x\n", priv->mb0_id);
3a5655a5
MKB
1091}
1092
42b9fd6e 1093static ssize_t mb0_id_store(struct device *dev,
ccc5f1c9
PL
1094 struct device_attribute *attr,
1095 const char *buf, size_t count)
3a5655a5
MKB
1096{
1097 struct net_device *ndev = to_net_dev(dev);
1098 struct at91_priv *priv = netdev_priv(ndev);
1099 unsigned long can_id;
1100 ssize_t ret;
1101 int err;
1102
1103 rtnl_lock();
1104
1105 if (ndev->flags & IFF_UP) {
1106 ret = -EBUSY;
1107 goto out;
1108 }
1109
0672f0ab 1110 err = kstrtoul(buf, 0, &can_id);
3a5655a5
MKB
1111 if (err) {
1112 ret = err;
1113 goto out;
1114 }
1115
1116 if (can_id & CAN_EFF_FLAG)
1117 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1118 else
1119 can_id &= CAN_SFF_MASK;
1120
1121 priv->mb0_id = can_id;
1122 ret = count;
1123
1124 out:
1125 rtnl_unlock();
1126 return ret;
1127}
1128
42b9fd6e 1129static DEVICE_ATTR_RW(mb0_id);
3a5655a5
MKB
1130
1131static struct attribute *at91_sysfs_attrs[] = {
1132 &dev_attr_mb0_id.attr,
1133 NULL,
1134};
1135
7ec2796e 1136static const struct attribute_group at91_sysfs_attr_group = {
3a5655a5
MKB
1137 .attrs = at91_sysfs_attrs,
1138};
1139
3078cde7
LD
1140#if defined(CONFIG_OF)
1141static const struct of_device_id at91_can_dt_ids[] = {
1142 {
1143 .compatible = "atmel,at91sam9x5-can",
1144 .data = &at91_at91sam9x5_data,
1145 }, {
1146 .compatible = "atmel,at91sam9263-can",
1147 .data = &at91_at91sam9263_data,
1148 }, {
1149 /* sentinel */
1150 }
1151};
1152MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
3078cde7
LD
1153#endif
1154
1155static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
1156{
1157 if (pdev->dev.of_node) {
1158 const struct of_device_id *match;
1159
1160 match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
1161 if (!match) {
1162 dev_err(&pdev->dev, "no matching node found in dtb\n");
1163 return NULL;
1164 }
1165 return (const struct at91_devtype_data *)match->data;
1166 }
1167 return (const struct at91_devtype_data *)
1168 platform_get_device_id(pdev)->driver_data;
1169}
1170
3c8ac0f2 1171static int at91_can_probe(struct platform_device *pdev)
99c4a634 1172{
d3d47264 1173 const struct at91_devtype_data *devtype_data;
3ecc0985 1174 struct phy *transceiver;
99c4a634
DM
1175 struct net_device *dev;
1176 struct at91_priv *priv;
1177 struct resource *res;
1178 struct clk *clk;
1179 void __iomem *addr;
1180 int err, irq;
1181
3078cde7
LD
1182 devtype_data = at91_can_get_driver_data(pdev);
1183 if (!devtype_data) {
1184 dev_err(&pdev->dev, "no driver data\n");
1185 err = -ENODEV;
1186 goto exit;
1187 }
d3d47264 1188
99c4a634
DM
1189 clk = clk_get(&pdev->dev, "can_clk");
1190 if (IS_ERR(clk)) {
1191 dev_err(&pdev->dev, "no clock defined\n");
1192 err = -ENODEV;
1193 goto exit;
1194 }
1195
1196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1197 irq = platform_get_irq(pdev, 0);
4773a47d 1198 if (!res || irq <= 0) {
99c4a634
DM
1199 err = -ENODEV;
1200 goto exit_put;
1201 }
1202
1203 if (!request_mem_region(res->start,
1204 resource_size(res),
1205 pdev->name)) {
1206 err = -EBUSY;
1207 goto exit_put;
1208 }
1209
4bdc0d67 1210 addr = ioremap(res->start, resource_size(res));
99c4a634
DM
1211 if (!addr) {
1212 err = -ENOMEM;
1213 goto exit_release;
1214 }
1215
d3d47264
MKB
1216 dev = alloc_candev(sizeof(struct at91_priv),
1217 1 << devtype_data->tx_shift);
99c4a634
DM
1218 if (!dev) {
1219 err = -ENOMEM;
1220 goto exit_iounmap;
1221 }
1222
3ecc0985
MKB
1223 transceiver = devm_phy_optional_get(&pdev->dev, NULL);
1224 if (IS_ERR(transceiver)) {
1225 err = PTR_ERR(transceiver);
1226 dev_err_probe(&pdev->dev, err, "failed to get phy\n");
1227 goto exit_iounmap;
1228 }
1229
99c4a634 1230 dev->netdev_ops = &at91_netdev_ops;
409c188c 1231 dev->ethtool_ops = &at91_ethtool_ops;
99c4a634
DM
1232 dev->irq = irq;
1233 dev->flags |= IFF_ECHO;
1234
1235 priv = netdev_priv(dev);
1236 priv->can.clock.freq = clk_get_rate(clk);
1237 priv->can.bittiming_const = &at91_bittiming_const;
99c4a634 1238 priv->can.do_set_mode = at91_set_mode;
33a6f298 1239 priv->can.do_get_berr_counter = at91_get_berr_counter;
17a50ee4
YDR
1240 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1241 CAN_CTRLMODE_LISTENONLY;
d3d47264
MKB
1242 priv->reg_base = addr;
1243 priv->devtype_data = *devtype_data;
99c4a634 1244 priv->clk = clk;
6cbdb918 1245 priv->pdata = dev_get_platdata(&pdev->dev);
3a5655a5 1246 priv->mb0_id = 0x7ff;
99c4a634 1247
caf6b7f8 1248 netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
99c4a634 1249
3ecc0985
MKB
1250 if (transceiver)
1251 priv->can.bitrate_max = transceiver->attrs.max_link_rate;
1252
07a648e6
MKB
1253 if (at91_is_sam9263(priv))
1254 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1255
40f7e0dd 1256 platform_set_drvdata(pdev, dev);
99c4a634
DM
1257 SET_NETDEV_DEV(dev, &pdev->dev);
1258
1259 err = register_candev(dev);
1260 if (err) {
1261 dev_err(&pdev->dev, "registering netdev failed\n");
1262 goto exit_free;
1263 }
1264
1265 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1266 priv->reg_base, dev->irq);
1267
1268 return 0;
1269
1270 exit_free:
759a6c76 1271 free_candev(dev);
99c4a634
DM
1272 exit_iounmap:
1273 iounmap(addr);
1274 exit_release:
1275 release_mem_region(res->start, resource_size(res));
1276 exit_put:
1277 clk_put(clk);
1278 exit:
1279 return err;
1280}
1281
03ef5a4b 1282static void at91_can_remove(struct platform_device *pdev)
99c4a634
DM
1283{
1284 struct net_device *dev = platform_get_drvdata(pdev);
1285 struct at91_priv *priv = netdev_priv(dev);
1286 struct resource *res;
1287
1288 unregister_netdev(dev);
1289
99c4a634
DM
1290 iounmap(priv->reg_base);
1291
1292 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1293 release_mem_region(res->start, resource_size(res));
1294
1295 clk_put(priv->clk);
1296
759a6c76 1297 free_candev(dev);
99c4a634
DM
1298}
1299
d3d47264
MKB
1300static const struct platform_device_id at91_can_id_table[] = {
1301 {
5abbeea5 1302 .name = "at91sam9x5_can",
3078cde7 1303 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
6388b396 1304 }, {
5abbeea5 1305 .name = "at91_can",
3078cde7 1306 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
d3d47264
MKB
1307 }, {
1308 /* sentinel */
1309 }
1310};
09ca71ca 1311MODULE_DEVICE_TABLE(platform, at91_can_id_table);
d3d47264 1312
99c4a634 1313static struct platform_driver at91_can_driver = {
44d85666 1314 .probe = at91_can_probe,
03ef5a4b 1315 .remove_new = at91_can_remove,
44d85666
MKB
1316 .driver = {
1317 .name = KBUILD_MODNAME,
1f3e4b0c 1318 .of_match_table = of_match_ptr(at91_can_dt_ids),
99c4a634 1319 },
d3d47264 1320 .id_table = at91_can_id_table,
99c4a634
DM
1321};
1322
871d3372 1323module_platform_driver(at91_can_driver);
99c4a634
DM
1324
1325MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1326MODULE_LICENSE("GPL v2");
00389b08 1327MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");