1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 David Jander, Protonic Holland
4 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
7 #include <linux/can/dev.h>
8 #include <linux/can/rx-offload.h>
10 struct can_rx_offload_cb {
14 static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
16 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
18 return (struct can_rx_offload_cb *)skb->cb;
21 static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
29 static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
37 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
39 struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
40 struct net_device *dev = offload->dev;
41 struct net_device_stats *stats = &dev->stats;
45 while ((work_done < quota) &&
46 (skb = skb_dequeue(&offload->skb_queue))) {
47 struct can_frame *cf = (struct can_frame *)skb->data;
51 stats->rx_bytes += cf->can_dlc;
52 netif_receive_skb(skb);
55 if (work_done < quota) {
56 napi_complete_done(napi, work_done);
58 /* Check if there was another interrupt */
59 if (!skb_queue_empty(&offload->skb_queue))
60 napi_reschedule(&offload->napi);
63 can_led_event(offload->dev, CAN_LED_EVENT_RX);
68 static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
69 int (*compare)(struct sk_buff *a, struct sk_buff *b))
71 struct sk_buff *pos, *insert = NULL;
73 skb_queue_reverse_walk(head, pos) {
74 const struct can_rx_offload_cb *cb_pos, *cb_new;
76 cb_pos = can_rx_offload_get_cb(pos);
77 cb_new = can_rx_offload_get_cb(new);
80 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
82 cb_pos->timestamp, cb_new->timestamp,
83 cb_new->timestamp - cb_pos->timestamp,
86 if (compare(pos, new) < 0)
92 __skb_queue_head(head, new);
94 __skb_queue_after(head, insert, new);
97 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
99 const struct can_rx_offload_cb *cb_a, *cb_b;
101 cb_a = can_rx_offload_get_cb(a);
102 cb_b = can_rx_offload_get_cb(b);
104 /* Substract two u32 and return result as int, to keep
105 * difference steady around the u32 overflow.
107 return cb_b->timestamp - cb_a->timestamp;
110 static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
112 struct sk_buff *skb = NULL;
113 struct can_rx_offload_cb *cb;
114 struct can_frame *cf;
117 /* If queue is full or skb not available, read to discard mailbox */
118 if (likely(skb_queue_len(&offload->skb_queue) <=
119 offload->skb_queue_len_max))
120 skb = alloc_can_skb(offload->dev, &cf);
123 struct can_frame cf_overflow;
126 ret = offload->mailbox_read(offload, &cf_overflow,
129 offload->dev->stats.rx_dropped++;
134 cb = can_rx_offload_get_cb(skb);
135 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
144 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
146 struct sk_buff_head skb_queue;
149 __skb_queue_head_init(&skb_queue);
151 for (i = offload->mb_first;
152 can_rx_offload_le(offload, i, offload->mb_last);
153 can_rx_offload_inc(offload, &i)) {
156 if (!(pending & BIT_ULL(i)))
159 skb = can_rx_offload_offload_one(offload, i);
163 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
166 if (!skb_queue_empty(&skb_queue)) {
170 spin_lock_irqsave(&offload->skb_queue.lock, flags);
171 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
172 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
174 if ((queue_len = skb_queue_len(&offload->skb_queue)) >
175 (offload->skb_queue_len_max / 8))
176 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
177 __func__, queue_len);
179 can_rx_offload_schedule(offload);
182 return skb_queue_len(&skb_queue);
184 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
186 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
191 while ((skb = can_rx_offload_offload_one(offload, 0))) {
192 skb_queue_tail(&offload->skb_queue, skb);
197 can_rx_offload_schedule(offload);
201 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
203 int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
204 struct sk_buff *skb, u32 timestamp)
206 struct can_rx_offload_cb *cb;
209 if (skb_queue_len(&offload->skb_queue) >
210 offload->skb_queue_len_max)
213 cb = can_rx_offload_get_cb(skb);
214 cb->timestamp = timestamp;
216 spin_lock_irqsave(&offload->skb_queue.lock, flags);
217 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
218 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
220 can_rx_offload_schedule(offload);
224 EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
226 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
227 unsigned int idx, u32 timestamp)
229 struct net_device *dev = offload->dev;
230 struct net_device_stats *stats = &dev->stats;
235 skb = __can_get_echo_skb(dev, idx, &len);
239 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
242 stats->tx_fifo_errors++;
247 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
249 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
252 if (skb_queue_len(&offload->skb_queue) >
253 offload->skb_queue_len_max)
256 skb_queue_tail(&offload->skb_queue, skb);
257 can_rx_offload_schedule(offload);
261 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
263 static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
267 /* Limit queue len to 4x the weight (rounted to next power of two) */
268 offload->skb_queue_len_max = 2 << fls(weight);
269 offload->skb_queue_len_max *= 4;
270 skb_queue_head_init(&offload->skb_queue);
272 can_rx_offload_reset(offload);
273 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
275 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
276 __func__, offload->skb_queue_len_max);
281 int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
285 if (offload->mb_first > BITS_PER_LONG_LONG ||
286 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
289 if (offload->mb_first < offload->mb_last) {
291 weight = offload->mb_last - offload->mb_first;
293 offload->inc = false;
294 weight = offload->mb_first - offload->mb_last;
297 return can_rx_offload_init_queue(dev, offload, weight);
299 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
301 int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
303 if (!offload->mailbox_read)
306 return can_rx_offload_init_queue(dev, offload, weight);
308 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
310 void can_rx_offload_enable(struct can_rx_offload *offload)
312 can_rx_offload_reset(offload);
313 napi_enable(&offload->napi);
315 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
317 void can_rx_offload_del(struct can_rx_offload *offload)
319 netif_napi_del(&offload->napi);
320 skb_queue_purge(&offload->skb_queue);
322 EXPORT_SYMBOL_GPL(can_rx_offload_del);
324 void can_rx_offload_reset(struct can_rx_offload *offload)
327 EXPORT_SYMBOL_GPL(can_rx_offload_reset);