1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2014 Protonic Holland,
3 * David Jander
4 * Copyright (C) 2014-2021 Pengutronix,
5 * Marc Kleine-Budde <kernel@pengutronix.de>
6 */
7
8 #include <linux/can/dev.h>
9 #include <linux/can/rx-offload.h>
10
11 struct can_rx_offload_cb {
12 u32 timestamp;
13 };
14
15 static inline struct can_rx_offload_cb *
can_rx_offload_get_cb(struct sk_buff * skb)16 can_rx_offload_get_cb(struct sk_buff *skb)
17 {
18 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
19
20 return (struct can_rx_offload_cb *)skb->cb;
21 }
22
23 static inline bool
can_rx_offload_le(struct can_rx_offload * offload,unsigned int a,unsigned int b)24 can_rx_offload_le(struct can_rx_offload *offload,
25 unsigned int a, unsigned int b)
26 {
27 if (offload->inc)
28 return a <= b;
29 else
30 return a >= b;
31 }
32
33 static inline unsigned int
can_rx_offload_inc(struct can_rx_offload * offload,unsigned int * val)34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
35 {
36 if (offload->inc)
37 return (*val)++;
38 else
39 return (*val)--;
40 }
41
can_rx_offload_napi_poll(struct napi_struct * napi,int quota)42 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
43 {
44 struct can_rx_offload *offload = container_of(napi,
45 struct can_rx_offload,
46 napi);
47 struct net_device *dev = offload->dev;
48 struct net_device_stats *stats = &dev->stats;
49 struct sk_buff *skb;
50 int work_done = 0;
51
52 while ((work_done < quota) &&
53 (skb = skb_dequeue(&offload->skb_queue))) {
54 struct can_frame *cf = (struct can_frame *)skb->data;
55
56 work_done++;
57 if (!(cf->can_id & CAN_ERR_FLAG)) {
58 stats->rx_packets++;
59 if (!(cf->can_id & CAN_RTR_FLAG))
60 stats->rx_bytes += cf->len;
61 }
62 netif_receive_skb(skb);
63 }
64
65 if (work_done < quota) {
66 napi_complete_done(napi, work_done);
67
68 /* Check if there was another interrupt */
69 if (!skb_queue_empty(&offload->skb_queue))
70 napi_reschedule(&offload->napi);
71 }
72
73 return work_done;
74 }
75
76 static inline void
__skb_queue_add_sort(struct sk_buff_head * head,struct sk_buff * new,int (* compare)(struct sk_buff * a,struct sk_buff * b))77 __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
78 int (*compare)(struct sk_buff *a, struct sk_buff *b))
79 {
80 struct sk_buff *pos, *insert = NULL;
81
82 skb_queue_reverse_walk(head, pos) {
83 const struct can_rx_offload_cb *cb_pos, *cb_new;
84
85 cb_pos = can_rx_offload_get_cb(pos);
86 cb_new = can_rx_offload_get_cb(new);
87
88 netdev_dbg(new->dev,
89 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
90 __func__,
91 cb_pos->timestamp, cb_new->timestamp,
92 cb_new->timestamp - cb_pos->timestamp,
93 skb_queue_len(head));
94
95 if (compare(pos, new) < 0)
96 continue;
97 insert = pos;
98 break;
99 }
100 if (!insert)
101 __skb_queue_head(head, new);
102 else
103 __skb_queue_after(head, insert, new);
104 }
105
can_rx_offload_compare(struct sk_buff * a,struct sk_buff * b)106 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
107 {
108 const struct can_rx_offload_cb *cb_a, *cb_b;
109
110 cb_a = can_rx_offload_get_cb(a);
111 cb_b = can_rx_offload_get_cb(b);
112
113 /* Subtract two u32 and return result as int, to keep
114 * difference steady around the u32 overflow.
115 */
116 return cb_b->timestamp - cb_a->timestamp;
117 }
118
119 /**
120 * can_rx_offload_offload_one() - Read one CAN frame from HW
121 * @offload: pointer to rx_offload context
122 * @n: number of mailbox to read
123 *
124 * The task of this function is to read a CAN frame from mailbox @n
125 * from the device and return the mailbox's content as a struct
126 * sk_buff.
127 *
128 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
129 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
130 * allocated, the mailbox contents is discarded by reading it into an
131 * overflow buffer. This way the mailbox is marked as free by the
132 * driver.
133 *
134 * Return: A pointer to skb containing the CAN frame on success.
135 *
136 * NULL if the mailbox @n is empty.
137 *
138 * ERR_PTR() in case of an error
139 */
140 static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload * offload,unsigned int n)141 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
142 {
143 struct sk_buff *skb;
144 struct can_rx_offload_cb *cb;
145 bool drop = false;
146 u32 timestamp;
147
148 /* If queue is full drop frame */
149 if (unlikely(skb_queue_len(&offload->skb_queue) >
150 offload->skb_queue_len_max))
151 drop = true;
152
153 skb = offload->mailbox_read(offload, n, ×tamp, drop);
154 /* Mailbox was empty. */
155 if (unlikely(!skb))
156 return NULL;
157
158 /* There was a problem reading the mailbox, propagate
159 * error value.
160 */
161 if (IS_ERR(skb)) {
162 offload->dev->stats.rx_dropped++;
163 offload->dev->stats.rx_fifo_errors++;
164
165 return skb;
166 }
167
168 /* Mailbox was read. */
169 cb = can_rx_offload_get_cb(skb);
170 cb->timestamp = timestamp;
171
172 return skb;
173 }
174
can_rx_offload_irq_offload_timestamp(struct can_rx_offload * offload,u64 pending)175 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
176 u64 pending)
177 {
178 unsigned int i;
179 int received = 0;
180
181 for (i = offload->mb_first;
182 can_rx_offload_le(offload, i, offload->mb_last);
183 can_rx_offload_inc(offload, &i)) {
184 struct sk_buff *skb;
185
186 if (!(pending & BIT_ULL(i)))
187 continue;
188
189 skb = can_rx_offload_offload_one(offload, i);
190 if (IS_ERR_OR_NULL(skb))
191 continue;
192
193 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
194 can_rx_offload_compare);
195 received++;
196 }
197
198 return received;
199 }
200 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
201
can_rx_offload_irq_offload_fifo(struct can_rx_offload * offload)202 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
203 {
204 struct sk_buff *skb;
205 int received = 0;
206
207 while (1) {
208 skb = can_rx_offload_offload_one(offload, 0);
209 if (IS_ERR(skb))
210 continue;
211 if (!skb)
212 break;
213
214 __skb_queue_tail(&offload->skb_irq_queue, skb);
215 received++;
216 }
217
218 return received;
219 }
220 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
221
can_rx_offload_queue_timestamp(struct can_rx_offload * offload,struct sk_buff * skb,u32 timestamp)222 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
223 struct sk_buff *skb, u32 timestamp)
224 {
225 struct can_rx_offload_cb *cb;
226
227 if (skb_queue_len(&offload->skb_queue) >
228 offload->skb_queue_len_max) {
229 dev_kfree_skb_any(skb);
230 return -ENOBUFS;
231 }
232
233 cb = can_rx_offload_get_cb(skb);
234 cb->timestamp = timestamp;
235
236 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
237 can_rx_offload_compare);
238
239 return 0;
240 }
241 EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
242
can_rx_offload_get_echo_skb(struct can_rx_offload * offload,unsigned int idx,u32 timestamp,unsigned int * frame_len_ptr)243 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
244 unsigned int idx, u32 timestamp,
245 unsigned int *frame_len_ptr)
246 {
247 struct net_device *dev = offload->dev;
248 struct net_device_stats *stats = &dev->stats;
249 struct sk_buff *skb;
250 unsigned int len;
251 int err;
252
253 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
254 if (!skb)
255 return 0;
256
257 err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
258 if (err) {
259 stats->rx_errors++;
260 stats->tx_fifo_errors++;
261 }
262
263 return len;
264 }
265 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
266
can_rx_offload_queue_tail(struct can_rx_offload * offload,struct sk_buff * skb)267 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
268 struct sk_buff *skb)
269 {
270 if (skb_queue_len(&offload->skb_queue) >
271 offload->skb_queue_len_max) {
272 dev_kfree_skb_any(skb);
273 return -ENOBUFS;
274 }
275
276 __skb_queue_tail(&offload->skb_irq_queue, skb);
277
278 return 0;
279 }
280 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
281
can_rx_offload_irq_finish(struct can_rx_offload * offload)282 void can_rx_offload_irq_finish(struct can_rx_offload *offload)
283 {
284 unsigned long flags;
285 int queue_len;
286
287 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
288 return;
289
290 spin_lock_irqsave(&offload->skb_queue.lock, flags);
291 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
292 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
293
294 queue_len = skb_queue_len(&offload->skb_queue);
295 if (queue_len > offload->skb_queue_len_max / 8)
296 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
297 __func__, queue_len);
298
299 napi_schedule(&offload->napi);
300 }
301 EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
302
can_rx_offload_threaded_irq_finish(struct can_rx_offload * offload)303 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
304 {
305 unsigned long flags;
306 int queue_len;
307
308 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
309 return;
310
311 spin_lock_irqsave(&offload->skb_queue.lock, flags);
312 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
313 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
314
315 queue_len = skb_queue_len(&offload->skb_queue);
316 if (queue_len > offload->skb_queue_len_max / 8)
317 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
318 __func__, queue_len);
319
320 local_bh_disable();
321 napi_schedule(&offload->napi);
322 local_bh_enable();
323 }
324 EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
325
can_rx_offload_init_queue(struct net_device * dev,struct can_rx_offload * offload,unsigned int weight)326 static int can_rx_offload_init_queue(struct net_device *dev,
327 struct can_rx_offload *offload,
328 unsigned int weight)
329 {
330 offload->dev = dev;
331
332 /* Limit queue len to 4x the weight (rounded to next power of two) */
333 offload->skb_queue_len_max = 2 << fls(weight);
334 offload->skb_queue_len_max *= 4;
335 skb_queue_head_init(&offload->skb_queue);
336 __skb_queue_head_init(&offload->skb_irq_queue);
337
338 netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
339 weight);
340
341 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
342 __func__, offload->skb_queue_len_max);
343
344 return 0;
345 }
346
can_rx_offload_add_timestamp(struct net_device * dev,struct can_rx_offload * offload)347 int can_rx_offload_add_timestamp(struct net_device *dev,
348 struct can_rx_offload *offload)
349 {
350 unsigned int weight;
351
352 if (offload->mb_first > BITS_PER_LONG_LONG ||
353 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
354 return -EINVAL;
355
356 if (offload->mb_first < offload->mb_last) {
357 offload->inc = true;
358 weight = offload->mb_last - offload->mb_first;
359 } else {
360 offload->inc = false;
361 weight = offload->mb_first - offload->mb_last;
362 }
363
364 return can_rx_offload_init_queue(dev, offload, weight);
365 }
366 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
367
can_rx_offload_add_fifo(struct net_device * dev,struct can_rx_offload * offload,unsigned int weight)368 int can_rx_offload_add_fifo(struct net_device *dev,
369 struct can_rx_offload *offload, unsigned int weight)
370 {
371 if (!offload->mailbox_read)
372 return -EINVAL;
373
374 return can_rx_offload_init_queue(dev, offload, weight);
375 }
376 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
377
can_rx_offload_add_manual(struct net_device * dev,struct can_rx_offload * offload,unsigned int weight)378 int can_rx_offload_add_manual(struct net_device *dev,
379 struct can_rx_offload *offload,
380 unsigned int weight)
381 {
382 if (offload->mailbox_read)
383 return -EINVAL;
384
385 return can_rx_offload_init_queue(dev, offload, weight);
386 }
387 EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
388
can_rx_offload_enable(struct can_rx_offload * offload)389 void can_rx_offload_enable(struct can_rx_offload *offload)
390 {
391 napi_enable(&offload->napi);
392 }
393 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
394
can_rx_offload_del(struct can_rx_offload * offload)395 void can_rx_offload_del(struct can_rx_offload *offload)
396 {
397 netif_napi_del(&offload->napi);
398 skb_queue_purge(&offload->skb_queue);
399 __skb_queue_purge(&offload->skb_irq_queue);
400 }
401 EXPORT_SYMBOL_GPL(can_rx_offload_del);
402