1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic Counter character device interface
4 * Copyright (C) 2020 William Breathitt Gray
5 */
6 #include <linux/cdev.h>
7 #include <linux/counter.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/kfifo.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/nospec.h>
16 #include <linux/poll.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/timekeeping.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include "counter-chrdev.h"
25
26 struct counter_comp_node {
27 struct list_head l;
28 struct counter_component component;
29 struct counter_comp comp;
30 void *parent;
31 };
32
33 #define counter_comp_read_is_equal(a, b) \
34 (a.action_read == b.action_read || \
35 a.device_u8_read == b.device_u8_read || \
36 a.count_u8_read == b.count_u8_read || \
37 a.signal_u8_read == b.signal_u8_read || \
38 a.device_u32_read == b.device_u32_read || \
39 a.count_u32_read == b.count_u32_read || \
40 a.signal_u32_read == b.signal_u32_read || \
41 a.device_u64_read == b.device_u64_read || \
42 a.count_u64_read == b.count_u64_read || \
43 a.signal_u64_read == b.signal_u64_read)
44
45 #define counter_comp_read_is_set(comp) \
46 (comp.action_read || \
47 comp.device_u8_read || \
48 comp.count_u8_read || \
49 comp.signal_u8_read || \
50 comp.device_u32_read || \
51 comp.count_u32_read || \
52 comp.signal_u32_read || \
53 comp.device_u64_read || \
54 comp.count_u64_read || \
55 comp.signal_u64_read)
56
counter_chrdev_read(struct file * filp,char __user * buf,size_t len,loff_t * f_ps)57 static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
58 size_t len, loff_t *f_ps)
59 {
60 struct counter_device *const counter = filp->private_data;
61 int err;
62 unsigned int copied;
63
64 if (!counter->ops)
65 return -ENODEV;
66
67 if (len < sizeof(struct counter_event))
68 return -EINVAL;
69
70 do {
71 if (kfifo_is_empty(&counter->events)) {
72 if (filp->f_flags & O_NONBLOCK)
73 return -EAGAIN;
74
75 err = wait_event_interruptible(counter->events_wait,
76 !kfifo_is_empty(&counter->events) ||
77 !counter->ops);
78 if (err < 0)
79 return err;
80 if (!counter->ops)
81 return -ENODEV;
82 }
83
84 if (mutex_lock_interruptible(&counter->events_out_lock))
85 return -ERESTARTSYS;
86 err = kfifo_to_user(&counter->events, buf, len, &copied);
87 mutex_unlock(&counter->events_out_lock);
88 if (err < 0)
89 return err;
90 } while (!copied);
91
92 return copied;
93 }
94
counter_chrdev_poll(struct file * filp,struct poll_table_struct * pollt)95 static __poll_t counter_chrdev_poll(struct file *filp,
96 struct poll_table_struct *pollt)
97 {
98 struct counter_device *const counter = filp->private_data;
99 __poll_t events = 0;
100
101 if (!counter->ops)
102 return events;
103
104 poll_wait(filp, &counter->events_wait, pollt);
105
106 if (!kfifo_is_empty(&counter->events))
107 events = EPOLLIN | EPOLLRDNORM;
108
109 return events;
110 }
111
counter_events_list_free(struct list_head * const events_list)112 static void counter_events_list_free(struct list_head *const events_list)
113 {
114 struct counter_event_node *p, *n;
115 struct counter_comp_node *q, *o;
116
117 list_for_each_entry_safe(p, n, events_list, l) {
118 /* Free associated component nodes */
119 list_for_each_entry_safe(q, o, &p->comp_list, l) {
120 list_del(&q->l);
121 kfree(q);
122 }
123
124 /* Free event node */
125 list_del(&p->l);
126 kfree(p);
127 }
128 }
129
counter_set_event_node(struct counter_device * const counter,struct counter_watch * const watch,const struct counter_comp_node * const cfg)130 static int counter_set_event_node(struct counter_device *const counter,
131 struct counter_watch *const watch,
132 const struct counter_comp_node *const cfg)
133 {
134 struct counter_event_node *event_node;
135 int err = 0;
136 struct counter_comp_node *comp_node;
137
138 /* Search for event in the list */
139 list_for_each_entry(event_node, &counter->next_events_list, l)
140 if (event_node->event == watch->event &&
141 event_node->channel == watch->channel)
142 break;
143
144 /* If event is not already in the list */
145 if (&event_node->l == &counter->next_events_list) {
146 /* Allocate new event node */
147 event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
148 if (!event_node)
149 return -ENOMEM;
150
151 /* Configure event node and add to the list */
152 event_node->event = watch->event;
153 event_node->channel = watch->channel;
154 INIT_LIST_HEAD(&event_node->comp_list);
155 list_add(&event_node->l, &counter->next_events_list);
156 }
157
158 /* Check if component watch has already been set before */
159 list_for_each_entry(comp_node, &event_node->comp_list, l)
160 if (comp_node->parent == cfg->parent &&
161 counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
162 err = -EINVAL;
163 goto exit_free_event_node;
164 }
165
166 /* Allocate component node */
167 comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
168 if (!comp_node) {
169 err = -ENOMEM;
170 goto exit_free_event_node;
171 }
172 *comp_node = *cfg;
173
174 /* Add component node to event node */
175 list_add_tail(&comp_node->l, &event_node->comp_list);
176
177 exit_free_event_node:
178 /* Free event node if no one else is watching */
179 if (list_empty(&event_node->comp_list)) {
180 list_del(&event_node->l);
181 kfree(event_node);
182 }
183
184 return err;
185 }
186
counter_enable_events(struct counter_device * const counter)187 static int counter_enable_events(struct counter_device *const counter)
188 {
189 unsigned long flags;
190 int err = 0;
191
192 mutex_lock(&counter->n_events_list_lock);
193 spin_lock_irqsave(&counter->events_list_lock, flags);
194
195 counter_events_list_free(&counter->events_list);
196 list_replace_init(&counter->next_events_list,
197 &counter->events_list);
198
199 if (counter->ops->events_configure)
200 err = counter->ops->events_configure(counter);
201
202 spin_unlock_irqrestore(&counter->events_list_lock, flags);
203 mutex_unlock(&counter->n_events_list_lock);
204
205 return err;
206 }
207
counter_disable_events(struct counter_device * const counter)208 static int counter_disable_events(struct counter_device *const counter)
209 {
210 unsigned long flags;
211 int err = 0;
212
213 spin_lock_irqsave(&counter->events_list_lock, flags);
214
215 counter_events_list_free(&counter->events_list);
216
217 if (counter->ops->events_configure)
218 err = counter->ops->events_configure(counter);
219
220 spin_unlock_irqrestore(&counter->events_list_lock, flags);
221
222 mutex_lock(&counter->n_events_list_lock);
223
224 counter_events_list_free(&counter->next_events_list);
225
226 mutex_unlock(&counter->n_events_list_lock);
227
228 return err;
229 }
230
counter_add_watch(struct counter_device * const counter,const unsigned long arg)231 static int counter_add_watch(struct counter_device *const counter,
232 const unsigned long arg)
233 {
234 void __user *const uwatch = (void __user *)arg;
235 struct counter_watch watch;
236 struct counter_comp_node comp_node = {};
237 size_t parent, id;
238 struct counter_comp *ext;
239 size_t num_ext;
240 int err = 0;
241
242 if (copy_from_user(&watch, uwatch, sizeof(watch)))
243 return -EFAULT;
244
245 if (watch.component.type == COUNTER_COMPONENT_NONE)
246 goto no_component;
247
248 parent = watch.component.parent;
249
250 /* Configure parent component info for comp node */
251 switch (watch.component.scope) {
252 case COUNTER_SCOPE_DEVICE:
253 ext = counter->ext;
254 num_ext = counter->num_ext;
255 break;
256 case COUNTER_SCOPE_SIGNAL:
257 if (parent >= counter->num_signals)
258 return -EINVAL;
259 parent = array_index_nospec(parent, counter->num_signals);
260
261 comp_node.parent = counter->signals + parent;
262
263 ext = counter->signals[parent].ext;
264 num_ext = counter->signals[parent].num_ext;
265 break;
266 case COUNTER_SCOPE_COUNT:
267 if (parent >= counter->num_counts)
268 return -EINVAL;
269 parent = array_index_nospec(parent, counter->num_counts);
270
271 comp_node.parent = counter->counts + parent;
272
273 ext = counter->counts[parent].ext;
274 num_ext = counter->counts[parent].num_ext;
275 break;
276 default:
277 return -EINVAL;
278 }
279
280 id = watch.component.id;
281
282 /* Configure component info for comp node */
283 switch (watch.component.type) {
284 case COUNTER_COMPONENT_SIGNAL:
285 if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
286 return -EINVAL;
287
288 comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
289 comp_node.comp.signal_u32_read = counter->ops->signal_read;
290 break;
291 case COUNTER_COMPONENT_COUNT:
292 if (watch.component.scope != COUNTER_SCOPE_COUNT)
293 return -EINVAL;
294
295 comp_node.comp.type = COUNTER_COMP_U64;
296 comp_node.comp.count_u64_read = counter->ops->count_read;
297 break;
298 case COUNTER_COMPONENT_FUNCTION:
299 if (watch.component.scope != COUNTER_SCOPE_COUNT)
300 return -EINVAL;
301
302 comp_node.comp.type = COUNTER_COMP_FUNCTION;
303 comp_node.comp.count_u32_read = counter->ops->function_read;
304 break;
305 case COUNTER_COMPONENT_SYNAPSE_ACTION:
306 if (watch.component.scope != COUNTER_SCOPE_COUNT)
307 return -EINVAL;
308 if (id >= counter->counts[parent].num_synapses)
309 return -EINVAL;
310 id = array_index_nospec(id, counter->counts[parent].num_synapses);
311
312 comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
313 comp_node.comp.action_read = counter->ops->action_read;
314 comp_node.comp.priv = counter->counts[parent].synapses + id;
315 break;
316 case COUNTER_COMPONENT_EXTENSION:
317 if (id >= num_ext)
318 return -EINVAL;
319 id = array_index_nospec(id, num_ext);
320
321 comp_node.comp = ext[id];
322 break;
323 default:
324 return -EINVAL;
325 }
326 if (!counter_comp_read_is_set(comp_node.comp))
327 return -EOPNOTSUPP;
328
329 no_component:
330 mutex_lock(&counter->n_events_list_lock);
331
332 if (counter->ops->watch_validate) {
333 err = counter->ops->watch_validate(counter, &watch);
334 if (err < 0)
335 goto err_exit;
336 }
337
338 comp_node.component = watch.component;
339
340 err = counter_set_event_node(counter, &watch, &comp_node);
341
342 err_exit:
343 mutex_unlock(&counter->n_events_list_lock);
344
345 return err;
346 }
347
counter_chrdev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)348 static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
349 unsigned long arg)
350 {
351 struct counter_device *const counter = filp->private_data;
352 int ret = -ENODEV;
353
354 mutex_lock(&counter->ops_exist_lock);
355
356 if (!counter->ops)
357 goto out_unlock;
358
359 switch (cmd) {
360 case COUNTER_ADD_WATCH_IOCTL:
361 ret = counter_add_watch(counter, arg);
362 break;
363 case COUNTER_ENABLE_EVENTS_IOCTL:
364 ret = counter_enable_events(counter);
365 break;
366 case COUNTER_DISABLE_EVENTS_IOCTL:
367 ret = counter_disable_events(counter);
368 break;
369 default:
370 ret = -ENOIOCTLCMD;
371 break;
372 }
373
374 out_unlock:
375 mutex_unlock(&counter->ops_exist_lock);
376
377 return ret;
378 }
379
counter_chrdev_open(struct inode * inode,struct file * filp)380 static int counter_chrdev_open(struct inode *inode, struct file *filp)
381 {
382 struct counter_device *const counter = container_of(inode->i_cdev,
383 typeof(*counter),
384 chrdev);
385
386 get_device(&counter->dev);
387 filp->private_data = counter;
388
389 return nonseekable_open(inode, filp);
390 }
391
counter_chrdev_release(struct inode * inode,struct file * filp)392 static int counter_chrdev_release(struct inode *inode, struct file *filp)
393 {
394 struct counter_device *const counter = filp->private_data;
395 int ret = 0;
396
397 mutex_lock(&counter->ops_exist_lock);
398
399 if (!counter->ops) {
400 /* Free any lingering held memory */
401 counter_events_list_free(&counter->events_list);
402 counter_events_list_free(&counter->next_events_list);
403 ret = -ENODEV;
404 goto out_unlock;
405 }
406
407 ret = counter_disable_events(counter);
408 if (ret < 0) {
409 mutex_unlock(&counter->ops_exist_lock);
410 return ret;
411 }
412
413 out_unlock:
414 mutex_unlock(&counter->ops_exist_lock);
415
416 put_device(&counter->dev);
417
418 return ret;
419 }
420
421 static const struct file_operations counter_fops = {
422 .owner = THIS_MODULE,
423 .llseek = no_llseek,
424 .read = counter_chrdev_read,
425 .poll = counter_chrdev_poll,
426 .unlocked_ioctl = counter_chrdev_ioctl,
427 .open = counter_chrdev_open,
428 .release = counter_chrdev_release,
429 };
430
counter_chrdev_add(struct counter_device * const counter)431 int counter_chrdev_add(struct counter_device *const counter)
432 {
433 /* Initialize Counter events lists */
434 INIT_LIST_HEAD(&counter->events_list);
435 INIT_LIST_HEAD(&counter->next_events_list);
436 spin_lock_init(&counter->events_list_lock);
437 mutex_init(&counter->n_events_list_lock);
438 init_waitqueue_head(&counter->events_wait);
439 spin_lock_init(&counter->events_in_lock);
440 mutex_init(&counter->events_out_lock);
441
442 /* Initialize character device */
443 cdev_init(&counter->chrdev, &counter_fops);
444
445 /* Allocate Counter events queue */
446 return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
447 }
448
counter_chrdev_remove(struct counter_device * const counter)449 void counter_chrdev_remove(struct counter_device *const counter)
450 {
451 kfifo_free(&counter->events);
452 }
453
counter_get_data(struct counter_device * const counter,const struct counter_comp_node * const comp_node,u64 * const value)454 static int counter_get_data(struct counter_device *const counter,
455 const struct counter_comp_node *const comp_node,
456 u64 *const value)
457 {
458 const struct counter_comp *const comp = &comp_node->comp;
459 void *const parent = comp_node->parent;
460 u8 value_u8 = 0;
461 u32 value_u32 = 0;
462 int ret;
463
464 if (comp_node->component.type == COUNTER_COMPONENT_NONE)
465 return 0;
466
467 switch (comp->type) {
468 case COUNTER_COMP_U8:
469 case COUNTER_COMP_BOOL:
470 switch (comp_node->component.scope) {
471 case COUNTER_SCOPE_DEVICE:
472 ret = comp->device_u8_read(counter, &value_u8);
473 break;
474 case COUNTER_SCOPE_SIGNAL:
475 ret = comp->signal_u8_read(counter, parent, &value_u8);
476 break;
477 case COUNTER_SCOPE_COUNT:
478 ret = comp->count_u8_read(counter, parent, &value_u8);
479 break;
480 default:
481 return -EINVAL;
482 }
483 *value = value_u8;
484 return ret;
485 case COUNTER_COMP_SIGNAL_LEVEL:
486 case COUNTER_COMP_FUNCTION:
487 case COUNTER_COMP_ENUM:
488 case COUNTER_COMP_COUNT_DIRECTION:
489 case COUNTER_COMP_COUNT_MODE:
490 switch (comp_node->component.scope) {
491 case COUNTER_SCOPE_DEVICE:
492 ret = comp->device_u32_read(counter, &value_u32);
493 break;
494 case COUNTER_SCOPE_SIGNAL:
495 ret = comp->signal_u32_read(counter, parent,
496 &value_u32);
497 break;
498 case COUNTER_SCOPE_COUNT:
499 ret = comp->count_u32_read(counter, parent, &value_u32);
500 break;
501 default:
502 return -EINVAL;
503 }
504 *value = value_u32;
505 return ret;
506 case COUNTER_COMP_U64:
507 switch (comp_node->component.scope) {
508 case COUNTER_SCOPE_DEVICE:
509 return comp->device_u64_read(counter, value);
510 case COUNTER_SCOPE_SIGNAL:
511 return comp->signal_u64_read(counter, parent, value);
512 case COUNTER_SCOPE_COUNT:
513 return comp->count_u64_read(counter, parent, value);
514 default:
515 return -EINVAL;
516 }
517 case COUNTER_COMP_SYNAPSE_ACTION:
518 ret = comp->action_read(counter, parent, comp->priv,
519 &value_u32);
520 *value = value_u32;
521 return ret;
522 default:
523 return -EINVAL;
524 }
525 }
526
527 /**
528 * counter_push_event - queue event for userspace reading
529 * @counter: pointer to Counter structure
530 * @event: triggered event
531 * @channel: event channel
532 *
533 * Note: If no one is watching for the respective event, it is silently
534 * discarded.
535 */
counter_push_event(struct counter_device * const counter,const u8 event,const u8 channel)536 void counter_push_event(struct counter_device *const counter, const u8 event,
537 const u8 channel)
538 {
539 struct counter_event ev;
540 unsigned int copied = 0;
541 unsigned long flags;
542 struct counter_event_node *event_node;
543 struct counter_comp_node *comp_node;
544
545 ev.timestamp = ktime_get_ns();
546 ev.watch.event = event;
547 ev.watch.channel = channel;
548
549 /* Could be in an interrupt context, so use a spin lock */
550 spin_lock_irqsave(&counter->events_list_lock, flags);
551
552 /* Search for event in the list */
553 list_for_each_entry(event_node, &counter->events_list, l)
554 if (event_node->event == event &&
555 event_node->channel == channel)
556 break;
557
558 /* If event is not in the list */
559 if (&event_node->l == &counter->events_list)
560 goto exit_early;
561
562 /* Read and queue relevant comp for userspace */
563 list_for_each_entry(comp_node, &event_node->comp_list, l) {
564 ev.watch.component = comp_node->component;
565 ev.status = -counter_get_data(counter, comp_node, &ev.value);
566
567 copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
568 1, &counter->events_in_lock);
569 }
570
571 exit_early:
572 spin_unlock_irqrestore(&counter->events_list_lock, flags);
573
574 if (copied)
575 wake_up_poll(&counter->events_wait, EPOLLIN);
576 }
577 EXPORT_SYMBOL_GPL(counter_push_event);
578