1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Based on elements of hwmon and input subsystems.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/cdev.h>
24 #include <linux/slab.h>
25 #include "iio.h"
26 #include "trigger_consumer.h"
27 
28 #define IIO_ID_PREFIX "device"
29 #define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
30 
31 /* IDR to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33 /* IDR to allocate character device minor numbers */
34 static DEFINE_IDA(iio_chrdev_ida);
35 /* Lock used to protect both of the above */
36 static DEFINE_SPINLOCK(iio_ida_lock);
37 
38 dev_t iio_devt;
39 EXPORT_SYMBOL(iio_devt);
40 
41 #define IIO_DEV_MAX 256
42 struct bus_type iio_bus_type = {
43 	.name = "iio",
44 };
45 EXPORT_SYMBOL(iio_bus_type);
46 
__iio_change_event(struct iio_detected_event_list * ev,int ev_code,s64 timestamp)47 void __iio_change_event(struct iio_detected_event_list *ev,
48 			int ev_code,
49 			s64 timestamp)
50 {
51 	ev->ev.id = ev_code;
52 	ev->ev.timestamp = timestamp;
53 }
54 EXPORT_SYMBOL(__iio_change_event);
55 
56 /* Used both in the interrupt line put events and the ring buffer ones */
57 
58 /* Note that in it's current form someone has to be listening before events
59  * are queued. Hence a client MUST open the chrdev before the ring buffer is
60  * switched on.
61  */
__iio_push_event(struct iio_event_interface * ev_int,int ev_code,s64 timestamp,struct iio_shared_ev_pointer * shared_pointer_p)62 int __iio_push_event(struct iio_event_interface *ev_int,
63 		     int ev_code,
64 		     s64 timestamp,
65 		     struct iio_shared_ev_pointer *
66 		     shared_pointer_p)
67 {
68 	struct iio_detected_event_list *ev;
69 	int ret = 0;
70 
71 	/* Does anyone care? */
72 	mutex_lock(&ev_int->event_list_lock);
73 	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
74 		if (ev_int->current_events == ev_int->max_events) {
75 			mutex_unlock(&ev_int->event_list_lock);
76 			return 0;
77 		}
78 		ev = kmalloc(sizeof(*ev), GFP_KERNEL);
79 		if (ev == NULL) {
80 			ret = -ENOMEM;
81 			mutex_unlock(&ev_int->event_list_lock);
82 			goto error_ret;
83 		}
84 		ev->ev.id = ev_code;
85 		ev->ev.timestamp = timestamp;
86 		ev->shared_pointer = shared_pointer_p;
87 		if (ev->shared_pointer)
88 			shared_pointer_p->ev_p = ev;
89 
90 		list_add_tail(&ev->list, &ev_int->det_events.list);
91 		ev_int->current_events++;
92 		mutex_unlock(&ev_int->event_list_lock);
93 		wake_up_interruptible(&ev_int->wait);
94 	} else
95 		mutex_unlock(&ev_int->event_list_lock);
96 
97 error_ret:
98 	return ret;
99 }
100 EXPORT_SYMBOL(__iio_push_event);
101 
iio_push_event(struct iio_dev * dev_info,int ev_line,int ev_code,s64 timestamp)102 int iio_push_event(struct iio_dev *dev_info,
103 		   int ev_line,
104 		   int ev_code,
105 		   s64 timestamp)
106 {
107 	return __iio_push_event(&dev_info->event_interfaces[ev_line],
108 				ev_code, timestamp, NULL);
109 }
110 EXPORT_SYMBOL(iio_push_event);
111 
112 /* Generic interrupt line interrupt handler */
iio_interrupt_handler(int irq,void * _int_info)113 static irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
114 {
115 	struct iio_interrupt *int_info = _int_info;
116 	struct iio_dev *dev_info = int_info->dev_info;
117 	struct iio_event_handler_list *p;
118 	s64 time_ns;
119 	unsigned long flags;
120 
121 	spin_lock_irqsave(&int_info->ev_list_lock, flags);
122 	if (list_empty(&int_info->ev_list)) {
123 		spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
124 		return IRQ_NONE;
125 	}
126 
127 	time_ns = iio_get_time_ns();
128 	list_for_each_entry(p, &int_info->ev_list, list) {
129 		disable_irq_nosync(irq);
130 		p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
131 	}
132 	spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
133 
134 	return IRQ_HANDLED;
135 }
136 
iio_allocate_interrupt(void)137 static struct iio_interrupt *iio_allocate_interrupt(void)
138 {
139 	struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
140 	if (i) {
141 		spin_lock_init(&i->ev_list_lock);
142 		INIT_LIST_HEAD(&i->ev_list);
143 	}
144 	return i;
145 }
146 
147 /* Confirming the validity of supplied irq is left to drivers.*/
iio_register_interrupt_line(unsigned int irq,struct iio_dev * dev_info,int line_number,unsigned long type,const char * name)148 int iio_register_interrupt_line(unsigned int irq,
149 				struct iio_dev *dev_info,
150 				int line_number,
151 				unsigned long type,
152 				const char *name)
153 {
154 	int ret;
155 
156 	dev_info->interrupts[line_number] = iio_allocate_interrupt();
157 	if (dev_info->interrupts[line_number] == NULL) {
158 		ret = -ENOMEM;
159 		goto error_ret;
160 	}
161 	dev_info->interrupts[line_number]->line_number = line_number;
162 	dev_info->interrupts[line_number]->irq = irq;
163 	dev_info->interrupts[line_number]->dev_info = dev_info;
164 
165 	/* Possibly only request on demand?
166 	 * Can see this may complicate the handling of interrupts.
167 	 * However, with this approach we might end up handling lots of
168 	 * events no-one cares about.*/
169 	ret = request_irq(irq,
170 			  &iio_interrupt_handler,
171 			  type,
172 			  name,
173 			  dev_info->interrupts[line_number]);
174 
175 error_ret:
176 	return ret;
177 }
178 EXPORT_SYMBOL(iio_register_interrupt_line);
179 
180 /* This turns up an awful lot */
iio_read_const_attr(struct device * dev,struct device_attribute * attr,char * buf)181 ssize_t iio_read_const_attr(struct device *dev,
182 			    struct device_attribute *attr,
183 			    char *buf)
184 {
185 	return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
186 }
187 EXPORT_SYMBOL(iio_read_const_attr);
188 
189 /* Before this runs the interrupt generator must have been disabled */
iio_unregister_interrupt_line(struct iio_dev * dev_info,int line_number)190 void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
191 {
192 	/* make sure the interrupt handlers are all done */
193 	flush_scheduled_work();
194 	free_irq(dev_info->interrupts[line_number]->irq,
195 		 dev_info->interrupts[line_number]);
196 	kfree(dev_info->interrupts[line_number]);
197 }
198 EXPORT_SYMBOL(iio_unregister_interrupt_line);
199 
200 /* Reference counted add and remove */
iio_add_event_to_list(struct iio_event_handler_list * el,struct list_head * head)201 void iio_add_event_to_list(struct iio_event_handler_list *el,
202 			  struct list_head *head)
203 {
204 	unsigned long flags;
205 	struct iio_interrupt *inter = to_iio_interrupt(head);
206 
207 	/* take mutex to protect this element */
208 	mutex_lock(&el->exist_lock);
209 	if (el->refcount == 0) {
210 		/* Take the event list spin lock */
211 		spin_lock_irqsave(&inter->ev_list_lock, flags);
212 		list_add(&el->list, head);
213 		spin_unlock_irqrestore(&inter->ev_list_lock, flags);
214 	}
215 	el->refcount++;
216 	mutex_unlock(&el->exist_lock);
217 }
218 EXPORT_SYMBOL(iio_add_event_to_list);
219 
iio_remove_event_from_list(struct iio_event_handler_list * el,struct list_head * head)220 void iio_remove_event_from_list(struct iio_event_handler_list *el,
221 			       struct list_head *head)
222 {
223 	unsigned long flags;
224 	struct iio_interrupt *inter = to_iio_interrupt(head);
225 
226 	mutex_lock(&el->exist_lock);
227 	el->refcount--;
228 	if (el->refcount == 0) {
229 		/* Take the event list spin lock */
230 		spin_lock_irqsave(&inter->ev_list_lock, flags);
231 		list_del_init(&el->list);
232 		spin_unlock_irqrestore(&inter->ev_list_lock, flags);
233 	}
234 	mutex_unlock(&el->exist_lock);
235 }
236 EXPORT_SYMBOL(iio_remove_event_from_list);
237 
iio_event_chrdev_read(struct file * filep,char __user * buf,size_t count,loff_t * f_ps)238 static ssize_t iio_event_chrdev_read(struct file *filep,
239 				     char __user *buf,
240 				     size_t count,
241 				     loff_t *f_ps)
242 {
243 	struct iio_event_interface *ev_int = filep->private_data;
244 	struct iio_detected_event_list *el;
245 	int ret;
246 	size_t len;
247 
248 	mutex_lock(&ev_int->event_list_lock);
249 	if (list_empty(&ev_int->det_events.list)) {
250 		if (filep->f_flags & O_NONBLOCK) {
251 			ret = -EAGAIN;
252 			goto error_mutex_unlock;
253 		}
254 		mutex_unlock(&ev_int->event_list_lock);
255 		/* Blocking on device; waiting for something to be there */
256 		ret = wait_event_interruptible(ev_int->wait,
257 					       !list_empty(&ev_int
258 							   ->det_events.list));
259 		if (ret)
260 			goto error_ret;
261 		/* Single access device so no one else can get the data */
262 		mutex_lock(&ev_int->event_list_lock);
263 	}
264 
265 	el = list_first_entry(&ev_int->det_events.list,
266 			      struct iio_detected_event_list,
267 			      list);
268 	len = sizeof el->ev;
269 	if (copy_to_user(buf, &(el->ev), len)) {
270 		ret = -EFAULT;
271 		goto error_mutex_unlock;
272 	}
273 	list_del(&el->list);
274 	ev_int->current_events--;
275 	mutex_unlock(&ev_int->event_list_lock);
276 	/*
277 	 * Possible concurency issue if an update of this event is on its way
278 	 * through. May lead to new event being removed whilst the reported
279 	 * event was the unescalated event. In typical use case this is not a
280 	 * problem as userspace will say read half the buffer due to a 50%
281 	 * full event which would make the correct 100% full incorrect anyway.
282 	 */
283 	if (el->shared_pointer) {
284 		spin_lock(&el->shared_pointer->lock);
285 		(el->shared_pointer->ev_p) = NULL;
286 		spin_unlock(&el->shared_pointer->lock);
287 	}
288 	kfree(el);
289 
290 	return len;
291 
292 error_mutex_unlock:
293 	mutex_unlock(&ev_int->event_list_lock);
294 error_ret:
295 
296 	return ret;
297 }
298 
iio_event_chrdev_release(struct inode * inode,struct file * filep)299 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
300 {
301 	struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
302 	struct iio_event_interface *ev_int = hand->private;
303 	struct iio_detected_event_list *el, *t;
304 
305 	mutex_lock(&ev_int->event_list_lock);
306 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
307 	/*
308 	 * In order to maintain a clean state for reopening,
309 	 * clear out any awaiting events. The mask will prevent
310 	 * any new __iio_push_event calls running.
311 	 */
312 	list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
313 		list_del(&el->list);
314 		kfree(el);
315 	}
316 	mutex_unlock(&ev_int->event_list_lock);
317 
318 	return 0;
319 }
320 
iio_event_chrdev_open(struct inode * inode,struct file * filep)321 static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
322 {
323 	struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
324 	struct iio_event_interface *ev_int = hand->private;
325 
326 	mutex_lock(&ev_int->event_list_lock);
327 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
328 		fops_put(filep->f_op);
329 		mutex_unlock(&ev_int->event_list_lock);
330 		return -EBUSY;
331 	}
332 	filep->private_data = hand->private;
333 	mutex_unlock(&ev_int->event_list_lock);
334 
335 	return 0;
336 }
337 
338 static const struct file_operations iio_event_chrdev_fileops = {
339 	.read =  iio_event_chrdev_read,
340 	.release = iio_event_chrdev_release,
341 	.open = iio_event_chrdev_open,
342 	.owner = THIS_MODULE,
343 	.llseek = noop_llseek,
344 };
345 
iio_event_dev_release(struct device * dev)346 static void iio_event_dev_release(struct device *dev)
347 {
348 	struct iio_event_interface *ev_int
349 		= container_of(dev, struct iio_event_interface, dev);
350 	cdev_del(&ev_int->handler.chrdev);
351 	iio_device_free_chrdev_minor(MINOR(dev->devt));
352 };
353 
354 static struct device_type iio_event_type = {
355 	.release = iio_event_dev_release,
356 };
357 
iio_device_get_chrdev_minor(void)358 int iio_device_get_chrdev_minor(void)
359 {
360 	int ret, val;
361 
362 ida_again:
363 	if (unlikely(ida_pre_get(&iio_chrdev_ida, GFP_KERNEL) == 0))
364 		return -ENOMEM;
365 	spin_lock(&iio_ida_lock);
366 	ret = ida_get_new(&iio_chrdev_ida, &val);
367 	spin_unlock(&iio_ida_lock);
368 	if (unlikely(ret == -EAGAIN))
369 		goto ida_again;
370 	else if (unlikely(ret))
371 		return ret;
372 	if (val > IIO_DEV_MAX)
373 		return -ENOMEM;
374 	return val;
375 }
376 
iio_device_free_chrdev_minor(int val)377 void iio_device_free_chrdev_minor(int val)
378 {
379 	spin_lock(&iio_ida_lock);
380 	ida_remove(&iio_chrdev_ida, val);
381 	spin_unlock(&iio_ida_lock);
382 }
383 
iio_setup_ev_int(struct iio_event_interface * ev_int,const char * name,struct module * owner,struct device * dev)384 int iio_setup_ev_int(struct iio_event_interface *ev_int,
385 		     const char *name,
386 		     struct module *owner,
387 		     struct device *dev)
388 {
389 	int ret, minor;
390 
391 	ev_int->dev.bus = &iio_bus_type;
392 	ev_int->dev.parent = dev;
393 	ev_int->dev.type = &iio_event_type;
394 	device_initialize(&ev_int->dev);
395 
396 	minor = iio_device_get_chrdev_minor();
397 	if (minor < 0) {
398 		ret = minor;
399 		goto error_device_put;
400 	}
401 	ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
402 	dev_set_name(&ev_int->dev, "%s", name);
403 
404 	ret = device_add(&ev_int->dev);
405 	if (ret)
406 		goto error_free_minor;
407 
408 	cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
409 	ev_int->handler.chrdev.owner = owner;
410 
411 	mutex_init(&ev_int->event_list_lock);
412 	/* discussion point - make this variable? */
413 	ev_int->max_events = 10;
414 	ev_int->current_events = 0;
415 	INIT_LIST_HEAD(&ev_int->det_events.list);
416 	init_waitqueue_head(&ev_int->wait);
417 	ev_int->handler.private = ev_int;
418 	ev_int->handler.flags = 0;
419 
420 	ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
421 	if (ret)
422 		goto error_unreg_device;
423 
424 	return 0;
425 
426 error_unreg_device:
427 	device_unregister(&ev_int->dev);
428 error_free_minor:
429 	iio_device_free_chrdev_minor(minor);
430 error_device_put:
431 	put_device(&ev_int->dev);
432 
433 	return ret;
434 }
435 
iio_free_ev_int(struct iio_event_interface * ev_int)436 void iio_free_ev_int(struct iio_event_interface *ev_int)
437 {
438 	device_unregister(&ev_int->dev);
439 	put_device(&ev_int->dev);
440 }
441 
iio_dev_init(void)442 static int __init iio_dev_init(void)
443 {
444 	int err;
445 
446 	err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
447 	if (err < 0)
448 		printk(KERN_ERR "%s: failed to allocate char dev region\n",
449 		       __FILE__);
450 
451 	return err;
452 }
453 
iio_dev_exit(void)454 static void __exit iio_dev_exit(void)
455 {
456 	if (iio_devt)
457 		unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
458 }
459 
iio_init(void)460 static int __init iio_init(void)
461 {
462 	int ret;
463 
464 	/* Register sysfs bus */
465 	ret  = bus_register(&iio_bus_type);
466 	if (ret < 0) {
467 		printk(KERN_ERR
468 		       "%s could not register bus type\n",
469 			__FILE__);
470 		goto error_nothing;
471 	}
472 
473 	ret = iio_dev_init();
474 	if (ret < 0)
475 		goto error_unregister_bus_type;
476 
477 	return 0;
478 
479 error_unregister_bus_type:
480 	bus_unregister(&iio_bus_type);
481 error_nothing:
482 	return ret;
483 }
484 
iio_exit(void)485 static void __exit iio_exit(void)
486 {
487 	iio_dev_exit();
488 	bus_unregister(&iio_bus_type);
489 }
490 
iio_device_register_sysfs(struct iio_dev * dev_info)491 static int iio_device_register_sysfs(struct iio_dev *dev_info)
492 {
493 	int ret = 0;
494 
495 	ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
496 	if (ret) {
497 		dev_err(dev_info->dev.parent,
498 			"Failed to register sysfs hooks\n");
499 		goto error_ret;
500 	}
501 
502 error_ret:
503 	return ret;
504 }
505 
iio_device_unregister_sysfs(struct iio_dev * dev_info)506 static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
507 {
508 	sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
509 }
510 
511 /* Return a negative errno on failure */
iio_get_new_ida_val(struct ida * this_ida)512 int iio_get_new_ida_val(struct ida *this_ida)
513 {
514 	int ret;
515 	int val;
516 
517 ida_again:
518 	if (unlikely(ida_pre_get(this_ida, GFP_KERNEL) == 0))
519 		return -ENOMEM;
520 
521 	spin_lock(&iio_ida_lock);
522 	ret = ida_get_new(this_ida, &val);
523 	spin_unlock(&iio_ida_lock);
524 	if (unlikely(ret == -EAGAIN))
525 		goto ida_again;
526 	else if (unlikely(ret))
527 		return ret;
528 
529 	return val;
530 }
531 EXPORT_SYMBOL(iio_get_new_ida_val);
532 
iio_free_ida_val(struct ida * this_ida,int id)533 void iio_free_ida_val(struct ida *this_ida, int id)
534 {
535 	spin_lock(&iio_ida_lock);
536 	ida_remove(this_ida, id);
537 	spin_unlock(&iio_ida_lock);
538 }
539 EXPORT_SYMBOL(iio_free_ida_val);
540 
iio_device_register_id(struct iio_dev * dev_info,struct ida * this_ida)541 static int iio_device_register_id(struct iio_dev *dev_info,
542 				  struct ida *this_ida)
543 {
544 	dev_info->id = iio_get_new_ida_val(&iio_ida);
545 	if (dev_info->id < 0)
546 		return dev_info->id;
547 	return 0;
548 }
549 
iio_device_unregister_id(struct iio_dev * dev_info)550 static void iio_device_unregister_id(struct iio_dev *dev_info)
551 {
552 	iio_free_ida_val(&iio_ida, dev_info->id);
553 }
554 
__iio_add_event_config_attrs(struct iio_dev * dev_info,int i)555 static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
556 {
557 	int ret;
558 	/*p for adding, q for removing */
559 	struct attribute **attrp, **attrq;
560 
561 	if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
562 		attrp = dev_info->event_conf_attrs[i].attrs;
563 		while (*attrp) {
564 			ret =  sysfs_add_file_to_group(&dev_info->dev.kobj,
565 						       *attrp,
566 						       dev_info
567 						       ->event_attrs[i].name);
568 			if (ret)
569 				goto error_ret;
570 			attrp++;
571 		}
572 	}
573 	return 0;
574 
575 error_ret:
576 	attrq = dev_info->event_conf_attrs[i].attrs;
577 	while (attrq != attrp) {
578 			sysfs_remove_file_from_group(&dev_info->dev.kobj,
579 					     *attrq,
580 					     dev_info->event_attrs[i].name);
581 		attrq++;
582 	}
583 
584 	return ret;
585 }
586 
__iio_remove_event_config_attrs(struct iio_dev * dev_info,int i)587 static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
588 						  int i)
589 {
590 	struct attribute **attrq;
591 
592 	if (dev_info->event_conf_attrs
593 		&& dev_info->event_conf_attrs[i].attrs) {
594 		attrq = dev_info->event_conf_attrs[i].attrs;
595 		while (*attrq) {
596 			sysfs_remove_file_from_group(&dev_info->dev.kobj,
597 						     *attrq,
598 						     dev_info
599 						     ->event_attrs[i].name);
600 			attrq++;
601 		}
602 	}
603 
604 	return 0;
605 }
606 
iio_device_register_eventset(struct iio_dev * dev_info)607 static int iio_device_register_eventset(struct iio_dev *dev_info)
608 {
609 	int ret = 0, i, j;
610 
611 	if (dev_info->num_interrupt_lines == 0)
612 		return 0;
613 
614 	dev_info->event_interfaces =
615 		kzalloc(sizeof(struct iio_event_interface)
616 			*dev_info->num_interrupt_lines,
617 			GFP_KERNEL);
618 	if (dev_info->event_interfaces == NULL) {
619 		ret = -ENOMEM;
620 		goto error_ret;
621 	}
622 
623 	dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
624 				       *dev_info->num_interrupt_lines,
625 				       GFP_KERNEL);
626 	if (dev_info->interrupts == NULL) {
627 		ret = -ENOMEM;
628 		goto error_free_event_interfaces;
629 	}
630 
631 	for (i = 0; i < dev_info->num_interrupt_lines; i++) {
632 		dev_info->event_interfaces[i].owner = dev_info->driver_module;
633 
634 		snprintf(dev_info->event_interfaces[i]._name, 20,
635 			 "%s:event%d",
636 			 dev_name(&dev_info->dev),
637 			 i);
638 
639 		ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
640 				       (const char *)(dev_info
641 						      ->event_interfaces[i]
642 						      ._name),
643 				       dev_info->driver_module,
644 				       &dev_info->dev);
645 		if (ret) {
646 			dev_err(&dev_info->dev,
647 				"Could not get chrdev interface\n");
648 			goto error_free_setup_ev_ints;
649 		}
650 
651 		dev_set_drvdata(&dev_info->event_interfaces[i].dev,
652 				(void *)dev_info);
653 		ret = sysfs_create_group(&dev_info
654 					->event_interfaces[i]
655 					.dev.kobj,
656 					&dev_info->event_attrs[i]);
657 
658 		if (ret) {
659 			dev_err(&dev_info->dev,
660 				"Failed to register sysfs for event attrs");
661 			goto error_remove_sysfs_interfaces;
662 		}
663 	}
664 
665 	for (i = 0; i < dev_info->num_interrupt_lines; i++) {
666 		ret = __iio_add_event_config_attrs(dev_info, i);
667 		if (ret)
668 			goto error_unregister_config_attrs;
669 	}
670 
671 	return 0;
672 
673 error_unregister_config_attrs:
674 	for (j = 0; j < i; j++)
675 		__iio_remove_event_config_attrs(dev_info, i);
676 	i = dev_info->num_interrupt_lines - 1;
677 error_remove_sysfs_interfaces:
678 	for (j = 0; j < i; j++)
679 		sysfs_remove_group(&dev_info
680 				   ->event_interfaces[j].dev.kobj,
681 				   &dev_info->event_attrs[j]);
682 error_free_setup_ev_ints:
683 	for (j = 0; j < i; j++)
684 		iio_free_ev_int(&dev_info->event_interfaces[j]);
685 	kfree(dev_info->interrupts);
686 error_free_event_interfaces:
687 	kfree(dev_info->event_interfaces);
688 error_ret:
689 
690 	return ret;
691 }
692 
iio_device_unregister_eventset(struct iio_dev * dev_info)693 static void iio_device_unregister_eventset(struct iio_dev *dev_info)
694 {
695 	int i;
696 
697 	if (dev_info->num_interrupt_lines == 0)
698 		return;
699 	for (i = 0; i < dev_info->num_interrupt_lines; i++)
700 		sysfs_remove_group(&dev_info
701 				   ->event_interfaces[i].dev.kobj,
702 				   &dev_info->event_attrs[i]);
703 
704 	for (i = 0; i < dev_info->num_interrupt_lines; i++)
705 		iio_free_ev_int(&dev_info->event_interfaces[i]);
706 	kfree(dev_info->interrupts);
707 	kfree(dev_info->event_interfaces);
708 }
709 
iio_dev_release(struct device * device)710 static void iio_dev_release(struct device *device)
711 {
712 	struct iio_dev *dev = to_iio_dev(device);
713 
714 	iio_put();
715 	kfree(dev);
716 }
717 
718 static struct device_type iio_dev_type = {
719 	.name = "iio_device",
720 	.release = iio_dev_release,
721 };
722 
iio_allocate_device(void)723 struct iio_dev *iio_allocate_device(void)
724 {
725 	struct iio_dev *dev = kzalloc(sizeof *dev, GFP_KERNEL);
726 
727 	if (dev) {
728 		dev->dev.type = &iio_dev_type;
729 		dev->dev.bus = &iio_bus_type;
730 		device_initialize(&dev->dev);
731 		dev_set_drvdata(&dev->dev, (void *)dev);
732 		mutex_init(&dev->mlock);
733 		iio_get();
734 	}
735 
736 	return dev;
737 }
738 EXPORT_SYMBOL(iio_allocate_device);
739 
iio_free_device(struct iio_dev * dev)740 void iio_free_device(struct iio_dev *dev)
741 {
742 	if (dev)
743 		iio_put_device(dev);
744 }
745 EXPORT_SYMBOL(iio_free_device);
746 
iio_device_register(struct iio_dev * dev_info)747 int iio_device_register(struct iio_dev *dev_info)
748 {
749 	int ret;
750 
751 	ret = iio_device_register_id(dev_info, &iio_ida);
752 	if (ret) {
753 		dev_err(&dev_info->dev, "Failed to get id\n");
754 		goto error_ret;
755 	}
756 	dev_set_name(&dev_info->dev, "device%d", dev_info->id);
757 
758 	ret = device_add(&dev_info->dev);
759 	if (ret)
760 		goto error_free_ida;
761 	ret = iio_device_register_sysfs(dev_info);
762 	if (ret) {
763 		dev_err(dev_info->dev.parent,
764 			"Failed to register sysfs interfaces\n");
765 		goto error_del_device;
766 	}
767 	ret = iio_device_register_eventset(dev_info);
768 	if (ret) {
769 		dev_err(dev_info->dev.parent,
770 			"Failed to register event set\n");
771 		goto error_free_sysfs;
772 	}
773 	if (dev_info->modes & INDIO_RING_TRIGGERED)
774 		iio_device_register_trigger_consumer(dev_info);
775 
776 	return 0;
777 
778 error_free_sysfs:
779 	iio_device_unregister_sysfs(dev_info);
780 error_del_device:
781 	device_del(&dev_info->dev);
782 error_free_ida:
783 	iio_device_unregister_id(dev_info);
784 error_ret:
785 	return ret;
786 }
787 EXPORT_SYMBOL(iio_device_register);
788 
iio_device_unregister(struct iio_dev * dev_info)789 void iio_device_unregister(struct iio_dev *dev_info)
790 {
791 	if (dev_info->modes & INDIO_RING_TRIGGERED)
792 		iio_device_unregister_trigger_consumer(dev_info);
793 	iio_device_unregister_eventset(dev_info);
794 	iio_device_unregister_sysfs(dev_info);
795 	iio_device_unregister_id(dev_info);
796 	device_unregister(&dev_info->dev);
797 }
798 EXPORT_SYMBOL(iio_device_unregister);
799 
iio_put(void)800 void iio_put(void)
801 {
802 	module_put(THIS_MODULE);
803 }
804 
iio_get(void)805 void iio_get(void)
806 {
807 	__module_get(THIS_MODULE);
808 }
809 
810 subsys_initcall(iio_init);
811 module_exit(iio_exit);
812 
813 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
814 MODULE_DESCRIPTION("Industrial I/O core");
815 MODULE_LICENSE("GPL");
816