1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of buffer allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 
24 #include "iio.h"
25 #include "iio_core.h"
26 #include "sysfs.h"
27 #include "buffer.h"
28 
29 static const char * const iio_endian_prefix[] = {
30 	[IIO_BE] = "be",
31 	[IIO_LE] = "le",
32 };
33 
34 /**
35  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
36  *
37  * This function relies on all buffer implementations having an
38  * iio_buffer as their first element.
39  **/
iio_buffer_read_first_n_outer(struct file * filp,char __user * buf,size_t n,loff_t * f_ps)40 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
41 				      size_t n, loff_t *f_ps)
42 {
43 	struct iio_dev *indio_dev = filp->private_data;
44 	struct iio_buffer *rb = indio_dev->buffer;
45 
46 	if (!rb || !rb->access->read_first_n)
47 		return -EINVAL;
48 	return rb->access->read_first_n(rb, n, buf);
49 }
50 
51 /**
52  * iio_buffer_poll() - poll the buffer to find out if it has data
53  */
iio_buffer_poll(struct file * filp,struct poll_table_struct * wait)54 unsigned int iio_buffer_poll(struct file *filp,
55 			     struct poll_table_struct *wait)
56 {
57 	struct iio_dev *indio_dev = filp->private_data;
58 	struct iio_buffer *rb = indio_dev->buffer;
59 
60 	poll_wait(filp, &rb->pollq, wait);
61 	if (rb->stufftoread)
62 		return POLLIN | POLLRDNORM;
63 	/* need a way of knowing if there may be enough data... */
64 	return 0;
65 }
66 
iio_buffer_init(struct iio_buffer * buffer)67 void iio_buffer_init(struct iio_buffer *buffer)
68 {
69 	INIT_LIST_HEAD(&buffer->demux_list);
70 	init_waitqueue_head(&buffer->pollq);
71 }
72 EXPORT_SYMBOL(iio_buffer_init);
73 
iio_show_scan_index(struct device * dev,struct device_attribute * attr,char * buf)74 static ssize_t iio_show_scan_index(struct device *dev,
75 				   struct device_attribute *attr,
76 				   char *buf)
77 {
78 	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
79 }
80 
iio_show_fixed_type(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t iio_show_fixed_type(struct device *dev,
82 				   struct device_attribute *attr,
83 				   char *buf)
84 {
85 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
86 	u8 type = this_attr->c->scan_type.endianness;
87 
88 	if (type == IIO_CPU) {
89 #ifdef __LITTLE_ENDIAN
90 		type = IIO_LE;
91 #else
92 		type = IIO_BE;
93 #endif
94 	}
95 	return sprintf(buf, "%s:%c%d/%d>>%u\n",
96 		       iio_endian_prefix[type],
97 		       this_attr->c->scan_type.sign,
98 		       this_attr->c->scan_type.realbits,
99 		       this_attr->c->scan_type.storagebits,
100 		       this_attr->c->scan_type.shift);
101 }
102 
iio_scan_el_show(struct device * dev,struct device_attribute * attr,char * buf)103 static ssize_t iio_scan_el_show(struct device *dev,
104 				struct device_attribute *attr,
105 				char *buf)
106 {
107 	int ret;
108 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
109 
110 	ret = test_bit(to_iio_dev_attr(attr)->address,
111 		       indio_dev->buffer->scan_mask);
112 
113 	return sprintf(buf, "%d\n", ret);
114 }
115 
iio_scan_mask_clear(struct iio_buffer * buffer,int bit)116 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
117 {
118 	clear_bit(bit, buffer->scan_mask);
119 	return 0;
120 }
121 
iio_scan_el_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)122 static ssize_t iio_scan_el_store(struct device *dev,
123 				 struct device_attribute *attr,
124 				 const char *buf,
125 				 size_t len)
126 {
127 	int ret = 0;
128 	bool state;
129 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
130 	struct iio_buffer *buffer = indio_dev->buffer;
131 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
132 
133 	state = !(buf[0] == '0');
134 	mutex_lock(&indio_dev->mlock);
135 	if (iio_buffer_enabled(indio_dev)) {
136 		ret = -EBUSY;
137 		goto error_ret;
138 	}
139 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
140 	if (ret < 0)
141 		goto error_ret;
142 	if (!state && ret) {
143 		ret = iio_scan_mask_clear(buffer, this_attr->address);
144 		if (ret)
145 			goto error_ret;
146 	} else if (state && !ret) {
147 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
148 		if (ret)
149 			goto error_ret;
150 	}
151 
152 error_ret:
153 	mutex_unlock(&indio_dev->mlock);
154 
155 	return ret < 0 ? ret : len;
156 
157 }
158 
iio_scan_el_ts_show(struct device * dev,struct device_attribute * attr,char * buf)159 static ssize_t iio_scan_el_ts_show(struct device *dev,
160 				   struct device_attribute *attr,
161 				   char *buf)
162 {
163 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
164 	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
165 }
166 
iio_scan_el_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)167 static ssize_t iio_scan_el_ts_store(struct device *dev,
168 				    struct device_attribute *attr,
169 				    const char *buf,
170 				    size_t len)
171 {
172 	int ret = 0;
173 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
174 	bool state;
175 
176 	state = !(buf[0] == '0');
177 	mutex_lock(&indio_dev->mlock);
178 	if (iio_buffer_enabled(indio_dev)) {
179 		ret = -EBUSY;
180 		goto error_ret;
181 	}
182 	indio_dev->buffer->scan_timestamp = state;
183 error_ret:
184 	mutex_unlock(&indio_dev->mlock);
185 
186 	return ret ? ret : len;
187 }
188 
iio_buffer_add_channel_sysfs(struct iio_dev * indio_dev,const struct iio_chan_spec * chan)189 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
190 					const struct iio_chan_spec *chan)
191 {
192 	int ret, attrcount = 0;
193 	struct iio_buffer *buffer = indio_dev->buffer;
194 
195 	ret = __iio_add_chan_devattr("index",
196 				     chan,
197 				     &iio_show_scan_index,
198 				     NULL,
199 				     0,
200 				     0,
201 				     &indio_dev->dev,
202 				     &buffer->scan_el_dev_attr_list);
203 	if (ret)
204 		goto error_ret;
205 	attrcount++;
206 	ret = __iio_add_chan_devattr("type",
207 				     chan,
208 				     &iio_show_fixed_type,
209 				     NULL,
210 				     0,
211 				     0,
212 				     &indio_dev->dev,
213 				     &buffer->scan_el_dev_attr_list);
214 	if (ret)
215 		goto error_ret;
216 	attrcount++;
217 	if (chan->type != IIO_TIMESTAMP)
218 		ret = __iio_add_chan_devattr("en",
219 					     chan,
220 					     &iio_scan_el_show,
221 					     &iio_scan_el_store,
222 					     chan->scan_index,
223 					     0,
224 					     &indio_dev->dev,
225 					     &buffer->scan_el_dev_attr_list);
226 	else
227 		ret = __iio_add_chan_devattr("en",
228 					     chan,
229 					     &iio_scan_el_ts_show,
230 					     &iio_scan_el_ts_store,
231 					     chan->scan_index,
232 					     0,
233 					     &indio_dev->dev,
234 					     &buffer->scan_el_dev_attr_list);
235 	attrcount++;
236 	ret = attrcount;
237 error_ret:
238 	return ret;
239 }
240 
iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev * indio_dev,struct iio_dev_attr * p)241 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
242 						     struct iio_dev_attr *p)
243 {
244 	kfree(p->dev_attr.attr.name);
245 	kfree(p);
246 }
247 
__iio_buffer_attr_cleanup(struct iio_dev * indio_dev)248 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
249 {
250 	struct iio_dev_attr *p, *n;
251 	struct iio_buffer *buffer = indio_dev->buffer;
252 
253 	list_for_each_entry_safe(p, n,
254 				 &buffer->scan_el_dev_attr_list, l)
255 		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
256 }
257 
258 static const char * const iio_scan_elements_group_name = "scan_elements";
259 
iio_buffer_register(struct iio_dev * indio_dev,const struct iio_chan_spec * channels,int num_channels)260 int iio_buffer_register(struct iio_dev *indio_dev,
261 			const struct iio_chan_spec *channels,
262 			int num_channels)
263 {
264 	struct iio_dev_attr *p;
265 	struct attribute **attr;
266 	struct iio_buffer *buffer = indio_dev->buffer;
267 	int ret, i, attrn, attrcount, attrcount_orig = 0;
268 
269 	if (buffer->attrs)
270 		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
271 
272 	if (buffer->scan_el_attrs != NULL) {
273 		attr = buffer->scan_el_attrs->attrs;
274 		while (*attr++ != NULL)
275 			attrcount_orig++;
276 	}
277 	attrcount = attrcount_orig;
278 	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
279 	if (channels) {
280 		/* new magic */
281 		for (i = 0; i < num_channels; i++) {
282 			/* Establish necessary mask length */
283 			if (channels[i].scan_index >
284 			    (int)indio_dev->masklength - 1)
285 				indio_dev->masklength
286 					= indio_dev->channels[i].scan_index + 1;
287 
288 			ret = iio_buffer_add_channel_sysfs(indio_dev,
289 							 &channels[i]);
290 			if (ret < 0)
291 				goto error_cleanup_dynamic;
292 			attrcount += ret;
293 			if (channels[i].type == IIO_TIMESTAMP)
294 				buffer->scan_index_timestamp =
295 					channels[i].scan_index;
296 		}
297 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
298 			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
299 						    sizeof(*buffer->scan_mask),
300 						    GFP_KERNEL);
301 			if (buffer->scan_mask == NULL) {
302 				ret = -ENOMEM;
303 				goto error_cleanup_dynamic;
304 			}
305 		}
306 	}
307 
308 	buffer->scan_el_group.name = iio_scan_elements_group_name;
309 
310 	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
311 					      sizeof(buffer->scan_el_group.attrs[0]),
312 					      GFP_KERNEL);
313 	if (buffer->scan_el_group.attrs == NULL) {
314 		ret = -ENOMEM;
315 		goto error_free_scan_mask;
316 	}
317 	if (buffer->scan_el_attrs)
318 		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
319 		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
320 	attrn = attrcount_orig;
321 
322 	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
323 		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
324 	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
325 
326 	return 0;
327 
328 error_free_scan_mask:
329 	kfree(buffer->scan_mask);
330 error_cleanup_dynamic:
331 	__iio_buffer_attr_cleanup(indio_dev);
332 
333 	return ret;
334 }
335 EXPORT_SYMBOL(iio_buffer_register);
336 
iio_buffer_unregister(struct iio_dev * indio_dev)337 void iio_buffer_unregister(struct iio_dev *indio_dev)
338 {
339 	kfree(indio_dev->buffer->scan_mask);
340 	kfree(indio_dev->buffer->scan_el_group.attrs);
341 	__iio_buffer_attr_cleanup(indio_dev);
342 }
343 EXPORT_SYMBOL(iio_buffer_unregister);
344 
iio_buffer_read_length(struct device * dev,struct device_attribute * attr,char * buf)345 ssize_t iio_buffer_read_length(struct device *dev,
346 			       struct device_attribute *attr,
347 			       char *buf)
348 {
349 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
350 	struct iio_buffer *buffer = indio_dev->buffer;
351 
352 	if (buffer->access->get_length)
353 		return sprintf(buf, "%d\n",
354 			       buffer->access->get_length(buffer));
355 
356 	return 0;
357 }
358 EXPORT_SYMBOL(iio_buffer_read_length);
359 
iio_buffer_write_length(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)360 ssize_t iio_buffer_write_length(struct device *dev,
361 				struct device_attribute *attr,
362 				const char *buf,
363 				size_t len)
364 {
365 	int ret;
366 	ulong val;
367 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
368 	struct iio_buffer *buffer = indio_dev->buffer;
369 
370 	ret = strict_strtoul(buf, 10, &val);
371 	if (ret)
372 		return ret;
373 
374 	if (buffer->access->get_length)
375 		if (val == buffer->access->get_length(buffer))
376 			return len;
377 
378 	mutex_lock(&indio_dev->mlock);
379 	if (iio_buffer_enabled(indio_dev)) {
380 		ret = -EBUSY;
381 	} else {
382 		if (buffer->access->set_length)
383 			buffer->access->set_length(buffer, val);
384 		ret = 0;
385 	}
386 	mutex_unlock(&indio_dev->mlock);
387 
388 	return ret ? ret : len;
389 }
390 EXPORT_SYMBOL(iio_buffer_write_length);
391 
iio_buffer_store_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)392 ssize_t iio_buffer_store_enable(struct device *dev,
393 				struct device_attribute *attr,
394 				const char *buf,
395 				size_t len)
396 {
397 	int ret;
398 	bool requested_state, current_state;
399 	int previous_mode;
400 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
401 	struct iio_buffer *buffer = indio_dev->buffer;
402 
403 	mutex_lock(&indio_dev->mlock);
404 	previous_mode = indio_dev->currentmode;
405 	requested_state = !(buf[0] == '0');
406 	current_state = iio_buffer_enabled(indio_dev);
407 	if (current_state == requested_state) {
408 		printk(KERN_INFO "iio-buffer, current state requested again\n");
409 		goto done;
410 	}
411 	if (requested_state) {
412 		if (indio_dev->setup_ops->preenable) {
413 			ret = indio_dev->setup_ops->preenable(indio_dev);
414 			if (ret) {
415 				printk(KERN_ERR
416 				       "Buffer not started:"
417 				       "buffer preenable failed\n");
418 				goto error_ret;
419 			}
420 		}
421 		if (buffer->access->request_update) {
422 			ret = buffer->access->request_update(buffer);
423 			if (ret) {
424 				printk(KERN_INFO
425 				       "Buffer not started:"
426 				       "buffer parameter update failed\n");
427 				goto error_ret;
428 			}
429 		}
430 		/* Definitely possible for devices to support both of these.*/
431 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
432 			if (!indio_dev->trig) {
433 				printk(KERN_INFO
434 				       "Buffer not started: no trigger\n");
435 				ret = -EINVAL;
436 				goto error_ret;
437 			}
438 			indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
439 		} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
440 			indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
441 		else { /* should never be reached */
442 			ret = -EINVAL;
443 			goto error_ret;
444 		}
445 
446 		if (indio_dev->setup_ops->postenable) {
447 			ret = indio_dev->setup_ops->postenable(indio_dev);
448 			if (ret) {
449 				printk(KERN_INFO
450 				       "Buffer not started:"
451 				       "postenable failed\n");
452 				indio_dev->currentmode = previous_mode;
453 				if (indio_dev->setup_ops->postdisable)
454 					indio_dev->setup_ops->
455 						postdisable(indio_dev);
456 				goto error_ret;
457 			}
458 		}
459 	} else {
460 		if (indio_dev->setup_ops->predisable) {
461 			ret = indio_dev->setup_ops->predisable(indio_dev);
462 			if (ret)
463 				goto error_ret;
464 		}
465 		indio_dev->currentmode = INDIO_DIRECT_MODE;
466 		if (indio_dev->setup_ops->postdisable) {
467 			ret = indio_dev->setup_ops->postdisable(indio_dev);
468 			if (ret)
469 				goto error_ret;
470 		}
471 	}
472 done:
473 	mutex_unlock(&indio_dev->mlock);
474 	return len;
475 
476 error_ret:
477 	mutex_unlock(&indio_dev->mlock);
478 	return ret;
479 }
480 EXPORT_SYMBOL(iio_buffer_store_enable);
481 
iio_buffer_show_enable(struct device * dev,struct device_attribute * attr,char * buf)482 ssize_t iio_buffer_show_enable(struct device *dev,
483 			       struct device_attribute *attr,
484 			       char *buf)
485 {
486 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
487 	return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
488 }
489 EXPORT_SYMBOL(iio_buffer_show_enable);
490 
491 /* note NULL used as error indicator as it doesn't make sense. */
iio_scan_mask_match(const unsigned long * av_masks,unsigned int masklength,const unsigned long * mask)492 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
493 					  unsigned int masklength,
494 					  const unsigned long *mask)
495 {
496 	if (bitmap_empty(mask, masklength))
497 		return NULL;
498 	while (*av_masks) {
499 		if (bitmap_subset(mask, av_masks, masklength))
500 			return av_masks;
501 		av_masks += BITS_TO_LONGS(masklength);
502 	}
503 	return NULL;
504 }
505 
iio_sw_buffer_preenable(struct iio_dev * indio_dev)506 int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
507 {
508 	struct iio_buffer *buffer = indio_dev->buffer;
509 	const struct iio_chan_spec *ch;
510 	unsigned bytes = 0;
511 	int length, i;
512 	dev_dbg(&indio_dev->dev, "%s\n", __func__);
513 
514 	/* How much space will the demuxed element take? */
515 	for_each_set_bit(i, buffer->scan_mask,
516 			 indio_dev->masklength) {
517 		ch = iio_find_channel_from_si(indio_dev, i);
518 		length = ch->scan_type.storagebits/8;
519 		bytes = ALIGN(bytes, length);
520 		bytes += length;
521 	}
522 	if (buffer->scan_timestamp) {
523 		ch = iio_find_channel_from_si(indio_dev,
524 					      buffer->scan_index_timestamp);
525 		length = ch->scan_type.storagebits/8;
526 		bytes = ALIGN(bytes, length);
527 		bytes += length;
528 	}
529 	buffer->access->set_bytes_per_datum(buffer, bytes);
530 
531 	/* What scan mask do we actually have ?*/
532 	if (indio_dev->available_scan_masks)
533 		indio_dev->active_scan_mask =
534 			iio_scan_mask_match(indio_dev->available_scan_masks,
535 					    indio_dev->masklength,
536 					    buffer->scan_mask);
537 	else
538 		indio_dev->active_scan_mask = buffer->scan_mask;
539 	iio_update_demux(indio_dev);
540 
541 	if (indio_dev->info->update_scan_mode)
542 		return indio_dev->info
543 			->update_scan_mode(indio_dev,
544 					   indio_dev->active_scan_mask);
545 	return 0;
546 }
547 EXPORT_SYMBOL(iio_sw_buffer_preenable);
548 
549 /**
550  * iio_scan_mask_set() - set particular bit in the scan mask
551  * @buffer: the buffer whose scan mask we are interested in
552  * @bit: the bit to be set.
553  **/
iio_scan_mask_set(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)554 int iio_scan_mask_set(struct iio_dev *indio_dev,
555 		      struct iio_buffer *buffer, int bit)
556 {
557 	const unsigned long *mask;
558 	unsigned long *trialmask;
559 
560 	trialmask = kmalloc(sizeof(*trialmask)*
561 			    BITS_TO_LONGS(indio_dev->masklength),
562 			    GFP_KERNEL);
563 
564 	if (trialmask == NULL)
565 		return -ENOMEM;
566 	if (!indio_dev->masklength) {
567 		WARN_ON("trying to set scanmask prior to registering buffer\n");
568 		kfree(trialmask);
569 		return -EINVAL;
570 	}
571 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
572 	set_bit(bit, trialmask);
573 
574 	if (indio_dev->available_scan_masks) {
575 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
576 					   indio_dev->masklength,
577 					   trialmask);
578 		if (!mask) {
579 			kfree(trialmask);
580 			return -EINVAL;
581 		}
582 	}
583 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
584 
585 	kfree(trialmask);
586 
587 	return 0;
588 };
589 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
590 
iio_scan_mask_query(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)591 int iio_scan_mask_query(struct iio_dev *indio_dev,
592 			struct iio_buffer *buffer, int bit)
593 {
594 	if (bit > indio_dev->masklength)
595 		return -EINVAL;
596 
597 	if (!buffer->scan_mask)
598 		return 0;
599 
600 	return test_bit(bit, buffer->scan_mask);
601 };
602 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
603 
604 /**
605  * struct iio_demux_table() - table describing demux memcpy ops
606  * @from:	index to copy from
607  * @to:	index to copy to
608  * @length:	how many bytes to copy
609  * @l:		list head used for management
610  */
611 struct iio_demux_table {
612 	unsigned from;
613 	unsigned to;
614 	unsigned length;
615 	struct list_head l;
616 };
617 
iio_demux(struct iio_buffer * buffer,unsigned char * datain)618 static unsigned char *iio_demux(struct iio_buffer *buffer,
619 				 unsigned char *datain)
620 {
621 	struct iio_demux_table *t;
622 
623 	if (list_empty(&buffer->demux_list))
624 		return datain;
625 	list_for_each_entry(t, &buffer->demux_list, l)
626 		memcpy(buffer->demux_bounce + t->to,
627 		       datain + t->from, t->length);
628 
629 	return buffer->demux_bounce;
630 }
631 
iio_push_to_buffer(struct iio_buffer * buffer,unsigned char * data,s64 timestamp)632 int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
633 		       s64 timestamp)
634 {
635 	unsigned char *dataout = iio_demux(buffer, data);
636 
637 	return buffer->access->store_to(buffer, dataout, timestamp);
638 }
639 EXPORT_SYMBOL_GPL(iio_push_to_buffer);
640 
iio_update_demux(struct iio_dev * indio_dev)641 int iio_update_demux(struct iio_dev *indio_dev)
642 {
643 	const struct iio_chan_spec *ch;
644 	struct iio_buffer *buffer = indio_dev->buffer;
645 	int ret, in_ind = -1, out_ind, length;
646 	unsigned in_loc = 0, out_loc = 0;
647 	struct iio_demux_table *p, *q;
648 
649 	/* Clear out any old demux */
650 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
651 		list_del(&p->l);
652 		kfree(p);
653 	}
654 	kfree(buffer->demux_bounce);
655 	buffer->demux_bounce = NULL;
656 
657 	/* First work out which scan mode we will actually have */
658 	if (bitmap_equal(indio_dev->active_scan_mask,
659 			 buffer->scan_mask,
660 			 indio_dev->masklength))
661 		return 0;
662 
663 	/* Now we have the two masks, work from least sig and build up sizes */
664 	for_each_set_bit(out_ind,
665 			 indio_dev->active_scan_mask,
666 			 indio_dev->masklength) {
667 		in_ind = find_next_bit(indio_dev->active_scan_mask,
668 				       indio_dev->masklength,
669 				       in_ind + 1);
670 		while (in_ind != out_ind) {
671 			in_ind = find_next_bit(indio_dev->active_scan_mask,
672 					       indio_dev->masklength,
673 					       in_ind + 1);
674 			ch = iio_find_channel_from_si(indio_dev, in_ind);
675 			length = ch->scan_type.storagebits/8;
676 			/* Make sure we are aligned */
677 			in_loc += length;
678 			if (in_loc % length)
679 				in_loc += length - in_loc % length;
680 		}
681 		p = kmalloc(sizeof(*p), GFP_KERNEL);
682 		if (p == NULL) {
683 			ret = -ENOMEM;
684 			goto error_clear_mux_table;
685 		}
686 		ch = iio_find_channel_from_si(indio_dev, in_ind);
687 		length = ch->scan_type.storagebits/8;
688 		if (out_loc % length)
689 			out_loc += length - out_loc % length;
690 		if (in_loc % length)
691 			in_loc += length - in_loc % length;
692 		p->from = in_loc;
693 		p->to = out_loc;
694 		p->length = length;
695 		list_add_tail(&p->l, &buffer->demux_list);
696 		out_loc += length;
697 		in_loc += length;
698 	}
699 	/* Relies on scan_timestamp being last */
700 	if (buffer->scan_timestamp) {
701 		p = kmalloc(sizeof(*p), GFP_KERNEL);
702 		if (p == NULL) {
703 			ret = -ENOMEM;
704 			goto error_clear_mux_table;
705 		}
706 		ch = iio_find_channel_from_si(indio_dev,
707 			buffer->scan_index_timestamp);
708 		length = ch->scan_type.storagebits/8;
709 		if (out_loc % length)
710 			out_loc += length - out_loc % length;
711 		if (in_loc % length)
712 			in_loc += length - in_loc % length;
713 		p->from = in_loc;
714 		p->to = out_loc;
715 		p->length = length;
716 		list_add_tail(&p->l, &buffer->demux_list);
717 		out_loc += length;
718 		in_loc += length;
719 	}
720 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
721 	if (buffer->demux_bounce == NULL) {
722 		ret = -ENOMEM;
723 		goto error_clear_mux_table;
724 	}
725 	return 0;
726 
727 error_clear_mux_table:
728 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
729 		list_del(&p->l);
730 		kfree(p);
731 	}
732 	return ret;
733 }
734 EXPORT_SYMBOL_GPL(iio_update_demux);
735