Lines Matching refs:c

50 static inline bool ch_has_mbo(struct comp_channel *c)  in ch_has_mbo()  argument
52 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0; in ch_has_mbo()
55 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo) in ch_get_mbo() argument
57 if (!kfifo_peek(&c->fifo, mbo)) { in ch_get_mbo()
58 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc); in ch_get_mbo()
60 kfifo_in(&c->fifo, mbo, 1); in ch_get_mbo()
67 struct comp_channel *c, *tmp; in get_channel() local
71 list_for_each_entry_safe(c, tmp, &channel_list, list) { in get_channel()
72 if ((c->iface == iface) && (c->channel_id == id)) { in get_channel()
74 return c; in get_channel()
81 static void stop_channel(struct comp_channel *c) in stop_channel() argument
85 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1)) in stop_channel()
87 most_stop_channel(c->iface, c->channel_id, &comp.cc); in stop_channel()
90 static void destroy_cdev(struct comp_channel *c) in destroy_cdev() argument
94 device_destroy(comp.class, c->devno); in destroy_cdev()
95 cdev_del(&c->cdev); in destroy_cdev()
97 list_del(&c->list); in destroy_cdev()
101 static void destroy_channel(struct comp_channel *c) in destroy_channel() argument
103 ida_simple_remove(&comp.minor_id, MINOR(c->devno)); in destroy_channel()
104 kfifo_free(&c->fifo); in destroy_channel()
105 kfree(c); in destroy_channel()
118 struct comp_channel *c; in comp_open() local
121 c = to_channel(inode->i_cdev); in comp_open()
122 filp->private_data = c; in comp_open()
124 if (((c->cfg->direction == MOST_CH_RX) && in comp_open()
126 ((c->cfg->direction == MOST_CH_TX) && in comp_open()
131 mutex_lock(&c->io_mutex); in comp_open()
132 if (!c->dev) { in comp_open()
133 mutex_unlock(&c->io_mutex); in comp_open()
137 if (c->access_ref) { in comp_open()
138 mutex_unlock(&c->io_mutex); in comp_open()
142 c->mbo_offs = 0; in comp_open()
143 ret = most_start_channel(c->iface, c->channel_id, &comp.cc); in comp_open()
145 c->access_ref = 1; in comp_open()
146 mutex_unlock(&c->io_mutex); in comp_open()
159 struct comp_channel *c = to_channel(inode->i_cdev); in comp_close() local
161 mutex_lock(&c->io_mutex); in comp_close()
162 spin_lock(&c->unlink); in comp_close()
163 c->access_ref = 0; in comp_close()
164 spin_unlock(&c->unlink); in comp_close()
165 if (c->dev) { in comp_close()
166 stop_channel(c); in comp_close()
167 mutex_unlock(&c->io_mutex); in comp_close()
169 mutex_unlock(&c->io_mutex); in comp_close()
170 destroy_channel(c); in comp_close()
188 struct comp_channel *c = filp->private_data; in comp_write() local
190 mutex_lock(&c->io_mutex); in comp_write()
191 while (c->dev && !ch_get_mbo(c, &mbo)) { in comp_write()
192 mutex_unlock(&c->io_mutex); in comp_write()
196 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev)) in comp_write()
198 mutex_lock(&c->io_mutex); in comp_write()
201 if (unlikely(!c->dev)) { in comp_write()
206 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs); in comp_write()
207 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy); in comp_write()
213 c->mbo_offs += to_copy - left; in comp_write()
214 if (c->mbo_offs >= c->cfg->buffer_size || in comp_write()
215 c->cfg->data_type == MOST_CH_CONTROL || in comp_write()
216 c->cfg->data_type == MOST_CH_ASYNC) { in comp_write()
217 kfifo_skip(&c->fifo); in comp_write()
218 mbo->buffer_length = c->mbo_offs; in comp_write()
219 c->mbo_offs = 0; in comp_write()
225 mutex_unlock(&c->io_mutex); in comp_write()
241 struct comp_channel *c = filp->private_data; in comp_read() local
243 mutex_lock(&c->io_mutex); in comp_read()
244 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) { in comp_read()
245 mutex_unlock(&c->io_mutex); in comp_read()
248 if (wait_event_interruptible(c->wq, in comp_read()
249 (!kfifo_is_empty(&c->fifo) || in comp_read()
250 (!c->dev)))) in comp_read()
252 mutex_lock(&c->io_mutex); in comp_read()
256 if (unlikely(!c->dev)) { in comp_read()
257 mutex_unlock(&c->io_mutex); in comp_read()
263 mbo->processed_length - c->mbo_offs); in comp_read()
266 mbo->virt_address + c->mbo_offs, in comp_read()
271 c->mbo_offs += copied; in comp_read()
272 if (c->mbo_offs >= mbo->processed_length) { in comp_read()
273 kfifo_skip(&c->fifo); in comp_read()
275 c->mbo_offs = 0; in comp_read()
277 mutex_unlock(&c->io_mutex); in comp_read()
283 struct comp_channel *c = filp->private_data; in comp_poll() local
286 poll_wait(filp, &c->wq, wait); in comp_poll()
288 mutex_lock(&c->io_mutex); in comp_poll()
289 if (c->cfg->direction == MOST_CH_RX) { in comp_poll()
290 if (!c->dev || !kfifo_is_empty(&c->fifo)) in comp_poll()
293 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c)) in comp_poll()
296 mutex_unlock(&c->io_mutex); in comp_poll()
322 struct comp_channel *c; in comp_disconnect_channel() local
324 c = get_channel(iface, channel_id); in comp_disconnect_channel()
325 if (!c) in comp_disconnect_channel()
328 mutex_lock(&c->io_mutex); in comp_disconnect_channel()
329 spin_lock(&c->unlink); in comp_disconnect_channel()
330 c->dev = NULL; in comp_disconnect_channel()
331 spin_unlock(&c->unlink); in comp_disconnect_channel()
332 destroy_cdev(c); in comp_disconnect_channel()
333 if (c->access_ref) { in comp_disconnect_channel()
334 stop_channel(c); in comp_disconnect_channel()
335 wake_up_interruptible(&c->wq); in comp_disconnect_channel()
336 mutex_unlock(&c->io_mutex); in comp_disconnect_channel()
338 mutex_unlock(&c->io_mutex); in comp_disconnect_channel()
339 destroy_channel(c); in comp_disconnect_channel()
353 struct comp_channel *c; in comp_rx_completion() local
358 c = get_channel(mbo->ifp, mbo->hdm_channel_id); in comp_rx_completion()
359 if (!c) in comp_rx_completion()
362 spin_lock(&c->unlink); in comp_rx_completion()
363 if (!c->access_ref || !c->dev) { in comp_rx_completion()
364 spin_unlock(&c->unlink); in comp_rx_completion()
367 kfifo_in(&c->fifo, &mbo, 1); in comp_rx_completion()
368 spin_unlock(&c->unlink); in comp_rx_completion()
370 if (kfifo_is_full(&c->fifo)) in comp_rx_completion()
371 dev_warn(c->dev, "Fifo is full\n"); in comp_rx_completion()
373 wake_up_interruptible(&c->wq); in comp_rx_completion()
386 struct comp_channel *c; in comp_tx_completion() local
388 c = get_channel(iface, channel_id); in comp_tx_completion()
389 if (!c) in comp_tx_completion()
393 dev_warn(c->dev, "Channel ID out of range\n"); in comp_tx_completion()
397 wake_up_interruptible(&c->wq); in comp_tx_completion()
416 struct comp_channel *c; in comp_probe() local
424 c = get_channel(iface, channel_id); in comp_probe()
425 if (c) in comp_probe()
432 c = kzalloc(sizeof(*c), GFP_KERNEL); in comp_probe()
433 if (!c) { in comp_probe()
438 c->devno = MKDEV(comp.major, current_minor); in comp_probe()
439 cdev_init(&c->cdev, &channel_fops); in comp_probe()
440 c->cdev.owner = THIS_MODULE; in comp_probe()
441 retval = cdev_add(&c->cdev, c->devno, 1); in comp_probe()
444 c->iface = iface; in comp_probe()
445 c->cfg = cfg; in comp_probe()
446 c->channel_id = channel_id; in comp_probe()
447 c->access_ref = 0; in comp_probe()
448 spin_lock_init(&c->unlink); in comp_probe()
449 INIT_KFIFO(c->fifo); in comp_probe()
450 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL); in comp_probe()
453 init_waitqueue_head(&c->wq); in comp_probe()
454 mutex_init(&c->io_mutex); in comp_probe()
456 list_add_tail(&c->list, &channel_list); in comp_probe()
458 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name); in comp_probe()
460 if (IS_ERR(c->dev)) { in comp_probe()
461 retval = PTR_ERR(c->dev); in comp_probe()
464 kobject_uevent(&c->dev->kobj, KOBJ_ADD); in comp_probe()
468 kfifo_free(&c->fifo); in comp_probe()
469 list_del(&c->list); in comp_probe()
471 cdev_del(&c->cdev); in comp_probe()
473 kfree(c); in comp_probe()
524 struct comp_channel *c, *tmp; in most_cdev_exit() local
529 list_for_each_entry_safe(c, tmp, &channel_list, list) { in most_cdev_exit()
530 destroy_cdev(c); in most_cdev_exit()
531 destroy_channel(c); in most_cdev_exit()