1 /*
2 * Public API and common code for kernel->userspace relay file support.
3 *
4 * See Documentation/filesystems/relay.rst for an overview.
5 *
6 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
8 *
9 * Moved to kernel/relay.c by Paul Mundt, 2006.
10 * November 2006 - CPU hotplug support by Mathieu Desnoyers
11 * (mathieu.desnoyers@polymtl.ca)
12 *
13 * This file is released under the GPL.
14 */
15 #include <linux/errno.h>
16 #include <linux/stddef.h>
17 #include <linux/slab.h>
18 #include <linux/export.h>
19 #include <linux/string.h>
20 #include <linux/relay.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/cpu.h>
24 #include <linux/splice.h>
25
26 /* list of open channels, for cpu hotplug */
27 static DEFINE_MUTEX(relay_channels_mutex);
28 static LIST_HEAD(relay_channels);
29
30 /*
31 * fault() vm_op implementation for relay file mapping.
32 */
relay_buf_fault(struct vm_fault * vmf)33 static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
34 {
35 struct page *page;
36 struct rchan_buf *buf = vmf->vma->vm_private_data;
37 pgoff_t pgoff = vmf->pgoff;
38
39 if (!buf)
40 return VM_FAULT_OOM;
41
42 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
43 if (!page)
44 return VM_FAULT_SIGBUS;
45 get_page(page);
46 vmf->page = page;
47
48 return 0;
49 }
50
51 /*
52 * vm_ops for relay file mappings.
53 */
54 static const struct vm_operations_struct relay_file_mmap_ops = {
55 .fault = relay_buf_fault,
56 };
57
58 /*
59 * allocate an array of pointers of struct page
60 */
relay_alloc_page_array(unsigned int n_pages)61 static struct page **relay_alloc_page_array(unsigned int n_pages)
62 {
63 return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
64 }
65
66 /*
67 * free an array of pointers of struct page
68 */
relay_free_page_array(struct page ** array)69 static void relay_free_page_array(struct page **array)
70 {
71 kvfree(array);
72 }
73
74 /**
75 * relay_mmap_buf: - mmap channel buffer to process address space
76 * @buf: relay channel buffer
77 * @vma: vm_area_struct describing memory to be mapped
78 *
79 * Returns 0 if ok, negative on error
80 *
81 * Caller should already have grabbed mmap_lock.
82 */
relay_mmap_buf(struct rchan_buf * buf,struct vm_area_struct * vma)83 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
84 {
85 unsigned long length = vma->vm_end - vma->vm_start;
86
87 if (!buf)
88 return -EBADF;
89
90 if (length != (unsigned long)buf->chan->alloc_size)
91 return -EINVAL;
92
93 vma->vm_ops = &relay_file_mmap_ops;
94 vma->vm_flags |= VM_DONTEXPAND;
95 vma->vm_private_data = buf;
96
97 return 0;
98 }
99
100 /**
101 * relay_alloc_buf - allocate a channel buffer
102 * @buf: the buffer struct
103 * @size: total size of the buffer
104 *
105 * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The
106 * passed in size will get page aligned, if it isn't already.
107 */
relay_alloc_buf(struct rchan_buf * buf,size_t * size)108 static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
109 {
110 void *mem;
111 unsigned int i, j, n_pages;
112
113 *size = PAGE_ALIGN(*size);
114 n_pages = *size >> PAGE_SHIFT;
115
116 buf->page_array = relay_alloc_page_array(n_pages);
117 if (!buf->page_array)
118 return NULL;
119
120 for (i = 0; i < n_pages; i++) {
121 buf->page_array[i] = alloc_page(GFP_KERNEL);
122 if (unlikely(!buf->page_array[i]))
123 goto depopulate;
124 set_page_private(buf->page_array[i], (unsigned long)buf);
125 }
126 mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
127 if (!mem)
128 goto depopulate;
129
130 memset(mem, 0, *size);
131 buf->page_count = n_pages;
132 return mem;
133
134 depopulate:
135 for (j = 0; j < i; j++)
136 __free_page(buf->page_array[j]);
137 relay_free_page_array(buf->page_array);
138 return NULL;
139 }
140
141 /**
142 * relay_create_buf - allocate and initialize a channel buffer
143 * @chan: the relay channel
144 *
145 * Returns channel buffer if successful, %NULL otherwise.
146 */
relay_create_buf(struct rchan * chan)147 static struct rchan_buf *relay_create_buf(struct rchan *chan)
148 {
149 struct rchan_buf *buf;
150
151 if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
152 return NULL;
153
154 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
155 if (!buf)
156 return NULL;
157 buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
158 GFP_KERNEL);
159 if (!buf->padding)
160 goto free_buf;
161
162 buf->start = relay_alloc_buf(buf, &chan->alloc_size);
163 if (!buf->start)
164 goto free_buf;
165
166 buf->chan = chan;
167 kref_get(&buf->chan->kref);
168 return buf;
169
170 free_buf:
171 kfree(buf->padding);
172 kfree(buf);
173 return NULL;
174 }
175
176 /**
177 * relay_destroy_channel - free the channel struct
178 * @kref: target kernel reference that contains the relay channel
179 *
180 * Should only be called from kref_put().
181 */
relay_destroy_channel(struct kref * kref)182 static void relay_destroy_channel(struct kref *kref)
183 {
184 struct rchan *chan = container_of(kref, struct rchan, kref);
185 free_percpu(chan->buf);
186 kfree(chan);
187 }
188
189 /**
190 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
191 * @buf: the buffer struct
192 */
relay_destroy_buf(struct rchan_buf * buf)193 static void relay_destroy_buf(struct rchan_buf *buf)
194 {
195 struct rchan *chan = buf->chan;
196 unsigned int i;
197
198 if (likely(buf->start)) {
199 vunmap(buf->start);
200 for (i = 0; i < buf->page_count; i++)
201 __free_page(buf->page_array[i]);
202 relay_free_page_array(buf->page_array);
203 }
204 *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
205 kfree(buf->padding);
206 kfree(buf);
207 kref_put(&chan->kref, relay_destroy_channel);
208 }
209
210 /**
211 * relay_remove_buf - remove a channel buffer
212 * @kref: target kernel reference that contains the relay buffer
213 *
214 * Removes the file from the filesystem, which also frees the
215 * rchan_buf_struct and the channel buffer. Should only be called from
216 * kref_put().
217 */
relay_remove_buf(struct kref * kref)218 static void relay_remove_buf(struct kref *kref)
219 {
220 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
221 relay_destroy_buf(buf);
222 }
223
224 /**
225 * relay_buf_empty - boolean, is the channel buffer empty?
226 * @buf: channel buffer
227 *
228 * Returns 1 if the buffer is empty, 0 otherwise.
229 */
relay_buf_empty(struct rchan_buf * buf)230 static int relay_buf_empty(struct rchan_buf *buf)
231 {
232 return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
233 }
234
235 /**
236 * relay_buf_full - boolean, is the channel buffer full?
237 * @buf: channel buffer
238 *
239 * Returns 1 if the buffer is full, 0 otherwise.
240 */
relay_buf_full(struct rchan_buf * buf)241 int relay_buf_full(struct rchan_buf *buf)
242 {
243 size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
244 return (ready >= buf->chan->n_subbufs) ? 1 : 0;
245 }
246 EXPORT_SYMBOL_GPL(relay_buf_full);
247
248 /*
249 * High-level relay kernel API and associated functions.
250 */
251
relay_subbuf_start(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)252 static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
253 void *prev_subbuf, size_t prev_padding)
254 {
255 if (!buf->chan->cb->subbuf_start)
256 return !relay_buf_full(buf);
257
258 return buf->chan->cb->subbuf_start(buf, subbuf,
259 prev_subbuf, prev_padding);
260 }
261
262 /**
263 * wakeup_readers - wake up readers waiting on a channel
264 * @work: contains the channel buffer
265 *
266 * This is the function used to defer reader waking
267 */
wakeup_readers(struct irq_work * work)268 static void wakeup_readers(struct irq_work *work)
269 {
270 struct rchan_buf *buf;
271
272 buf = container_of(work, struct rchan_buf, wakeup_work);
273 wake_up_interruptible(&buf->read_wait);
274 }
275
276 /**
277 * __relay_reset - reset a channel buffer
278 * @buf: the channel buffer
279 * @init: 1 if this is a first-time initialization
280 *
281 * See relay_reset() for description of effect.
282 */
__relay_reset(struct rchan_buf * buf,unsigned int init)283 static void __relay_reset(struct rchan_buf *buf, unsigned int init)
284 {
285 size_t i;
286
287 if (init) {
288 init_waitqueue_head(&buf->read_wait);
289 kref_init(&buf->kref);
290 init_irq_work(&buf->wakeup_work, wakeup_readers);
291 } else {
292 irq_work_sync(&buf->wakeup_work);
293 }
294
295 buf->subbufs_produced = 0;
296 buf->subbufs_consumed = 0;
297 buf->bytes_consumed = 0;
298 buf->finalized = 0;
299 buf->data = buf->start;
300 buf->offset = 0;
301
302 for (i = 0; i < buf->chan->n_subbufs; i++)
303 buf->padding[i] = 0;
304
305 relay_subbuf_start(buf, buf->data, NULL, 0);
306 }
307
308 /**
309 * relay_reset - reset the channel
310 * @chan: the channel
311 *
312 * This has the effect of erasing all data from all channel buffers
313 * and restarting the channel in its initial state. The buffers
314 * are not freed, so any mappings are still in effect.
315 *
316 * NOTE. Care should be taken that the channel isn't actually
317 * being used by anything when this call is made.
318 */
relay_reset(struct rchan * chan)319 void relay_reset(struct rchan *chan)
320 {
321 struct rchan_buf *buf;
322 unsigned int i;
323
324 if (!chan)
325 return;
326
327 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
328 __relay_reset(buf, 0);
329 return;
330 }
331
332 mutex_lock(&relay_channels_mutex);
333 for_each_possible_cpu(i)
334 if ((buf = *per_cpu_ptr(chan->buf, i)))
335 __relay_reset(buf, 0);
336 mutex_unlock(&relay_channels_mutex);
337 }
338 EXPORT_SYMBOL_GPL(relay_reset);
339
relay_set_buf_dentry(struct rchan_buf * buf,struct dentry * dentry)340 static inline void relay_set_buf_dentry(struct rchan_buf *buf,
341 struct dentry *dentry)
342 {
343 buf->dentry = dentry;
344 d_inode(buf->dentry)->i_size = buf->early_bytes;
345 }
346
relay_create_buf_file(struct rchan * chan,struct rchan_buf * buf,unsigned int cpu)347 static struct dentry *relay_create_buf_file(struct rchan *chan,
348 struct rchan_buf *buf,
349 unsigned int cpu)
350 {
351 struct dentry *dentry;
352 char *tmpname;
353
354 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
355 if (!tmpname)
356 return NULL;
357 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
358
359 /* Create file in fs */
360 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
361 S_IRUSR, buf,
362 &chan->is_global);
363 if (IS_ERR(dentry))
364 dentry = NULL;
365
366 kfree(tmpname);
367
368 return dentry;
369 }
370
371 /*
372 * relay_open_buf - create a new relay channel buffer
373 *
374 * used by relay_open() and CPU hotplug.
375 */
relay_open_buf(struct rchan * chan,unsigned int cpu)376 static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
377 {
378 struct rchan_buf *buf = NULL;
379 struct dentry *dentry;
380
381 if (chan->is_global)
382 return *per_cpu_ptr(chan->buf, 0);
383
384 buf = relay_create_buf(chan);
385 if (!buf)
386 return NULL;
387
388 if (chan->has_base_filename) {
389 dentry = relay_create_buf_file(chan, buf, cpu);
390 if (!dentry)
391 goto free_buf;
392 relay_set_buf_dentry(buf, dentry);
393 } else {
394 /* Only retrieve global info, nothing more, nothing less */
395 dentry = chan->cb->create_buf_file(NULL, NULL,
396 S_IRUSR, buf,
397 &chan->is_global);
398 if (IS_ERR_OR_NULL(dentry))
399 goto free_buf;
400 }
401
402 buf->cpu = cpu;
403 __relay_reset(buf, 1);
404
405 if(chan->is_global) {
406 *per_cpu_ptr(chan->buf, 0) = buf;
407 buf->cpu = 0;
408 }
409
410 return buf;
411
412 free_buf:
413 relay_destroy_buf(buf);
414 return NULL;
415 }
416
417 /**
418 * relay_close_buf - close a channel buffer
419 * @buf: channel buffer
420 *
421 * Marks the buffer finalized and restores the default callbacks.
422 * The channel buffer and channel buffer data structure are then freed
423 * automatically when the last reference is given up.
424 */
relay_close_buf(struct rchan_buf * buf)425 static void relay_close_buf(struct rchan_buf *buf)
426 {
427 buf->finalized = 1;
428 irq_work_sync(&buf->wakeup_work);
429 buf->chan->cb->remove_buf_file(buf->dentry);
430 kref_put(&buf->kref, relay_remove_buf);
431 }
432
relay_prepare_cpu(unsigned int cpu)433 int relay_prepare_cpu(unsigned int cpu)
434 {
435 struct rchan *chan;
436 struct rchan_buf *buf;
437
438 mutex_lock(&relay_channels_mutex);
439 list_for_each_entry(chan, &relay_channels, list) {
440 if (*per_cpu_ptr(chan->buf, cpu))
441 continue;
442 buf = relay_open_buf(chan, cpu);
443 if (!buf) {
444 pr_err("relay: cpu %d buffer creation failed\n", cpu);
445 mutex_unlock(&relay_channels_mutex);
446 return -ENOMEM;
447 }
448 *per_cpu_ptr(chan->buf, cpu) = buf;
449 }
450 mutex_unlock(&relay_channels_mutex);
451 return 0;
452 }
453
454 /**
455 * relay_open - create a new relay channel
456 * @base_filename: base name of files to create, %NULL for buffering only
457 * @parent: dentry of parent directory, %NULL for root directory or buffer
458 * @subbuf_size: size of sub-buffers
459 * @n_subbufs: number of sub-buffers
460 * @cb: client callback functions
461 * @private_data: user-defined data
462 *
463 * Returns channel pointer if successful, %NULL otherwise.
464 *
465 * Creates a channel buffer for each cpu using the sizes and
466 * attributes specified. The created channel buffer files
467 * will be named base_filename0...base_filenameN-1. File
468 * permissions will be %S_IRUSR.
469 *
470 * If opening a buffer (@parent = NULL) that you later wish to register
471 * in a filesystem, call relay_late_setup_files() once the @parent dentry
472 * is available.
473 */
relay_open(const char * base_filename,struct dentry * parent,size_t subbuf_size,size_t n_subbufs,const struct rchan_callbacks * cb,void * private_data)474 struct rchan *relay_open(const char *base_filename,
475 struct dentry *parent,
476 size_t subbuf_size,
477 size_t n_subbufs,
478 const struct rchan_callbacks *cb,
479 void *private_data)
480 {
481 unsigned int i;
482 struct rchan *chan;
483 struct rchan_buf *buf;
484
485 if (!(subbuf_size && n_subbufs))
486 return NULL;
487 if (subbuf_size > UINT_MAX / n_subbufs)
488 return NULL;
489 if (!cb || !cb->create_buf_file || !cb->remove_buf_file)
490 return NULL;
491
492 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
493 if (!chan)
494 return NULL;
495
496 chan->buf = alloc_percpu(struct rchan_buf *);
497 if (!chan->buf) {
498 kfree(chan);
499 return NULL;
500 }
501
502 chan->version = RELAYFS_CHANNEL_VERSION;
503 chan->n_subbufs = n_subbufs;
504 chan->subbuf_size = subbuf_size;
505 chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
506 chan->parent = parent;
507 chan->private_data = private_data;
508 if (base_filename) {
509 chan->has_base_filename = 1;
510 strlcpy(chan->base_filename, base_filename, NAME_MAX);
511 }
512 chan->cb = cb;
513 kref_init(&chan->kref);
514
515 mutex_lock(&relay_channels_mutex);
516 for_each_online_cpu(i) {
517 buf = relay_open_buf(chan, i);
518 if (!buf)
519 goto free_bufs;
520 *per_cpu_ptr(chan->buf, i) = buf;
521 }
522 list_add(&chan->list, &relay_channels);
523 mutex_unlock(&relay_channels_mutex);
524
525 return chan;
526
527 free_bufs:
528 for_each_possible_cpu(i) {
529 if ((buf = *per_cpu_ptr(chan->buf, i)))
530 relay_close_buf(buf);
531 }
532
533 kref_put(&chan->kref, relay_destroy_channel);
534 mutex_unlock(&relay_channels_mutex);
535 return NULL;
536 }
537 EXPORT_SYMBOL_GPL(relay_open);
538
539 struct rchan_percpu_buf_dispatcher {
540 struct rchan_buf *buf;
541 struct dentry *dentry;
542 };
543
544 /* Called in atomic context. */
__relay_set_buf_dentry(void * info)545 static void __relay_set_buf_dentry(void *info)
546 {
547 struct rchan_percpu_buf_dispatcher *p = info;
548
549 relay_set_buf_dentry(p->buf, p->dentry);
550 }
551
552 /**
553 * relay_late_setup_files - triggers file creation
554 * @chan: channel to operate on
555 * @base_filename: base name of files to create
556 * @parent: dentry of parent directory, %NULL for root directory
557 *
558 * Returns 0 if successful, non-zero otherwise.
559 *
560 * Use to setup files for a previously buffer-only channel created
561 * by relay_open() with a NULL parent dentry.
562 *
563 * For example, this is useful for perfomring early tracing in kernel,
564 * before VFS is up and then exposing the early results once the dentry
565 * is available.
566 */
relay_late_setup_files(struct rchan * chan,const char * base_filename,struct dentry * parent)567 int relay_late_setup_files(struct rchan *chan,
568 const char *base_filename,
569 struct dentry *parent)
570 {
571 int err = 0;
572 unsigned int i, curr_cpu;
573 unsigned long flags;
574 struct dentry *dentry;
575 struct rchan_buf *buf;
576 struct rchan_percpu_buf_dispatcher disp;
577
578 if (!chan || !base_filename)
579 return -EINVAL;
580
581 strlcpy(chan->base_filename, base_filename, NAME_MAX);
582
583 mutex_lock(&relay_channels_mutex);
584 /* Is chan already set up? */
585 if (unlikely(chan->has_base_filename)) {
586 mutex_unlock(&relay_channels_mutex);
587 return -EEXIST;
588 }
589 chan->has_base_filename = 1;
590 chan->parent = parent;
591
592 if (chan->is_global) {
593 err = -EINVAL;
594 buf = *per_cpu_ptr(chan->buf, 0);
595 if (!WARN_ON_ONCE(!buf)) {
596 dentry = relay_create_buf_file(chan, buf, 0);
597 if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
598 relay_set_buf_dentry(buf, dentry);
599 err = 0;
600 }
601 }
602 mutex_unlock(&relay_channels_mutex);
603 return err;
604 }
605
606 curr_cpu = get_cpu();
607 /*
608 * The CPU hotplug notifier ran before us and created buffers with
609 * no files associated. So it's safe to call relay_setup_buf_file()
610 * on all currently online CPUs.
611 */
612 for_each_online_cpu(i) {
613 buf = *per_cpu_ptr(chan->buf, i);
614 if (unlikely(!buf)) {
615 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
616 err = -EINVAL;
617 break;
618 }
619
620 dentry = relay_create_buf_file(chan, buf, i);
621 if (unlikely(!dentry)) {
622 err = -EINVAL;
623 break;
624 }
625
626 if (curr_cpu == i) {
627 local_irq_save(flags);
628 relay_set_buf_dentry(buf, dentry);
629 local_irq_restore(flags);
630 } else {
631 disp.buf = buf;
632 disp.dentry = dentry;
633 smp_mb();
634 /* relay_channels_mutex must be held, so wait. */
635 err = smp_call_function_single(i,
636 __relay_set_buf_dentry,
637 &disp, 1);
638 }
639 if (unlikely(err))
640 break;
641 }
642 put_cpu();
643 mutex_unlock(&relay_channels_mutex);
644
645 return err;
646 }
647 EXPORT_SYMBOL_GPL(relay_late_setup_files);
648
649 /**
650 * relay_switch_subbuf - switch to a new sub-buffer
651 * @buf: channel buffer
652 * @length: size of current event
653 *
654 * Returns either the length passed in or 0 if full.
655 *
656 * Performs sub-buffer-switch tasks such as invoking callbacks,
657 * updating padding counts, waking up readers, etc.
658 */
relay_switch_subbuf(struct rchan_buf * buf,size_t length)659 size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
660 {
661 void *old, *new;
662 size_t old_subbuf, new_subbuf;
663
664 if (unlikely(length > buf->chan->subbuf_size))
665 goto toobig;
666
667 if (buf->offset != buf->chan->subbuf_size + 1) {
668 buf->prev_padding = buf->chan->subbuf_size - buf->offset;
669 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
670 buf->padding[old_subbuf] = buf->prev_padding;
671 buf->subbufs_produced++;
672 if (buf->dentry)
673 d_inode(buf->dentry)->i_size +=
674 buf->chan->subbuf_size -
675 buf->padding[old_subbuf];
676 else
677 buf->early_bytes += buf->chan->subbuf_size -
678 buf->padding[old_subbuf];
679 smp_mb();
680 if (waitqueue_active(&buf->read_wait)) {
681 /*
682 * Calling wake_up_interruptible() from here
683 * will deadlock if we happen to be logging
684 * from the scheduler (trying to re-grab
685 * rq->lock), so defer it.
686 */
687 irq_work_queue(&buf->wakeup_work);
688 }
689 }
690
691 old = buf->data;
692 new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
693 new = buf->start + new_subbuf * buf->chan->subbuf_size;
694 buf->offset = 0;
695 if (!relay_subbuf_start(buf, new, old, buf->prev_padding)) {
696 buf->offset = buf->chan->subbuf_size + 1;
697 return 0;
698 }
699 buf->data = new;
700 buf->padding[new_subbuf] = 0;
701
702 if (unlikely(length + buf->offset > buf->chan->subbuf_size))
703 goto toobig;
704
705 return length;
706
707 toobig:
708 buf->chan->last_toobig = length;
709 return 0;
710 }
711 EXPORT_SYMBOL_GPL(relay_switch_subbuf);
712
713 /**
714 * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
715 * @chan: the channel
716 * @cpu: the cpu associated with the channel buffer to update
717 * @subbufs_consumed: number of sub-buffers to add to current buf's count
718 *
719 * Adds to the channel buffer's consumed sub-buffer count.
720 * subbufs_consumed should be the number of sub-buffers newly consumed,
721 * not the total consumed.
722 *
723 * NOTE. Kernel clients don't need to call this function if the channel
724 * mode is 'overwrite'.
725 */
relay_subbufs_consumed(struct rchan * chan,unsigned int cpu,size_t subbufs_consumed)726 void relay_subbufs_consumed(struct rchan *chan,
727 unsigned int cpu,
728 size_t subbufs_consumed)
729 {
730 struct rchan_buf *buf;
731
732 if (!chan || cpu >= NR_CPUS)
733 return;
734
735 buf = *per_cpu_ptr(chan->buf, cpu);
736 if (!buf || subbufs_consumed > chan->n_subbufs)
737 return;
738
739 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
740 buf->subbufs_consumed = buf->subbufs_produced;
741 else
742 buf->subbufs_consumed += subbufs_consumed;
743 }
744 EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
745
746 /**
747 * relay_close - close the channel
748 * @chan: the channel
749 *
750 * Closes all channel buffers and frees the channel.
751 */
relay_close(struct rchan * chan)752 void relay_close(struct rchan *chan)
753 {
754 struct rchan_buf *buf;
755 unsigned int i;
756
757 if (!chan)
758 return;
759
760 mutex_lock(&relay_channels_mutex);
761 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
762 relay_close_buf(buf);
763 else
764 for_each_possible_cpu(i)
765 if ((buf = *per_cpu_ptr(chan->buf, i)))
766 relay_close_buf(buf);
767
768 if (chan->last_toobig)
769 printk(KERN_WARNING "relay: one or more items not logged "
770 "[item size (%zd) > sub-buffer size (%zd)]\n",
771 chan->last_toobig, chan->subbuf_size);
772
773 list_del(&chan->list);
774 kref_put(&chan->kref, relay_destroy_channel);
775 mutex_unlock(&relay_channels_mutex);
776 }
777 EXPORT_SYMBOL_GPL(relay_close);
778
779 /**
780 * relay_flush - close the channel
781 * @chan: the channel
782 *
783 * Flushes all channel buffers, i.e. forces buffer switch.
784 */
relay_flush(struct rchan * chan)785 void relay_flush(struct rchan *chan)
786 {
787 struct rchan_buf *buf;
788 unsigned int i;
789
790 if (!chan)
791 return;
792
793 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
794 relay_switch_subbuf(buf, 0);
795 return;
796 }
797
798 mutex_lock(&relay_channels_mutex);
799 for_each_possible_cpu(i)
800 if ((buf = *per_cpu_ptr(chan->buf, i)))
801 relay_switch_subbuf(buf, 0);
802 mutex_unlock(&relay_channels_mutex);
803 }
804 EXPORT_SYMBOL_GPL(relay_flush);
805
806 /**
807 * relay_file_open - open file op for relay files
808 * @inode: the inode
809 * @filp: the file
810 *
811 * Increments the channel buffer refcount.
812 */
relay_file_open(struct inode * inode,struct file * filp)813 static int relay_file_open(struct inode *inode, struct file *filp)
814 {
815 struct rchan_buf *buf = inode->i_private;
816 kref_get(&buf->kref);
817 filp->private_data = buf;
818
819 return nonseekable_open(inode, filp);
820 }
821
822 /**
823 * relay_file_mmap - mmap file op for relay files
824 * @filp: the file
825 * @vma: the vma describing what to map
826 *
827 * Calls upon relay_mmap_buf() to map the file into user space.
828 */
relay_file_mmap(struct file * filp,struct vm_area_struct * vma)829 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
830 {
831 struct rchan_buf *buf = filp->private_data;
832 return relay_mmap_buf(buf, vma);
833 }
834
835 /**
836 * relay_file_poll - poll file op for relay files
837 * @filp: the file
838 * @wait: poll table
839 *
840 * Poll implemention.
841 */
relay_file_poll(struct file * filp,poll_table * wait)842 static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
843 {
844 __poll_t mask = 0;
845 struct rchan_buf *buf = filp->private_data;
846
847 if (buf->finalized)
848 return EPOLLERR;
849
850 if (filp->f_mode & FMODE_READ) {
851 poll_wait(filp, &buf->read_wait, wait);
852 if (!relay_buf_empty(buf))
853 mask |= EPOLLIN | EPOLLRDNORM;
854 }
855
856 return mask;
857 }
858
859 /**
860 * relay_file_release - release file op for relay files
861 * @inode: the inode
862 * @filp: the file
863 *
864 * Decrements the channel refcount, as the filesystem is
865 * no longer using it.
866 */
relay_file_release(struct inode * inode,struct file * filp)867 static int relay_file_release(struct inode *inode, struct file *filp)
868 {
869 struct rchan_buf *buf = filp->private_data;
870 kref_put(&buf->kref, relay_remove_buf);
871
872 return 0;
873 }
874
875 /*
876 * relay_file_read_consume - update the consumed count for the buffer
877 */
relay_file_read_consume(struct rchan_buf * buf,size_t read_pos,size_t bytes_consumed)878 static void relay_file_read_consume(struct rchan_buf *buf,
879 size_t read_pos,
880 size_t bytes_consumed)
881 {
882 size_t subbuf_size = buf->chan->subbuf_size;
883 size_t n_subbufs = buf->chan->n_subbufs;
884 size_t read_subbuf;
885
886 if (buf->subbufs_produced == buf->subbufs_consumed &&
887 buf->offset == buf->bytes_consumed)
888 return;
889
890 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
891 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
892 buf->bytes_consumed = 0;
893 }
894
895 buf->bytes_consumed += bytes_consumed;
896 if (!read_pos)
897 read_subbuf = buf->subbufs_consumed % n_subbufs;
898 else
899 read_subbuf = read_pos / buf->chan->subbuf_size;
900 if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
901 if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
902 (buf->offset == subbuf_size))
903 return;
904 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
905 buf->bytes_consumed = 0;
906 }
907 }
908
909 /*
910 * relay_file_read_avail - boolean, are there unconsumed bytes available?
911 */
relay_file_read_avail(struct rchan_buf * buf)912 static int relay_file_read_avail(struct rchan_buf *buf)
913 {
914 size_t subbuf_size = buf->chan->subbuf_size;
915 size_t n_subbufs = buf->chan->n_subbufs;
916 size_t produced = buf->subbufs_produced;
917 size_t consumed;
918
919 relay_file_read_consume(buf, 0, 0);
920
921 consumed = buf->subbufs_consumed;
922
923 if (unlikely(buf->offset > subbuf_size)) {
924 if (produced == consumed)
925 return 0;
926 return 1;
927 }
928
929 if (unlikely(produced - consumed >= n_subbufs)) {
930 consumed = produced - n_subbufs + 1;
931 buf->subbufs_consumed = consumed;
932 buf->bytes_consumed = 0;
933 }
934
935 produced = (produced % n_subbufs) * subbuf_size + buf->offset;
936 consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
937
938 if (consumed > produced)
939 produced += n_subbufs * subbuf_size;
940
941 if (consumed == produced) {
942 if (buf->offset == subbuf_size &&
943 buf->subbufs_produced > buf->subbufs_consumed)
944 return 1;
945 return 0;
946 }
947
948 return 1;
949 }
950
951 /**
952 * relay_file_read_subbuf_avail - return bytes available in sub-buffer
953 * @read_pos: file read position
954 * @buf: relay channel buffer
955 */
relay_file_read_subbuf_avail(size_t read_pos,struct rchan_buf * buf)956 static size_t relay_file_read_subbuf_avail(size_t read_pos,
957 struct rchan_buf *buf)
958 {
959 size_t padding, avail = 0;
960 size_t read_subbuf, read_offset, write_subbuf, write_offset;
961 size_t subbuf_size = buf->chan->subbuf_size;
962
963 write_subbuf = (buf->data - buf->start) / subbuf_size;
964 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
965 read_subbuf = read_pos / subbuf_size;
966 read_offset = read_pos % subbuf_size;
967 padding = buf->padding[read_subbuf];
968
969 if (read_subbuf == write_subbuf) {
970 if (read_offset + padding < write_offset)
971 avail = write_offset - (read_offset + padding);
972 } else
973 avail = (subbuf_size - padding) - read_offset;
974
975 return avail;
976 }
977
978 /**
979 * relay_file_read_start_pos - find the first available byte to read
980 * @buf: relay channel buffer
981 *
982 * If the read_pos is in the middle of padding, return the
983 * position of the first actually available byte, otherwise
984 * return the original value.
985 */
relay_file_read_start_pos(struct rchan_buf * buf)986 static size_t relay_file_read_start_pos(struct rchan_buf *buf)
987 {
988 size_t read_subbuf, padding, padding_start, padding_end;
989 size_t subbuf_size = buf->chan->subbuf_size;
990 size_t n_subbufs = buf->chan->n_subbufs;
991 size_t consumed = buf->subbufs_consumed % n_subbufs;
992 size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
993
994 read_subbuf = read_pos / subbuf_size;
995 padding = buf->padding[read_subbuf];
996 padding_start = (read_subbuf + 1) * subbuf_size - padding;
997 padding_end = (read_subbuf + 1) * subbuf_size;
998 if (read_pos >= padding_start && read_pos < padding_end) {
999 read_subbuf = (read_subbuf + 1) % n_subbufs;
1000 read_pos = read_subbuf * subbuf_size;
1001 }
1002
1003 return read_pos;
1004 }
1005
1006 /**
1007 * relay_file_read_end_pos - return the new read position
1008 * @read_pos: file read position
1009 * @buf: relay channel buffer
1010 * @count: number of bytes to be read
1011 */
relay_file_read_end_pos(struct rchan_buf * buf,size_t read_pos,size_t count)1012 static size_t relay_file_read_end_pos(struct rchan_buf *buf,
1013 size_t read_pos,
1014 size_t count)
1015 {
1016 size_t read_subbuf, padding, end_pos;
1017 size_t subbuf_size = buf->chan->subbuf_size;
1018 size_t n_subbufs = buf->chan->n_subbufs;
1019
1020 read_subbuf = read_pos / subbuf_size;
1021 padding = buf->padding[read_subbuf];
1022 if (read_pos % subbuf_size + count + padding == subbuf_size)
1023 end_pos = (read_subbuf + 1) * subbuf_size;
1024 else
1025 end_pos = read_pos + count;
1026 if (end_pos >= subbuf_size * n_subbufs)
1027 end_pos = 0;
1028
1029 return end_pos;
1030 }
1031
relay_file_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)1032 static ssize_t relay_file_read(struct file *filp,
1033 char __user *buffer,
1034 size_t count,
1035 loff_t *ppos)
1036 {
1037 struct rchan_buf *buf = filp->private_data;
1038 size_t read_start, avail;
1039 size_t written = 0;
1040 int ret;
1041
1042 if (!count)
1043 return 0;
1044
1045 inode_lock(file_inode(filp));
1046 do {
1047 void *from;
1048
1049 if (!relay_file_read_avail(buf))
1050 break;
1051
1052 read_start = relay_file_read_start_pos(buf);
1053 avail = relay_file_read_subbuf_avail(read_start, buf);
1054 if (!avail)
1055 break;
1056
1057 avail = min(count, avail);
1058 from = buf->start + read_start;
1059 ret = avail;
1060 if (copy_to_user(buffer, from, avail))
1061 break;
1062
1063 buffer += ret;
1064 written += ret;
1065 count -= ret;
1066
1067 relay_file_read_consume(buf, read_start, ret);
1068 *ppos = relay_file_read_end_pos(buf, read_start, ret);
1069 } while (count);
1070 inode_unlock(file_inode(filp));
1071
1072 return written;
1073 }
1074
relay_consume_bytes(struct rchan_buf * rbuf,int bytes_consumed)1075 static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
1076 {
1077 rbuf->bytes_consumed += bytes_consumed;
1078
1079 if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) {
1080 relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
1081 rbuf->bytes_consumed %= rbuf->chan->subbuf_size;
1082 }
1083 }
1084
relay_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)1085 static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
1086 struct pipe_buffer *buf)
1087 {
1088 struct rchan_buf *rbuf;
1089
1090 rbuf = (struct rchan_buf *)page_private(buf->page);
1091 relay_consume_bytes(rbuf, buf->private);
1092 }
1093
1094 static const struct pipe_buf_operations relay_pipe_buf_ops = {
1095 .release = relay_pipe_buf_release,
1096 .try_steal = generic_pipe_buf_try_steal,
1097 .get = generic_pipe_buf_get,
1098 };
1099
relay_page_release(struct splice_pipe_desc * spd,unsigned int i)1100 static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
1101 {
1102 }
1103
1104 /*
1105 * subbuf_splice_actor - splice up to one subbuf's worth of data
1106 */
subbuf_splice_actor(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags,int * nonpad_ret)1107 static ssize_t subbuf_splice_actor(struct file *in,
1108 loff_t *ppos,
1109 struct pipe_inode_info *pipe,
1110 size_t len,
1111 unsigned int flags,
1112 int *nonpad_ret)
1113 {
1114 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
1115 struct rchan_buf *rbuf = in->private_data;
1116 unsigned int subbuf_size = rbuf->chan->subbuf_size;
1117 uint64_t pos = (uint64_t) *ppos;
1118 uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size;
1119 size_t read_start = (size_t) do_div(pos, alloc_size);
1120 size_t read_subbuf = read_start / subbuf_size;
1121 size_t padding = rbuf->padding[read_subbuf];
1122 size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
1123 struct page *pages[PIPE_DEF_BUFFERS];
1124 struct partial_page partial[PIPE_DEF_BUFFERS];
1125 struct splice_pipe_desc spd = {
1126 .pages = pages,
1127 .nr_pages = 0,
1128 .nr_pages_max = PIPE_DEF_BUFFERS,
1129 .partial = partial,
1130 .ops = &relay_pipe_buf_ops,
1131 .spd_release = relay_page_release,
1132 };
1133 ssize_t ret;
1134
1135 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
1136 return 0;
1137 if (splice_grow_spd(pipe, &spd))
1138 return -ENOMEM;
1139
1140 /*
1141 * Adjust read len, if longer than what is available
1142 */
1143 if (len > (subbuf_size - read_start % subbuf_size))
1144 len = subbuf_size - read_start % subbuf_size;
1145
1146 subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
1147 pidx = (read_start / PAGE_SIZE) % subbuf_pages;
1148 poff = read_start & ~PAGE_MASK;
1149 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
1150
1151 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
1152 unsigned int this_len, this_end, private;
1153 unsigned int cur_pos = read_start + total_len;
1154
1155 if (!len)
1156 break;
1157
1158 this_len = min_t(unsigned long, len, PAGE_SIZE - poff);
1159 private = this_len;
1160
1161 spd.pages[spd.nr_pages] = rbuf->page_array[pidx];
1162 spd.partial[spd.nr_pages].offset = poff;
1163
1164 this_end = cur_pos + this_len;
1165 if (this_end >= nonpad_end) {
1166 this_len = nonpad_end - cur_pos;
1167 private = this_len + padding;
1168 }
1169 spd.partial[spd.nr_pages].len = this_len;
1170 spd.partial[spd.nr_pages].private = private;
1171
1172 len -= this_len;
1173 total_len += this_len;
1174 poff = 0;
1175 pidx = (pidx + 1) % subbuf_pages;
1176
1177 if (this_end >= nonpad_end) {
1178 spd.nr_pages++;
1179 break;
1180 }
1181 }
1182
1183 ret = 0;
1184 if (!spd.nr_pages)
1185 goto out;
1186
1187 ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
1188 if (ret < 0 || ret < total_len)
1189 goto out;
1190
1191 if (read_start + ret == nonpad_end)
1192 ret += padding;
1193
1194 out:
1195 splice_shrink_spd(&spd);
1196 return ret;
1197 }
1198
relay_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1199 static ssize_t relay_file_splice_read(struct file *in,
1200 loff_t *ppos,
1201 struct pipe_inode_info *pipe,
1202 size_t len,
1203 unsigned int flags)
1204 {
1205 ssize_t spliced;
1206 int ret;
1207 int nonpad_ret = 0;
1208
1209 ret = 0;
1210 spliced = 0;
1211
1212 while (len && !spliced) {
1213 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
1214 if (ret < 0)
1215 break;
1216 else if (!ret) {
1217 if (flags & SPLICE_F_NONBLOCK)
1218 ret = -EAGAIN;
1219 break;
1220 }
1221
1222 *ppos += ret;
1223 if (ret > len)
1224 len = 0;
1225 else
1226 len -= ret;
1227 spliced += nonpad_ret;
1228 nonpad_ret = 0;
1229 }
1230
1231 if (spliced)
1232 return spliced;
1233
1234 return ret;
1235 }
1236
1237 const struct file_operations relay_file_operations = {
1238 .open = relay_file_open,
1239 .poll = relay_file_poll,
1240 .mmap = relay_file_mmap,
1241 .read = relay_file_read,
1242 .llseek = no_llseek,
1243 .release = relay_file_release,
1244 .splice_read = relay_file_splice_read,
1245 };
1246 EXPORT_SYMBOL_GPL(relay_file_operations);
1247