1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON Debugfs Interface
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8 #define pr_fmt(fmt) "damon-dbgfs: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/debugfs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/page_idle.h>
16 #include <linux/slab.h>
17
18 static struct damon_ctx **dbgfs_ctxs;
19 static int dbgfs_nr_ctxs;
20 static struct dentry **dbgfs_dirs;
21 static DEFINE_MUTEX(damon_dbgfs_lock);
22
23 /*
24 * Returns non-empty string on success, negative error code otherwise.
25 */
user_input_str(const char __user * buf,size_t count,loff_t * ppos)26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
27 {
28 char *kbuf;
29 ssize_t ret;
30
31 /* We do not accept continuous write */
32 if (*ppos)
33 return ERR_PTR(-EINVAL);
34
35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
36 if (!kbuf)
37 return ERR_PTR(-ENOMEM);
38
39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
40 if (ret != count) {
41 kfree(kbuf);
42 return ERR_PTR(-EIO);
43 }
44 kbuf[ret] = '\0';
45
46 return kbuf;
47 }
48
dbgfs_attrs_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)49 static ssize_t dbgfs_attrs_read(struct file *file,
50 char __user *buf, size_t count, loff_t *ppos)
51 {
52 struct damon_ctx *ctx = file->private_data;
53 char kbuf[128];
54 int ret;
55
56 mutex_lock(&ctx->kdamond_lock);
57 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
58 ctx->sample_interval, ctx->aggr_interval,
59 ctx->ops_update_interval, ctx->min_nr_regions,
60 ctx->max_nr_regions);
61 mutex_unlock(&ctx->kdamond_lock);
62
63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
64 }
65
dbgfs_attrs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)66 static ssize_t dbgfs_attrs_write(struct file *file,
67 const char __user *buf, size_t count, loff_t *ppos)
68 {
69 struct damon_ctx *ctx = file->private_data;
70 unsigned long s, a, r, minr, maxr;
71 char *kbuf;
72 ssize_t ret;
73
74 kbuf = user_input_str(buf, count, ppos);
75 if (IS_ERR(kbuf))
76 return PTR_ERR(kbuf);
77
78 if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
79 &s, &a, &r, &minr, &maxr) != 5) {
80 ret = -EINVAL;
81 goto out;
82 }
83
84 mutex_lock(&ctx->kdamond_lock);
85 if (ctx->kdamond) {
86 ret = -EBUSY;
87 goto unlock_out;
88 }
89
90 ret = damon_set_attrs(ctx, s, a, r, minr, maxr);
91 if (!ret)
92 ret = count;
93 unlock_out:
94 mutex_unlock(&ctx->kdamond_lock);
95 out:
96 kfree(kbuf);
97 return ret;
98 }
99
sprint_schemes(struct damon_ctx * c,char * buf,ssize_t len)100 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
101 {
102 struct damos *s;
103 int written = 0;
104 int rc;
105
106 damon_for_each_scheme(s, c) {
107 rc = scnprintf(&buf[written], len - written,
108 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
109 s->min_sz_region, s->max_sz_region,
110 s->min_nr_accesses, s->max_nr_accesses,
111 s->min_age_region, s->max_age_region,
112 s->action,
113 s->quota.ms, s->quota.sz,
114 s->quota.reset_interval,
115 s->quota.weight_sz,
116 s->quota.weight_nr_accesses,
117 s->quota.weight_age,
118 s->wmarks.metric, s->wmarks.interval,
119 s->wmarks.high, s->wmarks.mid, s->wmarks.low,
120 s->stat.nr_tried, s->stat.sz_tried,
121 s->stat.nr_applied, s->stat.sz_applied,
122 s->stat.qt_exceeds);
123 if (!rc)
124 return -ENOMEM;
125
126 written += rc;
127 }
128 return written;
129 }
130
dbgfs_schemes_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)131 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
132 size_t count, loff_t *ppos)
133 {
134 struct damon_ctx *ctx = file->private_data;
135 char *kbuf;
136 ssize_t len;
137
138 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
139 if (!kbuf)
140 return -ENOMEM;
141
142 mutex_lock(&ctx->kdamond_lock);
143 len = sprint_schemes(ctx, kbuf, count);
144 mutex_unlock(&ctx->kdamond_lock);
145 if (len < 0)
146 goto out;
147 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
148
149 out:
150 kfree(kbuf);
151 return len;
152 }
153
free_schemes_arr(struct damos ** schemes,ssize_t nr_schemes)154 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
155 {
156 ssize_t i;
157
158 for (i = 0; i < nr_schemes; i++)
159 kfree(schemes[i]);
160 kfree(schemes);
161 }
162
damos_action_valid(int action)163 static bool damos_action_valid(int action)
164 {
165 switch (action) {
166 case DAMOS_WILLNEED:
167 case DAMOS_COLD:
168 case DAMOS_PAGEOUT:
169 case DAMOS_HUGEPAGE:
170 case DAMOS_NOHUGEPAGE:
171 case DAMOS_STAT:
172 return true;
173 default:
174 return false;
175 }
176 }
177
178 /*
179 * Converts a string into an array of struct damos pointers
180 *
181 * Returns an array of struct damos pointers that converted if the conversion
182 * success, or NULL otherwise.
183 */
str_to_schemes(const char * str,ssize_t len,ssize_t * nr_schemes)184 static struct damos **str_to_schemes(const char *str, ssize_t len,
185 ssize_t *nr_schemes)
186 {
187 struct damos *scheme, **schemes;
188 const int max_nr_schemes = 256;
189 int pos = 0, parsed, ret;
190 unsigned long min_sz, max_sz;
191 unsigned int min_nr_a, max_nr_a, min_age, max_age;
192 unsigned int action;
193
194 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
195 GFP_KERNEL);
196 if (!schemes)
197 return NULL;
198
199 *nr_schemes = 0;
200 while (pos < len && *nr_schemes < max_nr_schemes) {
201 struct damos_quota quota = {};
202 struct damos_watermarks wmarks;
203
204 ret = sscanf(&str[pos],
205 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
206 &min_sz, &max_sz, &min_nr_a, &max_nr_a,
207 &min_age, &max_age, &action, "a.ms,
208 "a.sz, "a.reset_interval,
209 "a.weight_sz, "a.weight_nr_accesses,
210 "a.weight_age, &wmarks.metric,
211 &wmarks.interval, &wmarks.high, &wmarks.mid,
212 &wmarks.low, &parsed);
213 if (ret != 18)
214 break;
215 if (!damos_action_valid(action))
216 goto fail;
217
218 if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age)
219 goto fail;
220
221 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
222 wmarks.mid < wmarks.low)
223 goto fail;
224
225 pos += parsed;
226 scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
227 min_age, max_age, action, "a, &wmarks);
228 if (!scheme)
229 goto fail;
230
231 schemes[*nr_schemes] = scheme;
232 *nr_schemes += 1;
233 }
234 return schemes;
235 fail:
236 free_schemes_arr(schemes, *nr_schemes);
237 return NULL;
238 }
239
dbgfs_schemes_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)240 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
241 size_t count, loff_t *ppos)
242 {
243 struct damon_ctx *ctx = file->private_data;
244 char *kbuf;
245 struct damos **schemes;
246 ssize_t nr_schemes = 0, ret;
247
248 kbuf = user_input_str(buf, count, ppos);
249 if (IS_ERR(kbuf))
250 return PTR_ERR(kbuf);
251
252 schemes = str_to_schemes(kbuf, count, &nr_schemes);
253 if (!schemes) {
254 ret = -EINVAL;
255 goto out;
256 }
257
258 mutex_lock(&ctx->kdamond_lock);
259 if (ctx->kdamond) {
260 ret = -EBUSY;
261 goto unlock_out;
262 }
263
264 ret = damon_set_schemes(ctx, schemes, nr_schemes);
265 if (!ret) {
266 ret = count;
267 nr_schemes = 0;
268 }
269
270 unlock_out:
271 mutex_unlock(&ctx->kdamond_lock);
272 free_schemes_arr(schemes, nr_schemes);
273 out:
274 kfree(kbuf);
275 return ret;
276 }
277
target_has_pid(const struct damon_ctx * ctx)278 static inline bool target_has_pid(const struct damon_ctx *ctx)
279 {
280 return ctx->ops.id == DAMON_OPS_VADDR;
281 }
282
sprint_target_ids(struct damon_ctx * ctx,char * buf,ssize_t len)283 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
284 {
285 struct damon_target *t;
286 int id;
287 int written = 0;
288 int rc;
289
290 damon_for_each_target(t, ctx) {
291 if (target_has_pid(ctx))
292 /* Show pid numbers to debugfs users */
293 id = pid_vnr(t->pid);
294 else
295 /* Show 42 for physical address space, just for fun */
296 id = 42;
297
298 rc = scnprintf(&buf[written], len - written, "%d ", id);
299 if (!rc)
300 return -ENOMEM;
301 written += rc;
302 }
303 if (written)
304 written -= 1;
305 written += scnprintf(&buf[written], len - written, "\n");
306 return written;
307 }
308
dbgfs_target_ids_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)309 static ssize_t dbgfs_target_ids_read(struct file *file,
310 char __user *buf, size_t count, loff_t *ppos)
311 {
312 struct damon_ctx *ctx = file->private_data;
313 ssize_t len;
314 char ids_buf[320];
315
316 mutex_lock(&ctx->kdamond_lock);
317 len = sprint_target_ids(ctx, ids_buf, 320);
318 mutex_unlock(&ctx->kdamond_lock);
319 if (len < 0)
320 return len;
321
322 return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
323 }
324
325 /*
326 * Converts a string into an integers array
327 *
328 * Returns an array of integers array if the conversion success, or NULL
329 * otherwise.
330 */
str_to_ints(const char * str,ssize_t len,ssize_t * nr_ints)331 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
332 {
333 int *array;
334 const int max_nr_ints = 32;
335 int nr;
336 int pos = 0, parsed, ret;
337
338 *nr_ints = 0;
339 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
340 if (!array)
341 return NULL;
342 while (*nr_ints < max_nr_ints && pos < len) {
343 ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
344 pos += parsed;
345 if (ret != 1)
346 break;
347 array[*nr_ints] = nr;
348 *nr_ints += 1;
349 }
350
351 return array;
352 }
353
dbgfs_put_pids(struct pid ** pids,int nr_pids)354 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
355 {
356 int i;
357
358 for (i = 0; i < nr_pids; i++)
359 put_pid(pids[i]);
360 }
361
362 /*
363 * Converts a string into an struct pid pointers array
364 *
365 * Returns an array of struct pid pointers if the conversion success, or NULL
366 * otherwise.
367 */
str_to_pids(const char * str,ssize_t len,ssize_t * nr_pids)368 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
369 {
370 int *ints;
371 ssize_t nr_ints;
372 struct pid **pids;
373
374 *nr_pids = 0;
375
376 ints = str_to_ints(str, len, &nr_ints);
377 if (!ints)
378 return NULL;
379
380 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
381 if (!pids)
382 goto out;
383
384 for (; *nr_pids < nr_ints; (*nr_pids)++) {
385 pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
386 if (!pids[*nr_pids]) {
387 dbgfs_put_pids(pids, *nr_pids);
388 kfree(ints);
389 kfree(pids);
390 return NULL;
391 }
392 }
393
394 out:
395 kfree(ints);
396 return pids;
397 }
398
399 /*
400 * dbgfs_set_targets() - Set monitoring targets.
401 * @ctx: monitoring context
402 * @nr_targets: number of targets
403 * @pids: array of target pids (size is same to @nr_targets)
404 *
405 * This function should not be called while the kdamond is running. @pids is
406 * ignored if the context is not configured to have pid in each target. On
407 * failure, reference counts of all pids in @pids are decremented.
408 *
409 * Return: 0 on success, negative error code otherwise.
410 */
dbgfs_set_targets(struct damon_ctx * ctx,ssize_t nr_targets,struct pid ** pids)411 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
412 struct pid **pids)
413 {
414 ssize_t i;
415 struct damon_target *t, *next;
416
417 damon_for_each_target_safe(t, next, ctx) {
418 if (target_has_pid(ctx))
419 put_pid(t->pid);
420 damon_destroy_target(t);
421 }
422
423 for (i = 0; i < nr_targets; i++) {
424 t = damon_new_target();
425 if (!t) {
426 damon_for_each_target_safe(t, next, ctx)
427 damon_destroy_target(t);
428 if (target_has_pid(ctx))
429 dbgfs_put_pids(pids, nr_targets);
430 return -ENOMEM;
431 }
432 if (target_has_pid(ctx))
433 t->pid = pids[i];
434 damon_add_target(ctx, t);
435 }
436
437 return 0;
438 }
439
dbgfs_target_ids_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)440 static ssize_t dbgfs_target_ids_write(struct file *file,
441 const char __user *buf, size_t count, loff_t *ppos)
442 {
443 struct damon_ctx *ctx = file->private_data;
444 bool id_is_pid = true;
445 char *kbuf;
446 struct pid **target_pids = NULL;
447 ssize_t nr_targets;
448 ssize_t ret;
449
450 kbuf = user_input_str(buf, count, ppos);
451 if (IS_ERR(kbuf))
452 return PTR_ERR(kbuf);
453
454 if (!strncmp(kbuf, "paddr\n", count)) {
455 id_is_pid = false;
456 nr_targets = 1;
457 }
458
459 if (id_is_pid) {
460 target_pids = str_to_pids(kbuf, count, &nr_targets);
461 if (!target_pids) {
462 ret = -ENOMEM;
463 goto out;
464 }
465 }
466
467 mutex_lock(&ctx->kdamond_lock);
468 if (ctx->kdamond) {
469 if (id_is_pid)
470 dbgfs_put_pids(target_pids, nr_targets);
471 ret = -EBUSY;
472 goto unlock_out;
473 }
474
475 /* remove previously set targets */
476 dbgfs_set_targets(ctx, 0, NULL);
477 if (!nr_targets) {
478 ret = count;
479 goto unlock_out;
480 }
481
482 /* Configure the context for the address space type */
483 if (id_is_pid)
484 ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
485 else
486 ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
487 if (ret)
488 goto unlock_out;
489
490 ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
491 if (!ret)
492 ret = count;
493
494 unlock_out:
495 mutex_unlock(&ctx->kdamond_lock);
496 kfree(target_pids);
497 out:
498 kfree(kbuf);
499 return ret;
500 }
501
sprint_init_regions(struct damon_ctx * c,char * buf,ssize_t len)502 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
503 {
504 struct damon_target *t;
505 struct damon_region *r;
506 int target_idx = 0;
507 int written = 0;
508 int rc;
509
510 damon_for_each_target(t, c) {
511 damon_for_each_region(r, t) {
512 rc = scnprintf(&buf[written], len - written,
513 "%d %lu %lu\n",
514 target_idx, r->ar.start, r->ar.end);
515 if (!rc)
516 return -ENOMEM;
517 written += rc;
518 }
519 target_idx++;
520 }
521 return written;
522 }
523
dbgfs_init_regions_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)524 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
525 size_t count, loff_t *ppos)
526 {
527 struct damon_ctx *ctx = file->private_data;
528 char *kbuf;
529 ssize_t len;
530
531 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
532 if (!kbuf)
533 return -ENOMEM;
534
535 mutex_lock(&ctx->kdamond_lock);
536 if (ctx->kdamond) {
537 mutex_unlock(&ctx->kdamond_lock);
538 len = -EBUSY;
539 goto out;
540 }
541
542 len = sprint_init_regions(ctx, kbuf, count);
543 mutex_unlock(&ctx->kdamond_lock);
544 if (len < 0)
545 goto out;
546 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
547
548 out:
549 kfree(kbuf);
550 return len;
551 }
552
add_init_region(struct damon_ctx * c,int target_idx,struct damon_addr_range * ar)553 static int add_init_region(struct damon_ctx *c, int target_idx,
554 struct damon_addr_range *ar)
555 {
556 struct damon_target *t;
557 struct damon_region *r, *prev;
558 unsigned long idx = 0;
559 int rc = -EINVAL;
560
561 if (ar->start >= ar->end)
562 return -EINVAL;
563
564 damon_for_each_target(t, c) {
565 if (idx++ == target_idx) {
566 r = damon_new_region(ar->start, ar->end);
567 if (!r)
568 return -ENOMEM;
569 damon_add_region(r, t);
570 if (damon_nr_regions(t) > 1) {
571 prev = damon_prev_region(r);
572 if (prev->ar.end > r->ar.start) {
573 damon_destroy_region(r, t);
574 return -EINVAL;
575 }
576 }
577 rc = 0;
578 }
579 }
580 return rc;
581 }
582
set_init_regions(struct damon_ctx * c,const char * str,ssize_t len)583 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
584 {
585 struct damon_target *t;
586 struct damon_region *r, *next;
587 int pos = 0, parsed, ret;
588 int target_idx;
589 struct damon_addr_range ar;
590 int err;
591
592 damon_for_each_target(t, c) {
593 damon_for_each_region_safe(r, next, t)
594 damon_destroy_region(r, t);
595 }
596
597 while (pos < len) {
598 ret = sscanf(&str[pos], "%d %lu %lu%n",
599 &target_idx, &ar.start, &ar.end, &parsed);
600 if (ret != 3)
601 break;
602 err = add_init_region(c, target_idx, &ar);
603 if (err)
604 goto fail;
605 pos += parsed;
606 }
607
608 return 0;
609
610 fail:
611 damon_for_each_target(t, c) {
612 damon_for_each_region_safe(r, next, t)
613 damon_destroy_region(r, t);
614 }
615 return err;
616 }
617
dbgfs_init_regions_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)618 static ssize_t dbgfs_init_regions_write(struct file *file,
619 const char __user *buf, size_t count,
620 loff_t *ppos)
621 {
622 struct damon_ctx *ctx = file->private_data;
623 char *kbuf;
624 ssize_t ret = count;
625 int err;
626
627 kbuf = user_input_str(buf, count, ppos);
628 if (IS_ERR(kbuf))
629 return PTR_ERR(kbuf);
630
631 mutex_lock(&ctx->kdamond_lock);
632 if (ctx->kdamond) {
633 ret = -EBUSY;
634 goto unlock_out;
635 }
636
637 err = set_init_regions(ctx, kbuf, ret);
638 if (err)
639 ret = err;
640
641 unlock_out:
642 mutex_unlock(&ctx->kdamond_lock);
643 kfree(kbuf);
644 return ret;
645 }
646
dbgfs_kdamond_pid_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)647 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
648 char __user *buf, size_t count, loff_t *ppos)
649 {
650 struct damon_ctx *ctx = file->private_data;
651 char *kbuf;
652 ssize_t len;
653
654 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
655 if (!kbuf)
656 return -ENOMEM;
657
658 mutex_lock(&ctx->kdamond_lock);
659 if (ctx->kdamond)
660 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
661 else
662 len = scnprintf(kbuf, count, "none\n");
663 mutex_unlock(&ctx->kdamond_lock);
664 if (!len)
665 goto out;
666 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
667
668 out:
669 kfree(kbuf);
670 return len;
671 }
672
damon_dbgfs_open(struct inode * inode,struct file * file)673 static int damon_dbgfs_open(struct inode *inode, struct file *file)
674 {
675 file->private_data = inode->i_private;
676
677 return nonseekable_open(inode, file);
678 }
679
680 static const struct file_operations attrs_fops = {
681 .open = damon_dbgfs_open,
682 .read = dbgfs_attrs_read,
683 .write = dbgfs_attrs_write,
684 };
685
686 static const struct file_operations schemes_fops = {
687 .open = damon_dbgfs_open,
688 .read = dbgfs_schemes_read,
689 .write = dbgfs_schemes_write,
690 };
691
692 static const struct file_operations target_ids_fops = {
693 .open = damon_dbgfs_open,
694 .read = dbgfs_target_ids_read,
695 .write = dbgfs_target_ids_write,
696 };
697
698 static const struct file_operations init_regions_fops = {
699 .open = damon_dbgfs_open,
700 .read = dbgfs_init_regions_read,
701 .write = dbgfs_init_regions_write,
702 };
703
704 static const struct file_operations kdamond_pid_fops = {
705 .open = damon_dbgfs_open,
706 .read = dbgfs_kdamond_pid_read,
707 };
708
dbgfs_fill_ctx_dir(struct dentry * dir,struct damon_ctx * ctx)709 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
710 {
711 const char * const file_names[] = {"attrs", "schemes", "target_ids",
712 "init_regions", "kdamond_pid"};
713 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
714 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
715 int i;
716
717 for (i = 0; i < ARRAY_SIZE(file_names); i++)
718 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
719 }
720
dbgfs_before_terminate(struct damon_ctx * ctx)721 static void dbgfs_before_terminate(struct damon_ctx *ctx)
722 {
723 struct damon_target *t, *next;
724
725 if (!target_has_pid(ctx))
726 return;
727
728 mutex_lock(&ctx->kdamond_lock);
729 damon_for_each_target_safe(t, next, ctx) {
730 put_pid(t->pid);
731 damon_destroy_target(t);
732 }
733 mutex_unlock(&ctx->kdamond_lock);
734 }
735
dbgfs_new_ctx(void)736 static struct damon_ctx *dbgfs_new_ctx(void)
737 {
738 struct damon_ctx *ctx;
739
740 ctx = damon_new_ctx();
741 if (!ctx)
742 return NULL;
743
744 if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
745 damon_select_ops(ctx, DAMON_OPS_PADDR)) {
746 damon_destroy_ctx(ctx);
747 return NULL;
748 }
749 ctx->callback.before_terminate = dbgfs_before_terminate;
750 return ctx;
751 }
752
dbgfs_destroy_ctx(struct damon_ctx * ctx)753 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
754 {
755 damon_destroy_ctx(ctx);
756 }
757
758 /*
759 * Make a context of @name and create a debugfs directory for it.
760 *
761 * This function should be called while holding damon_dbgfs_lock.
762 *
763 * Returns 0 on success, negative error code otherwise.
764 */
dbgfs_mk_context(char * name)765 static int dbgfs_mk_context(char *name)
766 {
767 struct dentry *root, **new_dirs, *new_dir;
768 struct damon_ctx **new_ctxs, *new_ctx;
769
770 if (damon_nr_running_ctxs())
771 return -EBUSY;
772
773 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
774 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
775 if (!new_ctxs)
776 return -ENOMEM;
777 dbgfs_ctxs = new_ctxs;
778
779 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
780 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
781 if (!new_dirs)
782 return -ENOMEM;
783 dbgfs_dirs = new_dirs;
784
785 root = dbgfs_dirs[0];
786 if (!root)
787 return -ENOENT;
788
789 new_dir = debugfs_create_dir(name, root);
790 /* Below check is required for a potential duplicated name case */
791 if (IS_ERR(new_dir))
792 return PTR_ERR(new_dir);
793 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
794
795 new_ctx = dbgfs_new_ctx();
796 if (!new_ctx) {
797 debugfs_remove(new_dir);
798 dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
799 return -ENOMEM;
800 }
801
802 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
803 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
804 dbgfs_ctxs[dbgfs_nr_ctxs]);
805 dbgfs_nr_ctxs++;
806
807 return 0;
808 }
809
dbgfs_mk_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)810 static ssize_t dbgfs_mk_context_write(struct file *file,
811 const char __user *buf, size_t count, loff_t *ppos)
812 {
813 char *kbuf;
814 char *ctx_name;
815 ssize_t ret;
816
817 kbuf = user_input_str(buf, count, ppos);
818 if (IS_ERR(kbuf))
819 return PTR_ERR(kbuf);
820 ctx_name = kmalloc(count + 1, GFP_KERNEL);
821 if (!ctx_name) {
822 kfree(kbuf);
823 return -ENOMEM;
824 }
825
826 /* Trim white space */
827 if (sscanf(kbuf, "%s", ctx_name) != 1) {
828 ret = -EINVAL;
829 goto out;
830 }
831
832 mutex_lock(&damon_dbgfs_lock);
833 ret = dbgfs_mk_context(ctx_name);
834 if (!ret)
835 ret = count;
836 mutex_unlock(&damon_dbgfs_lock);
837
838 out:
839 kfree(kbuf);
840 kfree(ctx_name);
841 return ret;
842 }
843
844 /*
845 * Remove a context of @name and its debugfs directory.
846 *
847 * This function should be called while holding damon_dbgfs_lock.
848 *
849 * Return 0 on success, negative error code otherwise.
850 */
dbgfs_rm_context(char * name)851 static int dbgfs_rm_context(char *name)
852 {
853 struct dentry *root, *dir, **new_dirs;
854 struct damon_ctx **new_ctxs;
855 int i, j;
856
857 if (damon_nr_running_ctxs())
858 return -EBUSY;
859
860 root = dbgfs_dirs[0];
861 if (!root)
862 return -ENOENT;
863
864 dir = debugfs_lookup(name, root);
865 if (!dir)
866 return -ENOENT;
867
868 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
869 GFP_KERNEL);
870 if (!new_dirs)
871 return -ENOMEM;
872
873 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
874 GFP_KERNEL);
875 if (!new_ctxs) {
876 kfree(new_dirs);
877 return -ENOMEM;
878 }
879
880 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
881 if (dbgfs_dirs[i] == dir) {
882 debugfs_remove(dbgfs_dirs[i]);
883 dbgfs_destroy_ctx(dbgfs_ctxs[i]);
884 continue;
885 }
886 new_dirs[j] = dbgfs_dirs[i];
887 new_ctxs[j++] = dbgfs_ctxs[i];
888 }
889
890 kfree(dbgfs_dirs);
891 kfree(dbgfs_ctxs);
892
893 dbgfs_dirs = new_dirs;
894 dbgfs_ctxs = new_ctxs;
895 dbgfs_nr_ctxs--;
896
897 return 0;
898 }
899
dbgfs_rm_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)900 static ssize_t dbgfs_rm_context_write(struct file *file,
901 const char __user *buf, size_t count, loff_t *ppos)
902 {
903 char *kbuf;
904 ssize_t ret;
905 char *ctx_name;
906
907 kbuf = user_input_str(buf, count, ppos);
908 if (IS_ERR(kbuf))
909 return PTR_ERR(kbuf);
910 ctx_name = kmalloc(count + 1, GFP_KERNEL);
911 if (!ctx_name) {
912 kfree(kbuf);
913 return -ENOMEM;
914 }
915
916 /* Trim white space */
917 if (sscanf(kbuf, "%s", ctx_name) != 1) {
918 ret = -EINVAL;
919 goto out;
920 }
921
922 mutex_lock(&damon_dbgfs_lock);
923 ret = dbgfs_rm_context(ctx_name);
924 if (!ret)
925 ret = count;
926 mutex_unlock(&damon_dbgfs_lock);
927
928 out:
929 kfree(kbuf);
930 kfree(ctx_name);
931 return ret;
932 }
933
dbgfs_monitor_on_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)934 static ssize_t dbgfs_monitor_on_read(struct file *file,
935 char __user *buf, size_t count, loff_t *ppos)
936 {
937 char monitor_on_buf[5];
938 bool monitor_on = damon_nr_running_ctxs() != 0;
939 int len;
940
941 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
942
943 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
944 }
945
dbgfs_monitor_on_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)946 static ssize_t dbgfs_monitor_on_write(struct file *file,
947 const char __user *buf, size_t count, loff_t *ppos)
948 {
949 ssize_t ret;
950 char *kbuf;
951
952 kbuf = user_input_str(buf, count, ppos);
953 if (IS_ERR(kbuf))
954 return PTR_ERR(kbuf);
955
956 /* Remove white space */
957 if (sscanf(kbuf, "%s", kbuf) != 1) {
958 kfree(kbuf);
959 return -EINVAL;
960 }
961
962 mutex_lock(&damon_dbgfs_lock);
963 if (!strncmp(kbuf, "on", count)) {
964 int i;
965
966 for (i = 0; i < dbgfs_nr_ctxs; i++) {
967 if (damon_targets_empty(dbgfs_ctxs[i])) {
968 kfree(kbuf);
969 mutex_unlock(&damon_dbgfs_lock);
970 return -EINVAL;
971 }
972 }
973 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
974 } else if (!strncmp(kbuf, "off", count)) {
975 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
976 } else {
977 ret = -EINVAL;
978 }
979 mutex_unlock(&damon_dbgfs_lock);
980
981 if (!ret)
982 ret = count;
983 kfree(kbuf);
984 return ret;
985 }
986
987 static const struct file_operations mk_contexts_fops = {
988 .write = dbgfs_mk_context_write,
989 };
990
991 static const struct file_operations rm_contexts_fops = {
992 .write = dbgfs_rm_context_write,
993 };
994
995 static const struct file_operations monitor_on_fops = {
996 .read = dbgfs_monitor_on_read,
997 .write = dbgfs_monitor_on_write,
998 };
999
__damon_dbgfs_init(void)1000 static int __init __damon_dbgfs_init(void)
1001 {
1002 struct dentry *dbgfs_root;
1003 const char * const file_names[] = {"mk_contexts", "rm_contexts",
1004 "monitor_on"};
1005 const struct file_operations *fops[] = {&mk_contexts_fops,
1006 &rm_contexts_fops, &monitor_on_fops};
1007 int i;
1008
1009 dbgfs_root = debugfs_create_dir("damon", NULL);
1010
1011 for (i = 0; i < ARRAY_SIZE(file_names); i++)
1012 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1013 fops[i]);
1014 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1015
1016 dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL);
1017 if (!dbgfs_dirs) {
1018 debugfs_remove(dbgfs_root);
1019 return -ENOMEM;
1020 }
1021 dbgfs_dirs[0] = dbgfs_root;
1022
1023 return 0;
1024 }
1025
1026 /*
1027 * Functions for the initialization
1028 */
1029
damon_dbgfs_init(void)1030 static int __init damon_dbgfs_init(void)
1031 {
1032 int rc = -ENOMEM;
1033
1034 mutex_lock(&damon_dbgfs_lock);
1035 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1036 if (!dbgfs_ctxs)
1037 goto out;
1038 dbgfs_ctxs[0] = dbgfs_new_ctx();
1039 if (!dbgfs_ctxs[0]) {
1040 kfree(dbgfs_ctxs);
1041 goto out;
1042 }
1043 dbgfs_nr_ctxs = 1;
1044
1045 rc = __damon_dbgfs_init();
1046 if (rc) {
1047 kfree(dbgfs_ctxs[0]);
1048 kfree(dbgfs_ctxs);
1049 pr_err("%s: dbgfs init failed\n", __func__);
1050 }
1051
1052 out:
1053 mutex_unlock(&damon_dbgfs_lock);
1054 return rc;
1055 }
1056
1057 module_init(damon_dbgfs_init);
1058
1059 #include "dbgfs-test.h"
1060