/linux-6.1.9/drivers/net/wireguard/ |
D | ratelimiter.c | 20 static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); 81 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_gc_entries() 191 queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); in wg_ratelimiter_init() 211 cancel_delayed_work_sync(&gc_work); in wg_ratelimiter_uninit()
|
/linux-6.1.9/net/netfilter/ |
D | nf_conntrack_core.c | 1468 struct conntrack_gc_work *gc_work; in gc_worker() local 1474 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); in gc_worker() 1476 i = gc_work->next_bucket; in gc_worker() 1477 if (gc_work->early_drop) in gc_worker() 1481 gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT; in gc_worker() 1482 gc_work->count = GC_SCAN_INITIAL_COUNT; in gc_worker() 1483 gc_work->start_time = start_time; in gc_worker() 1486 next_run = gc_work->avg_timeout; in gc_worker() 1487 count = gc_work->count; in gc_worker() 1520 gc_work->next_bucket = i; in gc_worker() [all …]
|
D | nf_flow_table_core.c | 449 flow_table = container_of(work, struct nf_flowtable, gc_work.work); in nf_flow_offload_work_gc() 451 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); in nf_flow_offload_work_gc() 546 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); in nf_flow_table_init() 556 &flowtable->gc_work, HZ); in nf_flow_table_init() 586 flush_delayed_work(&flowtable->gc_work); in nf_flow_table_gc_cleanup() 607 cancel_delayed_work_sync(&flow_table->gc_work); in nf_flow_table_free()
|
D | nft_set_rbtree.c | 22 struct delayed_work gc_work; member 569 priv = container_of(work, struct nft_rbtree, gc_work.work); in nft_rbtree_gc() 633 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_gc() 653 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc); in nft_rbtree_init() 655 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rbtree_init() 667 cancel_delayed_work_sync(&priv->gc_work); in nft_rbtree_destroy()
|
D | nf_conncount.c | 62 struct work_struct gc_work; member 307 schedule_work(&data->gc_work); in schedule_gc_worker() 457 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work); in tree_gc_worker() 555 INIT_WORK(&data->gc_work, tree_gc_worker); in nf_conncount_init() 591 cancel_work_sync(&data->gc_work); in nf_conncount_destroy()
|
D | nft_set_hash.c | 26 struct delayed_work gc_work; member 323 priv = container_of(work, struct nft_rhash, gc_work.work); in nft_rhash_gc() 363 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc() 377 queue_delayed_work(system_power_efficient_wq, &priv->gc_work, in nft_rhash_gc_init() 396 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); in nft_rhash_init() 412 cancel_delayed_work_sync(&priv->gc_work); in nft_rhash_destroy()
|
D | xt_hashlimit.c | 128 struct delayed_work gc_work; member 352 INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc); in htable_create() 353 queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work, in htable_create() 383 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); in htable_gc() 388 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); in htable_gc() 429 cancel_delayed_work_sync(&hinfo->gc_work); in htable_put()
|
/linux-6.1.9/drivers/gpu/drm/qxl/ |
D | qxl_kms.c | 102 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); in qxl_gc_work() 260 INIT_WORK(&qdev->gc_work, qxl_gc_work); in qxl_device_init() 288 if (!qdev->gc_work.func) in qxl_device_fini() 308 flush_work(&qdev->gc_work); in qxl_device_fini()
|
D | qxl_cmd.c | 204 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect() 206 flush_work(&qdev->gc_work); in qxl_queue_garbage_collect()
|
D | qxl_drv.h | 252 struct work_struct gc_work; member
|
/linux-6.1.9/net/bridge/ |
D | br_stp_if.c | 56 mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10); in br_stp_enable_bridge() 87 cancel_delayed_work_sync(&br->gc_work); in br_stp_disable_bridge()
|
D | br_ioctl.c | 186 b.gc_timer_value = br_timer_value(&br->gc_work.timer); in br_dev_siocdevprivate()
|
D | br_device.c | 533 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); in br_dev_setup()
|
D | br_if.c | 392 cancel_delayed_work_sync(&br->gc_work); in br_dev_delete()
|
D | br_stp.c | 643 mod_delayed_work(system_long_wq, &br->gc_work, 0); in br_set_ageing_time()
|
D | br_fdb.c | 518 gc_work.work); in br_fdb_cleanup() 558 mod_delayed_work(system_long_wq, &br->gc_work, work_delay); in br_fdb_cleanup()
|
D | br_sysfs_br.c | 294 return sprintf(buf, "%ld\n", br_timer_value(&br->gc_work.timer)); in gc_timer_show()
|
D | br_private.h | 530 struct delayed_work gc_work; member
|
D | br_netlink.c | 1594 clockval = br_timer_value(&br->gc_work.timer); in br_fill_info()
|
/linux-6.1.9/include/net/netfilter/ |
D | nf_flow_table.h | 78 struct delayed_work gc_work; member
|
/linux-6.1.9/include/net/ |
D | neighbour.h | 224 struct delayed_work gc_work; member
|
/linux-6.1.9/net/core/ |
D | neighbour.c | 961 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); in neigh_periodic_work() 1036 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, in neigh_periodic_work() 1814 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); in neigh_table_init() 1815 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, in neigh_table_init() 1836 cancel_delayed_work_sync(&tbl->gc_work); in neigh_table_clear()
|