/linux-6.6.21/net/smc/ |
D | smc_ism.c | 341 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) in smcd_handle_sw_event() argument 345 ev_info.info = wrk->event.info; in smcd_handle_sw_event() 346 switch (wrk->event.code) { in smcd_handle_sw_event() 348 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); in smcd_handle_sw_event() 353 wrk->smcd->ops->signal_event(wrk->smcd, in smcd_handle_sw_event() 354 wrk->event.tok, in smcd_handle_sw_event() 366 struct smc_ism_event_work *wrk = in smc_ism_event_work() local 369 switch (wrk->event.type) { in smc_ism_event_work() 371 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); in smc_ism_event_work() 376 smcd_handle_sw_event(wrk); in smc_ism_event_work() [all …]
|
/linux-6.6.21/drivers/mtd/ubi/ |
D | fastmap-wl.c | 12 static void update_fastmap_work_fn(struct work_struct *wrk) in update_fastmap_work_fn() argument 14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); in update_fastmap_work_fn() 364 struct ubi_work *wrk; in ubi_ensure_anchor_pebs() local 392 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); in ubi_ensure_anchor_pebs() 393 if (!wrk) { in ubi_ensure_anchor_pebs() 400 wrk->func = &wear_leveling_worker; in ubi_ensure_anchor_pebs() 401 __schedule_ubi_work(ubi, wrk); in ubi_ensure_anchor_pebs() 449 int ubi_is_erase_work(struct ubi_work *wrk) in ubi_is_erase_work() argument 451 return wrk->func == erase_worker; in ubi_is_erase_work()
|
D | wl.c | 191 struct ubi_work *wrk; in do_work() local 209 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work() 210 list_del(&wrk->list); in do_work() 220 err = wrk->func(ubi, wrk, 0); in do_work() 542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument 545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work() 561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument 564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work() 647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument 657 kfree(wrk); [all …]
|
D | wl.h | 5 static void update_fastmap_work_fn(struct work_struct *wrk);
|
D | ubi.h | 803 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown); 916 int ubi_is_erase_work(struct ubi_work *wrk);
|
/linux-6.6.21/net/core/ |
D | link_watch.c | 196 LIST_HEAD(wrk); in __linkwatch_run_queue() 218 list_splice_init(&lweventlist, &wrk); in __linkwatch_run_queue() 220 while (!list_empty(&wrk) && do_dev > 0) { in __linkwatch_run_queue() 222 dev = list_first_entry(&wrk, struct net_device, link_watch_list); in __linkwatch_run_queue() 241 list_splice_init(&wrk, &lweventlist); in __linkwatch_run_queue()
|
/linux-6.6.21/drivers/scsi/ |
D | storvsc_drv.c | 471 struct storvsc_scan_work *wrk; in storvsc_device_scan() local 474 wrk = container_of(work, struct storvsc_scan_work, work); in storvsc_device_scan() 476 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); in storvsc_device_scan() 483 kfree(wrk); in storvsc_device_scan() 517 struct storvsc_scan_work *wrk; in storvsc_remove_lun() local 520 wrk = container_of(work, struct storvsc_scan_work, work); in storvsc_remove_lun() 521 if (!scsi_host_get(wrk->host)) in storvsc_remove_lun() 524 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); in storvsc_remove_lun() 530 scsi_host_put(wrk->host); in storvsc_remove_lun() 533 kfree(wrk); in storvsc_remove_lun() [all …]
|
/linux-6.6.21/drivers/hv/ |
D | hv_balloon.c | 456 struct work_struct wrk; member 462 struct work_struct wrk; member 1557 schedule_work(&dm_device.balloon_wrk.wrk); in balloon_onchannelcallback() 1597 schedule_work(&dm_device.ha_wrk.wrk); in balloon_onchannelcallback() 1998 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); in balloon_probe() 1999 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); in balloon_probe() 2052 cancel_work_sync(&dm->balloon_wrk.wrk); in balloon_remove() 2053 cancel_work_sync(&dm->ha_wrk.wrk); in balloon_remove() 2088 cancel_work_sync(&dm->balloon_wrk.wrk); in balloon_suspend() 2089 cancel_work_sync(&dm->ha_wrk.wrk); in balloon_suspend()
|
/linux-6.6.21/arch/m68k/ifpsp060/ |
D | MISC | 194 wrk/fskeleton.s: 2.2 195 wrk/iskeleton.s: 2.2 196 wrk/os.s : 2.1
|
/linux-6.6.21/drivers/gpu/drm/i915/ |
D | intel_wakeref.c | 86 static void __intel_wakeref_put_work(struct work_struct *wrk) in __intel_wakeref_put_work() argument 88 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work); in __intel_wakeref_put_work()
|
D | i915_request.c | 183 static void irq_execute_cb(struct irq_work *wrk) in irq_execute_cb() argument 185 struct execute_cb *cb = container_of(wrk, typeof(*cb), work); in irq_execute_cb() 192 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument 210 static bool irq_work_imm(struct irq_work *wrk) in irq_work_imm() argument 212 wrk->func(wrk); in irq_work_imm()
|
D | i915_sw_fence.c | 463 static void irq_i915_sw_fence_work(struct irq_work *wrk) in irq_i915_sw_fence_work() argument 466 container_of(wrk, typeof(*cb), work); in irq_i915_sw_fence_work()
|
D | i915_active.c | 177 active_work(struct work_struct *wrk) in active_work() argument 179 struct i915_active *ref = container_of(wrk, typeof(*ref), work); in active_work()
|
/linux-6.6.21/drivers/dma-buf/ |
D | dma-fence-array.c | 45 static void irq_dma_fence_array_work(struct irq_work *wrk) in irq_dma_fence_array_work() argument 47 struct dma_fence_array *array = container_of(wrk, typeof(*array), work); in irq_dma_fence_array_work()
|
/linux-6.6.21/drivers/gpu/drm/i915/gt/ |
D | intel_gt_buffer_pool.c | 87 static void pool_free_work(struct work_struct *wrk) in pool_free_work() argument 90 container_of(wrk, typeof(*pool), work.work); in pool_free_work()
|
D | selftest_rps.c | 28 static void dummy_rps_work(struct work_struct *wrk) in dummy_rps_work() argument 223 void (*saved_work)(struct work_struct *wrk); in live_rps_clock_interval() 375 void (*saved_work)(struct work_struct *wrk); in live_rps_control() 603 void (*saved_work)(struct work_struct *wrk); in live_rps_frequency_cs() 742 void (*saved_work)(struct work_struct *wrk); in live_rps_frequency_srm() 1022 void (*saved_work)(struct work_struct *wrk); in live_rps_interrupt() 1132 void (*saved_work)(struct work_struct *wrk); in live_rps_power()
|
D | intel_engine_heartbeat.c | 135 static void heartbeat(struct work_struct *wrk) in heartbeat() argument 139 container_of(wrk, typeof(*engine), heartbeat.work.work); in heartbeat()
|
D | intel_execlists_submission.c | 3604 static void rcu_virtual_context_destroy(struct work_struct *wrk) in rcu_virtual_context_destroy() argument 3607 container_of(wrk, typeof(*ve), rcu.work); in rcu_virtual_context_destroy()
|
/linux-6.6.21/drivers/gpu/drm/scheduler/ |
D | sched_entity.c | 164 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) in drm_sched_entity_kill_jobs_work() argument 166 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work()
|
/linux-6.6.21/drivers/pci/controller/ |
D | pci-hyperv.c | 527 struct work_struct wrk; member 559 struct work_struct wrk; member 2587 dr_wrk = container_of(work, struct hv_dr_work, wrk); in pci_devices_present_work() 2723 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); in hv_pci_start_relations_work() 2739 queue_work(hbus->wq, &dr_wrk->wrk); in hv_pci_start_relations_work() 2841 hpdev = container_of(work, struct hv_pci_dev, wrk); in hv_eject_device_work() 2905 INIT_WORK(&hpdev->wrk, hv_eject_device_work); in hv_pci_eject_device() 2906 queue_work(hbus->wq, &hpdev->wrk); in hv_pci_eject_device()
|
/linux-6.6.21/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_common.c | 1078 struct refill_work *wrk; in otx2_pool_refill_task() local 1082 wrk = container_of(work, struct refill_work, pool_refill_work.work); in otx2_pool_refill_task() 1083 pfvf = wrk->pf; in otx2_pool_refill_task() 1084 qidx = wrk - pfvf->refill_wrk; in otx2_pool_refill_task() 1090 napi_schedule(wrk->napi); in otx2_pool_refill_task()
|
D | otx2_pf.c | 1949 int qidx, vec, wrk; in otx2_stop() local 1992 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) in otx2_stop() 1993 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); in otx2_stop()
|
/linux-6.6.21/kernel/power/ |
D | swap.c | 640 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ member 664 d->wrk); in lzo_compress_threadfn()
|
/linux-6.6.21/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_submission.c | 1424 static void guc_timestamp_ping(struct work_struct *wrk) in guc_timestamp_ping() argument 1426 struct intel_guc *guc = container_of(wrk, typeof(*guc), in guc_timestamp_ping() 3137 static void __delay_sched_disable(struct work_struct *wrk) in __delay_sched_disable() argument 3140 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work); in __delay_sched_disable() 3544 static void submit_work_cb(struct irq_work *wrk) in submit_work_cb() argument 3546 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work); in submit_work_cb()
|