/linux-6.1.9/net/smc/ |
D | smc_ism.c | 317 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) in smcd_handle_sw_event() argument 321 ev_info.info = wrk->event.info; in smcd_handle_sw_event() 322 switch (wrk->event.code) { in smcd_handle_sw_event() 324 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); in smcd_handle_sw_event() 329 wrk->smcd->ops->signal_event(wrk->smcd, in smcd_handle_sw_event() 330 wrk->event.tok, in smcd_handle_sw_event() 360 struct smc_ism_event_work *wrk = in smc_ism_event_work() local 363 switch (wrk->event.type) { in smc_ism_event_work() 365 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); in smc_ism_event_work() 370 smcd_handle_sw_event(wrk); in smc_ism_event_work() [all …]
|
/linux-6.1.9/drivers/mtd/ubi/ |
D | fastmap-wl.c | 12 static void update_fastmap_work_fn(struct work_struct *wrk) in update_fastmap_work_fn() argument 14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); in update_fastmap_work_fn() 362 struct ubi_work *wrk; in ubi_ensure_anchor_pebs() local 390 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); in ubi_ensure_anchor_pebs() 391 if (!wrk) { in ubi_ensure_anchor_pebs() 398 wrk->func = &wear_leveling_worker; in ubi_ensure_anchor_pebs() 399 __schedule_ubi_work(ubi, wrk); in ubi_ensure_anchor_pebs() 447 int ubi_is_erase_work(struct ubi_work *wrk) in ubi_is_erase_work() argument 449 return wrk->func == erase_worker; in ubi_is_erase_work()
|
D | wl.c | 191 struct ubi_work *wrk; in do_work() local 209 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work() 210 list_del(&wrk->list); in do_work() 220 err = wrk->func(ubi, wrk, 0); in do_work() 542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument 545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work() 561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument 564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work() 647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument 657 kfree(wrk); [all …]
|
D | wl.h | 5 static void update_fastmap_work_fn(struct work_struct *wrk);
|
D | ubi.h | 803 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown); 916 int ubi_is_erase_work(struct ubi_work *wrk);
|
/linux-6.1.9/net/core/ |
D | link_watch.c | 182 LIST_HEAD(wrk); in __linkwatch_run_queue() 204 list_splice_init(&lweventlist, &wrk); in __linkwatch_run_queue() 206 while (!list_empty(&wrk) && do_dev > 0) { in __linkwatch_run_queue() 208 dev = list_first_entry(&wrk, struct net_device, link_watch_list); in __linkwatch_run_queue() 227 list_splice_init(&wrk, &lweventlist); in __linkwatch_run_queue()
|
/linux-6.1.9/drivers/scsi/ |
D | storvsc_drv.c | 465 struct storvsc_scan_work *wrk; in storvsc_device_scan() local 468 wrk = container_of(work, struct storvsc_scan_work, work); in storvsc_device_scan() 470 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); in storvsc_device_scan() 477 kfree(wrk); in storvsc_device_scan() 511 struct storvsc_scan_work *wrk; in storvsc_remove_lun() local 514 wrk = container_of(work, struct storvsc_scan_work, work); in storvsc_remove_lun() 515 if (!scsi_host_get(wrk->host)) in storvsc_remove_lun() 518 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); in storvsc_remove_lun() 524 scsi_host_put(wrk->host); in storvsc_remove_lun() 527 kfree(wrk); in storvsc_remove_lun() [all …]
|
/linux-6.1.9/drivers/hv/ |
D | hv_balloon.c | 455 struct work_struct wrk; member 461 struct work_struct wrk; member 1541 schedule_work(&dm_device.balloon_wrk.wrk); in balloon_onchannelcallback() 1581 schedule_work(&dm_device.ha_wrk.wrk); in balloon_onchannelcallback() 1950 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); in balloon_probe() 1951 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); in balloon_probe() 2005 cancel_work_sync(&dm->balloon_wrk.wrk); in balloon_remove() 2006 cancel_work_sync(&dm->ha_wrk.wrk); in balloon_remove() 2044 cancel_work_sync(&dm->balloon_wrk.wrk); in balloon_suspend() 2045 cancel_work_sync(&dm->ha_wrk.wrk); in balloon_suspend()
|
/linux-6.1.9/arch/m68k/ifpsp060/ |
D | MISC | 194 wrk/fskeleton.s: 2.2 195 wrk/iskeleton.s: 2.2 196 wrk/os.s : 2.1
|
/linux-6.1.9/drivers/dma-buf/ |
D | dma-fence-array.c | 45 static void irq_dma_fence_array_work(struct irq_work *wrk) in irq_dma_fence_array_work() argument 47 struct dma_fence_array *array = container_of(wrk, typeof(*array), work); in irq_dma_fence_array_work()
|
/linux-6.1.9/drivers/gpu/drm/i915/ |
D | intel_wakeref.c | 85 static void __intel_wakeref_put_work(struct work_struct *wrk) in __intel_wakeref_put_work() argument 87 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work); in __intel_wakeref_put_work()
|
D | i915_request.c | 186 static void irq_execute_cb(struct irq_work *wrk) in irq_execute_cb() argument 188 struct execute_cb *cb = container_of(wrk, typeof(*cb), work); in irq_execute_cb() 195 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument 213 static bool irq_work_imm(struct irq_work *wrk) in irq_work_imm() argument 215 wrk->func(wrk); in irq_work_imm()
|
D | i915_sw_fence.c | 463 static void irq_i915_sw_fence_work(struct irq_work *wrk) in irq_i915_sw_fence_work() argument 466 container_of(wrk, typeof(*cb), work); in irq_i915_sw_fence_work()
|
D | i915_active.c | 178 active_work(struct work_struct *wrk) in active_work() argument 180 struct i915_active *ref = container_of(wrk, typeof(*ref), work); in active_work()
|
/linux-6.1.9/drivers/gpu/drm/i915/gt/ |
D | intel_gt_buffer_pool.c | 87 static void pool_free_work(struct work_struct *wrk) in pool_free_work() argument 90 container_of(wrk, typeof(*pool), work.work); in pool_free_work()
|
D | selftest_rps.c | 27 static void dummy_rps_work(struct work_struct *wrk) in dummy_rps_work() argument 222 void (*saved_work)(struct work_struct *wrk); in live_rps_clock_interval() 374 void (*saved_work)(struct work_struct *wrk); in live_rps_control() 602 void (*saved_work)(struct work_struct *wrk); in live_rps_frequency_cs() 743 void (*saved_work)(struct work_struct *wrk); in live_rps_frequency_srm() 1025 void (*saved_work)(struct work_struct *wrk); in live_rps_interrupt() 1129 void (*saved_work)(struct work_struct *wrk); in live_rps_power()
|
D | intel_engine_heartbeat.c | 107 static void heartbeat(struct work_struct *wrk) in heartbeat() argument 111 container_of(wrk, typeof(*engine), heartbeat.work.work); in heartbeat()
|
D | intel_execlists_submission.c | 3590 static void rcu_virtual_context_destroy(struct work_struct *wrk) in rcu_virtual_context_destroy() argument 3593 container_of(wrk, typeof(*ve), rcu.work); in rcu_virtual_context_destroy()
|
/linux-6.1.9/drivers/gpu/drm/scheduler/ |
D | sched_entity.c | 193 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) in drm_sched_entity_kill_jobs_work() argument 195 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work()
|
/linux-6.1.9/drivers/pci/controller/ |
D | pci-hyperv.c | 532 struct work_struct wrk; member 573 struct work_struct wrk; member 2514 dr_wrk = container_of(work, struct hv_dr_work, wrk); in pci_devices_present_work() 2646 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); in hv_pci_start_relations_work() 2662 queue_work(hbus->wq, &dr_wrk->wrk); in hv_pci_start_relations_work() 2764 hpdev = container_of(work, struct hv_pci_dev, wrk); in hv_eject_device_work() 2827 INIT_WORK(&hpdev->wrk, hv_eject_device_work); in hv_pci_eject_device() 2828 queue_work(hbus->wq, &hpdev->wrk); in hv_pci_eject_device()
|
/linux-6.1.9/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_common.c | 1003 struct refill_work *wrk; in otx2_pool_refill_task() local 1008 wrk = container_of(work, struct refill_work, pool_refill_work.work); in otx2_pool_refill_task() 1009 pfvf = wrk->pf; in otx2_pool_refill_task() 1010 qidx = wrk - pfvf->refill_wrk; in otx2_pool_refill_task() 1023 dwork = &wrk->pool_refill_work; in otx2_pool_refill_task()
|
D | otx2_pf.c | 1873 int qidx, vec, wrk; in otx2_stop() local 1921 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) in otx2_stop() 1922 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); in otx2_stop()
|
/linux-6.1.9/kernel/power/ |
D | swap.c | 640 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ member 664 d->wrk); in lzo_compress_threadfn()
|
/linux-6.1.9/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_submission.c | 1379 static void guc_timestamp_ping(struct work_struct *wrk) in guc_timestamp_ping() argument 1381 struct intel_guc *guc = container_of(wrk, typeof(*guc), in guc_timestamp_ping() 3383 static void submit_work_cb(struct irq_work *wrk) in submit_work_cb() argument 3385 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work); in submit_work_cb()
|