Searched refs:workitem (Results 1 – 4 of 4) sorted by relevance
682 struct dj_workitem *workitem) in logi_dj_recv_destroy_djhid_device() argument689 dj_dev = djrcv_dev->paired_dj_devices[workitem->device_index]; in logi_dj_recv_destroy_djhid_device()690 djrcv_dev->paired_dj_devices[workitem->device_index] = NULL; in logi_dj_recv_destroy_djhid_device()703 struct dj_workitem *workitem) in logi_dj_recv_add_djhid_device() argument709 u8 device_index = workitem->device_index; in logi_dj_recv_add_djhid_device()735 dj_hiddev->product = (workitem->quad_id_msb << 8) | in logi_dj_recv_add_djhid_device()736 workitem->quad_id_lsb; in logi_dj_recv_add_djhid_device()737 if (workitem->device_type) { in logi_dj_recv_add_djhid_device()740 switch (workitem->device_type) { in logi_dj_recv_add_djhid_device()774 dj_dev->reports_supported = workitem->reports_supported; in logi_dj_recv_add_djhid_device()[all …]
108 struct work_struct workitem; member450 cancel_work_sync(&ep->workitem); in endpoint_quiesce()501 INIT_WORK(&ep->workitem, work); in endpoint_alloc()638 flush_work(&chan->xdev->in_ep->workitem); in safely_assign_in_fifo()668 queue_work(ep->xdev->workq, &ep->workitem); in bulk_in_completer()689 queue_work(ep->xdev->workq, &ep->workitem); in bulk_out_completer()872 workitem); in bulk_out_work()1022 container_of(work, struct xillyusb_endpoint, workitem); in bulk_in_work()
1187 struct delayed_work *workitem = container_of( in xillybus_autoflush() local1190 workitem, struct xilly_channel, rd_workitem); in xillybus_autoflush()
43 parallel. After receiving each command a separated kernel workitem is prepared45 So, each SMB workitem is queued to the kworkers. This allows the benefit of load