/linux-6.1.9/net/x25/ |
D | x25_in.c | 210 int queued = 0; in x25_state3_machine() local 277 queued = 1; in x25_state3_machine() 315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 319 queued = 1; in x25_state3_machine() 330 return queued; in x25_state3_machine() 418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame() 436 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame() [all …]
|
D | x25_dev.c | 51 int queued = 1; in x25_receive_data() local 56 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data() 62 return queued; in x25_receive_data()
|
/linux-6.1.9/net/rose/ |
D | rose_in.c | 105 int queued = 0; in rose_state3_machine() local 168 queued = 1; in rose_state3_machine() 205 return queued; in rose_state3_machine() 266 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 275 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 278 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 284 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 287 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 293 return queued; in rose_process_rx_frame()
|
/linux-6.1.9/net/dccp/ |
D | input.c | 45 int queued = 0; in dccp_rcv_close() local 76 queued = 1; in dccp_rcv_close() 86 return queued; in dccp_rcv_close() 91 int queued = 0; in dccp_rcv_closereq() local 101 return queued; in dccp_rcv_closereq() 113 queued = 1; in dccp_rcv_closereq() 120 return queued; in dccp_rcv_closereq() 524 int queued = 0; in dccp_rcv_respond_partopen_state_process() local 562 queued = 1; /* packet was queued in dccp_rcv_respond_partopen_state_process() 568 return queued; in dccp_rcv_respond_partopen_state_process() [all …]
|
/linux-6.1.9/net/ax25/ |
D | ax25_std_in.c | 143 int queued = 0; in ax25_std_state3_machine() local 225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 258 return queued; in ax25_std_state3_machine() 268 int queued = 0; in ax25_std_state4_machine() local 380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 413 return queued; in ax25_std_state4_machine() 421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
D | ax25_ds_in.c | 147 int queued = 0; in ax25_ds_state3_machine() local 240 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 273 return queued; in ax25_ds_state3_machine() 281 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 297 return queued; in ax25_ds_frame_in()
|
D | ax25_in.c | 103 int queued = 0; in ax25_rx_iframe() local 145 queued = 1; in ax25_rx_iframe() 151 return queued; in ax25_rx_iframe() 159 int queued = 0; in ax25_process_rx_frame() local 167 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 173 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 175 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 180 return queued; in ax25_process_rx_frame()
|
/linux-6.1.9/net/netrom/ |
D | nr_in.c | 153 int queued = 0; in nr_state3_machine() local 225 queued = 1; in nr_state3_machine() 272 return queued; in nr_state3_machine() 279 int queued = 0, frametype; in nr_process_rx_frame() local 288 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 291 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 294 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 300 return queued; in nr_process_rx_frame()
|
/linux-6.1.9/drivers/gpu/drm/ |
D | drm_flip_work.c | 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit() 108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit() 151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init() 168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
|
/linux-6.1.9/drivers/net/wireless/mediatek/mt76/ |
D | debugfs.c | 67 i, q->queued, q->head, q->tail); in mt76_queues_read() 77 int i, queued; in mt76_rx_queues_read() local 83 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read() 85 i, queued, q->head, q->tail); in mt76_rx_queues_read()
|
D | sdio.c | 317 q->queued = 0; in mt76s_alloc_rx_queue() 372 if (q->queued > 0) { in mt76s_get_next_rx_entry() 375 q->queued--; in mt76s_get_next_rx_entry() 439 while (q->queued > 0) { in mt76s_process_tx_queue() 455 if (!q->queued) in mt76s_process_tx_queue() 527 if (q->queued == q->ndesc) in mt76s_tx_queue_skb() 542 q->queued++; in mt76s_tx_queue_skb() 553 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw() 566 q->queued++; in mt76s_tx_queue_skb_raw()
|
/linux-6.1.9/security/integrity/ima/ |
D | ima_asymmetric_keys.c | 33 bool queued = false; in ima_post_key_create_or_update() local 43 queued = ima_queue_key(keyring, payload, payload_len); in ima_post_key_create_or_update() 45 if (queued) in ima_post_key_create_or_update()
|
D | ima_queue_keys.c | 107 bool queued = false; in ima_queue_key() local 117 queued = true; in ima_queue_key() 121 if (!queued) in ima_queue_key() 124 return queued; in ima_queue_key()
|
/linux-6.1.9/tools/testing/selftests/net/mptcp/ |
D | mptcp_inq.c | 207 int nsd, ret, queued = -1; in wait_for_ack() local 210 ret = ioctl(fd, TIOCOUTQ, &queued); in wait_for_ack() 218 if ((size_t)queued > total) in wait_for_ack() 219 xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total); in wait_for_ack() 220 assert(nsd <= queued); in wait_for_ack() 222 if (queued == 0) in wait_for_ack() 355 unsigned int queued; in process_one_client() local 357 ret = ioctl(fd, FIONREAD, &queued); in process_one_client() 360 if (queued > expect_len) in process_one_client() 362 queued, expect_len); in process_one_client() [all …]
|
/linux-6.1.9/Documentation/userspace-api/media/mediactl/ |
D | media-request-ioc-queue.rst | 34 If the request was successfully queued, then the file descriptor can be 37 If the request was already queued before, then ``EBUSY`` is returned. 42 Once a request is queued, then the driver is required to gracefully handle 49 queued directly and you next try to queue a request, or vice versa. 62 The request was already queued or the application queued the first
|
D | media-request-ioc-reinit.rst | 40 A request can only be re-initialized if it either has not been queued 41 yet, or if it was queued and completed. Otherwise it will set ``errno`` 51 The request is queued but not yet completed.
|
/linux-6.1.9/virt/kvm/ |
D | async_pf.c | 134 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 154 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 168 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 195 vcpu->async_pf.queued++; in kvm_setup_async_pf() 226 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/linux-6.1.9/drivers/md/ |
D | dm-cache-background-tracker.c | 26 struct list_head queued; member 47 INIT_LIST_HEAD(&b->queued); in btracker_create() 205 list_add(&w->list, &b->queued); in btracker_queue() 219 if (list_empty(&b->queued)) in btracker_issue() 222 w = list_first_entry(&b->queued, struct bt_work, list); in btracker_issue()
|
/linux-6.1.9/fs/xfs/ |
D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/linux-6.1.9/drivers/media/platform/renesas/vsp1/ |
D | vsp1_dl.c | 224 struct vsp1_dl_list *queued; member 841 if (!dlm->queued) in vsp1_dl_list_hw_update_pending() 899 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous() 900 dlm->queued = dl; in vsp1_dl_list_commit_continuous() 1020 if (dlm->queued) { in vsp1_dlm_irq_frame_end() 1021 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) in vsp1_dlm_irq_frame_end() 1023 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; in vsp1_dlm_irq_frame_end() 1026 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end() 1027 dlm->queued = NULL; in vsp1_dlm_irq_frame_end() 1038 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end() [all …]
|
/linux-6.1.9/Documentation/userspace-api/media/v4l/ |
D | vidioc-streamon.rst | 51 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 52 queued. 63 If buffers have been queued with :ref:`VIDIOC_QBUF` and 65 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 77 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
/linux-6.1.9/sound/firewire/fireworks/ |
D | fireworks_hwdep.c | 128 bool queued; in hwdep_read() local 133 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 135 while (!dev_lock_changed && !queued) { in hwdep_read() 144 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 151 else if (queued) in hwdep_read()
|
/linux-6.1.9/Documentation/features/locking/queued-rwlocks/ |
D | arch-support.txt | 2 # Feature name: queued-rwlocks 4 # description: arch supports queued rwlocks
|
/linux-6.1.9/Documentation/features/locking/queued-spinlocks/ |
D | arch-support.txt | 2 # Feature name: queued-spinlocks 4 # description: arch supports queued spinlocks
|
/linux-6.1.9/Documentation/usb/ |
D | ohci.rst | 22 - interrupt transfers can be larger, and can be queued 28 types can be queued. That was also true in "usb-ohci", except for interrupt 30 to overhead in IRQ processing. When interrupt transfers are queued, those
|