/linux-6.6.21/drivers/md/ |
D | dm-delay.c | 83 struct dm_delay_info *delayed, *next; in flush_delayed_bios() local 89 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { in flush_delayed_bios() 90 if (flush_all || time_after_eq(jiffies, delayed->expires)) { in flush_delayed_bios() 91 struct bio *bio = dm_bio_from_per_bio_data(delayed, in flush_delayed_bios() 93 list_del(&delayed->list); in flush_delayed_bios() 95 delayed->class->ops--; in flush_delayed_bios() 101 next_expires = delayed->expires; in flush_delayed_bios() 103 next_expires = min(next_expires, delayed->expires); in flush_delayed_bios() 247 struct dm_delay_info *delayed; in delay_bio() local 253 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); in delay_bio() [all …]
|
/linux-6.6.21/sound/pci/mixart/ |
D | mixart_core.h | 217 u32 delayed; member 230 u32 delayed; member 387 u32 delayed; member 437 u32 delayed; member 497 u32 delayed; member 542 u32 delayed; member
|
/linux-6.6.21/Documentation/admin-guide/device-mapper/ |
D | delay.rst | 24 echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed 31 echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
|
/linux-6.6.21/Documentation/ABI/testing/ |
D | sysfs-devices-xenbus | 20 pv device has been delayed in order to avoid stalls due to 30 trigger delayed EOI processing. 37 before delayed EOI processing is triggered for a Xen pv
|
/linux-6.6.21/tools/virtio/ |
D | virtio_test.c | 170 bool delayed, int batch, int reset_n, int bufs) in run_test() argument 262 if (delayed) { in run_test() 351 bool delayed = false; in main() local 372 delayed = true; in main() 401 run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); in main()
|
/linux-6.6.21/Documentation/devicetree/bindings/gpio/ |
D | gpio-delay.yaml | 14 is delayed by some external setup, e.g. RC circuit. 47 description: Array of GPIOs which output signal change is delayed
|
/linux-6.6.21/fs/xfs/scrub/ |
D | fscounters.c | 302 uint64_t delayed; in xchk_fscount_aggregate_agcounts() local 364 delayed = percpu_counter_sum(&mp->m_delalloc_blks); in xchk_fscount_aggregate_agcounts() 365 fsc->fdblocks -= delayed; in xchk_fscount_aggregate_agcounts() 368 delayed); in xchk_fscount_aggregate_agcounts()
|
/linux-6.6.21/Documentation/networking/ |
D | proc_net_tcp.rst | 40 | | | | | | (delayed ACK control data) 53 2 another timer (e.g. delayed ack or keepalive) is pending
|
/linux-6.6.21/Documentation/misc-devices/ |
D | bh1770glc.rst | 36 by using a delayed work. As long as there is proximity threshold above 37 interrupts the delayed work is pushed forward. So, when proximity level goes 38 below the threshold value, there is no interrupt and the delayed work will
|
/linux-6.6.21/fs/btrfs/ |
D | Makefile | 31 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
/linux-6.6.21/Documentation/devicetree/bindings/memory-controllers/ |
D | ti,gpmc-child.yaml | 129 description: ADV signal is delayed by half GPMC clock 133 description: CS signal is delayed by half GPMC clock 149 description: OE signal is delayed by half GPMC clock 153 description: WE signal is delayed by half GPMC clock
|
/linux-6.6.21/net/sched/ |
D | sch_fq.c | 101 struct rb_root delayed; /* for rate limited flows */ member 174 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled() 181 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled() 194 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled() 515 while ((p = rb_first(&q->delayed)) != NULL) { in fq_check_throttled() 691 q->delayed = RB_ROOT; in fq_reset() 943 q->delayed = RB_ROOT; in fq_init()
|
/linux-6.6.21/fs/gfs2/ |
D | util.c | 373 bool delayed) in gfs2_assert_withdraw_i() argument 388 delayed = false; in gfs2_assert_withdraw_i() 390 if (delayed) in gfs2_assert_withdraw_i()
|
/linux-6.6.21/drivers/gpu/vga/ |
D | Kconfig | 12 X isn't running and delayed switching until the next logoff. This
|
/linux-6.6.21/drivers/cpufreq/ |
D | sa1110-cpufreq.c | 130 static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) in set_mdcas() argument 135 shift = delayed + 1 + rcd; in set_mdcas()
|
/linux-6.6.21/drivers/usb/serial/ |
D | usb-wwan.h | 51 struct usb_anchor delayed; member
|
D | usb_wwan.c | 182 usb_anchor_urb(this_urb, &portdata->delayed); in usb_wwan_write() 402 urb = usb_get_from_anchor(&portdata->delayed); in usb_wwan_close() 456 init_usb_anchor(&portdata->delayed); in usb_wwan_port_probe() 578 urb = usb_get_from_anchor(&portdata->delayed); in usb_wwan_submit_delayed_urbs()
|
D | sierra.c | 272 struct usb_anchor delayed; member 481 usb_anchor_urb(urb, &portdata->delayed); in sierra_write() 747 urb = usb_get_from_anchor(&portdata->delayed); in sierra_close() 870 init_usb_anchor(&portdata->delayed); in sierra_port_probe() 961 urb = usb_get_from_anchor(&portdata->delayed); in sierra_submit_delayed_urbs()
|
/linux-6.6.21/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm_sdma.c | 57 : &p->vm->delayed; in amdgpu_vm_sdma_alloc_job() 124 ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, in amdgpu_vm_sdma_commit()
|
/linux-6.6.21/drivers/crypto/intel/qat/qat_common/ |
D | icp_qat_hw.h | 365 #define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \ argument 369 (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
|
/linux-6.6.21/drivers/usb/class/ |
D | cdc-acm.h | 103 struct usb_anchor delayed; /* writes queued for a device about to be woken */ member
|
/linux-6.6.21/tools/testing/selftests/drivers/net/mlxsw/ |
D | tc_action_hw_stats.sh | 86 skip_sw dst_ip 192.0.2.2 action drop hw_stats delayed
|
/linux-6.6.21/Documentation/driver-api/mmc/ |
D | mmc-async-req.rst | 86 * the transfer is delayed, guesstimate max 4k as first chunk size. 96 * before this call, the transfer is delayed.
|
/linux-6.6.21/Documentation/RCU/ |
D | rcuref.rst | 133 that a call to delete() is not delayed even if there are an arbitrarily 135 object that delete() was invoked on. Instead, all that is delayed is
|
/linux-6.6.21/fs/ext4/ |
D | Kconfig | 44 physical block numbers. The ext4 filesystem also supports delayed 51 are some performance gains from the delayed allocation and inode
|