Home
last modified time | relevance | path

Searched refs:rings (Results 1 – 25 of 92) sorted by relevance

1234

/linux-6.1.9/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dflowring.c142 if (flow->rings[i] == NULL) in brcmf_flowring_create()
160 flow->rings[i] = ring; in brcmf_flowring_create()
172 ring = flow->rings[flowid]; in brcmf_flowring_tid()
192 ring = flow->rings[flowid]; in brcmf_flowring_block()
201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block()
202 ring = flow->rings[i]; in brcmf_flowring_block()
212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block()
236 ring = flow->rings[flowid]; in brcmf_flowring_delete()
247 flow->rings[flowid] = NULL; in brcmf_flowring_delete()
264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue()
[all …]
/linux-6.1.9/net/9p/
Dtrans_xen.c58 struct xen_9pfs_dataring *rings; member
135 ring = &priv->rings[num]; in p9_xen_request()
281 if (!priv->rings[i].intf) in xen_9pfs_front_free()
283 if (priv->rings[i].irq > 0) in xen_9pfs_front_free()
284 unbind_from_irqhandler(priv->rings[i].irq, priv->dev); in xen_9pfs_front_free()
285 if (priv->rings[i].data.in) { in xen_9pfs_front_free()
287 j < (1 << priv->rings[i].intf->ring_order); in xen_9pfs_front_free()
291 ref = priv->rings[i].intf->ref[j]; in xen_9pfs_front_free()
294 free_pages_exact(priv->rings[i].data.in, in xen_9pfs_front_free()
295 1UL << (priv->rings[i].intf->ring_order + in xen_9pfs_front_free()
[all …]
/linux-6.1.9/drivers/i3c/master/mipi-i3c-hci/
Ddma.c167 struct hci_rings_data *rings = hci->io_data; in hci_dma_cleanup() local
171 if (!rings) in hci_dma_cleanup()
174 for (i = 0; i < rings->total; i++) { in hci_dma_cleanup()
175 rh = &rings->headers[i]; in hci_dma_cleanup()
204 kfree(rings); in hci_dma_cleanup()
210 struct hci_rings_data *rings; in hci_dma_init() local
226 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); in hci_dma_init()
227 if (!rings) in hci_dma_init()
229 hci->io_data = rings; in hci_dma_init()
230 rings->total = nr_rings; in hci_dma_init()
[all …]
/linux-6.1.9/tools/testing/selftests/net/
Dpsock_fanout.c235 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument
239 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read()
240 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read()
412 char *rings[2]; in test_datapath() local
431 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath()
432 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath()
435 sock_fanout_read(fds, rings, expect0); in test_datapath()
440 ret = sock_fanout_read(fds, rings, expect1); in test_datapath()
445 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath()
447 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath()
[all …]
Dtoeplitz.c104 static struct ring_state rings[RSS_MAX_CPUS]; variable
250 do {} while (recv_block(&rings[i])); in process_rings()
404 rings[i].cpu = i; in setup_rings()
405 rings[i].fd = create_ring(&rings[i].mmap); in setup_rings()
410 set_filter(rings[i].fd); in setup_rings()
418 if (munmap(rings[i].mmap, ring_block_nr * ring_block_sz)) in cleanup_rings()
420 if (close(rings[i].fd)) in cleanup_rings()
/linux-6.1.9/Documentation/devicetree/bindings/soc/ti/
Dk3-ringacc.yaml49 ti,num-rings:
51 description: Number of rings supported by RA
53 ti,sci-rm-range-gp-rings:
70 - ti,num-rings
71 - ti,sci-rm-range-gp-rings
90 ti,num-rings = <818>;
91 ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
/linux-6.1.9/Documentation/mhi/
Dmhi.rst58 Transfer rings: Used by the host to schedule work items for a channel. The
59 transfer rings are organized as a circular queue of Transfer Descriptors (TD).
64 Event rings: Used by the device to send completion and state transition messages
70 Command rings: Used by the host to send MHI commands to the device. The command
71 rings are organized as a circular queue of Command Descriptors (CD).
81 Two unidirectional channels with their associated transfer rings form a
87 Transfer rings
91 Transfer Descriptors (TD). TDs are managed through transfer rings, which are
101 Below is the basic usage of transfer rings:
110 buffer information, increments the WP to the next element and rings the
[all …]
/linux-6.1.9/drivers/soc/ti/
Dk3-ringacc.c219 struct k3_ring *rings; member
355 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) in k3_ringacc_request_ring()
357 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) in k3_ringacc_request_ring()
369 ringacc->rings[id].proxy_id = proxy_id; in k3_ringacc_request_ring()
378 ringacc->rings[id].use_count++; in k3_ringacc_request_ring()
380 return &ringacc->rings[id]; in k3_ringacc_request_ring()
408 *fwd_ring = &ringacc->rings[fwd_id]; in k3_dmaring_request_dual_ring()
409 *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings]; in k3_dmaring_request_dual_ring()
411 ringacc->rings[fwd_id].use_count++; in k3_dmaring_request_dual_ring()
740 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; in k3_dmaring_cfg()
[all …]
/linux-6.1.9/drivers/crypto/qat/qat_common/
Dadf_transport.c267 ring = &bank->rings[ring_num]; in adf_create_ring()
338 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler()
406 bank->rings = kzalloc_node(size, GFP_KERNEL, in adf_init_bank()
408 if (!bank->rings) in adf_init_bank()
425 ring = &bank->rings[i]; in adf_init_bank()
439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank()
456 ring = &bank->rings[i]; in adf_init_bank()
460 kfree(bank->rings); in adf_init_bank()
532 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank()
540 kfree(bank->rings); in cleanup_bank()
[all …]
/linux-6.1.9/tools/lib/bpf/
Dringbuf.c37 struct ring *rings; member
100 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add()
103 rb->rings = tmp; in ring_buffer__add()
110 r = &rb->rings[rb->ring_cnt]; in ring_buffer__add()
173 ringbuf_unmap_ring(rb, &rb->rings[i]); in ring_buffer__free()
178 free(rb->rings); in ring_buffer__free()
281 struct ring *ring = &rb->rings[i]; in ring_buffer__consume()
308 struct ring *ring = &rb->rings[ring_id]; in ring_buffer__poll()
/linux-6.1.9/io_uring/
Dio_uring.c176 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events()
181 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events_user()
348 struct io_rings *r = ctx->rings; in io_account_cq_overflow()
528 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) in io_eventfd_signal()
636 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in __io_cqring_overflow_flush()
714 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in io_cqring_event_overflow()
746 struct io_rings *rings = ctx->rings; in __io_get_cqe() local
771 ctx->cqe_cached = &rings->cqes[off]; in __io_get_cqe()
778 return &rings->cqes[off]; in __io_get_cqe()
1005 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in ctx_flush_and_put()
[all …]
Dio_uring.h207 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); in io_commit_cqring()
236 struct io_rings *r = ctx->rings; in io_sqring_full()
243 struct io_rings *rings = ctx->rings; in io_sqring_entries() local
246 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; in io_sqring_entries()
Dsqpoll.c271 &ctx->rings->sq_flags); in io_sq_thread()
297 &ctx->rings->sq_flags); in io_sq_thread()
307 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); in io_sq_thread()
/linux-6.1.9/Documentation/networking/
Daf_xdp.rst24 syscall. Associated with each XSK are two rings: the RX ring and the
26 packets on the TX ring. These rings are registered and sized with the
28 to have at least one of these rings for each socket. An RX or TX
37 one of the rings references a frame by referencing its addr. The addr
42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The
50 TX ring. In summary, the RX and FILL rings are used for the RX path
51 and the TX and COMPLETION rings are used for the TX path.
59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind
65 process has to create its own socket with associated RX and TX rings,
67 reason that there is only one set of FILL and COMPLETION rings per
[all …]
/linux-6.1.9/drivers/block/xen-blkback/
Dxenbus.c84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) in xen_update_blkif_status()
110 ring = &blkif->rings[i]; in xen_update_blkif_status()
124 ring = &blkif->rings[i]; in xen_update_blkif_status()
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings()
136 if (!blkif->rings) in xen_blkif_alloc_rings()
140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings()
274 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect()
338 kfree(blkif->rings); in xen_blkif_disconnect()
339 blkif->rings = NULL; in xen_blkif_disconnect()
389 if (!blkif->rings) \
[all …]
/linux-6.1.9/include/linux/
Dptr_ring.h619 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, in ptr_ring_resize_multiple() argument
639 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
640 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple()
643 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
644 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
Dskb_array.h201 static inline int skb_array_resize_multiple(struct skb_array **rings, in skb_array_resize_multiple() argument
206 return ptr_ring_resize_multiple((struct ptr_ring **)rings, in skb_array_resize_multiple()
/linux-6.1.9/Documentation/devicetree/bindings/soc/qcom/
Dqcom,wcnss.yaml77 Should reference the tx-enable and tx-rings-empty SMEM states.
82 - const: tx-rings-empty
133 qcom,smem-state-names = "tx-enable", "tx-rings-empty";
/linux-6.1.9/drivers/crypto/inside-secure/
Dsafexcel.c51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt()
502 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings()
550 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings()
592 priv->config.pes, priv->config.rings); in safexcel_hw_init()
654 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
712 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
738 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
762 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
766 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
1336 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); in safexcel_configure()
[all …]
/linux-6.1.9/drivers/net/ethernet/netronome/nfp/
DMakefile24 nfd3/rings.o \
27 nfdk/rings.o \
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/
Damdgpu_fence.c517 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini()
545 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle()
562 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini()
595 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init()
773 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show()
/linux-6.1.9/Documentation/networking/device_drivers/ethernet/google/
Dgve.rst116 The descriptor rings are power-of-two-sized ring buffers consisting of
127 gve maps the buffers for transmit rings into a FIFO and copies the packets
132 The buffers for receive rings are put into a data ring that is the same
134 the rings together.
/linux-6.1.9/net/ethtool/
DMakefile8 linkstate.o debug.o wol.o features.o privflags.o rings.o \
/linux-6.1.9/Documentation/devicetree/bindings/net/
Dopencores-ethoc.txt6 first region is for the device registers and descriptor rings,
/linux-6.1.9/Documentation/devicetree/bindings/crypto/
Dmediatek-crypto.txt7 order. These are global system and four descriptor rings.

1234