/linux-6.1.9/drivers/media/usb/uvc/ |
D | uvc_queue.c | 45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 52 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 55 queue); in uvc_queue_return_buffers() 56 list_del(&buf->queue); in uvc_queue_return_buffers() 70 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 80 stream = uvc_queue_to_stream(queue); in uvc_queue_setup() 101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 106 uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE, in uvc_buffer_prepare() 111 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() [all …]
|
/linux-6.1.9/drivers/usb/gadget/function/ |
D | uvc_queue.c | 45 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 46 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 73 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 83 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 87 if (queue->use_sg) { in uvc_buffer_prepare() 104 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local 109 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue() 111 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue() 112 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue() 122 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue() [all …]
|
/linux-6.1.9/drivers/net/wireless/st/cw1200/ |
D | queue.c | 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 40 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock() 41 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock() 43 queue->queue_id); in __cw1200_queue_unlock() [all …]
|
/linux-6.1.9/drivers/net/wireless/broadcom/b43legacy/ |
D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
/linux-6.1.9/drivers/net/xen-netback/ |
D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
D | netback.c | 106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 109 static void make_tx_response(struct xenvif_queue *queue, 113 static void push_tx_responses(struct xenvif_queue *queue); 115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 117 static inline int tx_work_todo(struct xenvif_queue *queue); 119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument [all …]
|
D | interface.c | 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 69 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 85 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 91 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local 94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt() [all …]
|
/linux-6.1.9/drivers/iio/buffer/ |
D | industrialio-buffer-dma.c | 100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release() 166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument 174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 183 block->queue = queue; in iio_dma_buffer_alloc_block() 187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block() 194 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done() local 202 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done() 215 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local 218 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done() [all …]
|
/linux-6.1.9/drivers/nvme/target/ |
D | tcp.c | 66 struct nvmet_tcp_queue *queue; member 169 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 172 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 177 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 205 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 209 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 225 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 228 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 231 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 233 return queue->sock->sk->sk_incoming_cpu; in queue_cpu() [all …]
|
D | rdma.c | 48 struct nvmet_rdma_queue *queue; member 63 struct nvmet_rdma_queue *queue; member 170 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 210 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument 215 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp() 216 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp() 220 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp() 228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp() 246 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp() 251 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp() [all …]
|
/linux-6.1.9/drivers/misc/genwqe/ |
D | card_ddcb.c | 82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 90 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs() 92 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs() 95 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument 97 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs() 163 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument 174 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info() [all …]
|
/linux-6.1.9/drivers/crypto/cavium/zip/ |
D | zip_device.c | 57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) in zip_cmd_queue_consumed() argument 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 81 u32 queue = 0; in zip_load_instr() local 91 queue = 0; in zip_load_instr() 93 queue = 1; in zip_load_instr() 95 zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); in zip_load_instr() 98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr() 109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr() 110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr() 112 consumed = zip_cmd_queue_consumed(zip_dev, queue); in zip_load_instr() [all …]
|
/linux-6.1.9/drivers/net/ |
D | xen-netfront.c | 218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref() 248 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); in rx_refill_timeout() local 249 napi_schedule(&queue->napi); in rx_refill_timeout() 252 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument 254 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available() [all …]
|
/linux-6.1.9/drivers/nvme/host/ |
D | tcp.c | 81 struct nvme_tcp_queue *queue; member 179 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 186 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 188 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 191 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 193 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 196 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 197 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() 200 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument 202 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len() [all …]
|
D | rdma.c | 74 struct nvme_rdma_queue *queue; member 160 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument 162 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx() 165 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument 167 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue() 168 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue() 169 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue() 172 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument 174 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size() 247 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) in nvme_rdma_wait_for_cm() argument [all …]
|
/linux-6.1.9/drivers/net/wireless/ralink/rt2x00/ |
D | rt2x00queue.c | 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 491 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 503 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 532 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 534 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor() 540 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor() [all …]
|
/linux-6.1.9/drivers/net/wireguard/ |
D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 34 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 35 if (!queue->worker) { in wg_packet_queue_init() 36 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 42 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument 44 free_percpu(queue->worker); in wg_packet_queue_free() 45 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free() 46 ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); in wg_packet_queue_free() [all …]
|
/linux-6.1.9/drivers/scsi/arm/ |
D | queue.c | 59 int queue_initialise (Queue_t *queue) in queue_initialise() argument 64 spin_lock_init(&queue->queue_lock); in queue_initialise() 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() 79 list_add(&q->list, &queue->free); in queue_initialise() 83 return queue->alloc != NULL; in queue_initialise() 91 void queue_free (Queue_t *queue) in queue_free() argument 93 if (!list_empty(&queue->head)) in queue_free() 94 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
/linux-6.1.9/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 200 if (q_offset >= queue->queue_length) in hw_qeit_calc() 201 q_offset -= queue->queue_length; in hw_qeit_calc() 202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 211 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 215 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
/linux-6.1.9/drivers/crypto/hisilicon/sec/ |
D | sec_drv.c | 227 static int sec_queue_map_io(struct sec_queue *queue) in sec_queue_map_io() argument 229 struct device *dev = queue->dev_info->dev; in sec_queue_map_io() 234 2 + queue->queue_id); in sec_queue_map_io() 237 queue->queue_id); in sec_queue_map_io() 240 queue->regs = ioremap(res->start, resource_size(res)); in sec_queue_map_io() 241 if (!queue->regs) in sec_queue_map_io() 247 static void sec_queue_unmap_io(struct sec_queue *queue) in sec_queue_unmap_io() argument 249 iounmap(queue->regs); in sec_queue_unmap_io() 252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg) in sec_queue_ar_pkgattr() argument 254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; in sec_queue_ar_pkgattr() [all …]
|
/linux-6.1.9/drivers/soc/ixp4xx/ |
D | ixp4xx-qmgr.c | 29 void qmgr_put_entry(unsigned int queue, u32 val) in qmgr_put_entry() argument 32 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_put_entry() 35 qmgr_queue_descs[queue], queue, val); in qmgr_put_entry() 37 __raw_writel(val, &qmgr_regs->acc[queue][0]); in qmgr_put_entry() 40 u32 qmgr_get_entry(unsigned int queue) in qmgr_get_entry() argument 43 val = __raw_readl(&qmgr_regs->acc[queue][0]); in qmgr_get_entry() 45 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_get_entry() 48 qmgr_queue_descs[queue], queue, val); in qmgr_get_entry() 53 static int __qmgr_get_stat1(unsigned int queue) in __qmgr_get_stat1() argument 55 return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) in __qmgr_get_stat1() [all …]
|
/linux-6.1.9/net/sunrpc/ |
D | sched.c | 95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer() 106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 109 queue->timer_list.expires = expires; in rpc_set_queue_timer() 114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer() 121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument 125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer() 126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer() 127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer() [all …]
|
/linux-6.1.9/include/drm/ |
D | spsc_queue.h | 48 static inline void spsc_queue_init(struct spsc_queue *queue) in spsc_queue_init() argument 50 queue->head = NULL; in spsc_queue_init() 51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init() 52 atomic_set(&queue->job_count, 0); in spsc_queue_init() 55 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) in spsc_queue_peek() argument 57 return queue->head; in spsc_queue_peek() 60 static inline int spsc_queue_count(struct spsc_queue *queue) in spsc_queue_count() argument 62 return atomic_read(&queue->job_count); in spsc_queue_count() 65 static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) in spsc_queue_push() argument 73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); in spsc_queue_push() [all …]
|
/linux-6.1.9/drivers/net/wireless/ath/ath5k/ |
D | qcu.c | 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument 66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending() 69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending() 76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending() 82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending() 94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument 96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue() 100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue() 102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue() 138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument [all …]
|
/linux-6.1.9/arch/mips/cavium-octeon/executive/ |
D | cvmx-pko.c | 70 int queue; in __cvmx_pko_iport_config() local 76 for (queue = 0; queue < num_queues; queue++) { in __cvmx_pko_iport_config() 82 config.s.index = queue; in __cvmx_pko_iport_config() 83 config.s.qid = base_queue + queue; in __cvmx_pko_iport_config() 85 config.s.tail = (queue == (num_queues - 1)); in __cvmx_pko_iport_config() 86 config.s.s_tail = (queue == static_priority_end); in __cvmx_pko_iport_config() 88 config.s.static_q = (queue <= static_priority_end); in __cvmx_pko_iport_config() 92 CVMX_CMD_QUEUE_PKO(base_queue + queue), in __cvmx_pko_iport_config() 101 num_queues, queue); in __cvmx_pko_iport_config() 104 CVMX_CMD_QUEUE_PKO(base_queue + queue)); in __cvmx_pko_iport_config() [all …]
|