Home
last modified time | relevance | path

Searched refs:bq (Results 1 – 24 of 24) sorted by relevance

/linux-6.1.9/drivers/power/supply/
Dbq2415x_charger.c187 static int bq2415x_i2c_read(struct bq2415x_device *bq, u8 reg) in bq2415x_i2c_read() argument
189 struct i2c_client *client = to_i2c_client(bq->dev); in bq2415x_i2c_read()
217 static int bq2415x_i2c_read_mask(struct bq2415x_device *bq, u8 reg, in bq2415x_i2c_read_mask() argument
225 ret = bq2415x_i2c_read(bq, reg); in bq2415x_i2c_read_mask()
232 static int bq2415x_i2c_read_bit(struct bq2415x_device *bq, u8 reg, u8 bit) in bq2415x_i2c_read_bit() argument
236 return bq2415x_i2c_read_mask(bq, reg, BIT(bit), bit); in bq2415x_i2c_read_bit()
242 static int bq2415x_i2c_write(struct bq2415x_device *bq, u8 reg, u8 val) in bq2415x_i2c_write() argument
244 struct i2c_client *client = to_i2c_client(bq->dev); in bq2415x_i2c_write()
271 static int bq2415x_i2c_write_mask(struct bq2415x_device *bq, u8 reg, u8 val, in bq2415x_i2c_write_mask() argument
279 ret = bq2415x_i2c_read(bq, reg); in bq2415x_i2c_write_mask()
[all …]
Dbq24257_charger.c210 static int bq24257_field_read(struct bq24257_device *bq, in bq24257_field_read() argument
216 ret = regmap_field_read(bq->rmap_fields[field_id], &val); in bq24257_field_read()
223 static int bq24257_field_write(struct bq24257_device *bq, in bq24257_field_write() argument
226 return regmap_field_write(bq->rmap_fields[field_id], val); in bq24257_field_write()
261 static int bq24257_get_input_current_limit(struct bq24257_device *bq, in bq24257_get_input_current_limit() argument
266 ret = bq24257_field_read(bq, F_IILIMIT); in bq24257_get_input_current_limit()
285 static int bq24257_set_input_current_limit(struct bq24257_device *bq, in bq24257_set_input_current_limit() argument
293 if (bq->iilimit_autoset_enable) in bq24257_set_input_current_limit()
294 cancel_delayed_work_sync(&bq->iilimit_setup_work); in bq24257_set_input_current_limit()
296 return bq24257_field_write(bq, F_IILIMIT, in bq24257_set_input_current_limit()
[all …]
Dbq25980_charger.c275 static int bq25980_get_input_curr_lim(struct bq25980_device *bq) in bq25980_get_input_curr_lim() argument
280 ret = regmap_read(bq->regmap, BQ25980_BUSOCP, &busocp_reg_code); in bq25980_get_input_curr_lim()
287 static int bq25980_set_hiz(struct bq25980_device *bq, int setting) in bq25980_set_hiz() argument
289 return regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2, in bq25980_set_hiz()
293 static int bq25980_set_input_curr_lim(struct bq25980_device *bq, int busocp) in bq25980_set_input_curr_lim() argument
299 return bq25980_set_hiz(bq, BQ25980_ENABLE_HIZ); in bq25980_set_input_curr_lim()
301 bq25980_set_hiz(bq, BQ25980_DISABLE_HIZ); in bq25980_set_input_curr_lim()
306 if (bq->state.bypass) in bq25980_set_input_curr_lim()
307 busocp = min(busocp, bq->chip_info->busocp_sc_max); in bq25980_set_input_curr_lim()
309 busocp = min(busocp, bq->chip_info->busocp_byp_max); in bq25980_set_input_curr_lim()
[all …]
Dbq25890_charger.c351 static int bq25890_field_read(struct bq25890_device *bq, in bq25890_field_read() argument
357 ret = regmap_field_read(bq->rmap_fields[field_id], &val); in bq25890_field_read()
364 static int bq25890_field_write(struct bq25890_device *bq, in bq25890_field_write() argument
367 return regmap_field_write(bq->rmap_fields[field_id], val); in bq25890_field_write()
444 static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq);
446 static int bq25890_get_vbus_voltage(struct bq25890_device *bq) in bq25890_get_vbus_voltage() argument
450 ret = bq25890_field_read(bq, F_VBUSV); in bq25890_get_vbus_voltage()
461 struct bq25890_device *bq = power_supply_get_drvdata(psy); in bq25890_power_supply_get_property() local
466 mutex_lock(&bq->lock); in bq25890_power_supply_get_property()
468 __bq25890_handle_irq(bq); in bq25890_power_supply_get_property()
[all …]
Dbq256xx_charger.c280 int (*bq256xx_get_ichg)(struct bq256xx_device *bq);
281 int (*bq256xx_get_iindpm)(struct bq256xx_device *bq);
282 int (*bq256xx_get_vbatreg)(struct bq256xx_device *bq);
283 int (*bq256xx_get_iterm)(struct bq256xx_device *bq);
284 int (*bq256xx_get_iprechg)(struct bq256xx_device *bq);
285 int (*bq256xx_get_vindpm)(struct bq256xx_device *bq);
287 int (*bq256xx_set_ichg)(struct bq256xx_device *bq, int ichg);
288 int (*bq256xx_set_iindpm)(struct bq256xx_device *bq, int iindpm);
289 int (*bq256xx_set_vbatreg)(struct bq256xx_device *bq, int vbatreg);
290 int (*bq256xx_set_iterm)(struct bq256xx_device *bq, int iterm);
[all …]
/linux-6.1.9/drivers/isdn/mISDN/
Ddsp_biquad.h26 static inline void biquad2_init(struct biquad2_state *bq, in biquad2_init() argument
29 bq->gain = gain; in biquad2_init()
30 bq->a1 = a1; in biquad2_init()
31 bq->a2 = a2; in biquad2_init()
32 bq->b1 = b1; in biquad2_init()
33 bq->b2 = b2; in biquad2_init()
35 bq->z1 = 0; in biquad2_init()
36 bq->z2 = 0; in biquad2_init()
39 static inline int16_t biquad2(struct biquad2_state *bq, int16_t sample) in biquad2() argument
44 z0 = sample * bq->gain + bq->z1 * bq->a1 + bq->z2 * bq->a2; in biquad2()
[all …]
/linux-6.1.9/net/core/
Dxdp.c456 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) in xdp_flush_frame_bulk() argument
458 struct xdp_mem_allocator *xa = bq->xa; in xdp_flush_frame_bulk()
460 if (unlikely(!xa || !bq->count)) in xdp_flush_frame_bulk()
463 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); in xdp_flush_frame_bulk()
465 bq->count = 0; in xdp_flush_frame_bulk()
471 struct xdp_frame_bulk *bq) in xdp_return_frame_bulk() argument
481 xa = bq->xa; in xdp_return_frame_bulk()
484 bq->count = 0; in xdp_return_frame_bulk()
485 bq->xa = xa; in xdp_return_frame_bulk()
488 if (bq->count == XDP_BULK_QUEUE_SIZE) in xdp_return_frame_bulk()
[all …]
/linux-6.1.9/kernel/bpf/
Dcpumap.c429 struct xdp_bulk_queue *bq; in __cpu_map_entry_alloc() local
445 bq = per_cpu_ptr(rcpu->bulkq, i); in __cpu_map_entry_alloc()
446 bq->obj = rcpu; in __cpu_map_entry_alloc()
691 static void bq_flush_to_queue(struct xdp_bulk_queue *bq) in bq_flush_to_queue() argument
693 struct bpf_cpu_map_entry *rcpu = bq->obj; in bq_flush_to_queue()
699 if (unlikely(!bq->count)) in bq_flush_to_queue()
705 for (i = 0; i < bq->count; i++) { in bq_flush_to_queue()
706 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue()
716 bq->count = 0; in bq_flush_to_queue()
719 __list_del_clearprev(&bq->flush_node); in bq_flush_to_queue()
[all …]
Ddevmap.c365 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) in bq_xmit_all() argument
367 struct net_device *dev = bq->dev; in bq_xmit_all()
368 unsigned int cnt = bq->count; in bq_xmit_all()
377 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all()
382 if (bq->xdp_prog) { in bq_xmit_all()
383 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); in bq_xmit_all()
388 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all()
401 xdp_return_frame_rx_napi(bq->q[i]); in bq_xmit_all()
404 bq->count = 0; in bq_xmit_all()
405 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); in bq_xmit_all()
[all …]
/linux-6.1.9/drivers/net/
Dveth.c521 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) in veth_xdp_flush_bq() argument
525 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); in veth_xdp_flush_bq()
531 for (i = sent; unlikely(i < bq->count); i++) in veth_xdp_flush_bq()
532 xdp_return_frame(bq->q[i]); in veth_xdp_flush_bq()
534 drops = bq->count - sent; in veth_xdp_flush_bq()
542 bq->count = 0; in veth_xdp_flush_bq()
545 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) in veth_xdp_flush() argument
552 veth_xdp_flush_bq(rq, bq); in veth_xdp_flush()
569 struct veth_xdp_tx_bq *bq) in veth_xdp_tx() argument
576 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) in veth_xdp_tx()
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c481 struct xdp_frame_bulk *bq) in mlx5e_free_xdpsq_desc() argument
494 xdp_return_frame_bulk(xdpi.frame.xdpf, bq); in mlx5e_free_xdpsq_desc()
512 struct xdp_frame_bulk bq; in mlx5e_poll_xdpsq_cq() local
519 xdp_frame_bulk_init(&bq); in mlx5e_poll_xdpsq_cq()
552 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq); in mlx5e_poll_xdpsq_cq()
565 xdp_flush_frame_bulk(&bq); in mlx5e_poll_xdpsq_cq()
583 struct xdp_frame_bulk bq; in mlx5e_free_xdpsq_descs() local
586 xdp_frame_bulk_init(&bq); in mlx5e_free_xdpsq_descs()
599 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq); in mlx5e_free_xdpsq_descs()
602 xdp_flush_frame_bulk(&bq); in mlx5e_free_xdpsq_descs()
/linux-6.1.9/arch/arm/boot/dts/
Dmt6589-aquaris5.dts12 model = "bq Aquaris5";
13 compatible = "mundoreader,bq-aquaris5", "mediatek,mt6589";
Drk3066a-bqcurie2.dts12 model = "bq Curie 2";
13 compatible = "mundoreader,bq-curie2", "rockchip,rk3066a";
Drk3188-bqedison2qc.dts4 * Author: Heiko Stuebner <heiko.stuebner@bq.com>
15 compatible = "mundoreader,bq-edison2qc", "rockchip,rk3188";
/linux-6.1.9/include/net/
Dxdp.h194 static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq) in xdp_frame_bulk_init() argument
197 bq->xa = NULL; in xdp_frame_bulk_init()
313 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
315 struct xdp_frame_bulk *bq);
/linux-6.1.9/drivers/staging/qlge/
Dqlge_main.c952 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq) in qlge_get_curr_buf() argument
956 bq_desc = &bq->queue[bq->next_to_clean]; in qlge_get_curr_buf()
957 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1); in qlge_get_curr_buf()
1083 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp) in qlge_refill_bq() argument
1085 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); in qlge_refill_bq()
1092 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) - in qlge_refill_bq()
1093 bq->next_to_use); in qlge_refill_bq()
1097 i = bq->next_to_use; in qlge_refill_bq()
1098 bq_desc = &bq->queue[i]; in qlge_refill_bq()
1103 rx_ring->cq_id, bq_type_name[bq->type], i); in qlge_refill_bq()
[all …]
Dqlge.h1437 #define QLGE_BQ_CONTAINER(bq) \ argument
1439 typeof(bq) _bq = bq; \
1452 #define QLGE_BQ_HW_OWNED(bq) \ argument
1454 typeof(bq) _bq = bq; \
/linux-6.1.9/Documentation/devicetree/bindings/display/panel/
Dinnolux,ee101ia-01d.yaml10 - Heiko Stuebner <heiko.stuebner@bq.com>
/linux-6.1.9/Documentation/devicetree/bindings/arm/
Drockchip.yaml58 - description: bq Curie 2 tablet
60 - const: mundoreader,bq-curie2
63 - description: bq Edison 2 Quad-Core tablet
65 - const: mundoreader,bq-edison2qc
Dmediatek.yaml40 - mundoreader,bq-aquaris5
/linux-6.1.9/drivers/net/ethernet/mediatek/
Dmtk_eth_soc.c1016 struct xdp_frame_bulk *bq, bool napi) in mtk_tx_unmap() argument
1059 else if (bq) in mtk_tx_unmap()
1060 xdp_return_frame_bulk(xdpf, bq); in mtk_tx_unmap()
1995 struct xdp_frame_bulk bq; in mtk_poll_tx_qdma() local
2003 xdp_frame_bulk_init(&bq); in mtk_poll_tx_qdma()
2030 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2037 xdp_flush_frame_bulk(&bq); in mtk_poll_tx_qdma()
2050 struct xdp_frame_bulk bq; in mtk_poll_tx_pdma() local
2056 xdp_frame_bulk_init(&bq); in mtk_poll_tx_pdma()
2072 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
[all …]
/linux-6.1.9/drivers/net/ethernet/socionext/
Dnetsec.c638 struct xdp_frame_bulk bq; in netsec_clean_tx_dring() local
647 xdp_frame_bulk_init(&bq); in netsec_clean_tx_dring()
679 xdp_return_frame_bulk(desc->xdpf, &bq); in netsec_clean_tx_dring()
698 xdp_flush_frame_bulk(&bq); in netsec_clean_tx_dring()
/linux-6.1.9/drivers/net/ethernet/marvell/
Dmvneta.c1867 struct xdp_frame_bulk bq; in mvneta_txq_bufs_free() local
1870 xdp_frame_bulk_init(&bq); in mvneta_txq_bufs_free()
1895 xdp_return_frame_bulk(buf->xdpf, &bq); in mvneta_txq_bufs_free()
1898 xdp_flush_frame_bulk(&bq); in mvneta_txq_bufs_free()
/linux-6.1.9/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_main.c2789 struct xdp_frame_bulk bq; in mvpp2_txq_bufs_free() local
2792 xdp_frame_bulk_init(&bq); in mvpp2_txq_bufs_free()
2808 xdp_return_frame_bulk(tx_buf->xdpf, &bq); in mvpp2_txq_bufs_free()
2812 xdp_flush_frame_bulk(&bq); in mvpp2_txq_bufs_free()