Home
last modified time | relevance | path

Searched refs:rhp (Results 1 – 25 of 38) sorted by relevance

12

/linux-6.1.9/include/trace/events/
Drcu.h514 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
516 TP_ARGS(rcuname, rhp, qlen),
520 __field(void *, rhp)
527 __entry->rhp = rhp;
528 __entry->func = rhp->func;
533 __entry->rcuname, __entry->rhp, __entry->func,
573 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
576 TP_ARGS(rcuname, rhp, offset, qlen),
580 __field(void *, rhp)
587 __entry->rhp = rhp;
[all …]
/linux-6.1.9/drivers/infiniband/hw/cxgb4/
Dmem.c388 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); in finish_mem_reg()
391 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, in register_mem() argument
397 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in register_mem()
410 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in register_mem()
419 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, in alloc_pbl()
432 struct c4iw_dev *rhp; in c4iw_get_dma_mr() local
440 rhp = php->rhp; in c4iw_get_dma_mr()
458 mhp->rhp = rhp; in c4iw_get_dma_mr()
468 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, in c4iw_get_dma_mr()
480 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_get_dma_mr()
[all …]
Dprovider.c65 struct c4iw_dev *rhp; in c4iw_dealloc_ucontext() local
69 rhp = to_c4iw_dev(ucontext->ibucontext.device); in c4iw_dealloc_ucontext()
73 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); in c4iw_dealloc_ucontext()
81 struct c4iw_dev *rhp = to_c4iw_dev(ibdev); in c4iw_alloc_ucontext() local
87 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); in c4iw_alloc_ucontext()
93 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; in c4iw_alloc_ucontext()
114 mm->addr = virt_to_phys(rhp->rdev.status_page); in c4iw_alloc_ucontext()
196 struct c4iw_dev *rhp; in c4iw_deallocate_pd() local
200 rhp = php->rhp; in c4iw_deallocate_pd()
202 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); in c4iw_deallocate_pd()
[all …]
Dqp.c720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); in build_tpte_memreg()
916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
918 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
935 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
[all …]
Dcq.c351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq()
803 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); in __c4iw_poll_cq_one()
844 c4iw_invalidate_mr(qhp->rhp, in __c4iw_poll_cq_one()
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); in c4iw_poll_cq_one()
984 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); in c4iw_destroy_cq()
990 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_destroy_cq()
1003 struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device); in c4iw_create_cq() local
1020 if (vector >= rhp->rdev.lldi.nciq) in c4iw_create_cq()
1056 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); in c4iw_create_cq()
1078 ret = create_cq(&rhp->rdev, &chp->cq, in c4iw_create_cq()
[all …]
Diw_cxgb4.h343 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) in get_chp() argument
345 return xa_load(&rhp->cqs, cqid); in get_chp()
348 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) in get_qhp() argument
350 return xa_load(&rhp->qps, qpid); in get_qhp()
363 struct c4iw_dev *rhp; member
392 struct c4iw_dev *rhp; member
410 struct c4iw_dev *rhp; member
424 struct c4iw_dev *rhp; member
481 struct c4iw_dev *rhp; member
504 struct c4iw_dev *rhp; member
[all …]
/linux-6.1.9/kernel/rcu/
Drcu_segcblist.c28 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) in rcu_cblist_enqueue() argument
30 *rclp->tail = rhp; in rcu_cblist_enqueue()
31 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
45 struct rcu_head *rhp) in rcu_cblist_flush_enqueue() argument
53 if (!rhp) { in rcu_cblist_flush_enqueue()
56 rhp->next = NULL; in rcu_cblist_flush_enqueue()
57 srclp->head = rhp; in rcu_cblist_flush_enqueue()
58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
69 struct rcu_head *rhp; in rcu_cblist_dequeue() local
71 rhp = rclp->head; in rcu_cblist_dequeue()
[all …]
Dsrcutiny.c116 struct rcu_head *rhp; in srcu_drive_gp() local
139 rhp = lh; in srcu_drive_gp()
142 rhp->func(rhp); in srcu_drive_gp()
178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
183 rhp->func = func; in call_srcu()
184 rhp->next = NULL; in call_srcu()
186 *ssp->srcu_cb_tail = rhp; in call_srcu()
187 ssp->srcu_cb_tail = &rhp->next; in call_srcu()
Dupdate.c476 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, in do_trace_rcu_torture_read() argument
480 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); in do_trace_rcu_torture_read()
484 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
559 struct early_boot_kfree_rcu *rhp; in early_boot_test_call_rcu() local
566 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); in early_boot_test_call_rcu()
567 if (!WARN_ON_ONCE(!rhp)) in early_boot_test_call_rcu()
568 kfree_rcu(rhp, rh); in early_boot_test_call_rcu()
Dtasks.h281 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, in call_rcu_tasks_generic() argument
292 rhp->next = NULL; in call_rcu_tasks_generic()
293 rhp->func = func; in call_rcu_tasks_generic()
316 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); in call_rcu_tasks_generic()
335 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) in rcu_barrier_tasks_generic_cb() argument
340 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); in rcu_barrier_tasks_generic_cb()
457 struct rcu_head *rhp; in rcu_tasks_invoke_cbs() local
480 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { in rcu_tasks_invoke_cbs()
482 rhp->func(rhp); in rcu_tasks_invoke_cbs()
906 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
[all …]
Drcutorture.c1661 static void rcu_torture_timer_cb(struct rcu_head *rhp) in rcu_torture_timer_cb() argument
1663 kfree(rhp); in rcu_torture_timer_cb()
2014 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); in rcu_torture_timer() local
2016 if (rhp) in rcu_torture_timer()
2017 cur_ops->call(rhp, rcu_torture_timer_cb); in rcu_torture_timer()
2255 struct rcu_head *rhp; in rcu_torture_mem_dump_obj() local
2262 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); in rcu_torture_mem_dump_obj()
2263 if (WARN_ON_ONCE(!rhp)) { in rcu_torture_mem_dump_obj()
2267 …slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); in rcu_torture_mem_dump_obj()
2272 pr_alert("mem_dump_obj(%px):", &rhp); in rcu_torture_mem_dump_obj()
[all …]
Drcu_segcblist.h22 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
25 struct rcu_head *rhp);
137 struct rcu_head *rhp);
139 struct rcu_head *rhp);
Drcuscale.c398 static void rcu_scale_async_cb(struct rcu_head *rhp) in rcu_scale_async_cb() argument
401 kfree(rhp); in rcu_scale_async_cb()
413 struct rcu_head *rhp = NULL; in rcu_scale_writer() local
454 if (!rhp) in rcu_scale_writer()
455 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); in rcu_scale_writer()
456 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { in rcu_scale_writer()
458 cur_ops->async(rhp, rcu_scale_async_cb); in rcu_scale_writer()
459 rhp = NULL; in rcu_scale_writer()
464 kfree(rhp); /* Because we are stopping. */ in rcu_scale_writer()
Dsrcutree.c1073 static void srcu_leak_callback(struct rcu_head *rhp) in srcu_leak_callback() argument
1081 struct rcu_head *rhp, bool do_norm) in srcu_gp_start_if_needed() argument
1100 if (rhp) in srcu_gp_start_if_needed()
1101 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
1158 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in __call_srcu() argument
1161 if (debug_rcu_head_queue(rhp)) { in __call_srcu()
1163 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
1167 rhp->func = func; in __call_srcu()
1168 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); in __call_srcu()
1188 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
[all …]
Dsync.c43 static void rcu_sync_func(struct rcu_head *rhp);
73 static void rcu_sync_func(struct rcu_head *rhp) in rcu_sync_func() argument
75 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); in rcu_sync_func()
Dtree_nocb.h298 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_do_flush_bypass() argument
306 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
311 if (rhp) in rcu_nocb_do_flush_bypass()
313 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
328 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_flush_bypass() argument
335 return rcu_nocb_do_flush_bypass(rdp, rhp, j); in rcu_nocb_flush_bypass()
369 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_try_bypass() argument
436 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { in rcu_nocb_try_bypass()
459 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_try_bypass()
1567 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_flush_bypass() argument
[all …]
Drcu.h481 struct rcu_head *rhp,
495 struct rcu_head *rhp,
500 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
Dtree.h442 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
444 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
Dtiny.c161 static void tiny_rcu_leak_callback(struct rcu_head *rhp) in tiny_rcu_leak_callback() argument
Dtree.c2193 struct rcu_head *rhp; in rcu_do_batch() local
2237 rhp = rcu_cblist_dequeue(&rcl); in rcu_do_batch()
2239 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { in rcu_do_batch()
2243 debug_rcu_head_unqueue(rhp); in rcu_do_batch()
2246 trace_rcu_invoke_callback(rcu_state.name, rhp); in rcu_do_batch()
2248 f = rhp->func; in rcu_do_batch()
2249 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2250 f(rhp); in rcu_do_batch()
2687 static void rcu_leak_callback(struct rcu_head *rhp) in rcu_leak_callback() argument
3880 static void rcu_barrier_callback(struct rcu_head *rhp) in rcu_barrier_callback() argument
/linux-6.1.9/include/linux/
Drcupdate_trace.h87 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
95 static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); } in call_rcu_tasks_trace() argument
Drcupdate.h1026 static inline void rcu_head_init(struct rcu_head *rhp) in rcu_head_init() argument
1028 rhp->func = (rcu_callback_t)~0L; in rcu_head_init()
1045 rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) in rcu_head_after_call_rcu() argument
1047 rcu_callback_t func = READ_ONCE(rhp->func); in rcu_head_after_call_rcu()
/linux-6.1.9/drivers/media/usb/pvrusb2/
Dpvrusb2-v4l2.c42 struct pvr2_ioread *rhp; member
915 if (fhp->rhp) { in pvr2_v4l2_release()
918 sp = pvr2_ioread_get_stream(fhp->rhp); in pvr2_v4l2_release()
920 pvr2_ioread_destroy(fhp->rhp); in pvr2_v4l2_release()
921 fhp->rhp = NULL; in pvr2_v4l2_release()
1046 if (fh->rhp) return 0; in pvr2_v4l2_iosetup()
1062 fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); in pvr2_v4l2_iosetup()
1063 if (!fh->rhp) { in pvr2_v4l2_iosetup()
1073 return pvr2_ioread_set_enabled(fh->rhp,!0); in pvr2_v4l2_iosetup()
1116 if (!fh->rhp) { in pvr2_v4l2_read()
[all …]
/linux-6.1.9/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/
Dmisc.h33 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
/linux-6.1.9/include/linux/sched/
Dmm.h58 static inline void __mmdrop_delayed(struct rcu_head *rhp) in __mmdrop_delayed() argument
60 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); in __mmdrop_delayed()

12