Lines Matching refs:port_priv

63 	struct ib_device *dev = qp_info->port_priv->device;  in create_mad_addr_info()
64 u32 pnum = qp_info->port_priv->port_num; in create_mad_addr_info()
100 struct ib_mad_port_private *port_priv,
112 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
233 struct ib_mad_port_private *port_priv; in ib_register_mad_agent() local
352 port_priv = ib_get_mad_port(device, port_num); in ib_register_mad_agent()
353 if (!port_priv) { in ib_register_mad_agent()
363 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
386 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
393 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
429 spin_lock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
433 class = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
447 vendor = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
466 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
471 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
492 struct ib_mad_port_private *port_priv; in unregister_mad_agent() local
502 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
505 spin_lock_irq(&port_priv->reg_lock); in unregister_mad_agent()
507 spin_unlock_irq(&port_priv->reg_lock); in unregister_mad_agent()
510 flush_workqueue(port_priv->wq); in unregister_mad_agent()
583 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) in port_mad_size() argument
585 return rdma_max_mad_size(port_priv->device, port_priv->port_num); in port_mad_size()
607 struct ib_mad_port_private *port_priv; in handle_outgoing_dr_smp() local
613 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
616 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
617 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
728 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, in handle_outgoing_dr_smp()
730 if (port_priv) { in handle_outgoing_dr_smp()
732 recv_mad_agent = find_mad_agent(port_priv, in handle_outgoing_dr_smp()
735 if (!port_priv || !recv_mad_agent) { in handle_outgoing_dr_smp()
764 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1284 struct ib_mad_port_private *port_priv; in add_nonoui_reg_req() local
1289 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1290 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; in add_nonoui_reg_req()
1343 struct ib_mad_port_private *port_priv; in add_oui_reg_req() local
1353 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1354 vendor_table = &port_priv->version[ in add_oui_reg_req()
1440 struct ib_mad_port_private *port_priv; in remove_mad_reg_req() local
1455 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1457 class = port_priv->version[ in remove_mad_reg_req()
1475 port_priv->version[ in remove_mad_reg_req()
1488 vendor = port_priv->version[ in remove_mad_reg_req()
1520 port_priv->version[ in remove_mad_reg_req()
1535 find_mad_agent(struct ib_mad_port_private *port_priv, in find_mad_agent() argument
1562 spin_lock_irqsave(&port_priv->reg_lock, flags); in find_mad_agent()
1570 class = port_priv->version[ in find_mad_agent()
1583 vendor = port_priv->version[ in find_mad_agent()
1605 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in find_mad_agent()
1609 dev_notice(&port_priv->device->dev, in find_mad_agent()
1611 &mad_agent->agent, port_priv->port_num); in find_mad_agent()
1861 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, in handle_ib_smi() argument
1874 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1876 port_priv->device->phys_port_cnt) == in handle_ib_smi()
1886 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1890 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) in handle_ib_smi()
1892 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_ib_smi()
1901 port_priv->device, in handle_ib_smi()
1948 handle_opa_smi(struct ib_mad_port_private *port_priv, in handle_opa_smi() argument
1961 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1963 port_priv->device->phys_port_cnt) == in handle_opa_smi()
1973 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1977 if (opa_smi_check_local_smp(smp, port_priv->device) == in handle_opa_smi()
1981 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_opa_smi()
1991 port_priv->device, in handle_opa_smi()
2004 handle_smi(struct ib_mad_port_private *port_priv, in handle_smi() argument
2016 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2019 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2024 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_recv_done() local
2037 if (list_empty_careful(&port_priv->port_list)) in ib_mad_recv_done()
2051 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2052 qp_info->port_priv->port_num); in ib_mad_recv_done()
2057 ib_dma_unmap_single(port_priv->device, in ib_mad_recv_done()
2089 if (rdma_cap_ib_switch(port_priv->device)) in ib_mad_recv_done()
2092 port_num = port_priv->port_num; in ib_mad_recv_done()
2096 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done()
2103 if (port_priv->device->ops.process_mad) { in ib_mad_recv_done()
2104 ret = port_priv->device->ops.process_mad( in ib_mad_recv_done()
2105 port_priv->device, 0, port_priv->port_num, wc, in ib_mad_recv_done()
2119 port_priv->device, in ib_mad_recv_done()
2128 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done()
2140 port_priv->device, port_num, in ib_mad_recv_done()
2171 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2207 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2273 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_send_done() local
2283 if (list_empty_careful(&port_priv->port_list)) in ib_mad_send_done()
2287 if (!ib_mad_send_error(port_priv, wc)) in ib_mad_send_done()
2331 dev_err(&port_priv->device->dev, in ib_mad_send_done()
2356 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, in ib_mad_send_error() argument
2393 dev_err(&port_priv->device->dev, in ib_mad_send_error()
2512 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2513 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2644 port_priv->wq, in timeout_sends()
2684 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2697 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2705 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2709 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2731 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2736 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2770 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2783 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) in ib_mad_port_start() argument
2794 ret = ib_find_pkey(port_priv->device, port_priv->port_num, in ib_mad_port_start()
2800 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
2814 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2823 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2833 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2840 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start()
2842 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2849 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
2852 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
2854 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2869 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
2883 static void init_mad_qp(struct ib_mad_port_private *port_priv, in init_mad_qp() argument
2886 qp_info->port_priv = port_priv; in init_mad_qp()
2899 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
2900 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
2907 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
2910 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
2912 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
2943 struct ib_mad_port_private *port_priv; in ib_mad_port_open() local
2956 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); in ib_mad_port_open()
2957 if (!port_priv) in ib_mad_port_open()
2960 port_priv->device = device; in ib_mad_port_open()
2961 port_priv->port_num = port_num; in ib_mad_port_open()
2962 spin_lock_init(&port_priv->reg_lock); in ib_mad_port_open()
2963 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
2964 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
2971 port_priv->pd = ib_alloc_pd(device, 0); in ib_mad_port_open()
2972 if (IS_ERR(port_priv->pd)) { in ib_mad_port_open()
2974 ret = PTR_ERR(port_priv->pd); in ib_mad_port_open()
2978 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, in ib_mad_port_open()
2980 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open()
2982 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open()
2987 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
2991 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
2996 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in ib_mad_port_open()
2997 if (!port_priv->wq) { in ib_mad_port_open()
3003 list_add_tail(&port_priv->port_list, &ib_mad_port_list); in ib_mad_port_open()
3006 ret = ib_mad_port_start(port_priv); in ib_mad_port_open()
3016 list_del_init(&port_priv->port_list); in ib_mad_port_open()
3019 destroy_workqueue(port_priv->wq); in ib_mad_port_open()
3021 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3023 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3025 ib_free_cq(port_priv->cq); in ib_mad_port_open()
3026 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3027 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3029 ib_dealloc_pd(port_priv->pd); in ib_mad_port_open()
3031 kfree(port_priv); in ib_mad_port_open()
3043 struct ib_mad_port_private *port_priv; in ib_mad_port_close() local
3047 port_priv = __ib_get_mad_port(device, port_num); in ib_mad_port_close()
3048 if (port_priv == NULL) { in ib_mad_port_close()
3053 list_del_init(&port_priv->port_list); in ib_mad_port_close()
3056 destroy_workqueue(port_priv->wq); in ib_mad_port_close()
3057 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3058 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3059 ib_free_cq(port_priv->cq); in ib_mad_port_close()
3060 ib_dealloc_pd(port_priv->pd); in ib_mad_port_close()
3061 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3062 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()
3065 kfree(port_priv); in ib_mad_port_close()