Home
last modified time | relevance | path

Searched refs:ppd (Results 1 – 25 of 25) sorted by relevance

/linux-3.4.99/drivers/infiniband/hw/qib/
Dqib_sdma.c124 static void clear_sdma_activelist(struct qib_pportdata *ppd) in clear_sdma_activelist() argument
128 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { in clear_sdma_activelist()
135 unmap_desc(ppd, idx); in clear_sdma_activelist()
136 if (++idx == ppd->sdma_descq_cnt) in clear_sdma_activelist()
147 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque; in sdma_sw_clean_up_task() local
150 spin_lock_irqsave(&ppd->sdma_lock, flags); in sdma_sw_clean_up_task()
162 qib_sdma_make_progress(ppd); in sdma_sw_clean_up_task()
164 clear_sdma_activelist(ppd); in sdma_sw_clean_up_task()
170 ppd->sdma_descq_removed = ppd->sdma_descq_added; in sdma_sw_clean_up_task()
177 ppd->sdma_descq_tail = 0; in sdma_sw_clean_up_task()
[all …]
Dqib_intr.c72 static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev) in signal_ib_event() argument
75 struct qib_devdata *dd = ppd->dd; in signal_ib_event()
78 event.element.port_num = ppd->port; in signal_ib_event()
83 void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) in qib_handle_e_ibstatuschanged() argument
85 struct qib_devdata *dd = ppd->dd; in qib_handle_e_ibstatuschanged()
104 (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) && in qib_handle_e_ibstatuschanged()
107 if (dd->f_ib_updown(ppd, 1, ibcs)) in qib_handle_e_ibstatuschanged()
109 } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | in qib_handle_e_ibstatuschanged()
113 dd->f_ib_updown(ppd, 0, ibcs)) in qib_handle_e_ibstatuschanged()
115 qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT); in qib_handle_e_ibstatuschanged()
[all …]
Dqib_iba7322.c55 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
66 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
593 struct qib_pportdata *ppd; member
782 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, in qib_read_kreg_port() argument
785 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) in qib_read_kreg_port()
787 return readq(&ppd->cpspec->kpregbase[regno]); in qib_read_kreg_port()
790 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, in qib_write_kreg_port() argument
793 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && in qib_write_kreg_port()
794 (ppd->dd->flags & QIB_PRESENT)) in qib_write_kreg_port()
795 writeq(value, &ppd->cpspec->kpregbase[regno]); in qib_write_kreg_port()
[all …]
Dqib_driver.c96 struct qib_pportdata *ppd; in qib_count_active_units() local
105 ppd = dd->pport + pidx; in qib_count_active_units()
106 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | in qib_count_active_units()
128 struct qib_pportdata *ppd; in qib_count_units() local
137 ppd = dd->pport + pidx; in qib_count_units()
138 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | in qib_count_units()
166 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) in qib_wait_linkstate() argument
171 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_wait_linkstate()
172 if (ppd->state_wanted) { in qib_wait_linkstate()
173 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_wait_linkstate()
[all …]
Dqib_qsfp.c47 static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) in qsfp_read() argument
49 struct qib_devdata *dd = ppd->dd; in qsfp_read()
73 if (ppd->hw_pidx) { in qsfp_read()
89 qib_dev_porterr(dd, ppd->port, in qsfp_read()
138 qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n"); in qsfp_read()
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); in qsfp_read()
156 static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, in qib_qsfp_write() argument
159 struct qib_devdata *dd = ppd->dd; in qib_qsfp_write()
182 if (ppd->hw_pidx) { in qib_qsfp_write()
197 qib_dev_porterr(dd, ppd->port, in qib_qsfp_write()
[all …]
Dqib_iba7220.c51 static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
750 static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd) in qib_disarm_7220_senderrbufs() argument
753 struct qib_devdata *dd = ppd->dd; in qib_disarm_7220_senderrbufs()
778 static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) in qib_7220_sdma_sendctrl() argument
780 struct qib_devdata *dd = ppd->dd; in qib_7220_sdma_sendctrl()
810 static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd, in qib_decode_7220_sdma_errs() argument
856 static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd) in qib_7220_sdma_hw_clean_up() argument
859 sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | in qib_7220_sdma_hw_clean_up()
861 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ in qib_7220_sdma_hw_clean_up()
864 static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd) in qib_sdma_7220_setlengen() argument
[all …]
Dqib_mad.c317 struct qib_pportdata *ppd = dd->pport + pidx; in subn_get_guidinfo() local
318 struct qib_ibport *ibp = &ppd->ibport_data; in subn_get_guidinfo()
319 __be64 g = ppd->guid; in subn_get_guidinfo()
337 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) in set_link_width_enabled() argument
339 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); in set_link_width_enabled()
342 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) in set_link_speed_enabled() argument
344 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); in set_link_speed_enabled()
347 static int get_overrunthreshold(struct qib_pportdata *ppd) in get_overrunthreshold() argument
349 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); in get_overrunthreshold()
359 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) in set_overrunthreshold() argument
[all …]
Dqib_tx.c107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
116 struct qib_pportdata *ppd; in is_sdma_buf() local
120 ppd = dd->pport + pidx; in is_sdma_buf()
121 if (i >= ppd->sdma_state.first_sendbuf && in is_sdma_buf()
122 i < ppd->sdma_state.last_sendbuf) in is_sdma_buf()
123 return ppd; in is_sdma_buf()
174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; in qib_disarm_piobufs_set() local
189 ppd = is_sdma_buf(dd, i); in qib_disarm_piobufs_set()
190 if (ppd) { in qib_disarm_piobufs_set()
191 pppd[ppd->port] = ppd; in qib_disarm_piobufs_set()
[all …]
Dqib_user_sdma.c564 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd, in qib_user_sdma_queue_clean() argument
567 struct qib_devdata *dd = ppd->dd; in qib_user_sdma_queue_clean()
576 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
612 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd) in qib_user_sdma_hwqueue_clean() argument
617 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_user_sdma_hwqueue_clean()
618 ret = qib_sdma_make_progress(ppd); in qib_user_sdma_hwqueue_clean()
619 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_user_sdma_hwqueue_clean()
625 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, in qib_user_sdma_queue_drain() argument
628 struct qib_devdata *dd = ppd->dd; in qib_user_sdma_queue_drain()
640 qib_user_sdma_hwqueue_clean(ppd); in qib_user_sdma_queue_drain()
[all …]
Dqib_sysfs.c76 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf) in show_hrtbt_enb() argument
78 struct qib_devdata *dd = ppd->dd; in show_hrtbt_enb()
81 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT); in show_hrtbt_enb()
86 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, in store_hrtbt_enb() argument
89 struct qib_devdata *dd = ppd->dd; in store_hrtbt_enb()
103 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); in store_hrtbt_enb()
109 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf, in store_loopback() argument
112 struct qib_devdata *dd = ppd->dd; in store_loopback()
115 r = dd->f_set_ib_loopback(ppd, buf); in store_loopback()
122 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, in store_led_override() argument
[all …]
Dqib_init.c129 struct qib_pportdata *ppd; in qib_create_ctxts() local
135 ppd = dd->pport + (i % dd->num_pports); in qib_create_ctxts()
136 rcd = qib_create_ctxtdata(ppd, i); in qib_create_ctxts()
154 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) in qib_create_ctxtdata() argument
156 struct qib_devdata *dd = ppd->dd; in qib_create_ctxtdata()
162 rcd->ppd = ppd; in qib_create_ctxtdata()
197 void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, in qib_init_pportdata() argument
200 ppd->dd = dd; in qib_init_pportdata()
201 ppd->hw_pidx = hw_pidx; in qib_init_pportdata()
202 ppd->port = port; /* IB port number, not index */ in qib_init_pportdata()
[all …]
Dqib_iba6120.c48 static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
963 static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd) in qib_disarm_6120_senderrbufs() argument
966 struct qib_devdata *dd = ppd->dd; in qib_disarm_6120_senderrbufs()
1004 struct qib_pportdata *ppd = dd->pport; in handle_6120_errors() local
1025 qib_disarm_6120_senderrbufs(ppd); in handle_6120_errors()
1027 !(ppd->lflags & QIBL_LINKACTIVE)) { in handle_6120_errors()
1038 !(ppd->lflags & QIBL_LINKACTIVE)) { in handle_6120_errors()
1088 qib_handle_e_ibstatuschanged(ppd, ibcs); in handle_6120_errors()
1101 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); in handle_6120_errors()
1103 if (ppd->state_wanted & ppd->lflags) in handle_6120_errors()
[all …]
Dqib_verbs.c553 struct qib_ibport *ibp = &rcd->ppd->ibport_data; in qib_qp_rcv()
601 struct qib_pportdata *ppd = rcd->ppd; in qib_ib_rcv() local
602 struct qib_ibport *ibp = &ppd->ibport_data; in qib_ib_rcv()
618 lid &= ~((1 << ppd->lmc) - 1); in qib_ib_rcv()
619 if (unlikely(lid != ppd->lid)) in qib_ib_rcv()
1023 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) in qib_verbs_sdma_desc_avail() argument
1031 dev = &ppd->dd->verbs_dev; in qib_verbs_sdma_desc_avail()
1036 if (qp->port_num != ppd->port) in qib_verbs_sdma_desc_avail()
1130 struct qib_pportdata *ppd = ppd_from_ibp(ibp); in qib_verbs_send_dma() local
1141 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); in qib_verbs_send_dma()
[all …]
Dqib_file_ops.c96 struct qib_pportdata *ppd = rcd->ppd; in qib_get_base_info() local
152 kinfo->spi_port = ppd->port; in qib_get_base_info()
182 (char *) ppd->statusp - in qib_get_base_info()
232 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ in qib_get_base_info()
559 struct qib_pportdata *ppd = rcd->ppd; in qib_set_part_key() local
595 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { in qib_set_part_key()
596 if (!ppd->pkeys[i]) { in qib_set_part_key()
600 if (ppd->pkeys[i] == key) { in qib_set_part_key()
601 atomic_t *pkrefs = &ppd->pkeyrefs[i]; in qib_set_part_key()
615 if ((ppd->pkeys[i] & 0x7FFF) == lkey) { in qib_set_part_key()
[all …]
Dqib_ud.c52 struct qib_pportdata *ppd; in qib_ud_loopback() local
80 ppd = ppd_from_ibp(ibp); in qib_ud_loopback()
90 lid = ppd->lid | (ah_attr->src_path_bits & in qib_ud_loopback()
91 ((1 << ppd->lmc) - 1)); in qib_ud_loopback()
114 lid = ppd->lid | (ah_attr->src_path_bits & in qib_ud_loopback()
115 ((1 << ppd->lmc) - 1)); in qib_ud_loopback()
218 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); in qib_ud_loopback()
220 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); in qib_ud_loopback()
243 struct qib_pportdata *ppd; in qib_make_ud_req() local
283 ppd = ppd_from_ibp(ibp); in qib_make_ud_req()
[all …]
Dqib_qsfp.h177 struct qib_pportdata *ppd; member
184 extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
186 extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
Dqib_user_sdma.h45 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
48 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
Dqib.h207 struct qib_pportdata *ppd; member
1141 static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd) in dd_from_ppd() argument
1143 return ppd->dd; in dd_from_ppd()
1251 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1264 static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) in qib_sdma_descq_freecnt() argument
1266 return ppd->sdma_descq_cnt - in qib_sdma_descq_freecnt()
1267 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1; in qib_sdma_descq_freecnt()
1270 static inline int __qib_sdma_running(struct qib_pportdata *ppd) in __qib_sdma_running() argument
1272 return ppd->sdma_state.current_state == qib_sdma_state_s99_running; in __qib_sdma_running()
1354 extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
Dqib_diag.c550 struct qib_pportdata *ppd; in qib_diagpkt_write() local
588 ppd = &dd->pport[dp.port - 1]; in qib_diagpkt_write()
595 if ((plen + 4) > ppd->ibmaxlen) { in qib_diagpkt_write()
619 piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn); in qib_diagpkt_write()
Dqib_ruc.c247 struct qib_pportdata *ppd = ppd_from_ibp(ibp); in get_sguid() local
249 return ppd->guid; in get_sguid()
724 struct qib_pportdata *ppd = ppd_from_ibp(ibp); in qib_do_send() local
730 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { in qib_do_send()
Dqib_sd7220.c123 static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd, in qib_ibsd_ucode_loaded() argument
126 struct qib_devdata *dd = ppd->dd; in qib_ibsd_ucode_loaded()
1390 struct qib_pportdata *ppd = dd->pport; in qib_run_relock() local
1400 if ((dd->flags & QIB_INITTED) && !(ppd->lflags & in qib_run_relock()
1404 if (!(ppd->lflags & QIBL_IB_LINK_DISABLED)) in qib_run_relock()
Dqib_rc.c652 struct qib_pportdata *ppd = ppd_from_ibp(ibp); in qib_send_rc_ack() local
699 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); in qib_send_rc_ack()
707 if (!(ppd->lflags & QIBL_LINKACTIVE)) in qib_send_rc_ack()
710 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC, in qib_send_rc_ack()
715 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); in qib_send_rc_ack()
1406 struct qib_pportdata *ppd = ppd_from_ibp(ibp); in qib_rc_rcv_resp() local
1428 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_rc_rcv_resp()
1430 qib_sdma_make_progress(ppd); in qib_rc_rcv_resp()
1432 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_rc_rcv_resp()
1865 struct qib_ibport *ibp = &rcd->ppd->ibport_data; in qib_rc_rcv()
Dqib_verbs.h856 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
860 int qib_get_counters(struct qib_pportdata *ppd,
897 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
/linux-3.4.99/fs/exportfs/
Dexpfs.c143 struct dentry *ppd = ERR_PTR(-EACCES); in reconnect_path() local
148 ppd = mnt->mnt_sb->s_export_op->get_parent(pd); in reconnect_path()
151 if (IS_ERR(ppd)) { in reconnect_path()
152 err = PTR_ERR(ppd); in reconnect_path()
160 pd->d_inode->i_ino, ppd->d_inode->i_ino); in reconnect_path()
161 err = exportfs_get_name(mnt, ppd, nbuf, pd); in reconnect_path()
163 dput(ppd); in reconnect_path()
173 mutex_lock(&ppd->d_inode->i_mutex); in reconnect_path()
174 npd = lookup_one_len(nbuf, ppd, strlen(nbuf)); in reconnect_path()
175 mutex_unlock(&ppd->d_inode->i_mutex); in reconnect_path()
[all …]
/linux-3.4.99/net/packet/
Daf_packet.c954 struct tpacket3_hdr *ppd) in prb_fill_rxhash() argument
956 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb); in prb_fill_rxhash()
960 struct tpacket3_hdr *ppd) in prb_clear_rxhash() argument
962 ppd->hv1.tp_rxhash = 0; in prb_clear_rxhash()
966 struct tpacket3_hdr *ppd) in prb_fill_vlan_info() argument
969 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); in prb_fill_vlan_info()
970 ppd->tp_status = TP_STATUS_VLAN_VALID; in prb_fill_vlan_info()
972 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0; in prb_fill_vlan_info()
977 struct tpacket3_hdr *ppd) in prb_run_all_ft_ops() argument
979 prb_fill_vlan_info(pkc, ppd); in prb_run_all_ft_ops()
[all …]