1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <rdma/ib_smi.h>
36 
37 #include "qib.h"
38 #include "qib_mad.h"
39 
reply(struct ib_smp * smp)40 static int reply(struct ib_smp *smp)
41 {
42 	/*
43 	 * The verbs framework will handle the directed/LID route
44 	 * packet changes.
45 	 */
46 	smp->method = IB_MGMT_METHOD_GET_RESP;
47 	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 		smp->status |= IB_SMP_DIRECTION;
49 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50 }
51 
qib_send_trap(struct qib_ibport * ibp,void * data,unsigned len)52 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
53 {
54 	struct ib_mad_send_buf *send_buf;
55 	struct ib_mad_agent *agent;
56 	struct ib_smp *smp;
57 	int ret;
58 	unsigned long flags;
59 	unsigned long timeout;
60 
61 	agent = ibp->send_agent;
62 	if (!agent)
63 		return;
64 
65 	/* o14-3.2.1 */
66 	if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
67 		return;
68 
69 	/* o14-2 */
70 	if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
71 		return;
72 
73 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
74 				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
75 	if (IS_ERR(send_buf))
76 		return;
77 
78 	smp = send_buf->mad;
79 	smp->base_version = IB_MGMT_BASE_VERSION;
80 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
81 	smp->class_version = 1;
82 	smp->method = IB_MGMT_METHOD_TRAP;
83 	ibp->tid++;
84 	smp->tid = cpu_to_be64(ibp->tid);
85 	smp->attr_id = IB_SMP_ATTR_NOTICE;
86 	/* o14-1: smp->mkey = 0; */
87 	memcpy(smp->data, data, len);
88 
89 	spin_lock_irqsave(&ibp->lock, flags);
90 	if (!ibp->sm_ah) {
91 		if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
92 			struct ib_ah *ah;
93 			struct ib_ah_attr attr;
94 
95 			memset(&attr, 0, sizeof attr);
96 			attr.dlid = ibp->sm_lid;
97 			attr.port_num = ppd_from_ibp(ibp)->port;
98 			ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
99 			if (IS_ERR(ah))
100 				ret = -EINVAL;
101 			else {
102 				send_buf->ah = ah;
103 				ibp->sm_ah = to_iah(ah);
104 				ret = 0;
105 			}
106 		} else
107 			ret = -EINVAL;
108 	} else {
109 		send_buf->ah = &ibp->sm_ah->ibah;
110 		ret = 0;
111 	}
112 	spin_unlock_irqrestore(&ibp->lock, flags);
113 
114 	if (!ret)
115 		ret = ib_post_send_mad(send_buf, NULL);
116 	if (!ret) {
117 		/* 4.096 usec. */
118 		timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
119 		ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
120 	} else {
121 		ib_free_send_mad(send_buf);
122 		ibp->trap_timeout = 0;
123 	}
124 }
125 
126 /*
127  * Send a bad [PQ]_Key trap (ch. 14.3.8).
128  */
qib_bad_pqkey(struct qib_ibport * ibp,__be16 trap_num,u32 key,u32 sl,u32 qp1,u32 qp2,__be16 lid1,__be16 lid2)129 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
130 		   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
131 {
132 	struct ib_mad_notice_attr data;
133 
134 	if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
135 		ibp->pkey_violations++;
136 	else
137 		ibp->qkey_violations++;
138 	ibp->n_pkt_drops++;
139 
140 	/* Send violation trap */
141 	data.generic_type = IB_NOTICE_TYPE_SECURITY;
142 	data.prod_type_msb = 0;
143 	data.prod_type_lsb = IB_NOTICE_PROD_CA;
144 	data.trap_num = trap_num;
145 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
146 	data.toggle_count = 0;
147 	memset(&data.details, 0, sizeof data.details);
148 	data.details.ntc_257_258.lid1 = lid1;
149 	data.details.ntc_257_258.lid2 = lid2;
150 	data.details.ntc_257_258.key = cpu_to_be32(key);
151 	data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
152 	data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
153 
154 	qib_send_trap(ibp, &data, sizeof data);
155 }
156 
157 /*
158  * Send a bad M_Key trap (ch. 14.3.9).
159  */
qib_bad_mkey(struct qib_ibport * ibp,struct ib_smp * smp)160 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
161 {
162 	struct ib_mad_notice_attr data;
163 
164 	/* Send violation trap */
165 	data.generic_type = IB_NOTICE_TYPE_SECURITY;
166 	data.prod_type_msb = 0;
167 	data.prod_type_lsb = IB_NOTICE_PROD_CA;
168 	data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
169 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
170 	data.toggle_count = 0;
171 	memset(&data.details, 0, sizeof data.details);
172 	data.details.ntc_256.lid = data.issuer_lid;
173 	data.details.ntc_256.method = smp->method;
174 	data.details.ntc_256.attr_id = smp->attr_id;
175 	data.details.ntc_256.attr_mod = smp->attr_mod;
176 	data.details.ntc_256.mkey = smp->mkey;
177 	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
178 		u8 hop_cnt;
179 
180 		data.details.ntc_256.dr_slid = smp->dr_slid;
181 		data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
182 		hop_cnt = smp->hop_cnt;
183 		if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
184 			data.details.ntc_256.dr_trunc_hop |=
185 				IB_NOTICE_TRAP_DR_TRUNC;
186 			hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
187 		}
188 		data.details.ntc_256.dr_trunc_hop |= hop_cnt;
189 		memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
190 		       hop_cnt);
191 	}
192 
193 	qib_send_trap(ibp, &data, sizeof data);
194 }
195 
196 /*
197  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
198  */
qib_cap_mask_chg(struct qib_ibport * ibp)199 void qib_cap_mask_chg(struct qib_ibport *ibp)
200 {
201 	struct ib_mad_notice_attr data;
202 
203 	data.generic_type = IB_NOTICE_TYPE_INFO;
204 	data.prod_type_msb = 0;
205 	data.prod_type_lsb = IB_NOTICE_PROD_CA;
206 	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
207 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
208 	data.toggle_count = 0;
209 	memset(&data.details, 0, sizeof data.details);
210 	data.details.ntc_144.lid = data.issuer_lid;
211 	data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
212 
213 	qib_send_trap(ibp, &data, sizeof data);
214 }
215 
216 /*
217  * Send a System Image GUID Changed trap (ch. 14.3.12).
218  */
qib_sys_guid_chg(struct qib_ibport * ibp)219 void qib_sys_guid_chg(struct qib_ibport *ibp)
220 {
221 	struct ib_mad_notice_attr data;
222 
223 	data.generic_type = IB_NOTICE_TYPE_INFO;
224 	data.prod_type_msb = 0;
225 	data.prod_type_lsb = IB_NOTICE_PROD_CA;
226 	data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
227 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
228 	data.toggle_count = 0;
229 	memset(&data.details, 0, sizeof data.details);
230 	data.details.ntc_145.lid = data.issuer_lid;
231 	data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
232 
233 	qib_send_trap(ibp, &data, sizeof data);
234 }
235 
236 /*
237  * Send a Node Description Changed trap (ch. 14.3.13).
238  */
qib_node_desc_chg(struct qib_ibport * ibp)239 void qib_node_desc_chg(struct qib_ibport *ibp)
240 {
241 	struct ib_mad_notice_attr data;
242 
243 	data.generic_type = IB_NOTICE_TYPE_INFO;
244 	data.prod_type_msb = 0;
245 	data.prod_type_lsb = IB_NOTICE_PROD_CA;
246 	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
247 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
248 	data.toggle_count = 0;
249 	memset(&data.details, 0, sizeof data.details);
250 	data.details.ntc_144.lid = data.issuer_lid;
251 	data.details.ntc_144.local_changes = 1;
252 	data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
253 
254 	qib_send_trap(ibp, &data, sizeof data);
255 }
256 
subn_get_nodedescription(struct ib_smp * smp,struct ib_device * ibdev)257 static int subn_get_nodedescription(struct ib_smp *smp,
258 				    struct ib_device *ibdev)
259 {
260 	if (smp->attr_mod)
261 		smp->status |= IB_SMP_INVALID_FIELD;
262 
263 	memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
264 
265 	return reply(smp);
266 }
267 
subn_get_nodeinfo(struct ib_smp * smp,struct ib_device * ibdev,u8 port)268 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
269 			     u8 port)
270 {
271 	struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
272 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
273 	u32 vendor, majrev, minrev;
274 	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
275 
276 	/* GUID 0 is illegal */
277 	if (smp->attr_mod || pidx >= dd->num_pports ||
278 	    dd->pport[pidx].guid == 0)
279 		smp->status |= IB_SMP_INVALID_FIELD;
280 	else
281 		nip->port_guid = dd->pport[pidx].guid;
282 
283 	nip->base_version = 1;
284 	nip->class_version = 1;
285 	nip->node_type = 1;     /* channel adapter */
286 	nip->num_ports = ibdev->phys_port_cnt;
287 	/* This is already in network order */
288 	nip->sys_guid = ib_qib_sys_image_guid;
289 	nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
290 	nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
291 	nip->device_id = cpu_to_be16(dd->deviceid);
292 	majrev = dd->majrev;
293 	minrev = dd->minrev;
294 	nip->revision = cpu_to_be32((majrev << 16) | minrev);
295 	nip->local_port_num = port;
296 	vendor = dd->vendorid;
297 	nip->vendor_id[0] = QIB_SRC_OUI_1;
298 	nip->vendor_id[1] = QIB_SRC_OUI_2;
299 	nip->vendor_id[2] = QIB_SRC_OUI_3;
300 
301 	return reply(smp);
302 }
303 
subn_get_guidinfo(struct ib_smp * smp,struct ib_device * ibdev,u8 port)304 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
305 			     u8 port)
306 {
307 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
308 	u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
309 	__be64 *p = (__be64 *) smp->data;
310 	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
311 
312 	/* 32 blocks of 8 64-bit GUIDs per block */
313 
314 	memset(smp->data, 0, sizeof(smp->data));
315 
316 	if (startgx == 0 && pidx < dd->num_pports) {
317 		struct qib_pportdata *ppd = dd->pport + pidx;
318 		struct qib_ibport *ibp = &ppd->ibport_data;
319 		__be64 g = ppd->guid;
320 		unsigned i;
321 
322 		/* GUID 0 is illegal */
323 		if (g == 0)
324 			smp->status |= IB_SMP_INVALID_FIELD;
325 		else {
326 			/* The first is a copy of the read-only HW GUID. */
327 			p[0] = g;
328 			for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
329 				p[i] = ibp->guids[i - 1];
330 		}
331 	} else
332 		smp->status |= IB_SMP_INVALID_FIELD;
333 
334 	return reply(smp);
335 }
336 
set_link_width_enabled(struct qib_pportdata * ppd,u32 w)337 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
338 {
339 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
340 }
341 
set_link_speed_enabled(struct qib_pportdata * ppd,u32 s)342 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
343 {
344 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
345 }
346 
get_overrunthreshold(struct qib_pportdata * ppd)347 static int get_overrunthreshold(struct qib_pportdata *ppd)
348 {
349 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
350 }
351 
352 /**
353  * set_overrunthreshold - set the overrun threshold
354  * @ppd: the physical port data
355  * @n: the new threshold
356  *
357  * Note that this will only take effect when the link state changes.
358  */
set_overrunthreshold(struct qib_pportdata * ppd,unsigned n)359 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
360 {
361 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
362 					 (u32)n);
363 	return 0;
364 }
365 
get_phyerrthreshold(struct qib_pportdata * ppd)366 static int get_phyerrthreshold(struct qib_pportdata *ppd)
367 {
368 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
369 }
370 
371 /**
372  * set_phyerrthreshold - set the physical error threshold
373  * @ppd: the physical port data
374  * @n: the new threshold
375  *
376  * Note that this will only take effect when the link state changes.
377  */
set_phyerrthreshold(struct qib_pportdata * ppd,unsigned n)378 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
379 {
380 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
381 					 (u32)n);
382 	return 0;
383 }
384 
385 /**
386  * get_linkdowndefaultstate - get the default linkdown state
387  * @ppd: the physical port data
388  *
389  * Returns zero if the default is POLL, 1 if the default is SLEEP.
390  */
get_linkdowndefaultstate(struct qib_pportdata * ppd)391 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
392 {
393 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
394 		IB_LINKINITCMD_SLEEP;
395 }
396 
check_mkey(struct qib_ibport * ibp,struct ib_smp * smp,int mad_flags)397 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
398 {
399 	int ret = 0;
400 
401 	/* Is the mkey in the process of expiring? */
402 	if (ibp->mkey_lease_timeout &&
403 	    time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
404 		/* Clear timeout and mkey protection field. */
405 		ibp->mkey_lease_timeout = 0;
406 		ibp->mkeyprot = 0;
407 	}
408 
409 	/* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 	if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
411 	    ibp->mkey != smp->mkey &&
412 	    (smp->method == IB_MGMT_METHOD_SET ||
413 	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
414 	     (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
415 		if (ibp->mkey_violations != 0xFFFF)
416 			++ibp->mkey_violations;
417 		if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
418 			ibp->mkey_lease_timeout = jiffies +
419 				ibp->mkey_lease_period * HZ;
420 		/* Generate a trap notice. */
421 		qib_bad_mkey(ibp, smp);
422 		ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
423 	} else if (ibp->mkey_lease_timeout)
424 		ibp->mkey_lease_timeout = 0;
425 
426 	return ret;
427 }
428 
subn_get_portinfo(struct ib_smp * smp,struct ib_device * ibdev,u8 port)429 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
430 			     u8 port)
431 {
432 	struct qib_devdata *dd;
433 	struct qib_pportdata *ppd;
434 	struct qib_ibport *ibp;
435 	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
436 	u16 lid;
437 	u8 mtu;
438 	int ret;
439 	u32 state;
440 	u32 port_num = be32_to_cpu(smp->attr_mod);
441 
442 	if (port_num == 0)
443 		port_num = port;
444 	else {
445 		if (port_num > ibdev->phys_port_cnt) {
446 			smp->status |= IB_SMP_INVALID_FIELD;
447 			ret = reply(smp);
448 			goto bail;
449 		}
450 		if (port_num != port) {
451 			ibp = to_iport(ibdev, port_num);
452 			ret = check_mkey(ibp, smp, 0);
453 			if (ret)
454 				goto bail;
455 		}
456 	}
457 
458 	dd = dd_from_ibdev(ibdev);
459 	/* IB numbers ports from 1, hdw from 0 */
460 	ppd = dd->pport + (port_num - 1);
461 	ibp = &ppd->ibport_data;
462 
463 	/* Clear all fields.  Only set the non-zero fields. */
464 	memset(smp->data, 0, sizeof(smp->data));
465 
466 	/* Only return the mkey if the protection field allows it. */
467 	if (!(smp->method == IB_MGMT_METHOD_GET &&
468 	      ibp->mkey != smp->mkey &&
469 	      ibp->mkeyprot == 1))
470 		pip->mkey = ibp->mkey;
471 	pip->gid_prefix = ibp->gid_prefix;
472 	lid = ppd->lid;
473 	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
474 	pip->sm_lid = cpu_to_be16(ibp->sm_lid);
475 	pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
476 	/* pip->diag_code; */
477 	pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
478 	pip->local_port_num = port;
479 	pip->link_width_enabled = ppd->link_width_enabled;
480 	pip->link_width_supported = ppd->link_width_supported;
481 	pip->link_width_active = ppd->link_width_active;
482 	state = dd->f_iblink_state(ppd->lastibcstat);
483 	pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
484 
485 	pip->portphysstate_linkdown =
486 		(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
487 		(get_linkdowndefaultstate(ppd) ? 1 : 2);
488 	pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
489 	pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
490 		ppd->link_speed_enabled;
491 	switch (ppd->ibmtu) {
492 	default: /* something is wrong; fall through */
493 	case 4096:
494 		mtu = IB_MTU_4096;
495 		break;
496 	case 2048:
497 		mtu = IB_MTU_2048;
498 		break;
499 	case 1024:
500 		mtu = IB_MTU_1024;
501 		break;
502 	case 512:
503 		mtu = IB_MTU_512;
504 		break;
505 	case 256:
506 		mtu = IB_MTU_256;
507 		break;
508 	}
509 	pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
510 	pip->vlcap_inittype = ppd->vls_supported << 4;  /* InitType = 0 */
511 	pip->vl_high_limit = ibp->vl_high_limit;
512 	pip->vl_arb_high_cap =
513 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
514 	pip->vl_arb_low_cap =
515 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
516 	/* InitTypeReply = 0 */
517 	pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
518 	/* HCAs ignore VLStallCount and HOQLife */
519 	/* pip->vlstallcnt_hoqlife; */
520 	pip->operationalvl_pei_peo_fpi_fpo =
521 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
522 	pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
523 	/* P_KeyViolations are counted by hardware. */
524 	pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
525 	pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
526 	/* Only the hardware GUID is supported for now */
527 	pip->guid_cap = QIB_GUIDS_PER_PORT;
528 	pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
529 	/* 32.768 usec. response time (guessing) */
530 	pip->resv_resptimevalue = 3;
531 	pip->localphyerrors_overrunerrors =
532 		(get_phyerrthreshold(ppd) << 4) |
533 		get_overrunthreshold(ppd);
534 	/* pip->max_credit_hint; */
535 	if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
536 		u32 v;
537 
538 		v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
539 		pip->link_roundtrip_latency[0] = v >> 16;
540 		pip->link_roundtrip_latency[1] = v >> 8;
541 		pip->link_roundtrip_latency[2] = v;
542 	}
543 
544 	ret = reply(smp);
545 
546 bail:
547 	return ret;
548 }
549 
550 /**
551  * get_pkeys - return the PKEY table
552  * @dd: the qlogic_ib device
553  * @port: the IB port number
554  * @pkeys: the pkey table is placed here
555  */
get_pkeys(struct qib_devdata * dd,u8 port,u16 * pkeys)556 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
557 {
558 	struct qib_pportdata *ppd = dd->pport + port - 1;
559 	/*
560 	 * always a kernel context, no locking needed.
561 	 * If we get here with ppd setup, no need to check
562 	 * that pd is valid.
563 	 */
564 	struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
565 
566 	memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
567 
568 	return 0;
569 }
570 
subn_get_pkeytable(struct ib_smp * smp,struct ib_device * ibdev,u8 port)571 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
572 			      u8 port)
573 {
574 	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
575 	u16 *p = (u16 *) smp->data;
576 	__be16 *q = (__be16 *) smp->data;
577 
578 	/* 64 blocks of 32 16-bit P_Key entries */
579 
580 	memset(smp->data, 0, sizeof(smp->data));
581 	if (startpx == 0) {
582 		struct qib_devdata *dd = dd_from_ibdev(ibdev);
583 		unsigned i, n = qib_get_npkeys(dd);
584 
585 		get_pkeys(dd, port, p);
586 
587 		for (i = 0; i < n; i++)
588 			q[i] = cpu_to_be16(p[i]);
589 	} else
590 		smp->status |= IB_SMP_INVALID_FIELD;
591 
592 	return reply(smp);
593 }
594 
subn_set_guidinfo(struct ib_smp * smp,struct ib_device * ibdev,u8 port)595 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
596 			     u8 port)
597 {
598 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
599 	u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
600 	__be64 *p = (__be64 *) smp->data;
601 	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
602 
603 	/* 32 blocks of 8 64-bit GUIDs per block */
604 
605 	if (startgx == 0 && pidx < dd->num_pports) {
606 		struct qib_pportdata *ppd = dd->pport + pidx;
607 		struct qib_ibport *ibp = &ppd->ibport_data;
608 		unsigned i;
609 
610 		/* The first entry is read-only. */
611 		for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
612 			ibp->guids[i - 1] = p[i];
613 	} else
614 		smp->status |= IB_SMP_INVALID_FIELD;
615 
616 	/* The only GUID we support is the first read-only entry. */
617 	return subn_get_guidinfo(smp, ibdev, port);
618 }
619 
620 /**
621  * subn_set_portinfo - set port information
622  * @smp: the incoming SM packet
623  * @ibdev: the infiniband device
624  * @port: the port on the device
625  *
626  * Set Portinfo (see ch. 14.2.5.6).
627  */
subn_set_portinfo(struct ib_smp * smp,struct ib_device * ibdev,u8 port)628 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
629 			     u8 port)
630 {
631 	struct ib_port_info *pip = (struct ib_port_info *)smp->data;
632 	struct ib_event event;
633 	struct qib_devdata *dd;
634 	struct qib_pportdata *ppd;
635 	struct qib_ibport *ibp;
636 	char clientrereg = 0;
637 	unsigned long flags;
638 	u16 lid, smlid;
639 	u8 lwe;
640 	u8 lse;
641 	u8 state;
642 	u8 vls;
643 	u8 msl;
644 	u16 lstate;
645 	int ret, ore, mtu;
646 	u32 port_num = be32_to_cpu(smp->attr_mod);
647 
648 	if (port_num == 0)
649 		port_num = port;
650 	else {
651 		if (port_num > ibdev->phys_port_cnt)
652 			goto err;
653 		/* Port attributes can only be set on the receiving port */
654 		if (port_num != port)
655 			goto get_only;
656 	}
657 
658 	dd = dd_from_ibdev(ibdev);
659 	/* IB numbers ports from 1, hdw from 0 */
660 	ppd = dd->pport + (port_num - 1);
661 	ibp = &ppd->ibport_data;
662 	event.device = ibdev;
663 	event.element.port_num = port;
664 
665 	ibp->mkey = pip->mkey;
666 	ibp->gid_prefix = pip->gid_prefix;
667 	ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
668 
669 	lid = be16_to_cpu(pip->lid);
670 	/* Must be a valid unicast LID address. */
671 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
672 		smp->status |= IB_SMP_INVALID_FIELD;
673 	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
674 		if (ppd->lid != lid)
675 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
676 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
677 			qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
678 		qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
679 		event.event = IB_EVENT_LID_CHANGE;
680 		ib_dispatch_event(&event);
681 	}
682 
683 	smlid = be16_to_cpu(pip->sm_lid);
684 	msl = pip->neighbormtu_mastersmsl & 0xF;
685 	/* Must be a valid unicast LID address. */
686 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
687 		smp->status |= IB_SMP_INVALID_FIELD;
688 	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
689 		spin_lock_irqsave(&ibp->lock, flags);
690 		if (ibp->sm_ah) {
691 			if (smlid != ibp->sm_lid)
692 				ibp->sm_ah->attr.dlid = smlid;
693 			if (msl != ibp->sm_sl)
694 				ibp->sm_ah->attr.sl = msl;
695 		}
696 		spin_unlock_irqrestore(&ibp->lock, flags);
697 		if (smlid != ibp->sm_lid)
698 			ibp->sm_lid = smlid;
699 		if (msl != ibp->sm_sl)
700 			ibp->sm_sl = msl;
701 		event.event = IB_EVENT_SM_CHANGE;
702 		ib_dispatch_event(&event);
703 	}
704 
705 	/* Allow 1x or 4x to be set (see 14.2.6.6). */
706 	lwe = pip->link_width_enabled;
707 	if (lwe) {
708 		if (lwe == 0xFF)
709 			set_link_width_enabled(ppd, ppd->link_width_supported);
710 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
711 			smp->status |= IB_SMP_INVALID_FIELD;
712 		else if (lwe != ppd->link_width_enabled)
713 			set_link_width_enabled(ppd, lwe);
714 	}
715 
716 	lse = pip->linkspeedactive_enabled & 0xF;
717 	if (lse) {
718 		/*
719 		 * The IB 1.2 spec. only allows link speed values
720 		 * 1, 3, 5, 7, 15.  1.2.1 extended to allow specific
721 		 * speeds.
722 		 */
723 		if (lse == 15)
724 			set_link_speed_enabled(ppd,
725 					       ppd->link_speed_supported);
726 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
727 			smp->status |= IB_SMP_INVALID_FIELD;
728 		else if (lse != ppd->link_speed_enabled)
729 			set_link_speed_enabled(ppd, lse);
730 	}
731 
732 	/* Set link down default state. */
733 	switch (pip->portphysstate_linkdown & 0xF) {
734 	case 0: /* NOP */
735 		break;
736 	case 1: /* SLEEP */
737 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
738 					IB_LINKINITCMD_SLEEP);
739 		break;
740 	case 2: /* POLL */
741 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
742 					IB_LINKINITCMD_POLL);
743 		break;
744 	default:
745 		smp->status |= IB_SMP_INVALID_FIELD;
746 	}
747 
748 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
749 	ibp->vl_high_limit = pip->vl_high_limit;
750 	(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
751 				    ibp->vl_high_limit);
752 
753 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
754 	if (mtu == -1)
755 		smp->status |= IB_SMP_INVALID_FIELD;
756 	else
757 		qib_set_mtu(ppd, mtu);
758 
759 	/* Set operational VLs */
760 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
761 	if (vls) {
762 		if (vls > ppd->vls_supported)
763 			smp->status |= IB_SMP_INVALID_FIELD;
764 		else
765 			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
766 	}
767 
768 	if (pip->mkey_violations == 0)
769 		ibp->mkey_violations = 0;
770 
771 	if (pip->pkey_violations == 0)
772 		ibp->pkey_violations = 0;
773 
774 	if (pip->qkey_violations == 0)
775 		ibp->qkey_violations = 0;
776 
777 	ore = pip->localphyerrors_overrunerrors;
778 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
779 		smp->status |= IB_SMP_INVALID_FIELD;
780 
781 	if (set_overrunthreshold(ppd, (ore & 0xF)))
782 		smp->status |= IB_SMP_INVALID_FIELD;
783 
784 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
785 
786 	if (pip->clientrereg_resv_subnetto & 0x80) {
787 		clientrereg = 1;
788 		event.event = IB_EVENT_CLIENT_REREGISTER;
789 		ib_dispatch_event(&event);
790 	}
791 
792 	/*
793 	 * Do the port state change now that the other link parameters
794 	 * have been set.
795 	 * Changing the port physical state only makes sense if the link
796 	 * is down or is being set to down.
797 	 */
798 	state = pip->linkspeed_portstate & 0xF;
799 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
800 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
801 		smp->status |= IB_SMP_INVALID_FIELD;
802 
803 	/*
804 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
805 	 * and must be in the correct state to take effect (see 7.2.6).
806 	 */
807 	switch (state) {
808 	case IB_PORT_NOP:
809 		if (lstate == 0)
810 			break;
811 		/* FALLTHROUGH */
812 	case IB_PORT_DOWN:
813 		if (lstate == 0)
814 			lstate = QIB_IB_LINKDOWN_ONLY;
815 		else if (lstate == 1)
816 			lstate = QIB_IB_LINKDOWN_SLEEP;
817 		else if (lstate == 2)
818 			lstate = QIB_IB_LINKDOWN;
819 		else if (lstate == 3)
820 			lstate = QIB_IB_LINKDOWN_DISABLE;
821 		else {
822 			smp->status |= IB_SMP_INVALID_FIELD;
823 			break;
824 		}
825 		spin_lock_irqsave(&ppd->lflags_lock, flags);
826 		ppd->lflags &= ~QIBL_LINKV;
827 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
828 		qib_set_linkstate(ppd, lstate);
829 		/*
830 		 * Don't send a reply if the response would be sent
831 		 * through the disabled port.
832 		 */
833 		if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
834 			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
835 			goto done;
836 		}
837 		qib_wait_linkstate(ppd, QIBL_LINKV, 10);
838 		break;
839 	case IB_PORT_ARMED:
840 		qib_set_linkstate(ppd, QIB_IB_LINKARM);
841 		break;
842 	case IB_PORT_ACTIVE:
843 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
844 		break;
845 	default:
846 		smp->status |= IB_SMP_INVALID_FIELD;
847 	}
848 
849 	ret = subn_get_portinfo(smp, ibdev, port);
850 
851 	if (clientrereg)
852 		pip->clientrereg_resv_subnetto |= 0x80;
853 
854 	goto get_only;
855 
856 err:
857 	smp->status |= IB_SMP_INVALID_FIELD;
858 get_only:
859 	ret = subn_get_portinfo(smp, ibdev, port);
860 done:
861 	return ret;
862 }
863 
864 /**
865  * rm_pkey - decrecment the reference count for the given PKEY
866  * @dd: the qlogic_ib device
867  * @key: the PKEY index
868  *
869  * Return true if this was the last reference and the hardware table entry
870  * needs to be changed.
871  */
rm_pkey(struct qib_pportdata * ppd,u16 key)872 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
873 {
874 	int i;
875 	int ret;
876 
877 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
878 		if (ppd->pkeys[i] != key)
879 			continue;
880 		if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
881 			ppd->pkeys[i] = 0;
882 			ret = 1;
883 			goto bail;
884 		}
885 		break;
886 	}
887 
888 	ret = 0;
889 
890 bail:
891 	return ret;
892 }
893 
894 /**
895  * add_pkey - add the given PKEY to the hardware table
896  * @dd: the qlogic_ib device
897  * @key: the PKEY
898  *
899  * Return an error code if unable to add the entry, zero if no change,
900  * or 1 if the hardware PKEY register needs to be updated.
901  */
add_pkey(struct qib_pportdata * ppd,u16 key)902 static int add_pkey(struct qib_pportdata *ppd, u16 key)
903 {
904 	int i;
905 	u16 lkey = key & 0x7FFF;
906 	int any = 0;
907 	int ret;
908 
909 	if (lkey == 0x7FFF) {
910 		ret = 0;
911 		goto bail;
912 	}
913 
914 	/* Look for an empty slot or a matching PKEY. */
915 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
916 		if (!ppd->pkeys[i]) {
917 			any++;
918 			continue;
919 		}
920 		/* If it matches exactly, try to increment the ref count */
921 		if (ppd->pkeys[i] == key) {
922 			if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
923 				ret = 0;
924 				goto bail;
925 			}
926 			/* Lost the race. Look for an empty slot below. */
927 			atomic_dec(&ppd->pkeyrefs[i]);
928 			any++;
929 		}
930 		/*
931 		 * It makes no sense to have both the limited and unlimited
932 		 * PKEY set at the same time since the unlimited one will
933 		 * disable the limited one.
934 		 */
935 		if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
936 			ret = -EEXIST;
937 			goto bail;
938 		}
939 	}
940 	if (!any) {
941 		ret = -EBUSY;
942 		goto bail;
943 	}
944 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
945 		if (!ppd->pkeys[i] &&
946 		    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
947 			/* for qibstats, etc. */
948 			ppd->pkeys[i] = key;
949 			ret = 1;
950 			goto bail;
951 		}
952 	}
953 	ret = -EBUSY;
954 
955 bail:
956 	return ret;
957 }
958 
959 /**
960  * set_pkeys - set the PKEY table for ctxt 0
961  * @dd: the qlogic_ib device
962  * @port: the IB port number
963  * @pkeys: the PKEY table
964  */
set_pkeys(struct qib_devdata * dd,u8 port,u16 * pkeys)965 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
966 {
967 	struct qib_pportdata *ppd;
968 	struct qib_ctxtdata *rcd;
969 	int i;
970 	int changed = 0;
971 
972 	/*
973 	 * IB port one/two always maps to context zero/one,
974 	 * always a kernel context, no locking needed
975 	 * If we get here with ppd setup, no need to check
976 	 * that rcd is valid.
977 	 */
978 	ppd = dd->pport + (port - 1);
979 	rcd = dd->rcd[ppd->hw_pidx];
980 
981 	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
982 		u16 key = pkeys[i];
983 		u16 okey = rcd->pkeys[i];
984 
985 		if (key == okey)
986 			continue;
987 		/*
988 		 * The value of this PKEY table entry is changing.
989 		 * Remove the old entry in the hardware's array of PKEYs.
990 		 */
991 		if (okey & 0x7FFF)
992 			changed |= rm_pkey(ppd, okey);
993 		if (key & 0x7FFF) {
994 			int ret = add_pkey(ppd, key);
995 
996 			if (ret < 0)
997 				key = 0;
998 			else
999 				changed |= ret;
1000 		}
1001 		rcd->pkeys[i] = key;
1002 	}
1003 	if (changed) {
1004 		struct ib_event event;
1005 
1006 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1007 
1008 		event.event = IB_EVENT_PKEY_CHANGE;
1009 		event.device = &dd->verbs_dev.ibdev;
1010 		event.element.port_num = 1;
1011 		ib_dispatch_event(&event);
1012 	}
1013 	return 0;
1014 }
1015 
subn_set_pkeytable(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1016 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1017 			      u8 port)
1018 {
1019 	u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1020 	__be16 *p = (__be16 *) smp->data;
1021 	u16 *q = (u16 *) smp->data;
1022 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1023 	unsigned i, n = qib_get_npkeys(dd);
1024 
1025 	for (i = 0; i < n; i++)
1026 		q[i] = be16_to_cpu(p[i]);
1027 
1028 	if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1029 		smp->status |= IB_SMP_INVALID_FIELD;
1030 
1031 	return subn_get_pkeytable(smp, ibdev, port);
1032 }
1033 
subn_get_sl_to_vl(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1034 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1035 			     u8 port)
1036 {
1037 	struct qib_ibport *ibp = to_iport(ibdev, port);
1038 	u8 *p = (u8 *) smp->data;
1039 	unsigned i;
1040 
1041 	memset(smp->data, 0, sizeof(smp->data));
1042 
1043 	if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
1044 		smp->status |= IB_SMP_UNSUP_METHOD;
1045 	else
1046 		for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1047 			*p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1048 
1049 	return reply(smp);
1050 }
1051 
subn_set_sl_to_vl(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1052 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1053 			     u8 port)
1054 {
1055 	struct qib_ibport *ibp = to_iport(ibdev, port);
1056 	u8 *p = (u8 *) smp->data;
1057 	unsigned i;
1058 
1059 	if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1060 		smp->status |= IB_SMP_UNSUP_METHOD;
1061 		return reply(smp);
1062 	}
1063 
1064 	for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1065 		ibp->sl_to_vl[i] = *p >> 4;
1066 		ibp->sl_to_vl[i + 1] = *p & 0xF;
1067 	}
1068 	qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1069 			    _QIB_EVENT_SL2VL_CHANGE_BIT);
1070 
1071 	return subn_get_sl_to_vl(smp, ibdev, port);
1072 }
1073 
subn_get_vl_arb(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1074 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1075 			   u8 port)
1076 {
1077 	unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1078 	struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1079 
1080 	memset(smp->data, 0, sizeof(smp->data));
1081 
1082 	if (ppd->vls_supported == IB_VL_VL0)
1083 		smp->status |= IB_SMP_UNSUP_METHOD;
1084 	else if (which == IB_VLARB_LOWPRI_0_31)
1085 		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1086 						   smp->data);
1087 	else if (which == IB_VLARB_HIGHPRI_0_31)
1088 		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1089 						   smp->data);
1090 	else
1091 		smp->status |= IB_SMP_INVALID_FIELD;
1092 
1093 	return reply(smp);
1094 }
1095 
subn_set_vl_arb(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1096 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1097 			   u8 port)
1098 {
1099 	unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1100 	struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1101 
1102 	if (ppd->vls_supported == IB_VL_VL0)
1103 		smp->status |= IB_SMP_UNSUP_METHOD;
1104 	else if (which == IB_VLARB_LOWPRI_0_31)
1105 		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1106 						   smp->data);
1107 	else if (which == IB_VLARB_HIGHPRI_0_31)
1108 		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1109 						   smp->data);
1110 	else
1111 		smp->status |= IB_SMP_INVALID_FIELD;
1112 
1113 	return subn_get_vl_arb(smp, ibdev, port);
1114 }
1115 
subn_trap_repress(struct ib_smp * smp,struct ib_device * ibdev,u8 port)1116 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1117 			     u8 port)
1118 {
1119 	/*
1120 	 * For now, we only send the trap once so no need to process this.
1121 	 * o13-6, o13-7,
1122 	 * o14-3.a4 The SMA shall not send any message in response to a valid
1123 	 * SubnTrapRepress() message.
1124 	 */
1125 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1126 }
1127 
pma_get_classportinfo(struct ib_perf * pmp,struct ib_device * ibdev)1128 static int pma_get_classportinfo(struct ib_perf *pmp,
1129 				 struct ib_device *ibdev)
1130 {
1131 	struct ib_pma_classportinfo *p =
1132 		(struct ib_pma_classportinfo *)pmp->data;
1133 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
1134 
1135 	memset(pmp->data, 0, sizeof(pmp->data));
1136 
1137 	if (pmp->attr_mod != 0)
1138 		pmp->status |= IB_SMP_INVALID_FIELD;
1139 
1140 	/* Note that AllPortSelect is not valid */
1141 	p->base_version = 1;
1142 	p->class_version = 1;
1143 	p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1144 	/*
1145 	 * Set the most significant bit of CM2 to indicate support for
1146 	 * congestion statistics
1147 	 */
1148 	p->reserved[0] = dd->psxmitwait_supported << 7;
1149 	/*
1150 	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1151 	 */
1152 	p->resp_time_value = 18;
1153 
1154 	return reply((struct ib_smp *) pmp);
1155 }
1156 
pma_get_portsamplescontrol(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1157 static int pma_get_portsamplescontrol(struct ib_perf *pmp,
1158 				      struct ib_device *ibdev, u8 port)
1159 {
1160 	struct ib_pma_portsamplescontrol *p =
1161 		(struct ib_pma_portsamplescontrol *)pmp->data;
1162 	struct qib_ibdev *dev = to_idev(ibdev);
1163 	struct qib_devdata *dd = dd_from_dev(dev);
1164 	struct qib_ibport *ibp = to_iport(ibdev, port);
1165 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1166 	unsigned long flags;
1167 	u8 port_select = p->port_select;
1168 
1169 	memset(pmp->data, 0, sizeof(pmp->data));
1170 
1171 	p->port_select = port_select;
1172 	if (pmp->attr_mod != 0 || port_select != port) {
1173 		pmp->status |= IB_SMP_INVALID_FIELD;
1174 		goto bail;
1175 	}
1176 	spin_lock_irqsave(&ibp->lock, flags);
1177 	p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1178 	p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1179 	p->counter_width = 4;   /* 32 bit counters */
1180 	p->counter_mask0_9 = COUNTER_MASK0_9;
1181 	p->sample_start = cpu_to_be32(ibp->pma_sample_start);
1182 	p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
1183 	p->tag = cpu_to_be16(ibp->pma_tag);
1184 	p->counter_select[0] = ibp->pma_counter_select[0];
1185 	p->counter_select[1] = ibp->pma_counter_select[1];
1186 	p->counter_select[2] = ibp->pma_counter_select[2];
1187 	p->counter_select[3] = ibp->pma_counter_select[3];
1188 	p->counter_select[4] = ibp->pma_counter_select[4];
1189 	spin_unlock_irqrestore(&ibp->lock, flags);
1190 
1191 bail:
1192 	return reply((struct ib_smp *) pmp);
1193 }
1194 
pma_set_portsamplescontrol(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1195 static int pma_set_portsamplescontrol(struct ib_perf *pmp,
1196 				      struct ib_device *ibdev, u8 port)
1197 {
1198 	struct ib_pma_portsamplescontrol *p =
1199 		(struct ib_pma_portsamplescontrol *)pmp->data;
1200 	struct qib_ibdev *dev = to_idev(ibdev);
1201 	struct qib_devdata *dd = dd_from_dev(dev);
1202 	struct qib_ibport *ibp = to_iport(ibdev, port);
1203 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1204 	unsigned long flags;
1205 	u8 status, xmit_flags;
1206 	int ret;
1207 
1208 	if (pmp->attr_mod != 0 || p->port_select != port) {
1209 		pmp->status |= IB_SMP_INVALID_FIELD;
1210 		ret = reply((struct ib_smp *) pmp);
1211 		goto bail;
1212 	}
1213 
1214 	spin_lock_irqsave(&ibp->lock, flags);
1215 
1216 	/* Port Sampling code owns the PS* HW counters */
1217 	xmit_flags = ppd->cong_stats.flags;
1218 	ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1219 	status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1220 	if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1221 	    (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1222 	     xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1223 		ibp->pma_sample_start = be32_to_cpu(p->sample_start);
1224 		ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
1225 		ibp->pma_tag = be16_to_cpu(p->tag);
1226 		ibp->pma_counter_select[0] = p->counter_select[0];
1227 		ibp->pma_counter_select[1] = p->counter_select[1];
1228 		ibp->pma_counter_select[2] = p->counter_select[2];
1229 		ibp->pma_counter_select[3] = p->counter_select[3];
1230 		ibp->pma_counter_select[4] = p->counter_select[4];
1231 		dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
1232 				      ibp->pma_sample_start);
1233 	}
1234 	spin_unlock_irqrestore(&ibp->lock, flags);
1235 
1236 	ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1237 
1238 bail:
1239 	return ret;
1240 }
1241 
get_counter(struct qib_ibport * ibp,struct qib_pportdata * ppd,__be16 sel)1242 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1243 		       __be16 sel)
1244 {
1245 	u64 ret;
1246 
1247 	switch (sel) {
1248 	case IB_PMA_PORT_XMIT_DATA:
1249 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1250 		break;
1251 	case IB_PMA_PORT_RCV_DATA:
1252 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1253 		break;
1254 	case IB_PMA_PORT_XMIT_PKTS:
1255 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1256 		break;
1257 	case IB_PMA_PORT_RCV_PKTS:
1258 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1259 		break;
1260 	case IB_PMA_PORT_XMIT_WAIT:
1261 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1262 		break;
1263 	default:
1264 		ret = 0;
1265 	}
1266 
1267 	return ret;
1268 }
1269 
1270 /* This function assumes that the xmit_wait lock is already held */
xmit_wait_get_value_delta(struct qib_pportdata * ppd)1271 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1272 {
1273 	u32 delta;
1274 
1275 	delta = get_counter(&ppd->ibport_data, ppd,
1276 			    IB_PMA_PORT_XMIT_WAIT);
1277 	return ppd->cong_stats.counter + delta;
1278 }
1279 
cache_hw_sample_counters(struct qib_pportdata * ppd)1280 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1281 {
1282 	struct qib_ibport *ibp = &ppd->ibport_data;
1283 
1284 	ppd->cong_stats.counter_cache.psxmitdata =
1285 		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1286 	ppd->cong_stats.counter_cache.psrcvdata =
1287 		get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1288 	ppd->cong_stats.counter_cache.psxmitpkts =
1289 		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1290 	ppd->cong_stats.counter_cache.psrcvpkts =
1291 		get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1292 	ppd->cong_stats.counter_cache.psxmitwait =
1293 		get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1294 }
1295 
get_cache_hw_sample_counters(struct qib_pportdata * ppd,__be16 sel)1296 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1297 					__be16 sel)
1298 {
1299 	u64 ret;
1300 
1301 	switch (sel) {
1302 	case IB_PMA_PORT_XMIT_DATA:
1303 		ret = ppd->cong_stats.counter_cache.psxmitdata;
1304 		break;
1305 	case IB_PMA_PORT_RCV_DATA:
1306 		ret = ppd->cong_stats.counter_cache.psrcvdata;
1307 		break;
1308 	case IB_PMA_PORT_XMIT_PKTS:
1309 		ret = ppd->cong_stats.counter_cache.psxmitpkts;
1310 		break;
1311 	case IB_PMA_PORT_RCV_PKTS:
1312 		ret = ppd->cong_stats.counter_cache.psrcvpkts;
1313 		break;
1314 	case IB_PMA_PORT_XMIT_WAIT:
1315 		ret = ppd->cong_stats.counter_cache.psxmitwait;
1316 		break;
1317 	default:
1318 		ret = 0;
1319 	}
1320 
1321 	return ret;
1322 }
1323 
pma_get_portsamplesresult(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1324 static int pma_get_portsamplesresult(struct ib_perf *pmp,
1325 				     struct ib_device *ibdev, u8 port)
1326 {
1327 	struct ib_pma_portsamplesresult *p =
1328 		(struct ib_pma_portsamplesresult *)pmp->data;
1329 	struct qib_ibdev *dev = to_idev(ibdev);
1330 	struct qib_devdata *dd = dd_from_dev(dev);
1331 	struct qib_ibport *ibp = to_iport(ibdev, port);
1332 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1333 	unsigned long flags;
1334 	u8 status;
1335 	int i;
1336 
1337 	memset(pmp->data, 0, sizeof(pmp->data));
1338 	spin_lock_irqsave(&ibp->lock, flags);
1339 	p->tag = cpu_to_be16(ibp->pma_tag);
1340 	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1341 		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1342 	else {
1343 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1344 		p->sample_status = cpu_to_be16(status);
1345 		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1346 			cache_hw_sample_counters(ppd);
1347 			ppd->cong_stats.counter =
1348 				xmit_wait_get_value_delta(ppd);
1349 			dd->f_set_cntr_sample(ppd,
1350 					      QIB_CONG_TIMER_PSINTERVAL, 0);
1351 			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1352 		}
1353 	}
1354 	for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1355 		p->counter[i] = cpu_to_be32(
1356 			get_cache_hw_sample_counters(
1357 				ppd, ibp->pma_counter_select[i]));
1358 	spin_unlock_irqrestore(&ibp->lock, flags);
1359 
1360 	return reply((struct ib_smp *) pmp);
1361 }
1362 
pma_get_portsamplesresult_ext(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1363 static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1364 					 struct ib_device *ibdev, u8 port)
1365 {
1366 	struct ib_pma_portsamplesresult_ext *p =
1367 		(struct ib_pma_portsamplesresult_ext *)pmp->data;
1368 	struct qib_ibdev *dev = to_idev(ibdev);
1369 	struct qib_devdata *dd = dd_from_dev(dev);
1370 	struct qib_ibport *ibp = to_iport(ibdev, port);
1371 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1372 	unsigned long flags;
1373 	u8 status;
1374 	int i;
1375 
1376 	/* Port Sampling code owns the PS* HW counters */
1377 	memset(pmp->data, 0, sizeof(pmp->data));
1378 	spin_lock_irqsave(&ibp->lock, flags);
1379 	p->tag = cpu_to_be16(ibp->pma_tag);
1380 	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1381 		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1382 	else {
1383 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1384 		p->sample_status = cpu_to_be16(status);
1385 		/* 64 bits */
1386 		p->extended_width = cpu_to_be32(0x80000000);
1387 		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1388 			cache_hw_sample_counters(ppd);
1389 			ppd->cong_stats.counter =
1390 				xmit_wait_get_value_delta(ppd);
1391 			dd->f_set_cntr_sample(ppd,
1392 					      QIB_CONG_TIMER_PSINTERVAL, 0);
1393 			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1394 		}
1395 	}
1396 	for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1397 		p->counter[i] = cpu_to_be64(
1398 			get_cache_hw_sample_counters(
1399 				ppd, ibp->pma_counter_select[i]));
1400 	spin_unlock_irqrestore(&ibp->lock, flags);
1401 
1402 	return reply((struct ib_smp *) pmp);
1403 }
1404 
pma_get_portcounters(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1405 static int pma_get_portcounters(struct ib_perf *pmp,
1406 				struct ib_device *ibdev, u8 port)
1407 {
1408 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1409 		pmp->data;
1410 	struct qib_ibport *ibp = to_iport(ibdev, port);
1411 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1412 	struct qib_verbs_counters cntrs;
1413 	u8 port_select = p->port_select;
1414 
1415 	qib_get_counters(ppd, &cntrs);
1416 
1417 	/* Adjust counters for any resets done. */
1418 	cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1419 	cntrs.link_error_recovery_counter -=
1420 		ibp->z_link_error_recovery_counter;
1421 	cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1422 	cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1423 	cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1424 	cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1425 	cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1426 	cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1427 	cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1428 	cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1429 	cntrs.local_link_integrity_errors -=
1430 		ibp->z_local_link_integrity_errors;
1431 	cntrs.excessive_buffer_overrun_errors -=
1432 		ibp->z_excessive_buffer_overrun_errors;
1433 	cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1434 	cntrs.vl15_dropped += ibp->n_vl15_dropped;
1435 
1436 	memset(pmp->data, 0, sizeof(pmp->data));
1437 
1438 	p->port_select = port_select;
1439 	if (pmp->attr_mod != 0 || port_select != port)
1440 		pmp->status |= IB_SMP_INVALID_FIELD;
1441 
1442 	if (cntrs.symbol_error_counter > 0xFFFFUL)
1443 		p->symbol_error_counter = cpu_to_be16(0xFFFF);
1444 	else
1445 		p->symbol_error_counter =
1446 			cpu_to_be16((u16)cntrs.symbol_error_counter);
1447 	if (cntrs.link_error_recovery_counter > 0xFFUL)
1448 		p->link_error_recovery_counter = 0xFF;
1449 	else
1450 		p->link_error_recovery_counter =
1451 			(u8)cntrs.link_error_recovery_counter;
1452 	if (cntrs.link_downed_counter > 0xFFUL)
1453 		p->link_downed_counter = 0xFF;
1454 	else
1455 		p->link_downed_counter = (u8)cntrs.link_downed_counter;
1456 	if (cntrs.port_rcv_errors > 0xFFFFUL)
1457 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
1458 	else
1459 		p->port_rcv_errors =
1460 			cpu_to_be16((u16) cntrs.port_rcv_errors);
1461 	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1462 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1463 	else
1464 		p->port_rcv_remphys_errors =
1465 			cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1466 	if (cntrs.port_xmit_discards > 0xFFFFUL)
1467 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
1468 	else
1469 		p->port_xmit_discards =
1470 			cpu_to_be16((u16)cntrs.port_xmit_discards);
1471 	if (cntrs.local_link_integrity_errors > 0xFUL)
1472 		cntrs.local_link_integrity_errors = 0xFUL;
1473 	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1474 		cntrs.excessive_buffer_overrun_errors = 0xFUL;
1475 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1476 		cntrs.excessive_buffer_overrun_errors;
1477 	if (cntrs.vl15_dropped > 0xFFFFUL)
1478 		p->vl15_dropped = cpu_to_be16(0xFFFF);
1479 	else
1480 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1481 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1482 		p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1483 	else
1484 		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1485 	if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1486 		p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1487 	else
1488 		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1489 	if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1490 		p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1491 	else
1492 		p->port_xmit_packets =
1493 			cpu_to_be32((u32)cntrs.port_xmit_packets);
1494 	if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1495 		p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1496 	else
1497 		p->port_rcv_packets =
1498 			cpu_to_be32((u32) cntrs.port_rcv_packets);
1499 
1500 	return reply((struct ib_smp *) pmp);
1501 }
1502 
pma_get_portcounters_cong(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1503 static int pma_get_portcounters_cong(struct ib_perf *pmp,
1504 				     struct ib_device *ibdev, u8 port)
1505 {
1506 	/* Congestion PMA packets start at offset 24 not 64 */
1507 	struct ib_pma_portcounters_cong *p =
1508 		(struct ib_pma_portcounters_cong *)pmp->reserved;
1509 	struct qib_verbs_counters cntrs;
1510 	struct qib_ibport *ibp = to_iport(ibdev, port);
1511 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1512 	struct qib_devdata *dd = dd_from_ppd(ppd);
1513 	u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
1514 	u64 xmit_wait_counter;
1515 	unsigned long flags;
1516 
1517 	/*
1518 	 * This check is performed only in the GET method because the
1519 	 * SET method ends up calling this anyway.
1520 	 */
1521 	if (!dd->psxmitwait_supported)
1522 		pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1523 	if (port_select != port)
1524 		pmp->status |= IB_SMP_INVALID_FIELD;
1525 
1526 	qib_get_counters(ppd, &cntrs);
1527 	spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1528 	xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1529 	spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1530 
1531 	/* Adjust counters for any resets done. */
1532 	cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1533 	cntrs.link_error_recovery_counter -=
1534 		ibp->z_link_error_recovery_counter;
1535 	cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1536 	cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1537 	cntrs.port_rcv_remphys_errors -=
1538 		ibp->z_port_rcv_remphys_errors;
1539 	cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1540 	cntrs.local_link_integrity_errors -=
1541 		ibp->z_local_link_integrity_errors;
1542 	cntrs.excessive_buffer_overrun_errors -=
1543 		ibp->z_excessive_buffer_overrun_errors;
1544 	cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1545 	cntrs.vl15_dropped += ibp->n_vl15_dropped;
1546 	cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1547 	cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1548 	cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1549 	cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1550 
1551 	memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1552 	       sizeof(pmp->data));
1553 
1554 	/*
1555 	 * Set top 3 bits to indicate interval in picoseconds in
1556 	 * remaining bits.
1557 	 */
1558 	p->port_check_rate =
1559 		cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1560 			    (dd->psxmitwait_check_rate &
1561 			     ~(QIB_XMIT_RATE_PICO << 13)));
1562 	p->port_adr_events = cpu_to_be64(0);
1563 	p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1564 	p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1565 	p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1566 	p->port_xmit_packets =
1567 		cpu_to_be64(cntrs.port_xmit_packets);
1568 	p->port_rcv_packets =
1569 		cpu_to_be64(cntrs.port_rcv_packets);
1570 	if (cntrs.symbol_error_counter > 0xFFFFUL)
1571 		p->symbol_error_counter = cpu_to_be16(0xFFFF);
1572 	else
1573 		p->symbol_error_counter =
1574 			cpu_to_be16(
1575 				(u16)cntrs.symbol_error_counter);
1576 	if (cntrs.link_error_recovery_counter > 0xFFUL)
1577 		p->link_error_recovery_counter = 0xFF;
1578 	else
1579 		p->link_error_recovery_counter =
1580 			(u8)cntrs.link_error_recovery_counter;
1581 	if (cntrs.link_downed_counter > 0xFFUL)
1582 		p->link_downed_counter = 0xFF;
1583 	else
1584 		p->link_downed_counter =
1585 			(u8)cntrs.link_downed_counter;
1586 	if (cntrs.port_rcv_errors > 0xFFFFUL)
1587 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
1588 	else
1589 		p->port_rcv_errors =
1590 			cpu_to_be16((u16) cntrs.port_rcv_errors);
1591 	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1592 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1593 	else
1594 		p->port_rcv_remphys_errors =
1595 			cpu_to_be16(
1596 				(u16)cntrs.port_rcv_remphys_errors);
1597 	if (cntrs.port_xmit_discards > 0xFFFFUL)
1598 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
1599 	else
1600 		p->port_xmit_discards =
1601 			cpu_to_be16((u16)cntrs.port_xmit_discards);
1602 	if (cntrs.local_link_integrity_errors > 0xFUL)
1603 		cntrs.local_link_integrity_errors = 0xFUL;
1604 	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1605 		cntrs.excessive_buffer_overrun_errors = 0xFUL;
1606 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1607 		cntrs.excessive_buffer_overrun_errors;
1608 	if (cntrs.vl15_dropped > 0xFFFFUL)
1609 		p->vl15_dropped = cpu_to_be16(0xFFFF);
1610 	else
1611 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1612 
1613 	return reply((struct ib_smp *)pmp);
1614 }
1615 
pma_get_portcounters_ext(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1616 static int pma_get_portcounters_ext(struct ib_perf *pmp,
1617 				    struct ib_device *ibdev, u8 port)
1618 {
1619 	struct ib_pma_portcounters_ext *p =
1620 		(struct ib_pma_portcounters_ext *)pmp->data;
1621 	struct qib_ibport *ibp = to_iport(ibdev, port);
1622 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1623 	u64 swords, rwords, spkts, rpkts, xwait;
1624 	u8 port_select = p->port_select;
1625 
1626 	memset(pmp->data, 0, sizeof(pmp->data));
1627 
1628 	p->port_select = port_select;
1629 	if (pmp->attr_mod != 0 || port_select != port) {
1630 		pmp->status |= IB_SMP_INVALID_FIELD;
1631 		goto bail;
1632 	}
1633 
1634 	qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1635 
1636 	/* Adjust counters for any resets done. */
1637 	swords -= ibp->z_port_xmit_data;
1638 	rwords -= ibp->z_port_rcv_data;
1639 	spkts -= ibp->z_port_xmit_packets;
1640 	rpkts -= ibp->z_port_rcv_packets;
1641 
1642 	p->port_xmit_data = cpu_to_be64(swords);
1643 	p->port_rcv_data = cpu_to_be64(rwords);
1644 	p->port_xmit_packets = cpu_to_be64(spkts);
1645 	p->port_rcv_packets = cpu_to_be64(rpkts);
1646 	p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
1647 	p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
1648 	p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
1649 	p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
1650 
1651 bail:
1652 	return reply((struct ib_smp *) pmp);
1653 }
1654 
pma_set_portcounters(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1655 static int pma_set_portcounters(struct ib_perf *pmp,
1656 				struct ib_device *ibdev, u8 port)
1657 {
1658 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1659 		pmp->data;
1660 	struct qib_ibport *ibp = to_iport(ibdev, port);
1661 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1662 	struct qib_verbs_counters cntrs;
1663 
1664 	/*
1665 	 * Since the HW doesn't support clearing counters, we save the
1666 	 * current count and subtract it from future responses.
1667 	 */
1668 	qib_get_counters(ppd, &cntrs);
1669 
1670 	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1671 		ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1672 
1673 	if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1674 		ibp->z_link_error_recovery_counter =
1675 			cntrs.link_error_recovery_counter;
1676 
1677 	if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1678 		ibp->z_link_downed_counter = cntrs.link_downed_counter;
1679 
1680 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1681 		ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1682 
1683 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1684 		ibp->z_port_rcv_remphys_errors =
1685 			cntrs.port_rcv_remphys_errors;
1686 
1687 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1688 		ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1689 
1690 	if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1691 		ibp->z_local_link_integrity_errors =
1692 			cntrs.local_link_integrity_errors;
1693 
1694 	if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1695 		ibp->z_excessive_buffer_overrun_errors =
1696 			cntrs.excessive_buffer_overrun_errors;
1697 
1698 	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1699 		ibp->n_vl15_dropped = 0;
1700 		ibp->z_vl15_dropped = cntrs.vl15_dropped;
1701 	}
1702 
1703 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1704 		ibp->z_port_xmit_data = cntrs.port_xmit_data;
1705 
1706 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1707 		ibp->z_port_rcv_data = cntrs.port_rcv_data;
1708 
1709 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1710 		ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1711 
1712 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1713 		ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1714 
1715 	return pma_get_portcounters(pmp, ibdev, port);
1716 }
1717 
pma_set_portcounters_cong(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1718 static int pma_set_portcounters_cong(struct ib_perf *pmp,
1719 				     struct ib_device *ibdev, u8 port)
1720 {
1721 	struct qib_ibport *ibp = to_iport(ibdev, port);
1722 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1723 	struct qib_devdata *dd = dd_from_ppd(ppd);
1724 	struct qib_verbs_counters cntrs;
1725 	u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
1726 	int ret = 0;
1727 	unsigned long flags;
1728 
1729 	qib_get_counters(ppd, &cntrs);
1730 	/* Get counter values before we save them */
1731 	ret = pma_get_portcounters_cong(pmp, ibdev, port);
1732 
1733 	if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1734 		spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1735 		ppd->cong_stats.counter = 0;
1736 		dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1737 				      0x0);
1738 		spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1739 	}
1740 	if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1741 		ibp->z_port_xmit_data = cntrs.port_xmit_data;
1742 		ibp->z_port_rcv_data = cntrs.port_rcv_data;
1743 		ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1744 		ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1745 	}
1746 	if (counter_select & IB_PMA_SEL_CONG_ALL) {
1747 		ibp->z_symbol_error_counter =
1748 			cntrs.symbol_error_counter;
1749 		ibp->z_link_error_recovery_counter =
1750 			cntrs.link_error_recovery_counter;
1751 		ibp->z_link_downed_counter =
1752 			cntrs.link_downed_counter;
1753 		ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1754 		ibp->z_port_rcv_remphys_errors =
1755 			cntrs.port_rcv_remphys_errors;
1756 		ibp->z_port_xmit_discards =
1757 			cntrs.port_xmit_discards;
1758 		ibp->z_local_link_integrity_errors =
1759 			cntrs.local_link_integrity_errors;
1760 		ibp->z_excessive_buffer_overrun_errors =
1761 			cntrs.excessive_buffer_overrun_errors;
1762 		ibp->n_vl15_dropped = 0;
1763 		ibp->z_vl15_dropped = cntrs.vl15_dropped;
1764 	}
1765 
1766 	return ret;
1767 }
1768 
pma_set_portcounters_ext(struct ib_perf * pmp,struct ib_device * ibdev,u8 port)1769 static int pma_set_portcounters_ext(struct ib_perf *pmp,
1770 				    struct ib_device *ibdev, u8 port)
1771 {
1772 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1773 		pmp->data;
1774 	struct qib_ibport *ibp = to_iport(ibdev, port);
1775 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1776 	u64 swords, rwords, spkts, rpkts, xwait;
1777 
1778 	qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1779 
1780 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1781 		ibp->z_port_xmit_data = swords;
1782 
1783 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1784 		ibp->z_port_rcv_data = rwords;
1785 
1786 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1787 		ibp->z_port_xmit_packets = spkts;
1788 
1789 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1790 		ibp->z_port_rcv_packets = rpkts;
1791 
1792 	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1793 		ibp->n_unicast_xmit = 0;
1794 
1795 	if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1796 		ibp->n_unicast_rcv = 0;
1797 
1798 	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1799 		ibp->n_multicast_xmit = 0;
1800 
1801 	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1802 		ibp->n_multicast_rcv = 0;
1803 
1804 	return pma_get_portcounters_ext(pmp, ibdev, port);
1805 }
1806 
process_subn(struct ib_device * ibdev,int mad_flags,u8 port,struct ib_mad * in_mad,struct ib_mad * out_mad)1807 static int process_subn(struct ib_device *ibdev, int mad_flags,
1808 			u8 port, struct ib_mad *in_mad,
1809 			struct ib_mad *out_mad)
1810 {
1811 	struct ib_smp *smp = (struct ib_smp *)out_mad;
1812 	struct qib_ibport *ibp = to_iport(ibdev, port);
1813 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1814 	int ret;
1815 
1816 	*out_mad = *in_mad;
1817 	if (smp->class_version != 1) {
1818 		smp->status |= IB_SMP_UNSUP_VERSION;
1819 		ret = reply(smp);
1820 		goto bail;
1821 	}
1822 
1823 	ret = check_mkey(ibp, smp, mad_flags);
1824 	if (ret) {
1825 		u32 port_num = be32_to_cpu(smp->attr_mod);
1826 
1827 		/*
1828 		 * If this is a get/set portinfo, we already check the
1829 		 * M_Key if the MAD is for another port and the M_Key
1830 		 * is OK on the receiving port. This check is needed
1831 		 * to increment the error counters when the M_Key
1832 		 * fails to match on *both* ports.
1833 		 */
1834 		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1835 		    (smp->method == IB_MGMT_METHOD_GET ||
1836 		     smp->method == IB_MGMT_METHOD_SET) &&
1837 		    port_num && port_num <= ibdev->phys_port_cnt &&
1838 		    port != port_num)
1839 			(void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1840 		goto bail;
1841 	}
1842 
1843 	switch (smp->method) {
1844 	case IB_MGMT_METHOD_GET:
1845 		switch (smp->attr_id) {
1846 		case IB_SMP_ATTR_NODE_DESC:
1847 			ret = subn_get_nodedescription(smp, ibdev);
1848 			goto bail;
1849 		case IB_SMP_ATTR_NODE_INFO:
1850 			ret = subn_get_nodeinfo(smp, ibdev, port);
1851 			goto bail;
1852 		case IB_SMP_ATTR_GUID_INFO:
1853 			ret = subn_get_guidinfo(smp, ibdev, port);
1854 			goto bail;
1855 		case IB_SMP_ATTR_PORT_INFO:
1856 			ret = subn_get_portinfo(smp, ibdev, port);
1857 			goto bail;
1858 		case IB_SMP_ATTR_PKEY_TABLE:
1859 			ret = subn_get_pkeytable(smp, ibdev, port);
1860 			goto bail;
1861 		case IB_SMP_ATTR_SL_TO_VL_TABLE:
1862 			ret = subn_get_sl_to_vl(smp, ibdev, port);
1863 			goto bail;
1864 		case IB_SMP_ATTR_VL_ARB_TABLE:
1865 			ret = subn_get_vl_arb(smp, ibdev, port);
1866 			goto bail;
1867 		case IB_SMP_ATTR_SM_INFO:
1868 			if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1869 				ret = IB_MAD_RESULT_SUCCESS |
1870 					IB_MAD_RESULT_CONSUMED;
1871 				goto bail;
1872 			}
1873 			if (ibp->port_cap_flags & IB_PORT_SM) {
1874 				ret = IB_MAD_RESULT_SUCCESS;
1875 				goto bail;
1876 			}
1877 			/* FALLTHROUGH */
1878 		default:
1879 			smp->status |= IB_SMP_UNSUP_METH_ATTR;
1880 			ret = reply(smp);
1881 			goto bail;
1882 		}
1883 
1884 	case IB_MGMT_METHOD_SET:
1885 		switch (smp->attr_id) {
1886 		case IB_SMP_ATTR_GUID_INFO:
1887 			ret = subn_set_guidinfo(smp, ibdev, port);
1888 			goto bail;
1889 		case IB_SMP_ATTR_PORT_INFO:
1890 			ret = subn_set_portinfo(smp, ibdev, port);
1891 			goto bail;
1892 		case IB_SMP_ATTR_PKEY_TABLE:
1893 			ret = subn_set_pkeytable(smp, ibdev, port);
1894 			goto bail;
1895 		case IB_SMP_ATTR_SL_TO_VL_TABLE:
1896 			ret = subn_set_sl_to_vl(smp, ibdev, port);
1897 			goto bail;
1898 		case IB_SMP_ATTR_VL_ARB_TABLE:
1899 			ret = subn_set_vl_arb(smp, ibdev, port);
1900 			goto bail;
1901 		case IB_SMP_ATTR_SM_INFO:
1902 			if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1903 				ret = IB_MAD_RESULT_SUCCESS |
1904 					IB_MAD_RESULT_CONSUMED;
1905 				goto bail;
1906 			}
1907 			if (ibp->port_cap_flags & IB_PORT_SM) {
1908 				ret = IB_MAD_RESULT_SUCCESS;
1909 				goto bail;
1910 			}
1911 			/* FALLTHROUGH */
1912 		default:
1913 			smp->status |= IB_SMP_UNSUP_METH_ATTR;
1914 			ret = reply(smp);
1915 			goto bail;
1916 		}
1917 
1918 	case IB_MGMT_METHOD_TRAP_REPRESS:
1919 		if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1920 			ret = subn_trap_repress(smp, ibdev, port);
1921 		else {
1922 			smp->status |= IB_SMP_UNSUP_METH_ATTR;
1923 			ret = reply(smp);
1924 		}
1925 		goto bail;
1926 
1927 	case IB_MGMT_METHOD_TRAP:
1928 	case IB_MGMT_METHOD_REPORT:
1929 	case IB_MGMT_METHOD_REPORT_RESP:
1930 	case IB_MGMT_METHOD_GET_RESP:
1931 		/*
1932 		 * The ib_mad module will call us to process responses
1933 		 * before checking for other consumers.
1934 		 * Just tell the caller to process it normally.
1935 		 */
1936 		ret = IB_MAD_RESULT_SUCCESS;
1937 		goto bail;
1938 
1939 	case IB_MGMT_METHOD_SEND:
1940 		if (ib_get_smp_direction(smp) &&
1941 		    smp->attr_id == QIB_VENDOR_IPG) {
1942 			ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1943 					      smp->data[0]);
1944 			ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1945 		} else
1946 			ret = IB_MAD_RESULT_SUCCESS;
1947 		goto bail;
1948 
1949 	default:
1950 		smp->status |= IB_SMP_UNSUP_METHOD;
1951 		ret = reply(smp);
1952 	}
1953 
1954 bail:
1955 	return ret;
1956 }
1957 
process_perf(struct ib_device * ibdev,u8 port,struct ib_mad * in_mad,struct ib_mad * out_mad)1958 static int process_perf(struct ib_device *ibdev, u8 port,
1959 			struct ib_mad *in_mad,
1960 			struct ib_mad *out_mad)
1961 {
1962 	struct ib_perf *pmp = (struct ib_perf *)out_mad;
1963 	int ret;
1964 
1965 	*out_mad = *in_mad;
1966 	if (pmp->class_version != 1) {
1967 		pmp->status |= IB_SMP_UNSUP_VERSION;
1968 		ret = reply((struct ib_smp *) pmp);
1969 		goto bail;
1970 	}
1971 
1972 	switch (pmp->method) {
1973 	case IB_MGMT_METHOD_GET:
1974 		switch (pmp->attr_id) {
1975 		case IB_PMA_CLASS_PORT_INFO:
1976 			ret = pma_get_classportinfo(pmp, ibdev);
1977 			goto bail;
1978 		case IB_PMA_PORT_SAMPLES_CONTROL:
1979 			ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1980 			goto bail;
1981 		case IB_PMA_PORT_SAMPLES_RESULT:
1982 			ret = pma_get_portsamplesresult(pmp, ibdev, port);
1983 			goto bail;
1984 		case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1985 			ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
1986 			goto bail;
1987 		case IB_PMA_PORT_COUNTERS:
1988 			ret = pma_get_portcounters(pmp, ibdev, port);
1989 			goto bail;
1990 		case IB_PMA_PORT_COUNTERS_EXT:
1991 			ret = pma_get_portcounters_ext(pmp, ibdev, port);
1992 			goto bail;
1993 		case IB_PMA_PORT_COUNTERS_CONG:
1994 			ret = pma_get_portcounters_cong(pmp, ibdev, port);
1995 			goto bail;
1996 		default:
1997 			pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1998 			ret = reply((struct ib_smp *) pmp);
1999 			goto bail;
2000 		}
2001 
2002 	case IB_MGMT_METHOD_SET:
2003 		switch (pmp->attr_id) {
2004 		case IB_PMA_PORT_SAMPLES_CONTROL:
2005 			ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2006 			goto bail;
2007 		case IB_PMA_PORT_COUNTERS:
2008 			ret = pma_set_portcounters(pmp, ibdev, port);
2009 			goto bail;
2010 		case IB_PMA_PORT_COUNTERS_EXT:
2011 			ret = pma_set_portcounters_ext(pmp, ibdev, port);
2012 			goto bail;
2013 		case IB_PMA_PORT_COUNTERS_CONG:
2014 			ret = pma_set_portcounters_cong(pmp, ibdev, port);
2015 			goto bail;
2016 		default:
2017 			pmp->status |= IB_SMP_UNSUP_METH_ATTR;
2018 			ret = reply((struct ib_smp *) pmp);
2019 			goto bail;
2020 		}
2021 
2022 	case IB_MGMT_METHOD_TRAP:
2023 	case IB_MGMT_METHOD_GET_RESP:
2024 		/*
2025 		 * The ib_mad module will call us to process responses
2026 		 * before checking for other consumers.
2027 		 * Just tell the caller to process it normally.
2028 		 */
2029 		ret = IB_MAD_RESULT_SUCCESS;
2030 		goto bail;
2031 
2032 	default:
2033 		pmp->status |= IB_SMP_UNSUP_METHOD;
2034 		ret = reply((struct ib_smp *) pmp);
2035 	}
2036 
2037 bail:
2038 	return ret;
2039 }
2040 
2041 /**
2042  * qib_process_mad - process an incoming MAD packet
2043  * @ibdev: the infiniband device this packet came in on
2044  * @mad_flags: MAD flags
2045  * @port: the port number this packet came in on
2046  * @in_wc: the work completion entry for this packet
2047  * @in_grh: the global route header for this packet
2048  * @in_mad: the incoming MAD
2049  * @out_mad: any outgoing MAD reply
2050  *
2051  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2052  * interested in processing.
2053  *
2054  * Note that the verbs framework has already done the MAD sanity checks,
2055  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2056  * MADs.
2057  *
2058  * This is called by the ib_mad module.
2059  */
qib_process_mad(struct ib_device * ibdev,int mad_flags,u8 port,struct ib_wc * in_wc,struct ib_grh * in_grh,struct ib_mad * in_mad,struct ib_mad * out_mad)2060 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2061 		    struct ib_wc *in_wc, struct ib_grh *in_grh,
2062 		    struct ib_mad *in_mad, struct ib_mad *out_mad)
2063 {
2064 	int ret;
2065 
2066 	switch (in_mad->mad_hdr.mgmt_class) {
2067 	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2068 	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2069 		ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2070 		goto bail;
2071 
2072 	case IB_MGMT_CLASS_PERF_MGMT:
2073 		ret = process_perf(ibdev, port, in_mad, out_mad);
2074 		goto bail;
2075 
2076 	default:
2077 		ret = IB_MAD_RESULT_SUCCESS;
2078 	}
2079 
2080 bail:
2081 	return ret;
2082 }
2083 
send_handler(struct ib_mad_agent * agent,struct ib_mad_send_wc * mad_send_wc)2084 static void send_handler(struct ib_mad_agent *agent,
2085 			 struct ib_mad_send_wc *mad_send_wc)
2086 {
2087 	ib_free_send_mad(mad_send_wc->send_buf);
2088 }
2089 
xmit_wait_timer_func(unsigned long opaque)2090 static void xmit_wait_timer_func(unsigned long opaque)
2091 {
2092 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2093 	struct qib_devdata *dd = dd_from_ppd(ppd);
2094 	unsigned long flags;
2095 	u8 status;
2096 
2097 	spin_lock_irqsave(&ppd->ibport_data.lock, flags);
2098 	if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2099 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2100 		if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2101 			/* save counter cache */
2102 			cache_hw_sample_counters(ppd);
2103 			ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2104 		} else
2105 			goto done;
2106 	}
2107 	ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2108 	dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2109 done:
2110 	spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
2111 	mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2112 }
2113 
qib_create_agents(struct qib_ibdev * dev)2114 int qib_create_agents(struct qib_ibdev *dev)
2115 {
2116 	struct qib_devdata *dd = dd_from_dev(dev);
2117 	struct ib_mad_agent *agent;
2118 	struct qib_ibport *ibp;
2119 	int p;
2120 	int ret;
2121 
2122 	for (p = 0; p < dd->num_pports; p++) {
2123 		ibp = &dd->pport[p].ibport_data;
2124 		agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
2125 					      NULL, 0, send_handler,
2126 					      NULL, NULL);
2127 		if (IS_ERR(agent)) {
2128 			ret = PTR_ERR(agent);
2129 			goto err;
2130 		}
2131 
2132 		/* Initialize xmit_wait structure */
2133 		dd->pport[p].cong_stats.counter = 0;
2134 		init_timer(&dd->pport[p].cong_stats.timer);
2135 		dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
2136 		dd->pport[p].cong_stats.timer.data =
2137 			(unsigned long)(&dd->pport[p]);
2138 		dd->pport[p].cong_stats.timer.expires = 0;
2139 		add_timer(&dd->pport[p].cong_stats.timer);
2140 
2141 		ibp->send_agent = agent;
2142 	}
2143 
2144 	return 0;
2145 
2146 err:
2147 	for (p = 0; p < dd->num_pports; p++) {
2148 		ibp = &dd->pport[p].ibport_data;
2149 		if (ibp->send_agent) {
2150 			agent = ibp->send_agent;
2151 			ibp->send_agent = NULL;
2152 			ib_unregister_mad_agent(agent);
2153 		}
2154 	}
2155 
2156 	return ret;
2157 }
2158 
qib_free_agents(struct qib_ibdev * dev)2159 void qib_free_agents(struct qib_ibdev *dev)
2160 {
2161 	struct qib_devdata *dd = dd_from_dev(dev);
2162 	struct ib_mad_agent *agent;
2163 	struct qib_ibport *ibp;
2164 	int p;
2165 
2166 	for (p = 0; p < dd->num_pports; p++) {
2167 		ibp = &dd->pport[p].ibport_data;
2168 		if (ibp->send_agent) {
2169 			agent = ibp->send_agent;
2170 			ibp->send_agent = NULL;
2171 			ib_unregister_mad_agent(agent);
2172 		}
2173 		if (ibp->sm_ah) {
2174 			ib_destroy_ah(&ibp->sm_ah->ibah);
2175 			ibp->sm_ah = NULL;
2176 		}
2177 		if (dd->pport[p].cong_stats.timer.data)
2178 			del_timer_sync(&dd->pport[p].cong_stats.timer);
2179 	}
2180 }
2181