1 /*
2 * linux/drivers/net/ehea/ehea_phyp.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29 #ifndef __EHEA_PHYP_H__
30 #define __EHEA_PHYP_H__
31
32 #include <linux/delay.h>
33 #include <asm/hvcall.h>
34 #include "ehea.h"
35 #include "ehea_hw.h"
36
37 /* Some abbreviations used here:
38 *
39 * hcp_* - structures, variables and functions releated to Hypervisor Calls
40 */
41
get_longbusy_msecs(int long_busy_ret_code)42 static inline u32 get_longbusy_msecs(int long_busy_ret_code)
43 {
44 switch (long_busy_ret_code) {
45 case H_LONG_BUSY_ORDER_1_MSEC:
46 return 1;
47 case H_LONG_BUSY_ORDER_10_MSEC:
48 return 10;
49 case H_LONG_BUSY_ORDER_100_MSEC:
50 return 100;
51 case H_LONG_BUSY_ORDER_1_SEC:
52 return 1000;
53 case H_LONG_BUSY_ORDER_10_SEC:
54 return 10000;
55 case H_LONG_BUSY_ORDER_100_SEC:
56 return 100000;
57 default:
58 return 1;
59 }
60 }
61
62 /* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
63 #define EHEA_MAX_RPAGE 512
64
65 /* Notification Event Queue (NEQ) Entry bit masks */
66 #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
67 #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
68 #define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
69 #define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
70 #define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
71 #define NEQE_PLID EHEA_BMASK_IBM(16, 47)
72
73 /* Notification Event Codes */
74 #define EHEA_EC_PORTSTATE_CHG 0x30
75 #define EHEA_EC_ADAPTER_MALFUNC 0x32
76 #define EHEA_EC_PORT_MALFUNC 0x33
77
78 /* Notification Event Log Register (NELR) bit masks */
79 #define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
80 #define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
81 #define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
82
hcp_epas_ctor(struct h_epas * epas,u64 paddr_kernel,u64 paddr_user)83 static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
84 u64 paddr_user)
85 {
86 /* To support 64k pages we must round to 64k page boundary */
87 epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
88 (paddr_kernel & ~PAGE_MASK);
89 epas->user.addr = paddr_user;
90 }
91
hcp_epas_dtor(struct h_epas * epas)92 static inline void hcp_epas_dtor(struct h_epas *epas)
93 {
94 if (epas->kernel.addr)
95 iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
96
97 epas->user.addr = 0;
98 epas->kernel.addr = 0;
99 }
100
101 struct hcp_modify_qp_cb0 {
102 u64 qp_ctl_reg; /* 00 */
103 u32 max_swqe; /* 02 */
104 u32 max_rwqe; /* 03 */
105 u32 port_nb; /* 04 */
106 u32 reserved0; /* 05 */
107 u64 qp_aer; /* 06 */
108 u64 qp_tenure; /* 08 */
109 };
110
111 /* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
112 #define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
113 #define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
114 #define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
115 #define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
116 #define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
117 #define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
118 #define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
119
120 /* Queue Pair Control Register Status Bits */
121 #define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
122 /* QP States: */
123 #define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
124 #define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
125 #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
126 #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
127 #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
128 #define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
129
130 struct hcp_modify_qp_cb1 {
131 u32 qpn; /* 00 */
132 u32 qp_asyn_ev_eq_nb; /* 01 */
133 u64 sq_cq_handle; /* 02 */
134 u64 rq_cq_handle; /* 04 */
135 /* sgel = scatter gather element */
136 u32 sgel_nb_sq; /* 06 */
137 u32 sgel_nb_rq1; /* 07 */
138 u32 sgel_nb_rq2; /* 08 */
139 u32 sgel_nb_rq3; /* 09 */
140 };
141
142 /* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
143 #define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
144 #define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
145 #define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
146 #define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
147 #define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
148 #define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
149 #define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
150 #define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
151 #define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
152
153 struct hcp_query_ehea {
154 u32 cur_num_qps; /* 00 */
155 u32 cur_num_cqs; /* 01 */
156 u32 cur_num_eqs; /* 02 */
157 u32 cur_num_mrs; /* 03 */
158 u32 auth_level; /* 04 */
159 u32 max_num_qps; /* 05 */
160 u32 max_num_cqs; /* 06 */
161 u32 max_num_eqs; /* 07 */
162 u32 max_num_mrs; /* 08 */
163 u32 reserved0; /* 09 */
164 u32 int_clock_freq; /* 10 */
165 u32 max_num_pds; /* 11 */
166 u32 max_num_addr_handles; /* 12 */
167 u32 max_num_cqes; /* 13 */
168 u32 max_num_wqes; /* 14 */
169 u32 max_num_sgel_rq1wqe; /* 15 */
170 u32 max_num_sgel_rq2wqe; /* 16 */
171 u32 max_num_sgel_rq3wqe; /* 17 */
172 u32 mr_page_size; /* 18 */
173 u32 reserved1; /* 19 */
174 u64 max_mr_size; /* 20 */
175 u64 reserved2; /* 22 */
176 u32 num_ports; /* 24 */
177 u32 reserved3; /* 25 */
178 u32 reserved4; /* 26 */
179 u32 reserved5; /* 27 */
180 u64 max_mc_mac; /* 28 */
181 u64 ehea_cap; /* 30 */
182 u32 max_isn_per_eq; /* 32 */
183 u32 max_num_neq; /* 33 */
184 u64 max_num_vlan_ids; /* 34 */
185 u32 max_num_port_group; /* 36 */
186 u32 max_num_phys_port; /* 37 */
187
188 };
189
190 /* Hcall Query/Modify Port Control Block defines */
191 #define H_PORT_CB0 0
192 #define H_PORT_CB1 1
193 #define H_PORT_CB2 2
194 #define H_PORT_CB3 3
195 #define H_PORT_CB4 4
196 #define H_PORT_CB5 5
197 #define H_PORT_CB6 6
198 #define H_PORT_CB7 7
199
200 struct hcp_ehea_port_cb0 {
201 u64 port_mac_addr;
202 u64 port_rc;
203 u64 reserved0;
204 u32 port_op_state;
205 u32 port_speed;
206 u32 ext_swport_op_state;
207 u32 neg_tpf_prpf;
208 u32 num_default_qps;
209 u32 reserved1;
210 u64 default_qpn_arr[16];
211 };
212
213 /* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
214 #define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
215 #define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
216 #define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
217 #define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
218
219 /* Hcall Query Port: Returned port speed values */
220 #define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
221 #define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
222 #define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
223 #define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
224 #define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
225 #define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
226
227 /* Port Receive Control Status Bits */
228 #define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
229 #define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
230 #define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
231 #define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
232 #define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
233 #define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
234 #define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
235 #define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
236 #define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
237 #define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
238 #define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
239 #define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
240 #define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
241 #define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
242
243 #define PXLY_RC_VLAN_FILTER 2
244 #define PXLY_RC_VLAN_PERM 0
245
246
247 #define H_PORT_CB1_ALL 0x8000000000000000ULL
248
249 struct hcp_ehea_port_cb1 {
250 u64 vlan_filter[64];
251 };
252
253 #define H_PORT_CB2_ALL 0xFFE0000000000000ULL
254
255 struct hcp_ehea_port_cb2 {
256 u64 rxo;
257 u64 rxucp;
258 u64 rxufd;
259 u64 rxuerr;
260 u64 rxftl;
261 u64 rxmcp;
262 u64 rxbcp;
263 u64 txo;
264 u64 txucp;
265 u64 txmcp;
266 u64 txbcp;
267 };
268
269 struct hcp_ehea_port_cb3 {
270 u64 vlan_bc_filter[64];
271 u64 vlan_mc_filter[64];
272 u64 vlan_un_filter[64];
273 u64 port_mac_hash_array[64];
274 };
275
276 #define H_PORT_CB4_ALL 0xF000000000000000ULL
277 #define H_PORT_CB4_JUMBO 0x1000000000000000ULL
278 #define H_PORT_CB4_SPEED 0x8000000000000000ULL
279
280 struct hcp_ehea_port_cb4 {
281 u32 port_speed;
282 u32 pause_frame;
283 u32 ens_port_op_state;
284 u32 jumbo_frame;
285 u32 ens_port_wrap;
286 };
287
288 /* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
289 #define H_PORT_CB5_RCU 0x0001000000000000ULL
290 #define PXS_RCU EHEA_BMASK_IBM(61, 63)
291
292 struct hcp_ehea_port_cb5 {
293 u64 prc; /* 00 */
294 u64 uaa; /* 01 */
295 u64 macvc; /* 02 */
296 u64 xpcsc; /* 03 */
297 u64 xpcsp; /* 04 */
298 u64 pcsid; /* 05 */
299 u64 xpcsst; /* 06 */
300 u64 pthlb; /* 07 */
301 u64 pthrb; /* 08 */
302 u64 pqu; /* 09 */
303 u64 pqd; /* 10 */
304 u64 prt; /* 11 */
305 u64 wsth; /* 12 */
306 u64 rcb; /* 13 */
307 u64 rcm; /* 14 */
308 u64 rcu; /* 15 */
309 u64 macc; /* 16 */
310 u64 pc; /* 17 */
311 u64 pst; /* 18 */
312 u64 ducqpn; /* 19 */
313 u64 mcqpn; /* 20 */
314 u64 mma; /* 21 */
315 u64 pmc0h; /* 22 */
316 u64 pmc0l; /* 23 */
317 u64 lbc; /* 24 */
318 };
319
320 #define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
321
322 struct hcp_ehea_port_cb6 {
323 u64 rxo; /* 00 */
324 u64 rx64; /* 01 */
325 u64 rx65; /* 02 */
326 u64 rx128; /* 03 */
327 u64 rx256; /* 04 */
328 u64 rx512; /* 05 */
329 u64 rx1024; /* 06 */
330 u64 rxbfcs; /* 07 */
331 u64 rxime; /* 08 */
332 u64 rxrle; /* 09 */
333 u64 rxorle; /* 10 */
334 u64 rxftl; /* 11 */
335 u64 rxjab; /* 12 */
336 u64 rxse; /* 13 */
337 u64 rxce; /* 14 */
338 u64 rxrf; /* 15 */
339 u64 rxfrag; /* 16 */
340 u64 rxuoc; /* 17 */
341 u64 rxcpf; /* 18 */
342 u64 rxsb; /* 19 */
343 u64 rxfd; /* 20 */
344 u64 rxoerr; /* 21 */
345 u64 rxaln; /* 22 */
346 u64 ducqpn; /* 23 */
347 u64 reserved0; /* 24 */
348 u64 rxmcp; /* 25 */
349 u64 rxbcp; /* 26 */
350 u64 txmcp; /* 27 */
351 u64 txbcp; /* 28 */
352 u64 txo; /* 29 */
353 u64 tx64; /* 30 */
354 u64 tx65; /* 31 */
355 u64 tx128; /* 32 */
356 u64 tx256; /* 33 */
357 u64 tx512; /* 34 */
358 u64 tx1024; /* 35 */
359 u64 txbfcs; /* 36 */
360 u64 txcpf; /* 37 */
361 u64 txlf; /* 38 */
362 u64 txrf; /* 39 */
363 u64 txime; /* 40 */
364 u64 txsc; /* 41 */
365 u64 txmc; /* 42 */
366 u64 txsqe; /* 43 */
367 u64 txdef; /* 44 */
368 u64 txlcol; /* 45 */
369 u64 txexcol; /* 46 */
370 u64 txcse; /* 47 */
371 u64 txbor; /* 48 */
372 };
373
374 #define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
375
376 struct hcp_ehea_port_cb7 {
377 u64 def_uc_qpn;
378 };
379
380 u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
381 const u8 qp_category,
382 const u64 qp_handle, const u64 sel_mask,
383 void *cb_addr);
384
385 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
386 const u8 cat,
387 const u64 qp_handle,
388 const u64 sel_mask,
389 void *cb_addr,
390 u64 *inv_attr_id,
391 u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
392
393 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
394 struct ehea_eq_attr *eq_attr, u64 *eq_handle);
395
396 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
397 struct ehea_cq_attr *cq_attr,
398 u64 *cq_handle, struct h_epas *epas);
399
400 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
401 struct ehea_qp_init_attr *init_attr,
402 const u32 pd,
403 u64 *qp_handle, struct h_epas *h_epas);
404
405 #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
406 #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
407
408 u64 ehea_h_register_rpage(const u64 adapter_handle,
409 const u8 pagesize,
410 const u8 queue_type,
411 const u64 resource_handle,
412 const u64 log_pageaddr, u64 count);
413
414 #define H_DISABLE_GET_EHEA_WQE_P 1
415 #define H_DISABLE_GET_SQ_WQE_P 2
416 #define H_DISABLE_GET_RQC 3
417
418 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
419
420 #define FORCE_FREE 1
421 #define NORMAL_FREE 0
422
423 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
424 u64 force_bit);
425
426 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
427 const u64 length, const u32 access_ctrl,
428 const u32 pd, u64 *mr_handle, u32 *lkey);
429
430 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
431 const u8 pagesize, const u8 queue_type,
432 const u64 log_pageaddr, const u64 count);
433
434 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
435 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
436 struct ehea_mr *mr);
437
438 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
439
440 /* output param R5 */
441 #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
442 #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
443
444 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
445 const u8 cb_cat, const u64 select_mask,
446 void *cb_addr);
447
448 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
449 const u8 cb_cat, const u64 select_mask,
450 void *cb_addr);
451
452 #define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
453 #define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
454 #define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
455 #define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
456
457 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
458 const u8 reg_type, const u64 mc_mac_addr,
459 const u16 vlan_id, const u32 hcall_id);
460
461 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
462 const u64 event_mask);
463
464 u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
465 void *rblock);
466
467 #endif /* __EHEA_PHYP_H__ */
468