1 /*
2  *  linux/drivers/net/ehea/ehea.h
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #ifndef __EHEA_H__
30 #define __EHEA_H__
31 
32 #include <linux/module.h>
33 #include <linux/ethtool.h>
34 #include <linux/vmalloc.h>
35 #include <linux/if_vlan.h>
36 #include <linux/inet_lro.h>
37 
38 #include <asm/ibmebus.h>
39 #include <asm/abs_addr.h>
40 #include <asm/io.h>
41 
42 #define DRV_NAME	"ehea"
43 #define DRV_VERSION	"EHEA_0107"
44 
45 /* eHEA capability flags */
46 #define DLPAR_PORT_ADD_REM 1
47 #define DLPAR_MEM_ADD      2
48 #define DLPAR_MEM_REM      4
49 #define EHEA_CAPABILITIES  (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50 
51 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 	| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
53 
54 #define EHEA_MAX_ENTRIES_RQ1 32767
55 #define EHEA_MAX_ENTRIES_RQ2 16383
56 #define EHEA_MAX_ENTRIES_RQ3 16383
57 #define EHEA_MAX_ENTRIES_SQ  32767
58 #define EHEA_MIN_ENTRIES_QP  127
59 
60 #define EHEA_SMALL_QUEUES
61 #define EHEA_NUM_TX_QP 1
62 #define EHEA_LRO_MAX_AGGR 64
63 
64 #ifdef EHEA_SMALL_QUEUES
65 #define EHEA_MAX_CQE_COUNT      1023
66 #define EHEA_DEF_ENTRIES_SQ     1023
67 #define EHEA_DEF_ENTRIES_RQ1    4095
68 #define EHEA_DEF_ENTRIES_RQ2    1023
69 #define EHEA_DEF_ENTRIES_RQ3    1023
70 #else
71 #define EHEA_MAX_CQE_COUNT      4080
72 #define EHEA_DEF_ENTRIES_SQ     4080
73 #define EHEA_DEF_ENTRIES_RQ1    8160
74 #define EHEA_DEF_ENTRIES_RQ2    2040
75 #define EHEA_DEF_ENTRIES_RQ3    2040
76 #endif
77 
78 #define EHEA_MAX_ENTRIES_EQ 20
79 
80 #define EHEA_SG_SQ  2
81 #define EHEA_SG_RQ1 1
82 #define EHEA_SG_RQ2 0
83 #define EHEA_SG_RQ3 0
84 
85 #define EHEA_MAX_PACKET_SIZE    9022	/* for jumbo frames */
86 #define EHEA_RQ2_PKT_SIZE       1522
87 #define EHEA_L_PKT_SIZE         256	/* low latency */
88 
89 #define MAX_LRO_DESCRIPTORS 8
90 
91 /* Send completion signaling */
92 
93 /* Protection Domain Identifier */
94 #define EHEA_PD_ID        0xaabcdeff
95 
96 #define EHEA_RQ2_THRESHOLD 	   1
97 #define EHEA_RQ3_THRESHOLD 	   9	/* use RQ3 threshold of 1522 bytes */
98 
99 #define EHEA_SPEED_10G         10000
100 #define EHEA_SPEED_1G           1000
101 #define EHEA_SPEED_100M          100
102 #define EHEA_SPEED_10M            10
103 #define EHEA_SPEED_AUTONEG         0
104 
105 /* Broadcast/Multicast registration types */
106 #define EHEA_BCMC_SCOPE_ALL	0x08
107 #define EHEA_BCMC_SCOPE_SINGLE	0x00
108 #define EHEA_BCMC_MULTICAST	0x04
109 #define EHEA_BCMC_BROADCAST	0x00
110 #define EHEA_BCMC_UNTAGGED	0x02
111 #define EHEA_BCMC_TAGGED	0x00
112 #define EHEA_BCMC_VLANID_ALL	0x01
113 #define EHEA_BCMC_VLANID_SINGLE	0x00
114 
115 #define EHEA_CACHE_LINE          128
116 
117 /* Memory Regions */
118 #define EHEA_MR_ACC_CTRL       0x00800000
119 
120 #define EHEA_BUSMAP_START      0x8000000000000000ULL
121 #define EHEA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
122 #define EHEA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
123 #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
124 #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
125 #define EHEA_MAP_SIZE (0x10000)                   /* currently fixed map size */
126 #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
127 
128 
129 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
130 
131 /* utility functions */
132 
133 void ehea_dump(void *adr, int len, char *msg);
134 
135 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
136 
137 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
138 
139 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
140 
141 #define EHEA_BMASK_MASK(mask) \
142 	(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
143 
144 #define EHEA_BMASK_SET(mask, value) \
145 	((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
146 
147 #define EHEA_BMASK_GET(mask, value) \
148 	(EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
149 
150 /*
151  * Generic ehea page
152  */
153 struct ehea_page {
154 	u8 entries[PAGE_SIZE];
155 };
156 
157 /*
158  * Generic queue in linux kernel virtual memory
159  */
160 struct hw_queue {
161 	u64 current_q_offset;		/* current queue entry */
162 	struct ehea_page **queue_pages;	/* array of pages belonging to queue */
163 	u32 qe_size;			/* queue entry size */
164 	u32 queue_length;      		/* queue length allocated in bytes */
165 	u32 pagesize;
166 	u32 toggle_state;		/* toggle flag - per page */
167 	u32 reserved;			/* 64 bit alignment */
168 };
169 
170 /*
171  * For pSeries this is a 64bit memory address where
172  * I/O memory is mapped into CPU address space
173  */
174 struct h_epa {
175 	void __iomem *addr;
176 };
177 
178 struct h_epa_user {
179 	u64 addr;
180 };
181 
182 struct h_epas {
183 	struct h_epa kernel;	/* kernel space accessible resource,
184 				   set to 0 if unused */
185 	struct h_epa_user user;	/* user space accessible resource
186 				   set to 0 if unused */
187 };
188 
189 /*
190  * Memory map data structures
191  */
192 struct ehea_dir_bmap
193 {
194 	u64 ent[EHEA_MAP_ENTRIES];
195 };
196 struct ehea_top_bmap
197 {
198 	struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
199 };
200 struct ehea_bmap
201 {
202 	struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
203 };
204 
205 struct ehea_qp;
206 struct ehea_cq;
207 struct ehea_eq;
208 struct ehea_port;
209 struct ehea_av;
210 
211 /*
212  * Queue attributes passed to ehea_create_qp()
213  */
214 struct ehea_qp_init_attr {
215 	/* input parameter */
216 	u32 qp_token;           /* queue token */
217 	u8 low_lat_rq1;
218 	u8 signalingtype;       /* cqe generation flag */
219 	u8 rq_count;            /* num of receive queues */
220 	u8 eqe_gen;             /* eqe generation flag */
221 	u16 max_nr_send_wqes;   /* max number of send wqes */
222 	u16 max_nr_rwqes_rq1;   /* max number of receive wqes */
223 	u16 max_nr_rwqes_rq2;
224 	u16 max_nr_rwqes_rq3;
225 	u8 wqe_size_enc_sq;
226 	u8 wqe_size_enc_rq1;
227 	u8 wqe_size_enc_rq2;
228 	u8 wqe_size_enc_rq3;
229 	u8 swqe_imm_data_len;   /* immediate data length for swqes */
230 	u16 port_nr;
231 	u16 rq2_threshold;
232 	u16 rq3_threshold;
233 	u64 send_cq_handle;
234 	u64 recv_cq_handle;
235 	u64 aff_eq_handle;
236 
237 	/* output parameter */
238 	u32 qp_nr;
239 	u16 act_nr_send_wqes;
240 	u16 act_nr_rwqes_rq1;
241 	u16 act_nr_rwqes_rq2;
242 	u16 act_nr_rwqes_rq3;
243 	u8 act_wqe_size_enc_sq;
244 	u8 act_wqe_size_enc_rq1;
245 	u8 act_wqe_size_enc_rq2;
246 	u8 act_wqe_size_enc_rq3;
247 	u32 nr_sq_pages;
248 	u32 nr_rq1_pages;
249 	u32 nr_rq2_pages;
250 	u32 nr_rq3_pages;
251 	u32 liobn_sq;
252 	u32 liobn_rq1;
253 	u32 liobn_rq2;
254 	u32 liobn_rq3;
255 };
256 
257 /*
258  * Event Queue attributes, passed as parameter
259  */
260 struct ehea_eq_attr {
261 	u32 type;
262 	u32 max_nr_of_eqes;
263 	u8 eqe_gen;        /* generate eqe flag */
264 	u64 eq_handle;
265 	u32 act_nr_of_eqes;
266 	u32 nr_pages;
267 	u32 ist1;          /* Interrupt service token */
268 	u32 ist2;
269 	u32 ist3;
270 	u32 ist4;
271 };
272 
273 
274 /*
275  * Event Queue
276  */
277 struct ehea_eq {
278 	struct ehea_adapter *adapter;
279 	struct hw_queue hw_queue;
280 	u64 fw_handle;
281 	struct h_epas epas;
282 	spinlock_t spinlock;
283 	struct ehea_eq_attr attr;
284 };
285 
286 /*
287  * HEA Queues
288  */
289 struct ehea_qp {
290 	struct ehea_adapter *adapter;
291 	u64 fw_handle;			/* QP handle for firmware calls */
292 	struct hw_queue hw_squeue;
293 	struct hw_queue hw_rqueue1;
294 	struct hw_queue hw_rqueue2;
295 	struct hw_queue hw_rqueue3;
296 	struct h_epas epas;
297 	struct ehea_qp_init_attr init_attr;
298 };
299 
300 /*
301  * Completion Queue attributes
302  */
303 struct ehea_cq_attr {
304 	/* input parameter */
305 	u32 max_nr_of_cqes;
306 	u32 cq_token;
307 	u64 eq_handle;
308 
309 	/* output parameter */
310 	u32 act_nr_of_cqes;
311 	u32 nr_pages;
312 };
313 
314 /*
315  * Completion Queue
316  */
317 struct ehea_cq {
318 	struct ehea_adapter *adapter;
319 	u64 fw_handle;
320 	struct hw_queue hw_queue;
321 	struct h_epas epas;
322 	struct ehea_cq_attr attr;
323 };
324 
325 /*
326  * Memory Region
327  */
328 struct ehea_mr {
329 	struct ehea_adapter *adapter;
330 	u64 handle;
331 	u64 vaddr;
332 	u32 lkey;
333 };
334 
335 /*
336  * Port state information
337  */
338 struct port_stats {
339 	int poll_receive_errors;
340 	int queue_stopped;
341 	int err_tcp_cksum;
342 	int err_ip_cksum;
343 	int err_frame_crc;
344 };
345 
346 #define EHEA_IRQ_NAME_SIZE 20
347 
348 /*
349  * Queue SKB Array
350  */
351 struct ehea_q_skb_arr {
352 	struct sk_buff **arr;		/* skb array for queue */
353 	int len;                	/* array length */
354 	int index;			/* array index */
355 	int os_skbs;			/* rq2/rq3 only: outstanding skbs */
356 };
357 
358 /*
359  * Port resources
360  */
361 struct ehea_port_res {
362 	struct napi_struct napi;
363 	struct port_stats p_stats;
364 	struct ehea_mr send_mr;       	/* send memory region */
365 	struct ehea_mr recv_mr;       	/* receive memory region */
366 	spinlock_t xmit_lock;
367 	struct ehea_port *port;
368 	char int_recv_name[EHEA_IRQ_NAME_SIZE];
369 	char int_send_name[EHEA_IRQ_NAME_SIZE];
370 	struct ehea_qp *qp;
371 	struct ehea_cq *send_cq;
372 	struct ehea_cq *recv_cq;
373 	struct ehea_eq *eq;
374 	struct ehea_q_skb_arr rq1_skba;
375 	struct ehea_q_skb_arr rq2_skba;
376 	struct ehea_q_skb_arr rq3_skba;
377 	struct ehea_q_skb_arr sq_skba;
378 	int sq_skba_size;
379 	spinlock_t netif_queue;
380 	int queue_stopped;
381 	int swqe_refill_th;
382 	atomic_t swqe_avail;
383 	int swqe_ll_count;
384 	u32 swqe_id_counter;
385 	u64 tx_packets;
386 	u64 tx_bytes;
387 	u64 rx_packets;
388 	u64 rx_bytes;
389 	u32 poll_counter;
390 	struct net_lro_mgr lro_mgr;
391 	struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
392 	int sq_restart_flag;
393 };
394 
395 
396 #define EHEA_MAX_PORTS 16
397 
398 #define EHEA_NUM_PORTRES_FW_HANDLES    6  /* QP handle, SendCQ handle,
399 					     RecvCQ handle, EQ handle,
400 					     SendMR handle, RecvMR handle */
401 #define EHEA_NUM_PORT_FW_HANDLES       1  /* EQ handle */
402 #define EHEA_NUM_ADAPTER_FW_HANDLES    2  /* MR handle, NEQ handle */
403 
404 struct ehea_adapter {
405 	u64 handle;
406 	struct platform_device *ofdev;
407 	struct ehea_port *port[EHEA_MAX_PORTS];
408 	struct ehea_eq *neq;       /* notification event queue */
409 	struct tasklet_struct neq_tasklet;
410 	struct ehea_mr mr;
411 	u32 pd;                    /* protection domain */
412 	u64 max_mc_mac;            /* max number of multicast mac addresses */
413 	int active_ports;
414 	struct list_head list;
415 };
416 
417 
418 struct ehea_mc_list {
419 	struct list_head list;
420 	u64 macaddr;
421 };
422 
423 /* kdump support */
424 struct ehea_fw_handle_entry {
425 	u64 adh;               /* Adapter Handle */
426 	u64 fwh;               /* Firmware Handle */
427 };
428 
429 struct ehea_fw_handle_array {
430 	struct ehea_fw_handle_entry *arr;
431 	int num_entries;
432 	struct mutex lock;
433 };
434 
435 struct ehea_bcmc_reg_entry {
436 	u64 adh;               /* Adapter Handle */
437 	u32 port_id;           /* Logical Port Id */
438 	u8 reg_type;           /* Registration Type */
439 	u64 macaddr;
440 };
441 
442 struct ehea_bcmc_reg_array {
443 	struct ehea_bcmc_reg_entry *arr;
444 	int num_entries;
445 	spinlock_t lock;
446 };
447 
448 #define EHEA_PORT_UP 1
449 #define EHEA_PORT_DOWN 0
450 #define EHEA_PHY_LINK_UP 1
451 #define EHEA_PHY_LINK_DOWN 0
452 #define EHEA_MAX_PORT_RES 16
453 struct ehea_port {
454 	struct ehea_adapter *adapter;	 /* adapter that owns this port */
455 	struct net_device *netdev;
456 	struct net_device_stats stats;
457 	struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
458 	struct platform_device  ofdev; /* Open Firmware Device */
459 	struct ehea_mc_list *mc_list;	 /* Multicast MAC addresses */
460 	struct vlan_group *vgrp;
461 	struct ehea_eq *qp_eq;
462 	struct work_struct reset_task;
463 	struct mutex port_lock;
464 	char int_aff_name[EHEA_IRQ_NAME_SIZE];
465 	int allmulti;			 /* Indicates IFF_ALLMULTI state */
466 	int promisc;		 	 /* Indicates IFF_PROMISC state */
467 	int num_tx_qps;
468 	int num_add_tx_qps;
469 	int num_mcs;
470 	int resets;
471 	unsigned long flags;
472 	u64 mac_addr;
473 	u32 logical_port_id;
474 	u32 port_speed;
475 	u32 msg_enable;
476 	u32 sig_comp_iv;
477 	u32 state;
478 	u32 lro_max_aggr;
479 	u8 phy_link;
480 	u8 full_duplex;
481 	u8 autoneg;
482 	u8 num_def_qps;
483 	wait_queue_head_t swqe_avail_wq;
484 	wait_queue_head_t restart_wq;
485 };
486 
487 struct port_res_cfg {
488 	int max_entries_rcq;
489 	int max_entries_scq;
490 	int max_entries_sq;
491 	int max_entries_rq1;
492 	int max_entries_rq2;
493 	int max_entries_rq3;
494 };
495 
496 enum ehea_flag_bits {
497 	__EHEA_STOP_XFER,
498 	__EHEA_DISABLE_PORT_RESET
499 };
500 
501 void ehea_set_ethtool_ops(struct net_device *netdev);
502 int ehea_sense_port_attr(struct ehea_port *port);
503 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
504 
505 #endif	/* __EHEA_H__ */
506