1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /**
29  *
30  * Interface to the hardware Input Packet Data unit.
31  */
32 
33 #ifndef __CVMX_IPD_H__
34 #define __CVMX_IPD_H__
35 
36 #include <asm/octeon/octeon-feature.h>
37 
38 #include <asm/octeon/cvmx-ipd-defs.h>
39 
40 enum cvmx_ipd_mode {
41    CVMX_IPD_OPC_MODE_STT = 0LL,   /* All blocks DRAM, not cached in L2 */
42    CVMX_IPD_OPC_MODE_STF = 1LL,   /* All bloccks into  L2 */
43    CVMX_IPD_OPC_MODE_STF1_STT = 2LL,   /* 1st block L2, rest DRAM */
44    CVMX_IPD_OPC_MODE_STF2_STT = 3LL    /* 1st, 2nd blocks L2, rest DRAM */
45 };
46 
47 #ifndef CVMX_ENABLE_LEN_M8_FIX
48 #define CVMX_ENABLE_LEN_M8_FIX 0
49 #endif
50 
51 /* CSR typedefs have been moved to cvmx-csr-*.h */
52 typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
53 typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
54 
55 typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
56 typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
57 
58 /**
59  * Configure IPD
60  *
61  * @mbuff_size: Packets buffer size in 8 byte words
62  * @first_mbuff_skip:
63  *                   Number of 8 byte words to skip in the first buffer
64  * @not_first_mbuff_skip:
65  *                   Number of 8 byte words to skip in each following buffer
66  * @first_back: Must be same as first_mbuff_skip / 128
67  * @second_back:
68  *                   Must be same as not_first_mbuff_skip / 128
69  * @wqe_fpa_pool:
70  *                   FPA pool to get work entries from
71  * @cache_mode:
72  * @back_pres_enable_flag:
73  *                   Enable or disable port back pressure
74  */
cvmx_ipd_config(uint64_t mbuff_size,uint64_t first_mbuff_skip,uint64_t not_first_mbuff_skip,uint64_t first_back,uint64_t second_back,uint64_t wqe_fpa_pool,enum cvmx_ipd_mode cache_mode,uint64_t back_pres_enable_flag)75 static inline void cvmx_ipd_config(uint64_t mbuff_size,
76 				   uint64_t first_mbuff_skip,
77 				   uint64_t not_first_mbuff_skip,
78 				   uint64_t first_back,
79 				   uint64_t second_back,
80 				   uint64_t wqe_fpa_pool,
81 				   enum cvmx_ipd_mode cache_mode,
82 				   uint64_t back_pres_enable_flag)
83 {
84 	cvmx_ipd_mbuff_first_skip_t first_skip;
85 	cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
86 	union cvmx_ipd_packet_mbuff_size size;
87 	cvmx_ipd_first_next_ptr_back_t first_back_struct;
88 	cvmx_ipd_second_next_ptr_back_t second_back_struct;
89 	union cvmx_ipd_wqe_fpa_queue wqe_pool;
90 	union cvmx_ipd_ctl_status ipd_ctl_reg;
91 
92 	first_skip.u64 = 0;
93 	first_skip.s.skip_sz = first_mbuff_skip;
94 	cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
95 
96 	not_first_skip.u64 = 0;
97 	not_first_skip.s.skip_sz = not_first_mbuff_skip;
98 	cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
99 
100 	size.u64 = 0;
101 	size.s.mb_size = mbuff_size;
102 	cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
103 
104 	first_back_struct.u64 = 0;
105 	first_back_struct.s.back = first_back;
106 	cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
107 
108 	second_back_struct.u64 = 0;
109 	second_back_struct.s.back = second_back;
110 	cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
111 
112 	wqe_pool.u64 = 0;
113 	wqe_pool.s.wqe_pool = wqe_fpa_pool;
114 	cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
115 
116 	ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
117 	ipd_ctl_reg.s.opc_mode = cache_mode;
118 	ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
119 	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
120 
121 	/* Note: the example RED code that used to be here has been moved to
122 	   cvmx_helper_setup_red */
123 }
124 
125 /**
126  * Enable IPD
127  */
cvmx_ipd_enable(void)128 static inline void cvmx_ipd_enable(void)
129 {
130 	union cvmx_ipd_ctl_status ipd_reg;
131 	ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
132 	if (ipd_reg.s.ipd_en) {
133 		cvmx_dprintf
134 		    ("Warning: Enabling IPD when IPD already enabled.\n");
135 	}
136 	ipd_reg.s.ipd_en = 1;
137 #if  CVMX_ENABLE_LEN_M8_FIX
138 	if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
139 		ipd_reg.s.len_m8 = TRUE;
140 #endif
141 	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
142 }
143 
144 /**
145  * Disable IPD
146  */
cvmx_ipd_disable(void)147 static inline void cvmx_ipd_disable(void)
148 {
149 	union cvmx_ipd_ctl_status ipd_reg;
150 	ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
151 	ipd_reg.s.ipd_en = 0;
152 	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
153 }
154 
155 /**
156  * Supportive function for cvmx_fpa_shutdown_pool.
157  */
cvmx_ipd_free_ptr(void)158 static inline void cvmx_ipd_free_ptr(void)
159 {
160 	/* Only CN38XXp{1,2} cannot read pointer out of the IPD */
161 	if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
162 	    && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
163 		int no_wptr = 0;
164 		union cvmx_ipd_ptr_count ipd_ptr_count;
165 		ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
166 
167 		/* Handle Work Queue Entry in cn56xx and cn52xx */
168 		if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
169 			union cvmx_ipd_ctl_status ipd_ctl_status;
170 			ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
171 			if (ipd_ctl_status.s.no_wptr)
172 				no_wptr = 1;
173 		}
174 
175 		/* Free the prefetched WQE */
176 		if (ipd_ptr_count.s.wqev_cnt) {
177 			union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
178 			ipd_wqe_ptr_valid.u64 =
179 			    cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
180 			if (no_wptr)
181 				cvmx_fpa_free(cvmx_phys_to_ptr
182 					      ((uint64_t) ipd_wqe_ptr_valid.s.
183 					       ptr << 7), CVMX_FPA_PACKET_POOL,
184 					      0);
185 			else
186 				cvmx_fpa_free(cvmx_phys_to_ptr
187 					      ((uint64_t) ipd_wqe_ptr_valid.s.
188 					       ptr << 7), CVMX_FPA_WQE_POOL, 0);
189 		}
190 
191 		/* Free all WQE in the fifo */
192 		if (ipd_ptr_count.s.wqe_pcnt) {
193 			int i;
194 			union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
195 			ipd_pwp_ptr_fifo_ctl.u64 =
196 			    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
197 			for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
198 				ipd_pwp_ptr_fifo_ctl.s.cena = 0;
199 				ipd_pwp_ptr_fifo_ctl.s.raddr =
200 				    ipd_pwp_ptr_fifo_ctl.s.max_cnts +
201 				    (ipd_pwp_ptr_fifo_ctl.s.wraddr +
202 				     i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
203 				cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
204 					       ipd_pwp_ptr_fifo_ctl.u64);
205 				ipd_pwp_ptr_fifo_ctl.u64 =
206 				    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
207 				if (no_wptr)
208 					cvmx_fpa_free(cvmx_phys_to_ptr
209 						      ((uint64_t)
210 						       ipd_pwp_ptr_fifo_ctl.s.
211 						       ptr << 7),
212 						      CVMX_FPA_PACKET_POOL, 0);
213 				else
214 					cvmx_fpa_free(cvmx_phys_to_ptr
215 						      ((uint64_t)
216 						       ipd_pwp_ptr_fifo_ctl.s.
217 						       ptr << 7),
218 						      CVMX_FPA_WQE_POOL, 0);
219 			}
220 			ipd_pwp_ptr_fifo_ctl.s.cena = 1;
221 			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
222 				       ipd_pwp_ptr_fifo_ctl.u64);
223 		}
224 
225 		/* Free the prefetched packet */
226 		if (ipd_ptr_count.s.pktv_cnt) {
227 			union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
228 			ipd_pkt_ptr_valid.u64 =
229 			    cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
230 			cvmx_fpa_free(cvmx_phys_to_ptr
231 				      (ipd_pkt_ptr_valid.s.ptr << 7),
232 				      CVMX_FPA_PACKET_POOL, 0);
233 		}
234 
235 		/* Free the per port prefetched packets */
236 		if (1) {
237 			int i;
238 			union cvmx_ipd_prc_port_ptr_fifo_ctl
239 			    ipd_prc_port_ptr_fifo_ctl;
240 			ipd_prc_port_ptr_fifo_ctl.u64 =
241 			    cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
242 
243 			for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
244 			     i++) {
245 				ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
246 				ipd_prc_port_ptr_fifo_ctl.s.raddr =
247 				    i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
248 				cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
249 					       ipd_prc_port_ptr_fifo_ctl.u64);
250 				ipd_prc_port_ptr_fifo_ctl.u64 =
251 				    cvmx_read_csr
252 				    (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
253 				cvmx_fpa_free(cvmx_phys_to_ptr
254 					      ((uint64_t)
255 					       ipd_prc_port_ptr_fifo_ctl.s.
256 					       ptr << 7), CVMX_FPA_PACKET_POOL,
257 					      0);
258 			}
259 			ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
260 			cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
261 				       ipd_prc_port_ptr_fifo_ctl.u64);
262 		}
263 
264 		/* Free all packets in the holding fifo */
265 		if (ipd_ptr_count.s.pfif_cnt) {
266 			int i;
267 			union cvmx_ipd_prc_hold_ptr_fifo_ctl
268 			    ipd_prc_hold_ptr_fifo_ctl;
269 
270 			ipd_prc_hold_ptr_fifo_ctl.u64 =
271 			    cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
272 
273 			for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
274 				ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
275 				ipd_prc_hold_ptr_fifo_ctl.s.raddr =
276 				    (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
277 				     i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
278 				cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
279 					       ipd_prc_hold_ptr_fifo_ctl.u64);
280 				ipd_prc_hold_ptr_fifo_ctl.u64 =
281 				    cvmx_read_csr
282 				    (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
283 				cvmx_fpa_free(cvmx_phys_to_ptr
284 					      ((uint64_t)
285 					       ipd_prc_hold_ptr_fifo_ctl.s.
286 					       ptr << 7), CVMX_FPA_PACKET_POOL,
287 					      0);
288 			}
289 			ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
290 			cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
291 				       ipd_prc_hold_ptr_fifo_ctl.u64);
292 		}
293 
294 		/* Free all packets in the fifo */
295 		if (ipd_ptr_count.s.pkt_pcnt) {
296 			int i;
297 			union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
298 			ipd_pwp_ptr_fifo_ctl.u64 =
299 			    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
300 
301 			for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
302 				ipd_pwp_ptr_fifo_ctl.s.cena = 0;
303 				ipd_pwp_ptr_fifo_ctl.s.raddr =
304 				    (ipd_pwp_ptr_fifo_ctl.s.praddr +
305 				     i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
306 				cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
307 					       ipd_pwp_ptr_fifo_ctl.u64);
308 				ipd_pwp_ptr_fifo_ctl.u64 =
309 				    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
310 				cvmx_fpa_free(cvmx_phys_to_ptr
311 					      ((uint64_t) ipd_pwp_ptr_fifo_ctl.
312 					       s.ptr << 7),
313 					      CVMX_FPA_PACKET_POOL, 0);
314 			}
315 			ipd_pwp_ptr_fifo_ctl.s.cena = 1;
316 			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
317 				       ipd_pwp_ptr_fifo_ctl.u64);
318 		}
319 
320 		/* Reset the IPD to get all buffers out of it */
321 		{
322 			union cvmx_ipd_ctl_status ipd_ctl_status;
323 			ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
324 			ipd_ctl_status.s.reset = 1;
325 			cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
326 		}
327 
328 		/* Reset the PIP */
329 		{
330 			union cvmx_pip_sft_rst pip_sft_rst;
331 			pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
332 			pip_sft_rst.s.rst = 1;
333 			cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
334 		}
335 	}
336 }
337 
338 #endif /*  __CVMX_IPD_H__ */
339