1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe host controller driver
4  *
5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6  *		https://www.samsung.com
7  *
8  * Author: Jingoo Han <jg1.han@samsung.com>
9  */
10 
11 #include <linux/align.h>
12 #include <linux/bitops.h>
13 #include <linux/delay.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/sizes.h>
17 #include <linux/types.h>
18 
19 #include "../../pci.h"
20 #include "pcie-designware.h"
21 
dw_pcie_version_detect(struct dw_pcie * pci)22 void dw_pcie_version_detect(struct dw_pcie *pci)
23 {
24 	u32 ver;
25 
26 	/* The content of the CSR is zero on DWC PCIe older than v4.70a */
27 	ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
28 	if (!ver)
29 		return;
30 
31 	if (pci->version && pci->version != ver)
32 		dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
33 			 pci->version, ver);
34 	else
35 		pci->version = ver;
36 
37 	ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
38 
39 	if (pci->type && pci->type != ver)
40 		dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
41 			 pci->type, ver);
42 	else
43 		pci->type = ver;
44 }
45 
46 /*
47  * These interfaces resemble the pci_find_*capability() interfaces, but these
48  * are for configuring host controllers, which are bridges *to* PCI devices but
49  * are not PCI devices themselves.
50  */
__dw_pcie_find_next_cap(struct dw_pcie * pci,u8 cap_ptr,u8 cap)51 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
52 				  u8 cap)
53 {
54 	u8 cap_id, next_cap_ptr;
55 	u16 reg;
56 
57 	if (!cap_ptr)
58 		return 0;
59 
60 	reg = dw_pcie_readw_dbi(pci, cap_ptr);
61 	cap_id = (reg & 0x00ff);
62 
63 	if (cap_id > PCI_CAP_ID_MAX)
64 		return 0;
65 
66 	if (cap_id == cap)
67 		return cap_ptr;
68 
69 	next_cap_ptr = (reg & 0xff00) >> 8;
70 	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
71 }
72 
dw_pcie_find_capability(struct dw_pcie * pci,u8 cap)73 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
74 {
75 	u8 next_cap_ptr;
76 	u16 reg;
77 
78 	reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
79 	next_cap_ptr = (reg & 0x00ff);
80 
81 	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
82 }
83 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
84 
dw_pcie_find_next_ext_capability(struct dw_pcie * pci,u16 start,u8 cap)85 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
86 					    u8 cap)
87 {
88 	u32 header;
89 	int ttl;
90 	int pos = PCI_CFG_SPACE_SIZE;
91 
92 	/* minimum 8 bytes per capability */
93 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
94 
95 	if (start)
96 		pos = start;
97 
98 	header = dw_pcie_readl_dbi(pci, pos);
99 	/*
100 	 * If we have no capabilities, this is indicated by cap ID,
101 	 * cap version and next pointer all being 0.
102 	 */
103 	if (header == 0)
104 		return 0;
105 
106 	while (ttl-- > 0) {
107 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
108 			return pos;
109 
110 		pos = PCI_EXT_CAP_NEXT(header);
111 		if (pos < PCI_CFG_SPACE_SIZE)
112 			break;
113 
114 		header = dw_pcie_readl_dbi(pci, pos);
115 	}
116 
117 	return 0;
118 }
119 
dw_pcie_find_ext_capability(struct dw_pcie * pci,u8 cap)120 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
121 {
122 	return dw_pcie_find_next_ext_capability(pci, 0, cap);
123 }
124 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
125 
dw_pcie_read(void __iomem * addr,int size,u32 * val)126 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
127 {
128 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
129 		*val = 0;
130 		return PCIBIOS_BAD_REGISTER_NUMBER;
131 	}
132 
133 	if (size == 4) {
134 		*val = readl(addr);
135 	} else if (size == 2) {
136 		*val = readw(addr);
137 	} else if (size == 1) {
138 		*val = readb(addr);
139 	} else {
140 		*val = 0;
141 		return PCIBIOS_BAD_REGISTER_NUMBER;
142 	}
143 
144 	return PCIBIOS_SUCCESSFUL;
145 }
146 EXPORT_SYMBOL_GPL(dw_pcie_read);
147 
dw_pcie_write(void __iomem * addr,int size,u32 val)148 int dw_pcie_write(void __iomem *addr, int size, u32 val)
149 {
150 	if (!IS_ALIGNED((uintptr_t)addr, size))
151 		return PCIBIOS_BAD_REGISTER_NUMBER;
152 
153 	if (size == 4)
154 		writel(val, addr);
155 	else if (size == 2)
156 		writew(val, addr);
157 	else if (size == 1)
158 		writeb(val, addr);
159 	else
160 		return PCIBIOS_BAD_REGISTER_NUMBER;
161 
162 	return PCIBIOS_SUCCESSFUL;
163 }
164 EXPORT_SYMBOL_GPL(dw_pcie_write);
165 
dw_pcie_read_dbi(struct dw_pcie * pci,u32 reg,size_t size)166 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
167 {
168 	int ret;
169 	u32 val;
170 
171 	if (pci->ops && pci->ops->read_dbi)
172 		return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
173 
174 	ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
175 	if (ret)
176 		dev_err(pci->dev, "Read DBI address failed\n");
177 
178 	return val;
179 }
180 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
181 
dw_pcie_write_dbi(struct dw_pcie * pci,u32 reg,size_t size,u32 val)182 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
183 {
184 	int ret;
185 
186 	if (pci->ops && pci->ops->write_dbi) {
187 		pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
188 		return;
189 	}
190 
191 	ret = dw_pcie_write(pci->dbi_base + reg, size, val);
192 	if (ret)
193 		dev_err(pci->dev, "Write DBI address failed\n");
194 }
195 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
196 
dw_pcie_write_dbi2(struct dw_pcie * pci,u32 reg,size_t size,u32 val)197 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
198 {
199 	int ret;
200 
201 	if (pci->ops && pci->ops->write_dbi2) {
202 		pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
203 		return;
204 	}
205 
206 	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
207 	if (ret)
208 		dev_err(pci->dev, "write DBI address failed\n");
209 }
210 
dw_pcie_select_atu(struct dw_pcie * pci,u32 dir,u32 index)211 static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
212 					       u32 index)
213 {
214 	if (pci->iatu_unroll_enabled)
215 		return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
216 
217 	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
218 	return pci->atu_base;
219 }
220 
dw_pcie_readl_atu(struct dw_pcie * pci,u32 dir,u32 index,u32 reg)221 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
222 {
223 	void __iomem *base;
224 	int ret;
225 	u32 val;
226 
227 	base = dw_pcie_select_atu(pci, dir, index);
228 
229 	if (pci->ops && pci->ops->read_dbi)
230 		return pci->ops->read_dbi(pci, base, reg, 4);
231 
232 	ret = dw_pcie_read(base + reg, 4, &val);
233 	if (ret)
234 		dev_err(pci->dev, "Read ATU address failed\n");
235 
236 	return val;
237 }
238 
dw_pcie_writel_atu(struct dw_pcie * pci,u32 dir,u32 index,u32 reg,u32 val)239 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
240 			       u32 reg, u32 val)
241 {
242 	void __iomem *base;
243 	int ret;
244 
245 	base = dw_pcie_select_atu(pci, dir, index);
246 
247 	if (pci->ops && pci->ops->write_dbi) {
248 		pci->ops->write_dbi(pci, base, reg, 4, val);
249 		return;
250 	}
251 
252 	ret = dw_pcie_write(base + reg, 4, val);
253 	if (ret)
254 		dev_err(pci->dev, "Write ATU address failed\n");
255 }
256 
dw_pcie_readl_atu_ob(struct dw_pcie * pci,u32 index,u32 reg)257 static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
258 {
259 	return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
260 }
261 
dw_pcie_writel_atu_ob(struct dw_pcie * pci,u32 index,u32 reg,u32 val)262 static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
263 					 u32 val)
264 {
265 	dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
266 }
267 
dw_pcie_enable_ecrc(u32 val)268 static inline u32 dw_pcie_enable_ecrc(u32 val)
269 {
270 	/*
271 	 * DesignWare core version 4.90A has a design issue where the 'TD'
272 	 * bit in the Control register-1 of the ATU outbound region acts
273 	 * like an override for the ECRC setting, i.e., the presence of TLP
274 	 * Digest (ECRC) in the outgoing TLPs is solely determined by this
275 	 * bit. This is contrary to the PCIe spec which says that the
276 	 * enablement of the ECRC is solely determined by the AER
277 	 * registers.
278 	 *
279 	 * Because of this, even when the ECRC is enabled through AER
280 	 * registers, the transactions going through ATU won't have TLP
281 	 * Digest as there is no way the PCI core AER code could program
282 	 * the TD bit which is specific to the DesignWare core.
283 	 *
284 	 * The best way to handle this scenario is to program the TD bit
285 	 * always. It affects only the traffic from root port to downstream
286 	 * devices.
287 	 *
288 	 * At this point,
289 	 * When ECRC is enabled in AER registers, everything works normally
290 	 * When ECRC is NOT enabled in AER registers, then,
291 	 * on Root Port:- TLP Digest (DWord size) gets appended to each packet
292 	 *                even through it is not required. Since downstream
293 	 *                TLPs are mostly for configuration accesses and BAR
294 	 *                accesses, they are not in critical path and won't
295 	 *                have much negative effect on the performance.
296 	 * on End Point:- TLP Digest is received for some/all the packets coming
297 	 *                from the root port. TLP Digest is ignored because,
298 	 *                as per the PCIe Spec r5.0 v1.0 section 2.2.3
299 	 *                "TLP Digest Rules", when an endpoint receives TLP
300 	 *                Digest when its ECRC check functionality is disabled
301 	 *                in AER registers, received TLP Digest is just ignored.
302 	 * Since there is no issue or error reported either side, best way to
303 	 * handle the scenario is to program TD bit by default.
304 	 */
305 
306 	return val | PCIE_ATU_TD;
307 }
308 
__dw_pcie_prog_outbound_atu(struct dw_pcie * pci,u8 func_no,int index,int type,u64 cpu_addr,u64 pci_addr,u64 size)309 static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
310 				       int index, int type, u64 cpu_addr,
311 				       u64 pci_addr, u64 size)
312 {
313 	u32 retries, val;
314 	u64 limit_addr;
315 
316 	if (pci->ops && pci->ops->cpu_addr_fixup)
317 		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
318 
319 	limit_addr = cpu_addr + size - 1;
320 
321 	if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
322 	    !IS_ALIGNED(cpu_addr, pci->region_align) ||
323 	    !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
324 		return -EINVAL;
325 	}
326 
327 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
328 			      lower_32_bits(cpu_addr));
329 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
330 			      upper_32_bits(cpu_addr));
331 
332 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
333 			      lower_32_bits(limit_addr));
334 	if (dw_pcie_ver_is_ge(pci, 460A))
335 		dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
336 				      upper_32_bits(limit_addr));
337 
338 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
339 			      lower_32_bits(pci_addr));
340 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
341 			      upper_32_bits(pci_addr));
342 
343 	val = type | PCIE_ATU_FUNC_NUM(func_no);
344 	if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
345 	    dw_pcie_ver_is_ge(pci, 460A))
346 		val |= PCIE_ATU_INCREASE_REGION_SIZE;
347 	if (dw_pcie_ver_is(pci, 490A))
348 		val = dw_pcie_enable_ecrc(val);
349 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
350 
351 	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
352 
353 	/*
354 	 * Make sure ATU enable takes effect before any subsequent config
355 	 * and I/O accesses.
356 	 */
357 	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
358 		val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
359 		if (val & PCIE_ATU_ENABLE)
360 			return 0;
361 
362 		mdelay(LINK_WAIT_IATU);
363 	}
364 
365 	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
366 
367 	return -ETIMEDOUT;
368 }
369 
dw_pcie_prog_outbound_atu(struct dw_pcie * pci,int index,int type,u64 cpu_addr,u64 pci_addr,u64 size)370 int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
371 			      u64 cpu_addr, u64 pci_addr, u64 size)
372 {
373 	return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
374 					   cpu_addr, pci_addr, size);
375 }
376 
dw_pcie_prog_ep_outbound_atu(struct dw_pcie * pci,u8 func_no,int index,int type,u64 cpu_addr,u64 pci_addr,u64 size)377 int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
378 				 int type, u64 cpu_addr, u64 pci_addr,
379 				 u64 size)
380 {
381 	return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
382 					   cpu_addr, pci_addr, size);
383 }
384 
dw_pcie_readl_atu_ib(struct dw_pcie * pci,u32 index,u32 reg)385 static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
386 {
387 	return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
388 }
389 
dw_pcie_writel_atu_ib(struct dw_pcie * pci,u32 index,u32 reg,u32 val)390 static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
391 					 u32 val)
392 {
393 	dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
394 }
395 
dw_pcie_prog_inbound_atu(struct dw_pcie * pci,u8 func_no,int index,int type,u64 cpu_addr,u8 bar)396 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
397 			     int type, u64 cpu_addr, u8 bar)
398 {
399 	u32 retries, val;
400 
401 	if (!IS_ALIGNED(cpu_addr, pci->region_align))
402 		return -EINVAL;
403 
404 	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
405 			      lower_32_bits(cpu_addr));
406 	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
407 			      upper_32_bits(cpu_addr));
408 
409 	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
410 			      PCIE_ATU_FUNC_NUM(func_no));
411 	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
412 			      PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
413 			      PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
414 
415 	/*
416 	 * Make sure ATU enable takes effect before any subsequent config
417 	 * and I/O accesses.
418 	 */
419 	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
420 		val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
421 		if (val & PCIE_ATU_ENABLE)
422 			return 0;
423 
424 		mdelay(LINK_WAIT_IATU);
425 	}
426 
427 	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
428 
429 	return -ETIMEDOUT;
430 }
431 
dw_pcie_disable_atu(struct dw_pcie * pci,u32 dir,int index)432 void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
433 {
434 	dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
435 }
436 
dw_pcie_wait_for_link(struct dw_pcie * pci)437 int dw_pcie_wait_for_link(struct dw_pcie *pci)
438 {
439 	u32 offset, val;
440 	int retries;
441 
442 	/* Check if the link is up or not */
443 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
444 		if (dw_pcie_link_up(pci))
445 			break;
446 
447 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
448 	}
449 
450 	if (retries >= LINK_WAIT_MAX_RETRIES) {
451 		dev_err(pci->dev, "Phy link never came up\n");
452 		return -ETIMEDOUT;
453 	}
454 
455 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
456 	val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
457 
458 	dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
459 		 FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
460 		 FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
461 
462 	return 0;
463 }
464 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
465 
dw_pcie_link_up(struct dw_pcie * pci)466 int dw_pcie_link_up(struct dw_pcie *pci)
467 {
468 	u32 val;
469 
470 	if (pci->ops && pci->ops->link_up)
471 		return pci->ops->link_up(pci);
472 
473 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
474 	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
475 		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
476 }
477 EXPORT_SYMBOL_GPL(dw_pcie_link_up);
478 
dw_pcie_upconfig_setup(struct dw_pcie * pci)479 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
480 {
481 	u32 val;
482 
483 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
484 	val |= PORT_MLTI_UPCFG_SUPPORT;
485 	dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
486 }
487 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
488 
dw_pcie_link_set_max_speed(struct dw_pcie * pci,u32 link_gen)489 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
490 {
491 	u32 cap, ctrl2, link_speed;
492 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
493 
494 	cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
495 	ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
496 	ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
497 
498 	switch (pcie_link_speed[link_gen]) {
499 	case PCIE_SPEED_2_5GT:
500 		link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
501 		break;
502 	case PCIE_SPEED_5_0GT:
503 		link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
504 		break;
505 	case PCIE_SPEED_8_0GT:
506 		link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
507 		break;
508 	case PCIE_SPEED_16_0GT:
509 		link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
510 		break;
511 	default:
512 		/* Use hardware capability */
513 		link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
514 		ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
515 		break;
516 	}
517 
518 	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
519 
520 	cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
521 	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
522 
523 }
524 
dw_pcie_iatu_unroll_enabled(struct dw_pcie * pci)525 static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
526 {
527 	u32 val;
528 
529 	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
530 	if (val == 0xffffffff)
531 		return true;
532 
533 	return false;
534 }
535 
dw_pcie_iatu_detect_regions(struct dw_pcie * pci)536 static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
537 {
538 	int max_region, ob, ib;
539 	u32 val, min, dir;
540 	u64 max;
541 
542 	if (pci->iatu_unroll_enabled) {
543 		max_region = min((int)pci->atu_size / 512, 256);
544 	} else {
545 		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
546 		max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
547 	}
548 
549 	for (ob = 0; ob < max_region; ob++) {
550 		dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
551 		val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
552 		if (val != 0x11110000)
553 			break;
554 	}
555 
556 	for (ib = 0; ib < max_region; ib++) {
557 		dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
558 		val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
559 		if (val != 0x11110000)
560 			break;
561 	}
562 
563 	if (ob) {
564 		dir = PCIE_ATU_REGION_DIR_OB;
565 	} else if (ib) {
566 		dir = PCIE_ATU_REGION_DIR_IB;
567 	} else {
568 		dev_err(pci->dev, "No iATU regions found\n");
569 		return;
570 	}
571 
572 	dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
573 	min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
574 
575 	if (dw_pcie_ver_is_ge(pci, 460A)) {
576 		dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
577 		max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
578 	} else {
579 		max = 0;
580 	}
581 
582 	pci->num_ob_windows = ob;
583 	pci->num_ib_windows = ib;
584 	pci->region_align = 1 << fls(min);
585 	pci->region_limit = (max << 32) | (SZ_4G - 1);
586 }
587 
dw_pcie_iatu_detect(struct dw_pcie * pci)588 void dw_pcie_iatu_detect(struct dw_pcie *pci)
589 {
590 	struct platform_device *pdev = to_platform_device(pci->dev);
591 
592 	pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
593 	if (pci->iatu_unroll_enabled) {
594 		if (!pci->atu_base) {
595 			struct resource *res =
596 				platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
597 			if (res) {
598 				pci->atu_size = resource_size(res);
599 				pci->atu_base = devm_ioremap_resource(pci->dev, res);
600 			}
601 			if (!pci->atu_base || IS_ERR(pci->atu_base))
602 				pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
603 		}
604 
605 		if (!pci->atu_size)
606 			/* Pick a minimal default, enough for 8 in and 8 out windows */
607 			pci->atu_size = SZ_4K;
608 	} else {
609 		pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
610 		pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
611 	}
612 
613 	dw_pcie_iatu_detect_regions(pci);
614 
615 	dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
616 		"enabled" : "disabled");
617 
618 	dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
619 		 pci->num_ob_windows, pci->num_ib_windows,
620 		 pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
621 }
622 
dw_pcie_setup(struct dw_pcie * pci)623 void dw_pcie_setup(struct dw_pcie *pci)
624 {
625 	struct device_node *np = pci->dev->of_node;
626 	u32 val;
627 
628 	if (pci->link_gen > 0)
629 		dw_pcie_link_set_max_speed(pci, pci->link_gen);
630 
631 	/* Configure Gen1 N_FTS */
632 	if (pci->n_fts[0]) {
633 		val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
634 		val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
635 		val |= PORT_AFR_N_FTS(pci->n_fts[0]);
636 		val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
637 		dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
638 	}
639 
640 	/* Configure Gen2+ N_FTS */
641 	if (pci->n_fts[1]) {
642 		val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
643 		val &= ~PORT_LOGIC_N_FTS_MASK;
644 		val |= pci->n_fts[1];
645 		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
646 	}
647 
648 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
649 	val &= ~PORT_LINK_FAST_LINK_MODE;
650 	val |= PORT_LINK_DLL_LINK_EN;
651 	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
652 
653 	if (of_property_read_bool(np, "snps,enable-cdm-check")) {
654 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
655 		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
656 		       PCIE_PL_CHK_REG_CHK_REG_START;
657 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
658 	}
659 
660 	of_property_read_u32(np, "num-lanes", &pci->num_lanes);
661 	if (!pci->num_lanes) {
662 		dev_dbg(pci->dev, "Using h/w default number of lanes\n");
663 		return;
664 	}
665 
666 	/* Set the number of lanes */
667 	val &= ~PORT_LINK_FAST_LINK_MODE;
668 	val &= ~PORT_LINK_MODE_MASK;
669 	switch (pci->num_lanes) {
670 	case 1:
671 		val |= PORT_LINK_MODE_1_LANES;
672 		break;
673 	case 2:
674 		val |= PORT_LINK_MODE_2_LANES;
675 		break;
676 	case 4:
677 		val |= PORT_LINK_MODE_4_LANES;
678 		break;
679 	case 8:
680 		val |= PORT_LINK_MODE_8_LANES;
681 		break;
682 	default:
683 		dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
684 		return;
685 	}
686 	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
687 
688 	/* Set link width speed control register */
689 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
690 	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
691 	switch (pci->num_lanes) {
692 	case 1:
693 		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
694 		break;
695 	case 2:
696 		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
697 		break;
698 	case 4:
699 		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
700 		break;
701 	case 8:
702 		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
703 		break;
704 	}
705 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
706 }
707