1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 
8 #include "pcie-cadence.h"
9 
cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie * pcie)10 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
11 {
12 	u32 delay = 0x3;
13 	u32 ltssm_control_cap;
14 
15 	/*
16 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
17 	 */
18 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
19 	ltssm_control_cap = ((ltssm_control_cap &
20 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
21 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
22 
23 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
24 }
25 
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)26 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
27 				   u32 r, bool is_io,
28 				   u64 cpu_addr, u64 pci_addr, size_t size)
29 {
30 	/*
31 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
32 	 * for 64bit values.
33 	 */
34 	u64 sz = 1ULL << fls64(size - 1);
35 	int nbits = ilog2(sz);
36 	u32 addr0, addr1, desc0, desc1;
37 
38 	if (nbits < 8)
39 		nbits = 8;
40 
41 	/* Set the PCI address */
42 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
43 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
44 	addr1 = upper_32_bits(pci_addr);
45 
46 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
47 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
48 
49 	/* Set the PCIe header descriptor */
50 	if (is_io)
51 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
52 	else
53 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
54 	desc1 = 0;
55 
56 	/*
57 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
58 	 * PCIe descriptor, the PCI function number must be set into
59 	 * Bits [26:24] of DESC0 anyway.
60 	 *
61 	 * In Root Complex mode, the function number is always 0 but in Endpoint
62 	 * mode, the PCIe controller may support more than one function. This
63 	 * function number needs to be set properly into the outbound PCIe
64 	 * descriptor.
65 	 *
66 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
67 	 * then the driver must provide the bus, resp. device, number in
68 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
69 	 * number, the device number is always 0 in Root Complex mode.
70 	 *
71 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
72 	 * the PCIe controller will use the captured values for the bus and
73 	 * device numbers.
74 	 */
75 	if (pcie->is_rc) {
76 		/* The device and function numbers are always 0. */
77 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
78 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
79 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
80 	} else {
81 		/*
82 		 * Use captured values for bus and device numbers but still
83 		 * need to set the function number.
84 		 */
85 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
86 	}
87 
88 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
89 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
90 
91 	/* Set the CPU address */
92 	if (pcie->ops->cpu_addr_fixup)
93 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
94 
95 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
96 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
97 	addr1 = upper_32_bits(cpu_addr);
98 
99 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
100 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
101 }
102 
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,u64 cpu_addr)103 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
104 						  u8 busnr, u8 fn,
105 						  u32 r, u64 cpu_addr)
106 {
107 	u32 addr0, addr1, desc0, desc1;
108 
109 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
110 	desc1 = 0;
111 
112 	/* See cdns_pcie_set_outbound_region() comments above. */
113 	if (pcie->is_rc) {
114 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
115 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
116 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
117 	} else {
118 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
119 	}
120 
121 	/* Set the CPU address */
122 	if (pcie->ops->cpu_addr_fixup)
123 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
124 
125 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
126 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
127 	addr1 = upper_32_bits(cpu_addr);
128 
129 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
130 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
131 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
132 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
133 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
134 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
135 }
136 
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)137 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
138 {
139 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
140 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
141 
142 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
143 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
144 
145 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
146 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
147 }
148 
cdns_pcie_disable_phy(struct cdns_pcie * pcie)149 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
150 {
151 	int i = pcie->phy_count;
152 
153 	while (i--) {
154 		phy_power_off(pcie->phy[i]);
155 		phy_exit(pcie->phy[i]);
156 	}
157 }
158 
cdns_pcie_enable_phy(struct cdns_pcie * pcie)159 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
160 {
161 	int ret;
162 	int i;
163 
164 	for (i = 0; i < pcie->phy_count; i++) {
165 		ret = phy_init(pcie->phy[i]);
166 		if (ret < 0)
167 			goto err_phy;
168 
169 		ret = phy_power_on(pcie->phy[i]);
170 		if (ret < 0) {
171 			phy_exit(pcie->phy[i]);
172 			goto err_phy;
173 		}
174 	}
175 
176 	return 0;
177 
178 err_phy:
179 	while (--i >= 0) {
180 		phy_power_off(pcie->phy[i]);
181 		phy_exit(pcie->phy[i]);
182 	}
183 
184 	return ret;
185 }
186 
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)187 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
188 {
189 	struct device_node *np = dev->of_node;
190 	int phy_count;
191 	struct phy **phy;
192 	struct device_link **link;
193 	int i;
194 	int ret;
195 	const char *name;
196 
197 	phy_count = of_property_count_strings(np, "phy-names");
198 	if (phy_count < 1) {
199 		dev_err(dev, "no phy-names.  PHY will not be initialized\n");
200 		pcie->phy_count = 0;
201 		return 0;
202 	}
203 
204 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
205 	if (!phy)
206 		return -ENOMEM;
207 
208 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
209 	if (!link)
210 		return -ENOMEM;
211 
212 	for (i = 0; i < phy_count; i++) {
213 		of_property_read_string_index(np, "phy-names", i, &name);
214 		phy[i] = devm_phy_get(dev, name);
215 		if (IS_ERR(phy[i])) {
216 			ret = PTR_ERR(phy[i]);
217 			goto err_phy;
218 		}
219 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
220 		if (!link[i]) {
221 			devm_phy_put(dev, phy[i]);
222 			ret = -EINVAL;
223 			goto err_phy;
224 		}
225 	}
226 
227 	pcie->phy_count = phy_count;
228 	pcie->phy = phy;
229 	pcie->link = link;
230 
231 	ret =  cdns_pcie_enable_phy(pcie);
232 	if (ret)
233 		goto err_phy;
234 
235 	return 0;
236 
237 err_phy:
238 	while (--i >= 0) {
239 		device_link_del(link[i]);
240 		devm_phy_put(dev, phy[i]);
241 	}
242 
243 	return ret;
244 }
245 
cdns_pcie_suspend_noirq(struct device * dev)246 static int cdns_pcie_suspend_noirq(struct device *dev)
247 {
248 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
249 
250 	cdns_pcie_disable_phy(pcie);
251 
252 	return 0;
253 }
254 
cdns_pcie_resume_noirq(struct device * dev)255 static int cdns_pcie_resume_noirq(struct device *dev)
256 {
257 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
258 	int ret;
259 
260 	ret = cdns_pcie_enable_phy(pcie);
261 	if (ret) {
262 		dev_err(dev, "failed to enable phy\n");
263 		return ret;
264 	}
265 
266 	return 0;
267 }
268 
269 const struct dev_pm_ops cdns_pcie_pm_ops = {
270 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
271 				  cdns_pcie_resume_noirq)
272 };
273