1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Rockchip PCIe PHY driver
4 *
5 * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com>
6 * Copyright (C) 2016 ROCKCHIP, Inc.
7 */
8
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/io.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_platform.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/regmap.h>
20 #include <linux/reset.h>
21
22 /*
23 * The higher 16-bit of this register is used for write protection
24 * only if BIT(x + 16) set to 1 the BIT(x) can be written.
25 */
26 #define HIWORD_UPDATE(val, mask, shift) \
27 ((val) << (shift) | (mask) << ((shift) + 16))
28
29 #define PHY_MAX_LANE_NUM 4
30 #define PHY_CFG_DATA_SHIFT 7
31 #define PHY_CFG_ADDR_SHIFT 1
32 #define PHY_CFG_DATA_MASK 0xf
33 #define PHY_CFG_ADDR_MASK 0x3f
34 #define PHY_CFG_RD_MASK 0x3ff
35 #define PHY_CFG_WR_ENABLE 1
36 #define PHY_CFG_WR_DISABLE 1
37 #define PHY_CFG_WR_SHIFT 0
38 #define PHY_CFG_WR_MASK 1
39 #define PHY_CFG_PLL_LOCK 0x10
40 #define PHY_CFG_CLK_TEST 0x10
41 #define PHY_CFG_CLK_SCC 0x12
42 #define PHY_CFG_SEPE_RATE BIT(3)
43 #define PHY_CFG_PLL_100M BIT(3)
44 #define PHY_PLL_LOCKED BIT(9)
45 #define PHY_PLL_OUTPUT BIT(10)
46 #define PHY_LANE_A_STATUS 0x30
47 #define PHY_LANE_B_STATUS 0x31
48 #define PHY_LANE_C_STATUS 0x32
49 #define PHY_LANE_D_STATUS 0x33
50 #define PHY_LANE_RX_DET_SHIFT 11
51 #define PHY_LANE_RX_DET_TH 0x1
52 #define PHY_LANE_IDLE_OFF 0x1
53 #define PHY_LANE_IDLE_MASK 0x1
54 #define PHY_LANE_IDLE_A_SHIFT 3
55 #define PHY_LANE_IDLE_B_SHIFT 4
56 #define PHY_LANE_IDLE_C_SHIFT 5
57 #define PHY_LANE_IDLE_D_SHIFT 6
58
59 struct rockchip_pcie_data {
60 unsigned int pcie_conf;
61 unsigned int pcie_status;
62 unsigned int pcie_laneoff;
63 };
64
65 struct rockchip_pcie_phy {
66 struct rockchip_pcie_data *phy_data;
67 struct regmap *reg_base;
68 struct phy_pcie_instance {
69 struct phy *phy;
70 u32 index;
71 } phys[PHY_MAX_LANE_NUM];
72 struct mutex pcie_mutex;
73 struct reset_control *phy_rst;
74 struct clk *clk_pciephy_ref;
75 int pwr_cnt;
76 int init_cnt;
77 };
78
to_pcie_phy(struct phy_pcie_instance * inst)79 static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst)
80 {
81 return container_of(inst, struct rockchip_pcie_phy,
82 phys[inst->index]);
83 }
84
rockchip_pcie_phy_of_xlate(struct device * dev,struct of_phandle_args * args)85 static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev,
86 struct of_phandle_args *args)
87 {
88 struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev);
89
90 if (args->args_count == 0)
91 return rk_phy->phys[0].phy;
92
93 if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM))
94 return ERR_PTR(-ENODEV);
95
96 return rk_phy->phys[args->args[0]].phy;
97 }
98
99
phy_wr_cfg(struct rockchip_pcie_phy * rk_phy,u32 addr,u32 data)100 static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
101 u32 addr, u32 data)
102 {
103 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
104 HIWORD_UPDATE(data,
105 PHY_CFG_DATA_MASK,
106 PHY_CFG_DATA_SHIFT) |
107 HIWORD_UPDATE(addr,
108 PHY_CFG_ADDR_MASK,
109 PHY_CFG_ADDR_SHIFT));
110 udelay(1);
111 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
112 HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
113 PHY_CFG_WR_MASK,
114 PHY_CFG_WR_SHIFT));
115 udelay(1);
116 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
117 HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
118 PHY_CFG_WR_MASK,
119 PHY_CFG_WR_SHIFT));
120 }
121
phy_rd_cfg(struct rockchip_pcie_phy * rk_phy,u32 addr)122 static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy,
123 u32 addr)
124 {
125 u32 val;
126
127 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
128 HIWORD_UPDATE(addr,
129 PHY_CFG_RD_MASK,
130 PHY_CFG_ADDR_SHIFT));
131 regmap_read(rk_phy->reg_base,
132 rk_phy->phy_data->pcie_status,
133 &val);
134 return val;
135 }
136
rockchip_pcie_phy_power_off(struct phy * phy)137 static int rockchip_pcie_phy_power_off(struct phy *phy)
138 {
139 struct phy_pcie_instance *inst = phy_get_drvdata(phy);
140 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
141 int err = 0;
142
143 mutex_lock(&rk_phy->pcie_mutex);
144
145 regmap_write(rk_phy->reg_base,
146 rk_phy->phy_data->pcie_laneoff,
147 HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
148 PHY_LANE_IDLE_MASK,
149 PHY_LANE_IDLE_A_SHIFT + inst->index));
150
151 if (--rk_phy->pwr_cnt)
152 goto err_out;
153
154 err = reset_control_assert(rk_phy->phy_rst);
155 if (err) {
156 dev_err(&phy->dev, "assert phy_rst err %d\n", err);
157 goto err_restore;
158 }
159
160 err_out:
161 mutex_unlock(&rk_phy->pcie_mutex);
162 return 0;
163
164 err_restore:
165 rk_phy->pwr_cnt++;
166 regmap_write(rk_phy->reg_base,
167 rk_phy->phy_data->pcie_laneoff,
168 HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
169 PHY_LANE_IDLE_MASK,
170 PHY_LANE_IDLE_A_SHIFT + inst->index));
171 mutex_unlock(&rk_phy->pcie_mutex);
172 return err;
173 }
174
rockchip_pcie_phy_power_on(struct phy * phy)175 static int rockchip_pcie_phy_power_on(struct phy *phy)
176 {
177 struct phy_pcie_instance *inst = phy_get_drvdata(phy);
178 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
179 int err = 0;
180 u32 status;
181 unsigned long timeout;
182
183 mutex_lock(&rk_phy->pcie_mutex);
184
185 if (rk_phy->pwr_cnt++)
186 goto err_out;
187
188 err = reset_control_deassert(rk_phy->phy_rst);
189 if (err) {
190 dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
191 goto err_pwr_cnt;
192 }
193
194 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
195 HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
196 PHY_CFG_ADDR_MASK,
197 PHY_CFG_ADDR_SHIFT));
198
199 regmap_write(rk_phy->reg_base,
200 rk_phy->phy_data->pcie_laneoff,
201 HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
202 PHY_LANE_IDLE_MASK,
203 PHY_LANE_IDLE_A_SHIFT + inst->index));
204
205 /*
206 * No documented timeout value for phy operation below,
207 * so we make it large enough here. And we use loop-break
208 * method which should not be harmful.
209 */
210 timeout = jiffies + msecs_to_jiffies(1000);
211
212 err = -EINVAL;
213 while (time_before(jiffies, timeout)) {
214 regmap_read(rk_phy->reg_base,
215 rk_phy->phy_data->pcie_status,
216 &status);
217 if (status & PHY_PLL_LOCKED) {
218 dev_dbg(&phy->dev, "pll locked!\n");
219 err = 0;
220 break;
221 }
222 msleep(20);
223 }
224
225 if (err) {
226 dev_err(&phy->dev, "pll lock timeout!\n");
227 goto err_pll_lock;
228 }
229
230 phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
231 phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
232
233 err = -ETIMEDOUT;
234 while (time_before(jiffies, timeout)) {
235 regmap_read(rk_phy->reg_base,
236 rk_phy->phy_data->pcie_status,
237 &status);
238 if (!(status & PHY_PLL_OUTPUT)) {
239 dev_dbg(&phy->dev, "pll output enable done!\n");
240 err = 0;
241 break;
242 }
243 msleep(20);
244 }
245
246 if (err) {
247 dev_err(&phy->dev, "pll output enable timeout!\n");
248 goto err_pll_lock;
249 }
250
251 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
252 HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
253 PHY_CFG_ADDR_MASK,
254 PHY_CFG_ADDR_SHIFT));
255 err = -EINVAL;
256 while (time_before(jiffies, timeout)) {
257 regmap_read(rk_phy->reg_base,
258 rk_phy->phy_data->pcie_status,
259 &status);
260 if (status & PHY_PLL_LOCKED) {
261 dev_dbg(&phy->dev, "pll relocked!\n");
262 err = 0;
263 break;
264 }
265 msleep(20);
266 }
267
268 if (err) {
269 dev_err(&phy->dev, "pll relock timeout!\n");
270 goto err_pll_lock;
271 }
272
273 err_out:
274 mutex_unlock(&rk_phy->pcie_mutex);
275 return 0;
276
277 err_pll_lock:
278 reset_control_assert(rk_phy->phy_rst);
279 err_pwr_cnt:
280 rk_phy->pwr_cnt--;
281 mutex_unlock(&rk_phy->pcie_mutex);
282 return err;
283 }
284
rockchip_pcie_phy_init(struct phy * phy)285 static int rockchip_pcie_phy_init(struct phy *phy)
286 {
287 struct phy_pcie_instance *inst = phy_get_drvdata(phy);
288 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
289 int err = 0;
290
291 mutex_lock(&rk_phy->pcie_mutex);
292
293 if (rk_phy->init_cnt++)
294 goto err_out;
295
296 err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
297 if (err) {
298 dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
299 goto err_refclk;
300 }
301
302 err = reset_control_assert(rk_phy->phy_rst);
303 if (err) {
304 dev_err(&phy->dev, "assert phy_rst err %d\n", err);
305 goto err_reset;
306 }
307
308 err_out:
309 mutex_unlock(&rk_phy->pcie_mutex);
310 return 0;
311
312 err_reset:
313
314 clk_disable_unprepare(rk_phy->clk_pciephy_ref);
315 err_refclk:
316 rk_phy->init_cnt--;
317 mutex_unlock(&rk_phy->pcie_mutex);
318 return err;
319 }
320
rockchip_pcie_phy_exit(struct phy * phy)321 static int rockchip_pcie_phy_exit(struct phy *phy)
322 {
323 struct phy_pcie_instance *inst = phy_get_drvdata(phy);
324 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
325
326 mutex_lock(&rk_phy->pcie_mutex);
327
328 if (--rk_phy->init_cnt)
329 goto err_init_cnt;
330
331 clk_disable_unprepare(rk_phy->clk_pciephy_ref);
332
333 err_init_cnt:
334 mutex_unlock(&rk_phy->pcie_mutex);
335 return 0;
336 }
337
338 static const struct phy_ops ops = {
339 .init = rockchip_pcie_phy_init,
340 .exit = rockchip_pcie_phy_exit,
341 .power_on = rockchip_pcie_phy_power_on,
342 .power_off = rockchip_pcie_phy_power_off,
343 .owner = THIS_MODULE,
344 };
345
346 static const struct rockchip_pcie_data rk3399_pcie_data = {
347 .pcie_conf = 0xe220,
348 .pcie_status = 0xe2a4,
349 .pcie_laneoff = 0xe214,
350 };
351
352 static const struct of_device_id rockchip_pcie_phy_dt_ids[] = {
353 {
354 .compatible = "rockchip,rk3399-pcie-phy",
355 .data = &rk3399_pcie_data,
356 },
357 {}
358 };
359
360 MODULE_DEVICE_TABLE(of, rockchip_pcie_phy_dt_ids);
361
rockchip_pcie_phy_probe(struct platform_device * pdev)362 static int rockchip_pcie_phy_probe(struct platform_device *pdev)
363 {
364 struct device *dev = &pdev->dev;
365 struct rockchip_pcie_phy *rk_phy;
366 struct phy_provider *phy_provider;
367 struct regmap *grf;
368 const struct of_device_id *of_id;
369 int i;
370 u32 phy_num;
371
372 grf = syscon_node_to_regmap(dev->parent->of_node);
373 if (IS_ERR(grf)) {
374 dev_err(dev, "Cannot find GRF syscon\n");
375 return PTR_ERR(grf);
376 }
377
378 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
379 if (!rk_phy)
380 return -ENOMEM;
381
382 of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
383 if (!of_id)
384 return -EINVAL;
385
386 rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
387 rk_phy->reg_base = grf;
388
389 mutex_init(&rk_phy->pcie_mutex);
390
391 rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
392 if (IS_ERR(rk_phy->phy_rst)) {
393 if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
394 dev_err(dev,
395 "missing phy property for reset controller\n");
396 return PTR_ERR(rk_phy->phy_rst);
397 }
398
399 rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
400 if (IS_ERR(rk_phy->clk_pciephy_ref)) {
401 dev_err(dev, "refclk not found.\n");
402 return PTR_ERR(rk_phy->clk_pciephy_ref);
403 }
404
405 /* parse #phy-cells to see if it's legacy PHY model */
406 if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
407 return -ENOENT;
408
409 phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM;
410 dev_dbg(dev, "phy number is %d\n", phy_num);
411
412 for (i = 0; i < phy_num; i++) {
413 rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops);
414 if (IS_ERR(rk_phy->phys[i].phy)) {
415 dev_err(dev, "failed to create PHY%d\n", i);
416 return PTR_ERR(rk_phy->phys[i].phy);
417 }
418 rk_phy->phys[i].index = i;
419 phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]);
420 }
421
422 platform_set_drvdata(pdev, rk_phy);
423 phy_provider = devm_of_phy_provider_register(dev,
424 rockchip_pcie_phy_of_xlate);
425
426 return PTR_ERR_OR_ZERO(phy_provider);
427 }
428
429 static struct platform_driver rockchip_pcie_driver = {
430 .probe = rockchip_pcie_phy_probe,
431 .driver = {
432 .name = "rockchip-pcie-phy",
433 .of_match_table = rockchip_pcie_phy_dt_ids,
434 },
435 };
436
437 module_platform_driver(rockchip_pcie_driver);
438
439 MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
440 MODULE_DESCRIPTION("Rockchip PCIe PHY driver");
441 MODULE_LICENSE("GPL v2");
442