1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/time.h>
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/phy/phy.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/reset-controller.h>
16 #include <linux/devfreq.h>
17
18 #include <ufs/ufshcd.h>
19 #include "ufshcd-pltfrm.h"
20 #include <ufs/unipro.h>
21 #include "ufs-qcom.h"
22 #include <ufs/ufshci.h>
23 #include <ufs/ufs_quirks.h>
24
25 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
26 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
27
28 enum {
29 TSTBUS_UAWM,
30 TSTBUS_UARM,
31 TSTBUS_TXUC,
32 TSTBUS_RXUC,
33 TSTBUS_DFC,
34 TSTBUS_TRLUT,
35 TSTBUS_TMRLUT,
36 TSTBUS_OCSC,
37 TSTBUS_UTP_HCI,
38 TSTBUS_COMBINED,
39 TSTBUS_WRAPPER,
40 TSTBUS_UNIPRO,
41 TSTBUS_MAX,
42 };
43
44 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
45
46 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
47 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
48 u32 clk_cycles);
49
rcdev_to_ufs_host(struct reset_controller_dev * rcd)50 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
51 {
52 return container_of(rcd, struct ufs_qcom_host, rcdev);
53 }
54
ufs_qcom_dump_regs_wrapper(struct ufs_hba * hba,int offset,int len,const char * prefix,void * priv)55 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
56 const char *prefix, void *priv)
57 {
58 ufshcd_dump_regs(hba, offset, len * 4, prefix);
59 }
60
ufs_qcom_get_connected_tx_lanes(struct ufs_hba * hba,u32 * tx_lanes)61 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
62 {
63 int err = 0;
64
65 err = ufshcd_dme_get(hba,
66 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
67 if (err)
68 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
69 __func__, err);
70
71 return err;
72 }
73
ufs_qcom_host_clk_get(struct device * dev,const char * name,struct clk ** clk_out,bool optional)74 static int ufs_qcom_host_clk_get(struct device *dev,
75 const char *name, struct clk **clk_out, bool optional)
76 {
77 struct clk *clk;
78 int err = 0;
79
80 clk = devm_clk_get(dev, name);
81 if (!IS_ERR(clk)) {
82 *clk_out = clk;
83 return 0;
84 }
85
86 err = PTR_ERR(clk);
87
88 if (optional && err == -ENOENT) {
89 *clk_out = NULL;
90 return 0;
91 }
92
93 if (err != -EPROBE_DEFER)
94 dev_err(dev, "failed to get %s err %d\n", name, err);
95
96 return err;
97 }
98
ufs_qcom_host_clk_enable(struct device * dev,const char * name,struct clk * clk)99 static int ufs_qcom_host_clk_enable(struct device *dev,
100 const char *name, struct clk *clk)
101 {
102 int err = 0;
103
104 err = clk_prepare_enable(clk);
105 if (err)
106 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
107
108 return err;
109 }
110
ufs_qcom_disable_lane_clks(struct ufs_qcom_host * host)111 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
112 {
113 if (!host->is_lane_clks_enabled)
114 return;
115
116 clk_disable_unprepare(host->tx_l1_sync_clk);
117 clk_disable_unprepare(host->tx_l0_sync_clk);
118 clk_disable_unprepare(host->rx_l1_sync_clk);
119 clk_disable_unprepare(host->rx_l0_sync_clk);
120
121 host->is_lane_clks_enabled = false;
122 }
123
ufs_qcom_enable_lane_clks(struct ufs_qcom_host * host)124 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
125 {
126 int err = 0;
127 struct device *dev = host->hba->dev;
128
129 if (host->is_lane_clks_enabled)
130 return 0;
131
132 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
133 host->rx_l0_sync_clk);
134 if (err)
135 goto out;
136
137 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
138 host->tx_l0_sync_clk);
139 if (err)
140 goto disable_rx_l0;
141
142 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
143 host->rx_l1_sync_clk);
144 if (err)
145 goto disable_tx_l0;
146
147 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
148 host->tx_l1_sync_clk);
149 if (err)
150 goto disable_rx_l1;
151
152 host->is_lane_clks_enabled = true;
153 goto out;
154
155 disable_rx_l1:
156 clk_disable_unprepare(host->rx_l1_sync_clk);
157 disable_tx_l0:
158 clk_disable_unprepare(host->tx_l0_sync_clk);
159 disable_rx_l0:
160 clk_disable_unprepare(host->rx_l0_sync_clk);
161 out:
162 return err;
163 }
164
ufs_qcom_init_lane_clks(struct ufs_qcom_host * host)165 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
166 {
167 int err = 0;
168 struct device *dev = host->hba->dev;
169
170 if (has_acpi_companion(dev))
171 return 0;
172
173 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
174 &host->rx_l0_sync_clk, false);
175 if (err)
176 goto out;
177
178 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
179 &host->tx_l0_sync_clk, false);
180 if (err)
181 goto out;
182
183 /* In case of single lane per direction, don't read lane1 clocks */
184 if (host->hba->lanes_per_direction > 1) {
185 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
186 &host->rx_l1_sync_clk, false);
187 if (err)
188 goto out;
189
190 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
191 &host->tx_l1_sync_clk, true);
192 }
193 out:
194 return err;
195 }
196
ufs_qcom_link_startup_post_change(struct ufs_hba * hba)197 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
198 {
199 u32 tx_lanes;
200
201 return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
202 }
203
ufs_qcom_check_hibern8(struct ufs_hba * hba)204 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
205 {
206 int err;
207 u32 tx_fsm_val = 0;
208 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
209
210 do {
211 err = ufshcd_dme_get(hba,
212 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
213 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
214 &tx_fsm_val);
215 if (err || tx_fsm_val == TX_FSM_HIBERN8)
216 break;
217
218 /* sleep for max. 200us */
219 usleep_range(100, 200);
220 } while (time_before(jiffies, timeout));
221
222 /*
223 * we might have scheduled out for long during polling so
224 * check the state again.
225 */
226 if (time_after(jiffies, timeout))
227 err = ufshcd_dme_get(hba,
228 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
229 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
230 &tx_fsm_val);
231
232 if (err) {
233 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
234 __func__, err);
235 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
236 err = tx_fsm_val;
237 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
238 __func__, err);
239 }
240
241 return err;
242 }
243
ufs_qcom_select_unipro_mode(struct ufs_qcom_host * host)244 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
245 {
246 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
247 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
248 REG_UFS_CFG1);
249 /* make sure above configuration is applied before we return */
250 mb();
251 }
252
253 /*
254 * ufs_qcom_host_reset - reset host controller and PHY
255 */
ufs_qcom_host_reset(struct ufs_hba * hba)256 static int ufs_qcom_host_reset(struct ufs_hba *hba)
257 {
258 int ret = 0;
259 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
260 bool reenable_intr = false;
261
262 if (!host->core_reset) {
263 dev_warn(hba->dev, "%s: reset control not set\n", __func__);
264 goto out;
265 }
266
267 reenable_intr = hba->is_irq_enabled;
268 disable_irq(hba->irq);
269 hba->is_irq_enabled = false;
270
271 ret = reset_control_assert(host->core_reset);
272 if (ret) {
273 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
274 __func__, ret);
275 goto out;
276 }
277
278 /*
279 * The hardware requirement for delay between assert/deassert
280 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
281 * ~125us (4/32768). To be on the safe side add 200us delay.
282 */
283 usleep_range(200, 210);
284
285 ret = reset_control_deassert(host->core_reset);
286 if (ret)
287 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
288 __func__, ret);
289
290 usleep_range(1000, 1100);
291
292 if (reenable_intr) {
293 enable_irq(hba->irq);
294 hba->is_irq_enabled = true;
295 }
296
297 out:
298 return ret;
299 }
300
ufs_qcom_power_up_sequence(struct ufs_hba * hba)301 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
302 {
303 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
304 struct phy *phy = host->generic_phy;
305 int ret = 0;
306 bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
307
308 /* Reset UFS Host Controller and PHY */
309 ret = ufs_qcom_host_reset(hba);
310 if (ret)
311 dev_warn(hba->dev, "%s: host reset returned %d\n",
312 __func__, ret);
313
314 if (is_rate_B)
315 phy_set_mode(phy, PHY_MODE_UFS_HS_B);
316
317 /* phy initialization - calibrate the phy */
318 ret = phy_init(phy);
319 if (ret) {
320 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
321 __func__, ret);
322 goto out;
323 }
324
325 /* power on phy - start serdes and phy's power and clocks */
326 ret = phy_power_on(phy);
327 if (ret) {
328 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
329 __func__, ret);
330 goto out_disable_phy;
331 }
332
333 ufs_qcom_select_unipro_mode(host);
334
335 return 0;
336
337 out_disable_phy:
338 phy_exit(phy);
339 out:
340 return ret;
341 }
342
343 /*
344 * The UTP controller has a number of internal clock gating cells (CGCs).
345 * Internal hardware sub-modules within the UTP controller control the CGCs.
346 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
347 * in a specific operation, UTP controller CGCs are by default disabled and
348 * this function enables them (after every UFS link startup) to save some power
349 * leakage.
350 */
ufs_qcom_enable_hw_clk_gating(struct ufs_hba * hba)351 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
352 {
353 ufshcd_writel(hba,
354 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
355 REG_UFS_CFG2);
356
357 /* Ensure that HW clock gating is enabled before next operations */
358 mb();
359 }
360
ufs_qcom_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)361 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
362 enum ufs_notify_change_status status)
363 {
364 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
365 int err = 0;
366
367 switch (status) {
368 case PRE_CHANGE:
369 ufs_qcom_power_up_sequence(hba);
370 /*
371 * The PHY PLL output is the source of tx/rx lane symbol
372 * clocks, hence, enable the lane clocks only after PHY
373 * is initialized.
374 */
375 err = ufs_qcom_enable_lane_clks(host);
376 break;
377 case POST_CHANGE:
378 /* check if UFS PHY moved from DISABLED to HIBERN8 */
379 err = ufs_qcom_check_hibern8(hba);
380 ufs_qcom_enable_hw_clk_gating(hba);
381 ufs_qcom_ice_enable(host);
382 break;
383 default:
384 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
385 err = -EINVAL;
386 break;
387 }
388 return err;
389 }
390
391 /*
392 * Returns zero for success and non-zero in case of a failure
393 */
ufs_qcom_cfg_timers(struct ufs_hba * hba,u32 gear,u32 hs,u32 rate,bool update_link_startup_timer)394 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
395 u32 hs, u32 rate, bool update_link_startup_timer)
396 {
397 int ret = 0;
398 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
399 struct ufs_clk_info *clki;
400 u32 core_clk_period_in_ns;
401 u32 tx_clk_cycles_per_us = 0;
402 unsigned long core_clk_rate = 0;
403 u32 core_clk_cycles_per_us = 0;
404
405 static u32 pwm_fr_table[][2] = {
406 {UFS_PWM_G1, 0x1},
407 {UFS_PWM_G2, 0x1},
408 {UFS_PWM_G3, 0x1},
409 {UFS_PWM_G4, 0x1},
410 };
411
412 static u32 hs_fr_table_rA[][2] = {
413 {UFS_HS_G1, 0x1F},
414 {UFS_HS_G2, 0x3e},
415 {UFS_HS_G3, 0x7D},
416 };
417
418 static u32 hs_fr_table_rB[][2] = {
419 {UFS_HS_G1, 0x24},
420 {UFS_HS_G2, 0x49},
421 {UFS_HS_G3, 0x92},
422 };
423
424 /*
425 * The Qunipro controller does not use following registers:
426 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
427 * UFS_REG_PA_LINK_STARTUP_TIMER
428 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
429 * Aggregation logic.
430 */
431 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
432 goto out;
433
434 if (gear == 0) {
435 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
436 goto out_error;
437 }
438
439 list_for_each_entry(clki, &hba->clk_list_head, list) {
440 if (!strcmp(clki->name, "core_clk"))
441 core_clk_rate = clk_get_rate(clki->clk);
442 }
443
444 /* If frequency is smaller than 1MHz, set to 1MHz */
445 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
446 core_clk_rate = DEFAULT_CLK_RATE_HZ;
447
448 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
449 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
450 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
451 /*
452 * make sure above write gets applied before we return from
453 * this function.
454 */
455 mb();
456 }
457
458 if (ufs_qcom_cap_qunipro(host))
459 goto out;
460
461 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
462 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
463 core_clk_period_in_ns &= MASK_CLK_NS_REG;
464
465 switch (hs) {
466 case FASTAUTO_MODE:
467 case FAST_MODE:
468 if (rate == PA_HS_MODE_A) {
469 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
470 dev_err(hba->dev,
471 "%s: index %d exceeds table size %zu\n",
472 __func__, gear,
473 ARRAY_SIZE(hs_fr_table_rA));
474 goto out_error;
475 }
476 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
477 } else if (rate == PA_HS_MODE_B) {
478 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
479 dev_err(hba->dev,
480 "%s: index %d exceeds table size %zu\n",
481 __func__, gear,
482 ARRAY_SIZE(hs_fr_table_rB));
483 goto out_error;
484 }
485 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
486 } else {
487 dev_err(hba->dev, "%s: invalid rate = %d\n",
488 __func__, rate);
489 goto out_error;
490 }
491 break;
492 case SLOWAUTO_MODE:
493 case SLOW_MODE:
494 if (gear > ARRAY_SIZE(pwm_fr_table)) {
495 dev_err(hba->dev,
496 "%s: index %d exceeds table size %zu\n",
497 __func__, gear,
498 ARRAY_SIZE(pwm_fr_table));
499 goto out_error;
500 }
501 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
502 break;
503 case UNCHANGED:
504 default:
505 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
506 goto out_error;
507 }
508
509 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
510 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
511 /* this register 2 fields shall be written at once */
512 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
513 REG_UFS_TX_SYMBOL_CLK_NS_US);
514 /*
515 * make sure above write gets applied before we return from
516 * this function.
517 */
518 mb();
519 }
520
521 if (update_link_startup_timer) {
522 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
523 REG_UFS_PA_LINK_STARTUP_TIMER);
524 /*
525 * make sure that this configuration is applied before
526 * we return
527 */
528 mb();
529 }
530 goto out;
531
532 out_error:
533 ret = -EINVAL;
534 out:
535 return ret;
536 }
537
ufs_qcom_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)538 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
539 enum ufs_notify_change_status status)
540 {
541 int err = 0;
542 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
543
544 switch (status) {
545 case PRE_CHANGE:
546 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
547 0, true)) {
548 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
549 __func__);
550 err = -EINVAL;
551 goto out;
552 }
553
554 if (ufs_qcom_cap_qunipro(host))
555 /*
556 * set unipro core clock cycles to 150 & clear clock
557 * divider
558 */
559 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
560 150);
561
562 /*
563 * Some UFS devices (and may be host) have issues if LCC is
564 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
565 * before link startup which will make sure that both host
566 * and device TX LCC are disabled once link startup is
567 * completed.
568 */
569 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
570 err = ufshcd_disable_host_tx_lcc(hba);
571
572 break;
573 case POST_CHANGE:
574 ufs_qcom_link_startup_post_change(hba);
575 break;
576 default:
577 break;
578 }
579
580 out:
581 return err;
582 }
583
ufs_qcom_device_reset_ctrl(struct ufs_hba * hba,bool asserted)584 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
585 {
586 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
587
588 /* reset gpio is optional */
589 if (!host->device_reset)
590 return;
591
592 gpiod_set_value_cansleep(host->device_reset, asserted);
593 }
594
ufs_qcom_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)595 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
596 enum ufs_notify_change_status status)
597 {
598 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
599 struct phy *phy = host->generic_phy;
600
601 if (status == PRE_CHANGE)
602 return 0;
603
604 if (ufs_qcom_is_link_off(hba)) {
605 /*
606 * Disable the tx/rx lane symbol clocks before PHY is
607 * powered down as the PLL source should be disabled
608 * after downstream clocks are disabled.
609 */
610 ufs_qcom_disable_lane_clks(host);
611 phy_power_off(phy);
612
613 /* reset the connected UFS device during power down */
614 ufs_qcom_device_reset_ctrl(hba, true);
615
616 } else if (!ufs_qcom_is_link_active(hba)) {
617 ufs_qcom_disable_lane_clks(host);
618 }
619
620 return 0;
621 }
622
ufs_qcom_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)623 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
624 {
625 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
626 struct phy *phy = host->generic_phy;
627 int err;
628
629 if (ufs_qcom_is_link_off(hba)) {
630 err = phy_power_on(phy);
631 if (err) {
632 dev_err(hba->dev, "%s: failed PHY power on: %d\n",
633 __func__, err);
634 return err;
635 }
636
637 err = ufs_qcom_enable_lane_clks(host);
638 if (err)
639 return err;
640
641 } else if (!ufs_qcom_is_link_active(hba)) {
642 err = ufs_qcom_enable_lane_clks(host);
643 if (err)
644 return err;
645 }
646
647 return ufs_qcom_ice_resume(host);
648 }
649
ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host * host,bool enable)650 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
651 {
652 if (host->dev_ref_clk_ctrl_mmio &&
653 (enable ^ host->is_dev_ref_clk_enabled)) {
654 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
655
656 if (enable)
657 temp |= host->dev_ref_clk_en_mask;
658 else
659 temp &= ~host->dev_ref_clk_en_mask;
660
661 /*
662 * If we are here to disable this clock it might be immediately
663 * after entering into hibern8 in which case we need to make
664 * sure that device ref_clk is active for specific time after
665 * hibern8 enter.
666 */
667 if (!enable) {
668 unsigned long gating_wait;
669
670 gating_wait = host->hba->dev_info.clk_gating_wait_us;
671 if (!gating_wait) {
672 udelay(1);
673 } else {
674 /*
675 * bRefClkGatingWaitTime defines the minimum
676 * time for which the reference clock is
677 * required by device during transition from
678 * HS-MODE to LS-MODE or HIBERN8 state. Give it
679 * more delay to be on the safe side.
680 */
681 gating_wait += 10;
682 usleep_range(gating_wait, gating_wait + 10);
683 }
684 }
685
686 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
687
688 /*
689 * Make sure the write to ref_clk reaches the destination and
690 * not stored in a Write Buffer (WB).
691 */
692 readl(host->dev_ref_clk_ctrl_mmio);
693
694 /*
695 * If we call hibern8 exit after this, we need to make sure that
696 * device ref_clk is stable for at least 1us before the hibern8
697 * exit command.
698 */
699 if (enable)
700 udelay(1);
701
702 host->is_dev_ref_clk_enabled = enable;
703 }
704 }
705
ufs_qcom_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)706 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
707 enum ufs_notify_change_status status,
708 struct ufs_pa_layer_attr *dev_max_params,
709 struct ufs_pa_layer_attr *dev_req_params)
710 {
711 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
712 struct ufs_dev_params ufs_qcom_cap;
713 int ret = 0;
714
715 if (!dev_req_params) {
716 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
717 ret = -EINVAL;
718 goto out;
719 }
720
721 switch (status) {
722 case PRE_CHANGE:
723 ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
724 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
725
726 if (host->hw_ver.major == 0x1) {
727 /*
728 * HS-G3 operations may not reliably work on legacy QCOM
729 * UFS host controller hardware even though capability
730 * exchange during link startup phase may end up
731 * negotiating maximum supported gear as G3.
732 * Hence downgrade the maximum supported gear to HS-G2.
733 */
734 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
735 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
736 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
737 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
738 }
739
740 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
741 dev_max_params,
742 dev_req_params);
743 if (ret) {
744 pr_err("%s: failed to determine capabilities\n",
745 __func__);
746 goto out;
747 }
748
749 /* enable the device ref clock before changing to HS mode */
750 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
751 ufshcd_is_hs_mode(dev_req_params))
752 ufs_qcom_dev_ref_clk_ctrl(host, true);
753
754 if (host->hw_ver.major >= 0x4) {
755 ufshcd_dme_configure_adapt(hba,
756 dev_req_params->gear_tx,
757 PA_INITIAL_ADAPT);
758 }
759 break;
760 case POST_CHANGE:
761 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
762 dev_req_params->pwr_rx,
763 dev_req_params->hs_rate, false)) {
764 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
765 __func__);
766 /*
767 * we return error code at the end of the routine,
768 * but continue to configure UFS_PHY_TX_LANE_ENABLE
769 * and bus voting as usual
770 */
771 ret = -EINVAL;
772 }
773
774 /* cache the power mode parameters to use internally */
775 memcpy(&host->dev_req_params,
776 dev_req_params, sizeof(*dev_req_params));
777
778 /* disable the device ref clock if entered PWM mode */
779 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
780 !ufshcd_is_hs_mode(dev_req_params))
781 ufs_qcom_dev_ref_clk_ctrl(host, false);
782 break;
783 default:
784 ret = -EINVAL;
785 break;
786 }
787 out:
788 return ret;
789 }
790
ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba * hba)791 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
792 {
793 int err;
794 u32 pa_vs_config_reg1;
795
796 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
797 &pa_vs_config_reg1);
798 if (err)
799 goto out;
800
801 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
802 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
803 (pa_vs_config_reg1 | (1 << 12)));
804
805 out:
806 return err;
807 }
808
ufs_qcom_apply_dev_quirks(struct ufs_hba * hba)809 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
810 {
811 int err = 0;
812
813 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
814 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
815
816 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
817 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
818
819 return err;
820 }
821
ufs_qcom_get_ufs_hci_version(struct ufs_hba * hba)822 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
823 {
824 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
825
826 if (host->hw_ver.major == 0x1)
827 return ufshci_version(1, 1);
828 else
829 return ufshci_version(2, 0);
830 }
831
832 /**
833 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
834 * @hba: host controller instance
835 *
836 * QCOM UFS host controller might have some non standard behaviours (quirks)
837 * than what is specified by UFSHCI specification. Advertise all such
838 * quirks to standard UFS host controller driver so standard takes them into
839 * account.
840 */
ufs_qcom_advertise_quirks(struct ufs_hba * hba)841 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
842 {
843 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
844
845 if (host->hw_ver.major == 0x01) {
846 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
847 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
848 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
849
850 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
851 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
852
853 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
854 }
855
856 if (host->hw_ver.major == 0x2) {
857 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
858
859 if (!ufs_qcom_cap_qunipro(host))
860 /* Legacy UniPro mode still need following quirks */
861 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
862 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
863 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
864 }
865 }
866
ufs_qcom_set_caps(struct ufs_hba * hba)867 static void ufs_qcom_set_caps(struct ufs_hba *hba)
868 {
869 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
870
871 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
872 hba->caps |= UFSHCD_CAP_CLK_SCALING;
873 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
874 hba->caps |= UFSHCD_CAP_WB_EN;
875 hba->caps |= UFSHCD_CAP_CRYPTO;
876 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
877 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
878
879 if (host->hw_ver.major >= 0x2) {
880 host->caps = UFS_QCOM_CAP_QUNIPRO |
881 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
882 }
883 }
884
885 /**
886 * ufs_qcom_setup_clocks - enables/disable clocks
887 * @hba: host controller instance
888 * @on: If true, enable clocks else disable them.
889 * @status: PRE_CHANGE or POST_CHANGE notify
890 *
891 * Returns 0 on success, non-zero on failure.
892 */
ufs_qcom_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)893 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
894 enum ufs_notify_change_status status)
895 {
896 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
897
898 /*
899 * In case ufs_qcom_init() is not yet done, simply ignore.
900 * This ufs_qcom_setup_clocks() shall be called from
901 * ufs_qcom_init() after init is done.
902 */
903 if (!host)
904 return 0;
905
906 switch (status) {
907 case PRE_CHANGE:
908 if (!on) {
909 if (!ufs_qcom_is_link_active(hba)) {
910 /* disable device ref_clk */
911 ufs_qcom_dev_ref_clk_ctrl(host, false);
912 }
913 }
914 break;
915 case POST_CHANGE:
916 if (on) {
917 /* enable the device ref clock for HS mode*/
918 if (ufshcd_is_hs_mode(&hba->pwr_info))
919 ufs_qcom_dev_ref_clk_ctrl(host, true);
920 }
921 break;
922 }
923
924 return 0;
925 }
926
927 static int
ufs_qcom_reset_assert(struct reset_controller_dev * rcdev,unsigned long id)928 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
929 {
930 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
931
932 /* Currently this code only knows about a single reset. */
933 WARN_ON(id);
934 ufs_qcom_assert_reset(host->hba);
935 /* provide 1ms delay to let the reset pulse propagate. */
936 usleep_range(1000, 1100);
937 return 0;
938 }
939
940 static int
ufs_qcom_reset_deassert(struct reset_controller_dev * rcdev,unsigned long id)941 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
942 {
943 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
944
945 /* Currently this code only knows about a single reset. */
946 WARN_ON(id);
947 ufs_qcom_deassert_reset(host->hba);
948
949 /*
950 * after reset deassertion, phy will need all ref clocks,
951 * voltage, current to settle down before starting serdes.
952 */
953 usleep_range(1000, 1100);
954 return 0;
955 }
956
957 static const struct reset_control_ops ufs_qcom_reset_ops = {
958 .assert = ufs_qcom_reset_assert,
959 .deassert = ufs_qcom_reset_deassert,
960 };
961
962 /**
963 * ufs_qcom_init - bind phy with controller
964 * @hba: host controller instance
965 *
966 * Binds PHY with controller and powers up PHY enabling clocks
967 * and regulators.
968 *
969 * Returns -EPROBE_DEFER if binding fails, returns negative error
970 * on phy power up failure and returns zero on success.
971 */
ufs_qcom_init(struct ufs_hba * hba)972 static int ufs_qcom_init(struct ufs_hba *hba)
973 {
974 int err;
975 struct device *dev = hba->dev;
976 struct platform_device *pdev = to_platform_device(dev);
977 struct ufs_qcom_host *host;
978 struct resource *res;
979 struct ufs_clk_info *clki;
980
981 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
982 if (!host) {
983 err = -ENOMEM;
984 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
985 goto out;
986 }
987
988 /* Make a two way bind between the qcom host and the hba */
989 host->hba = hba;
990 ufshcd_set_variant(hba, host);
991
992 /* Setup the optional reset control of HCI */
993 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
994 if (IS_ERR(host->core_reset)) {
995 err = dev_err_probe(dev, PTR_ERR(host->core_reset),
996 "Failed to get reset control\n");
997 goto out_variant_clear;
998 }
999
1000 /* Fire up the reset controller. Failure here is non-fatal. */
1001 host->rcdev.of_node = dev->of_node;
1002 host->rcdev.ops = &ufs_qcom_reset_ops;
1003 host->rcdev.owner = dev->driver->owner;
1004 host->rcdev.nr_resets = 1;
1005 err = devm_reset_controller_register(dev, &host->rcdev);
1006 if (err) {
1007 dev_warn(dev, "Failed to register reset controller\n");
1008 err = 0;
1009 }
1010
1011 if (!has_acpi_companion(dev)) {
1012 host->generic_phy = devm_phy_get(dev, "ufsphy");
1013 if (IS_ERR(host->generic_phy)) {
1014 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
1015 goto out_variant_clear;
1016 }
1017 }
1018
1019 host->device_reset = devm_gpiod_get_optional(dev, "reset",
1020 GPIOD_OUT_HIGH);
1021 if (IS_ERR(host->device_reset)) {
1022 err = PTR_ERR(host->device_reset);
1023 if (err != -EPROBE_DEFER)
1024 dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1025 goto out_variant_clear;
1026 }
1027
1028 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1029 &host->hw_ver.minor, &host->hw_ver.step);
1030
1031 /*
1032 * for newer controllers, device reference clock control bit has
1033 * moved inside UFS controller register address space itself.
1034 */
1035 if (host->hw_ver.major >= 0x02) {
1036 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1037 host->dev_ref_clk_en_mask = BIT(26);
1038 } else {
1039 /* "dev_ref_clk_ctrl_mem" is optional resource */
1040 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1041 "dev_ref_clk_ctrl_mem");
1042 if (res) {
1043 host->dev_ref_clk_ctrl_mmio =
1044 devm_ioremap_resource(dev, res);
1045 if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
1046 host->dev_ref_clk_ctrl_mmio = NULL;
1047 host->dev_ref_clk_en_mask = BIT(5);
1048 }
1049 }
1050
1051 list_for_each_entry(clki, &hba->clk_list_head, list) {
1052 if (!strcmp(clki->name, "core_clk_unipro"))
1053 clki->keep_link_active = true;
1054 }
1055
1056 err = ufs_qcom_init_lane_clks(host);
1057 if (err)
1058 goto out_variant_clear;
1059
1060 ufs_qcom_set_caps(hba);
1061 ufs_qcom_advertise_quirks(hba);
1062
1063 err = ufs_qcom_ice_init(host);
1064 if (err)
1065 goto out_variant_clear;
1066
1067 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1068
1069 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1070 ufs_qcom_hosts[hba->dev->id] = host;
1071
1072 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1073 ufs_qcom_get_default_testbus_cfg(host);
1074 err = ufs_qcom_testbus_config(host);
1075 if (err) {
1076 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1077 __func__, err);
1078 err = 0;
1079 }
1080
1081 goto out;
1082
1083 out_variant_clear:
1084 ufshcd_set_variant(hba, NULL);
1085 out:
1086 return err;
1087 }
1088
ufs_qcom_exit(struct ufs_hba * hba)1089 static void ufs_qcom_exit(struct ufs_hba *hba)
1090 {
1091 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1092
1093 ufs_qcom_disable_lane_clks(host);
1094 phy_power_off(host->generic_phy);
1095 phy_exit(host->generic_phy);
1096 }
1097
ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba * hba,u32 clk_cycles)1098 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1099 u32 clk_cycles)
1100 {
1101 int err;
1102 u32 core_clk_ctrl_reg;
1103
1104 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1105 return -EINVAL;
1106
1107 err = ufshcd_dme_get(hba,
1108 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1109 &core_clk_ctrl_reg);
1110 if (err)
1111 goto out;
1112
1113 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1114 core_clk_ctrl_reg |= clk_cycles;
1115
1116 /* Clear CORE_CLK_DIV_EN */
1117 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1118
1119 err = ufshcd_dme_set(hba,
1120 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1121 core_clk_ctrl_reg);
1122 out:
1123 return err;
1124 }
1125
ufs_qcom_clk_scale_up_pre_change(struct ufs_hba * hba)1126 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1127 {
1128 /* nothing to do as of now */
1129 return 0;
1130 }
1131
ufs_qcom_clk_scale_up_post_change(struct ufs_hba * hba)1132 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1133 {
1134 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1135
1136 if (!ufs_qcom_cap_qunipro(host))
1137 return 0;
1138
1139 /* set unipro core clock cycles to 150 and clear clock divider */
1140 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1141 }
1142
ufs_qcom_clk_scale_down_pre_change(struct ufs_hba * hba)1143 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1144 {
1145 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1146 int err;
1147 u32 core_clk_ctrl_reg;
1148
1149 if (!ufs_qcom_cap_qunipro(host))
1150 return 0;
1151
1152 err = ufshcd_dme_get(hba,
1153 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1154 &core_clk_ctrl_reg);
1155
1156 /* make sure CORE_CLK_DIV_EN is cleared */
1157 if (!err &&
1158 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1159 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1160 err = ufshcd_dme_set(hba,
1161 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1162 core_clk_ctrl_reg);
1163 }
1164
1165 return err;
1166 }
1167
ufs_qcom_clk_scale_down_post_change(struct ufs_hba * hba)1168 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1169 {
1170 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1171
1172 if (!ufs_qcom_cap_qunipro(host))
1173 return 0;
1174
1175 /* set unipro core clock cycles to 75 and clear clock divider */
1176 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1177 }
1178
ufs_qcom_clk_scale_notify(struct ufs_hba * hba,bool scale_up,enum ufs_notify_change_status status)1179 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1180 bool scale_up, enum ufs_notify_change_status status)
1181 {
1182 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1183 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1184 int err = 0;
1185
1186 if (status == PRE_CHANGE) {
1187 err = ufshcd_uic_hibern8_enter(hba);
1188 if (err)
1189 return err;
1190 if (scale_up)
1191 err = ufs_qcom_clk_scale_up_pre_change(hba);
1192 else
1193 err = ufs_qcom_clk_scale_down_pre_change(hba);
1194 if (err)
1195 ufshcd_uic_hibern8_exit(hba);
1196
1197 } else {
1198 if (scale_up)
1199 err = ufs_qcom_clk_scale_up_post_change(hba);
1200 else
1201 err = ufs_qcom_clk_scale_down_post_change(hba);
1202
1203
1204 if (err || !dev_req_params) {
1205 ufshcd_uic_hibern8_exit(hba);
1206 goto out;
1207 }
1208
1209 ufs_qcom_cfg_timers(hba,
1210 dev_req_params->gear_rx,
1211 dev_req_params->pwr_rx,
1212 dev_req_params->hs_rate,
1213 false);
1214 ufshcd_uic_hibern8_exit(hba);
1215 }
1216
1217 out:
1218 return err;
1219 }
1220
ufs_qcom_print_hw_debug_reg_all(struct ufs_hba * hba,void * priv,void (* print_fn)(struct ufs_hba * hba,int offset,int num_regs,const char * str,void * priv))1221 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1222 void *priv, void (*print_fn)(struct ufs_hba *hba,
1223 int offset, int num_regs, const char *str, void *priv))
1224 {
1225 u32 reg;
1226 struct ufs_qcom_host *host;
1227
1228 if (unlikely(!hba)) {
1229 pr_err("%s: hba is NULL\n", __func__);
1230 return;
1231 }
1232 if (unlikely(!print_fn)) {
1233 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1234 return;
1235 }
1236
1237 host = ufshcd_get_variant(hba);
1238 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1239 return;
1240
1241 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1242 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1243
1244 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1245 reg |= UTP_DBG_RAMS_EN;
1246 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1247
1248 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1249 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1250
1251 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1252 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1253
1254 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1255 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1256
1257 /* clear bit 17 - UTP_DBG_RAMS_EN */
1258 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1259
1260 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1261 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1262
1263 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1264 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1265
1266 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1267 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1268
1269 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1270 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1271
1272 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1273 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1274
1275 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1276 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1277
1278 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1279 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1280 }
1281
ufs_qcom_enable_test_bus(struct ufs_qcom_host * host)1282 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1283 {
1284 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1285 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1286 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1287 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1288 } else {
1289 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1290 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1291 }
1292 }
1293
ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host * host)1294 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1295 {
1296 /* provide a legal default configuration */
1297 host->testbus.select_major = TSTBUS_UNIPRO;
1298 host->testbus.select_minor = 37;
1299 }
1300
ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host * host)1301 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1302 {
1303 if (host->testbus.select_major >= TSTBUS_MAX) {
1304 dev_err(host->hba->dev,
1305 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1306 __func__, host->testbus.select_major);
1307 return false;
1308 }
1309
1310 return true;
1311 }
1312
ufs_qcom_testbus_config(struct ufs_qcom_host * host)1313 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1314 {
1315 int reg;
1316 int offset;
1317 u32 mask = TEST_BUS_SUB_SEL_MASK;
1318
1319 if (!host)
1320 return -EINVAL;
1321
1322 if (!ufs_qcom_testbus_cfg_is_ok(host))
1323 return -EPERM;
1324
1325 switch (host->testbus.select_major) {
1326 case TSTBUS_UAWM:
1327 reg = UFS_TEST_BUS_CTRL_0;
1328 offset = 24;
1329 break;
1330 case TSTBUS_UARM:
1331 reg = UFS_TEST_BUS_CTRL_0;
1332 offset = 16;
1333 break;
1334 case TSTBUS_TXUC:
1335 reg = UFS_TEST_BUS_CTRL_0;
1336 offset = 8;
1337 break;
1338 case TSTBUS_RXUC:
1339 reg = UFS_TEST_BUS_CTRL_0;
1340 offset = 0;
1341 break;
1342 case TSTBUS_DFC:
1343 reg = UFS_TEST_BUS_CTRL_1;
1344 offset = 24;
1345 break;
1346 case TSTBUS_TRLUT:
1347 reg = UFS_TEST_BUS_CTRL_1;
1348 offset = 16;
1349 break;
1350 case TSTBUS_TMRLUT:
1351 reg = UFS_TEST_BUS_CTRL_1;
1352 offset = 8;
1353 break;
1354 case TSTBUS_OCSC:
1355 reg = UFS_TEST_BUS_CTRL_1;
1356 offset = 0;
1357 break;
1358 case TSTBUS_WRAPPER:
1359 reg = UFS_TEST_BUS_CTRL_2;
1360 offset = 16;
1361 break;
1362 case TSTBUS_COMBINED:
1363 reg = UFS_TEST_BUS_CTRL_2;
1364 offset = 8;
1365 break;
1366 case TSTBUS_UTP_HCI:
1367 reg = UFS_TEST_BUS_CTRL_2;
1368 offset = 0;
1369 break;
1370 case TSTBUS_UNIPRO:
1371 reg = UFS_UNIPRO_CFG;
1372 offset = 20;
1373 mask = 0xFFF;
1374 break;
1375 /*
1376 * No need for a default case, since
1377 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1378 * is legal
1379 */
1380 }
1381 mask <<= offset;
1382 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1383 (u32)host->testbus.select_major << 19,
1384 REG_UFS_CFG1);
1385 ufshcd_rmwl(host->hba, mask,
1386 (u32)host->testbus.select_minor << offset,
1387 reg);
1388 ufs_qcom_enable_test_bus(host);
1389 /*
1390 * Make sure the test bus configuration is
1391 * committed before returning.
1392 */
1393 mb();
1394
1395 return 0;
1396 }
1397
ufs_qcom_dump_dbg_regs(struct ufs_hba * hba)1398 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1399 {
1400 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1401 "HCI Vendor Specific Registers ");
1402
1403 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1404 }
1405
1406 /**
1407 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1408 * @hba: per-adapter instance
1409 *
1410 * Toggles the (optional) reset line to reset the attached device.
1411 */
ufs_qcom_device_reset(struct ufs_hba * hba)1412 static int ufs_qcom_device_reset(struct ufs_hba *hba)
1413 {
1414 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1415
1416 /* reset gpio is optional */
1417 if (!host->device_reset)
1418 return -EOPNOTSUPP;
1419
1420 /*
1421 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1422 * be on the safe side.
1423 */
1424 ufs_qcom_device_reset_ctrl(hba, true);
1425 usleep_range(10, 15);
1426
1427 ufs_qcom_device_reset_ctrl(hba, false);
1428 usleep_range(10, 15);
1429
1430 return 0;
1431 }
1432
1433 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
ufs_qcom_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * p,struct devfreq_simple_ondemand_data * d)1434 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1435 struct devfreq_dev_profile *p,
1436 struct devfreq_simple_ondemand_data *d)
1437 {
1438 p->polling_ms = 60;
1439 d->upthreshold = 70;
1440 d->downdifferential = 5;
1441 }
1442 #else
ufs_qcom_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * p,struct devfreq_simple_ondemand_data * data)1443 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1444 struct devfreq_dev_profile *p,
1445 struct devfreq_simple_ondemand_data *data)
1446 {
1447 }
1448 #endif
1449
1450 /*
1451 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1452 *
1453 * The variant operations configure the necessary controller and PHY
1454 * handshake during initialization.
1455 */
1456 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1457 .name = "qcom",
1458 .init = ufs_qcom_init,
1459 .exit = ufs_qcom_exit,
1460 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1461 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1462 .setup_clocks = ufs_qcom_setup_clocks,
1463 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1464 .link_startup_notify = ufs_qcom_link_startup_notify,
1465 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1466 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1467 .suspend = ufs_qcom_suspend,
1468 .resume = ufs_qcom_resume,
1469 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1470 .device_reset = ufs_qcom_device_reset,
1471 .config_scaling_param = ufs_qcom_config_scaling_param,
1472 .program_key = ufs_qcom_ice_program_key,
1473 };
1474
1475 /**
1476 * ufs_qcom_probe - probe routine of the driver
1477 * @pdev: pointer to Platform device handle
1478 *
1479 * Return zero for success and non-zero for failure
1480 */
ufs_qcom_probe(struct platform_device * pdev)1481 static int ufs_qcom_probe(struct platform_device *pdev)
1482 {
1483 int err;
1484 struct device *dev = &pdev->dev;
1485
1486 /* Perform generic probe */
1487 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1488 if (err)
1489 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1490
1491 return err;
1492 }
1493
1494 /**
1495 * ufs_qcom_remove - set driver_data of the device to NULL
1496 * @pdev: pointer to platform device handle
1497 *
1498 * Always returns 0
1499 */
ufs_qcom_remove(struct platform_device * pdev)1500 static int ufs_qcom_remove(struct platform_device *pdev)
1501 {
1502 struct ufs_hba *hba = platform_get_drvdata(pdev);
1503
1504 pm_runtime_get_sync(&(pdev)->dev);
1505 ufshcd_remove(hba);
1506 return 0;
1507 }
1508
1509 static const struct of_device_id ufs_qcom_of_match[] = {
1510 { .compatible = "qcom,ufshc"},
1511 {},
1512 };
1513 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1514
1515 #ifdef CONFIG_ACPI
1516 static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1517 { "QCOM24A5" },
1518 { },
1519 };
1520 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1521 #endif
1522
1523 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1524 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1525 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1526 .prepare = ufshcd_suspend_prepare,
1527 .complete = ufshcd_resume_complete,
1528 };
1529
1530 static struct platform_driver ufs_qcom_pltform = {
1531 .probe = ufs_qcom_probe,
1532 .remove = ufs_qcom_remove,
1533 .shutdown = ufshcd_pltfrm_shutdown,
1534 .driver = {
1535 .name = "ufshcd-qcom",
1536 .pm = &ufs_qcom_pm_ops,
1537 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1538 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1539 },
1540 };
1541 module_platform_driver(ufs_qcom_pltform);
1542
1543 MODULE_LICENSE("GPL v2");
1544