1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low level PM code for TI EMIF 4 * 5 * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/ 6 * Dave Gerlach 7 */ 8 9#include <linux/linkage.h> 10#include <asm/assembler.h> 11#include <asm/memory.h> 12 13#include "emif.h" 14#include "ti-emif-asm-offsets.h" 15 16#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0 17#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0 18#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200 19#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700 20 21#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT 22#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT 23#define EMIF_STATUS_READY 0x4 24 25#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120 26 27#define EMIF_AM437X_REGISTERS 0x1 28 29 .arm 30 .align 3 31 32ENTRY(ti_emif_sram) 33 34/* 35 * void ti_emif_save_context(void) 36 * 37 * Used during suspend to save the context of all required EMIF registers 38 * to local memory if the EMIF is going to lose context during the sleep 39 * transition. Operates on the VIRTUAL address of the EMIF. 40 */ 41ENTRY(ti_emif_save_context) 42 stmfd sp!, {r4 - r11, lr} @ save registers on stack 43 44 adr r4, ti_emif_pm_sram_data 45 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET] 46 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET] 47 48 /* Save EMIF configuration */ 49 ldr r1, [r0, #EMIF_SDRAM_CONFIG] 50 str r1, [r2, #EMIF_SDCFG_VAL_OFFSET] 51 52 ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL] 53 str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET] 54 55 ldr r1, [r0, #EMIF_SDRAM_TIMING_1] 56 str r1, [r2, #EMIF_TIMING1_VAL_OFFSET] 57 58 ldr r1, [r0, #EMIF_SDRAM_TIMING_2] 59 str r1, [r2, #EMIF_TIMING2_VAL_OFFSET] 60 61 ldr r1, [r0, #EMIF_SDRAM_TIMING_3] 62 str r1, [r2, #EMIF_TIMING3_VAL_OFFSET] 63 64 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 65 str r1, [r2, #EMIF_PMCR_VAL_OFFSET] 66 67 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW] 68 str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET] 69 70 ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG] 71 str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET] 72 73 ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1] 74 str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET] 75 76 ldr r1, [r0, #EMIF_COS_CONFIG] 77 str r1, [r2, #EMIF_COS_CONFIG_OFFSET] 78 79 ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING] 80 str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET] 81 82 ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING] 83 str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET] 84 85 ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING] 86 str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET] 87 88 ldr r1, [r0, #EMIF_OCP_CONFIG] 89 str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET] 90 91 ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET] 92 cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT 93 bne emif_skip_save_extra_regs 94 95 ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL] 96 str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET] 97 98 ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD] 99 str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET] 100 101 ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING] 102 str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET] 103 104 ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW] 105 str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET] 106 107 ldr r1, [r0, #EMIF_DLL_CALIB_CTRL] 108 str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET] 109 110 ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW] 111 str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET] 112 113 /* Loop and save entire block of emif phy regs */ 114 mov r5, #0x0 115 add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET 116 add r3, r0, #EMIF_EXT_PHY_CTRL_1 117ddr_phy_ctrl_save: 118 ldr r1, [r3, r5] 119 str r1, [r4, r5] 120 add r5, r5, #0x4 121 cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT 122 bne ddr_phy_ctrl_save 123 124emif_skip_save_extra_regs: 125 ldmfd sp!, {r4 - r11, pc} @ restore regs and return 126ENDPROC(ti_emif_save_context) 127 128/* 129 * void ti_emif_restore_context(void) 130 * 131 * Used during resume to restore the context of all required EMIF registers 132 * from local memory after the EMIF has lost context during a sleep transition. 133 * Operates on the PHYSICAL address of the EMIF. 134 */ 135ENTRY(ti_emif_restore_context) 136 adr r4, ti_emif_pm_sram_data 137 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET] 138 ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET] 139 140 /* Config EMIF Timings */ 141 ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET] 142 str r1, [r0, #EMIF_DDR_PHY_CTRL_1] 143 str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW] 144 145 ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET] 146 str r1, [r0, #EMIF_SDRAM_TIMING_1] 147 str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW] 148 149 ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET] 150 str r1, [r0, #EMIF_SDRAM_TIMING_2] 151 str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW] 152 153 ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET] 154 str r1, [r0, #EMIF_SDRAM_TIMING_3] 155 str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW] 156 157 ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET] 158 str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL] 159 str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW] 160 161 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET] 162 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 163 164 ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET] 165 str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW] 166 167 ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET] 168 str r1, [r0, #EMIF_COS_CONFIG] 169 170 ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET] 171 str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING] 172 173 ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET] 174 str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING] 175 176 ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET] 177 str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING] 178 179 ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET] 180 str r1, [r0, #EMIF_OCP_CONFIG] 181 182 ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET] 183 cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT 184 bne emif_skip_restore_extra_regs 185 186 ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET] 187 str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL] 188 189 ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET] 190 str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD] 191 192 ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET] 193 str r1, [r0, #EMIF_LPDDR2_NVM_TIMING] 194 195 ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET] 196 str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW] 197 198 ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET] 199 str r1, [r0, #EMIF_DLL_CALIB_CTRL] 200 201 ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET] 202 str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW] 203 204 ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET] 205 str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG] 206 207 /* Loop and restore entire block of emif phy regs */ 208 mov r5, #0x0 209 /* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address 210 * to phy register save space 211 */ 212 add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET 213 add r4, r0, #EMIF_EXT_PHY_CTRL_1 214ddr_phy_ctrl_restore: 215 ldr r1, [r3, r5] 216 str r1, [r4, r5] 217 add r5, r5, #0x4 218 cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT 219 bne ddr_phy_ctrl_restore 220 221emif_skip_restore_extra_regs: 222 /* 223 * Output impedence calib needed only for DDR3 224 * but since the initial state of this will be 225 * disabled for DDR2 no harm in restoring the 226 * old configuration 227 */ 228 ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET] 229 str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG] 230 231 /* Write to sdcfg last for DDR2 only */ 232 ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET] 233 and r2, r1, #SDRAM_TYPE_MASK 234 cmp r2, #EMIF_SDCFG_TYPE_DDR2 235 streq r1, [r0, #EMIF_SDRAM_CONFIG] 236 237 mov pc, lr 238ENDPROC(ti_emif_restore_context) 239 240/* 241 * void ti_emif_run_hw_leveling(void) 242 * 243 * Used during resume to run hardware leveling again and restore the 244 * configuration of the EMIF PHY, only for DDR3. 245 */ 246ENTRY(ti_emif_run_hw_leveling) 247 adr r4, ti_emif_pm_sram_data 248 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET] 249 250 ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 251 orr r3, r3, #RDWRLVLFULL_START 252 ldr r2, [r0, #EMIF_SDRAM_CONFIG] 253 and r2, r2, #SDRAM_TYPE_MASK 254 cmp r2, #EMIF_SDCFG_TYPE_DDR3 255 bne skip_hwlvl 256 257 str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 258 259 /* 260 * If EMIF registers are touched during initial stage of HW 261 * leveling sequence there will be an L3 NOC timeout error issued 262 * as the EMIF will not respond, which is not fatal, but it is 263 * avoidable. This small wait loop is enough time for this condition 264 * to clear, even at worst case of CPU running at max speed of 1Ghz. 265 */ 266 mov r2, #0x2000 2671: 268 subs r2, r2, #0x1 269 bne 1b 270 271 /* Bit clears when operation is complete */ 2722: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL] 273 tst r1, #RDWRLVLFULL_START 274 bne 2b 275 276skip_hwlvl: 277 mov pc, lr 278ENDPROC(ti_emif_run_hw_leveling) 279 280/* 281 * void ti_emif_enter_sr(void) 282 * 283 * Programs the EMIF to tell the SDRAM to enter into self-refresh 284 * mode during a sleep transition. Operates on the VIRTUAL address 285 * of the EMIF. 286 */ 287ENTRY(ti_emif_enter_sr) 288 stmfd sp!, {r4 - r11, lr} @ save registers on stack 289 290 adr r4, ti_emif_pm_sram_data 291 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET] 292 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET] 293 294 ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 295 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 296 orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE 297 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 298 299 ldmfd sp!, {r4 - r11, pc} @ restore regs and return 300ENDPROC(ti_emif_enter_sr) 301 302/* 303 * void ti_emif_exit_sr(void) 304 * 305 * Programs the EMIF to tell the SDRAM to exit self-refresh mode 306 * after a sleep transition. Operates on the PHYSICAL address of 307 * the EMIF. 308 */ 309ENTRY(ti_emif_exit_sr) 310 adr r4, ti_emif_pm_sram_data 311 ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET] 312 ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET] 313 314 /* 315 * Toggle EMIF to exit refresh mode: 316 * if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable 317 * (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable 318 * (0x0) here. 319 * *If* EMIF did not lose context, nothing broken as we write the same 320 * value(0x2) to reg before we write a disable (0x0). 321 */ 322 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET] 323 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 324 orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE 325 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 326 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 327 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 328 329 /* Wait for EMIF to become ready */ 3301: ldr r1, [r0, #EMIF_STATUS] 331 tst r1, #EMIF_STATUS_READY 332 beq 1b 333 334 mov pc, lr 335ENDPROC(ti_emif_exit_sr) 336 337/* 338 * void ti_emif_abort_sr(void) 339 * 340 * Disables self-refresh after a failed transition to a low-power 341 * state so the kernel can jump back to DDR and follow abort path. 342 * Operates on the VIRTUAL address of the EMIF. 343 */ 344ENTRY(ti_emif_abort_sr) 345 stmfd sp!, {r4 - r11, lr} @ save registers on stack 346 347 adr r4, ti_emif_pm_sram_data 348 ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET] 349 ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET] 350 351 ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET] 352 bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 353 str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL] 354 355 /* Wait for EMIF to become ready */ 3561: ldr r1, [r0, #EMIF_STATUS] 357 tst r1, #EMIF_STATUS_READY 358 beq 1b 359 360 ldmfd sp!, {r4 - r11, pc} @ restore regs and return 361ENDPROC(ti_emif_abort_sr) 362 363 .align 3 364ENTRY(ti_emif_pm_sram_data) 365 .space EMIF_PM_DATA_SIZE 366ENTRY(ti_emif_sram_sz) 367 .word . - ti_emif_save_context 368