1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18
19 /*
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
23 *
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
28 * L1 or L0 states.
29 *
30 * Valid transitions:
31 * L0: DISABLE <--> POR
32 * POR <--> POR
33 * POR -> M0 -> M2 --> M0
34 * POR -> FW_DL_ERR
35 * FW_DL_ERR <--> FW_DL_ERR
36 * M0 <--> M0
37 * M0 -> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
44 */
45 static const struct mhi_pm_transitions dev_state_transitions[] = {
46 /* L0 States */
47 {
48 MHI_PM_DISABLE,
49 MHI_PM_POR
50 },
51 {
52 MHI_PM_POR,
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
56 },
57 {
58 MHI_PM_M0,
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
62 },
63 {
64 MHI_PM_M2,
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
67 },
68 {
69 MHI_PM_M3_ENTER,
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
72 },
73 {
74 MHI_PM_M3,
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
77 },
78 {
79 MHI_PM_M3_EXIT,
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
82 },
83 {
84 MHI_PM_FW_DL_ERR,
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
87 },
88 /* L1 States */
89 {
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
93 },
94 {
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
98 },
99 /* L2 States */
100 {
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
103 },
104 /* L3 States */
105 {
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
108 },
109 };
110
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)111 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
113 {
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
116
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
118 return cur_state;
119
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
121 return cur_state;
122
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
124 return cur_state;
125
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
128 }
129
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)130 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
131 {
132 struct device *dev = &mhi_cntrl->mhi_dev->dev;
133 int ret;
134
135 if (state == MHI_STATE_RESET) {
136 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_RESET_MASK, 1);
138 } else {
139 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
140 MHICTRL_MHISTATE_MASK, state);
141 }
142
143 if (ret)
144 dev_err(dev, "Failed to set MHI state to: %s\n",
145 mhi_state_str(state));
146 }
147
148 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)149 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
150 {
151 }
152
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)153 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
154 {
155 mhi_cntrl->wake_get(mhi_cntrl, false);
156 mhi_cntrl->wake_put(mhi_cntrl, true);
157 }
158
159 /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)160 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
161 {
162 struct mhi_event *mhi_event;
163 enum mhi_pm_state cur_state;
164 struct device *dev = &mhi_cntrl->mhi_dev->dev;
165 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
166 int ret, i;
167
168 /* Check if device entered error state */
169 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
170 dev_err(dev, "Device link is not accessible\n");
171 return -EIO;
172 }
173
174 /* Wait for RESET to be cleared and READY bit to be set by the device */
175 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
176 MHICTRL_RESET_MASK, 0, interval_us);
177 if (ret) {
178 dev_err(dev, "Device failed to clear MHI Reset\n");
179 return ret;
180 }
181
182 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
183 MHISTATUS_READY_MASK, 1, interval_us);
184 if (ret) {
185 dev_err(dev, "Device failed to enter MHI Ready\n");
186 return ret;
187 }
188
189 dev_dbg(dev, "Device in READY State\n");
190 write_lock_irq(&mhi_cntrl->pm_lock);
191 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
192 mhi_cntrl->dev_state = MHI_STATE_READY;
193 write_unlock_irq(&mhi_cntrl->pm_lock);
194
195 if (cur_state != MHI_PM_POR) {
196 dev_err(dev, "Error moving to state %s from %s\n",
197 to_mhi_pm_state_str(MHI_PM_POR),
198 to_mhi_pm_state_str(cur_state));
199 return -EIO;
200 }
201
202 read_lock_bh(&mhi_cntrl->pm_lock);
203 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
204 dev_err(dev, "Device registers not accessible\n");
205 goto error_mmio;
206 }
207
208 /* Configure MMIO registers */
209 ret = mhi_init_mmio(mhi_cntrl);
210 if (ret) {
211 dev_err(dev, "Error configuring MMIO registers\n");
212 goto error_mmio;
213 }
214
215 /* Add elements to all SW event rings */
216 mhi_event = mhi_cntrl->mhi_event;
217 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
218 struct mhi_ring *ring = &mhi_event->ring;
219
220 /* Skip if this is an offload or HW event */
221 if (mhi_event->offload_ev || mhi_event->hw_ring)
222 continue;
223
224 ring->wp = ring->base + ring->len - ring->el_size;
225 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
226 /* Update all cores */
227 smp_wmb();
228
229 /* Ring the event ring db */
230 spin_lock_irq(&mhi_event->lock);
231 mhi_ring_er_db(mhi_event);
232 spin_unlock_irq(&mhi_event->lock);
233 }
234
235 /* Set MHI to M0 state */
236 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
237 read_unlock_bh(&mhi_cntrl->pm_lock);
238
239 return 0;
240
241 error_mmio:
242 read_unlock_bh(&mhi_cntrl->pm_lock);
243
244 return -EIO;
245 }
246
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)247 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
248 {
249 enum mhi_pm_state cur_state;
250 struct mhi_chan *mhi_chan;
251 struct device *dev = &mhi_cntrl->mhi_dev->dev;
252 int i;
253
254 write_lock_irq(&mhi_cntrl->pm_lock);
255 mhi_cntrl->dev_state = MHI_STATE_M0;
256 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
257 write_unlock_irq(&mhi_cntrl->pm_lock);
258 if (unlikely(cur_state != MHI_PM_M0)) {
259 dev_err(dev, "Unable to transition to M0 state\n");
260 return -EIO;
261 }
262 mhi_cntrl->M0++;
263
264 /* Wake up the device */
265 read_lock_bh(&mhi_cntrl->pm_lock);
266 mhi_cntrl->wake_get(mhi_cntrl, true);
267
268 /* Ring all event rings and CMD ring only if we're in mission mode */
269 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
270 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
271 struct mhi_cmd *mhi_cmd =
272 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
273
274 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
275 if (mhi_event->offload_ev)
276 continue;
277
278 spin_lock_irq(&mhi_event->lock);
279 mhi_ring_er_db(mhi_event);
280 spin_unlock_irq(&mhi_event->lock);
281 }
282
283 /* Only ring primary cmd ring if ring is not empty */
284 spin_lock_irq(&mhi_cmd->lock);
285 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
286 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
287 spin_unlock_irq(&mhi_cmd->lock);
288 }
289
290 /* Ring channel DB registers */
291 mhi_chan = mhi_cntrl->mhi_chan;
292 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
293 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
294
295 if (mhi_chan->db_cfg.reset_req) {
296 write_lock_irq(&mhi_chan->lock);
297 mhi_chan->db_cfg.db_mode = true;
298 write_unlock_irq(&mhi_chan->lock);
299 }
300
301 read_lock_irq(&mhi_chan->lock);
302
303 /* Only ring DB if ring is not empty */
304 if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
305 mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
306 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
307 read_unlock_irq(&mhi_chan->lock);
308 }
309
310 mhi_cntrl->wake_put(mhi_cntrl, false);
311 read_unlock_bh(&mhi_cntrl->pm_lock);
312 wake_up_all(&mhi_cntrl->state_event);
313
314 return 0;
315 }
316
317 /*
318 * After receiving the MHI state change event from the device indicating the
319 * transition to M1 state, the host can transition the device to M2 state
320 * for keeping it in low power state.
321 */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)322 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
323 {
324 enum mhi_pm_state state;
325 struct device *dev = &mhi_cntrl->mhi_dev->dev;
326
327 write_lock_irq(&mhi_cntrl->pm_lock);
328 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
329 if (state == MHI_PM_M2) {
330 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
331 mhi_cntrl->dev_state = MHI_STATE_M2;
332
333 write_unlock_irq(&mhi_cntrl->pm_lock);
334
335 mhi_cntrl->M2++;
336 wake_up_all(&mhi_cntrl->state_event);
337
338 /* If there are any pending resources, exit M2 immediately */
339 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
340 atomic_read(&mhi_cntrl->dev_wake))) {
341 dev_dbg(dev,
342 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
343 atomic_read(&mhi_cntrl->pending_pkts),
344 atomic_read(&mhi_cntrl->dev_wake));
345 read_lock_bh(&mhi_cntrl->pm_lock);
346 mhi_cntrl->wake_get(mhi_cntrl, true);
347 mhi_cntrl->wake_put(mhi_cntrl, true);
348 read_unlock_bh(&mhi_cntrl->pm_lock);
349 } else {
350 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
351 }
352 } else {
353 write_unlock_irq(&mhi_cntrl->pm_lock);
354 }
355 }
356
357 /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)358 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
359 {
360 enum mhi_pm_state state;
361 struct device *dev = &mhi_cntrl->mhi_dev->dev;
362
363 write_lock_irq(&mhi_cntrl->pm_lock);
364 mhi_cntrl->dev_state = MHI_STATE_M3;
365 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
366 write_unlock_irq(&mhi_cntrl->pm_lock);
367 if (state != MHI_PM_M3) {
368 dev_err(dev, "Unable to transition to M3 state\n");
369 return -EIO;
370 }
371
372 mhi_cntrl->M3++;
373 wake_up_all(&mhi_cntrl->state_event);
374
375 return 0;
376 }
377
378 /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)379 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
380 {
381 struct mhi_event *mhi_event;
382 struct device *dev = &mhi_cntrl->mhi_dev->dev;
383 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
384 int i, ret;
385
386 dev_dbg(dev, "Processing Mission Mode transition\n");
387
388 write_lock_irq(&mhi_cntrl->pm_lock);
389 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
390 ee = mhi_get_exec_env(mhi_cntrl);
391
392 if (!MHI_IN_MISSION_MODE(ee)) {
393 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
394 write_unlock_irq(&mhi_cntrl->pm_lock);
395 wake_up_all(&mhi_cntrl->state_event);
396 return -EIO;
397 }
398 mhi_cntrl->ee = ee;
399 write_unlock_irq(&mhi_cntrl->pm_lock);
400
401 wake_up_all(&mhi_cntrl->state_event);
402
403 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
404 mhi_destroy_device);
405 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
406
407 /* Force MHI to be in M0 state before continuing */
408 ret = __mhi_device_get_sync(mhi_cntrl);
409 if (ret)
410 return ret;
411
412 read_lock_bh(&mhi_cntrl->pm_lock);
413
414 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
415 ret = -EIO;
416 goto error_mission_mode;
417 }
418
419 /* Add elements to all HW event rings */
420 mhi_event = mhi_cntrl->mhi_event;
421 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
422 struct mhi_ring *ring = &mhi_event->ring;
423
424 if (mhi_event->offload_ev || !mhi_event->hw_ring)
425 continue;
426
427 ring->wp = ring->base + ring->len - ring->el_size;
428 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
429 /* Update to all cores */
430 smp_wmb();
431
432 spin_lock_irq(&mhi_event->lock);
433 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
434 mhi_ring_er_db(mhi_event);
435 spin_unlock_irq(&mhi_event->lock);
436 }
437
438 read_unlock_bh(&mhi_cntrl->pm_lock);
439
440 /*
441 * The MHI devices are only created when the client device switches its
442 * Execution Environment (EE) to either SBL or AMSS states
443 */
444 mhi_create_devices(mhi_cntrl);
445
446 read_lock_bh(&mhi_cntrl->pm_lock);
447
448 error_mission_mode:
449 mhi_cntrl->wake_put(mhi_cntrl, false);
450 read_unlock_bh(&mhi_cntrl->pm_lock);
451
452 return ret;
453 }
454
455 /* Handle shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl)456 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
457 {
458 enum mhi_pm_state cur_state;
459 struct mhi_event *mhi_event;
460 struct mhi_cmd_ctxt *cmd_ctxt;
461 struct mhi_cmd *mhi_cmd;
462 struct mhi_event_ctxt *er_ctxt;
463 struct device *dev = &mhi_cntrl->mhi_dev->dev;
464 int ret, i;
465
466 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
467 to_mhi_pm_state_str(mhi_cntrl->pm_state));
468
469 mutex_lock(&mhi_cntrl->pm_mutex);
470
471 /* Trigger MHI RESET so that the device will not access host memory */
472 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
473 /* Skip MHI RESET if in RDDM state */
474 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
475 goto skip_mhi_reset;
476
477 dev_dbg(dev, "Triggering MHI Reset in device\n");
478 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
479
480 /* Wait for the reset bit to be cleared by the device */
481 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
482 MHICTRL_RESET_MASK, 0, 25000);
483 if (ret)
484 dev_err(dev, "Device failed to clear MHI Reset\n");
485
486 /*
487 * Device will clear BHI_INTVEC as a part of RESET processing,
488 * hence re-program it
489 */
490 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
491
492 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
493 /* wait for ready to be set */
494 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
495 MHISTATUS,
496 MHISTATUS_READY_MASK, 1, 25000);
497 if (ret)
498 dev_err(dev, "Device failed to enter READY state\n");
499 }
500 }
501
502 skip_mhi_reset:
503 dev_dbg(dev,
504 "Waiting for all pending event ring processing to complete\n");
505 mhi_event = mhi_cntrl->mhi_event;
506 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
507 if (mhi_event->offload_ev)
508 continue;
509 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
510 tasklet_kill(&mhi_event->task);
511 }
512
513 /* Release lock and wait for all pending threads to complete */
514 mutex_unlock(&mhi_cntrl->pm_mutex);
515 dev_dbg(dev, "Waiting for all pending threads to complete\n");
516 wake_up_all(&mhi_cntrl->state_event);
517
518 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
519 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
520
521 mutex_lock(&mhi_cntrl->pm_mutex);
522
523 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
524 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
525
526 /* Reset the ev rings and cmd rings */
527 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
528 mhi_cmd = mhi_cntrl->mhi_cmd;
529 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
530 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
531 struct mhi_ring *ring = &mhi_cmd->ring;
532
533 ring->rp = ring->base;
534 ring->wp = ring->base;
535 cmd_ctxt->rp = cmd_ctxt->rbase;
536 cmd_ctxt->wp = cmd_ctxt->rbase;
537 }
538
539 mhi_event = mhi_cntrl->mhi_event;
540 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
541 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
542 mhi_event++) {
543 struct mhi_ring *ring = &mhi_event->ring;
544
545 /* Skip offload events */
546 if (mhi_event->offload_ev)
547 continue;
548
549 ring->rp = ring->base;
550 ring->wp = ring->base;
551 er_ctxt->rp = er_ctxt->rbase;
552 er_ctxt->wp = er_ctxt->rbase;
553 }
554
555 /* Move to disable state */
556 write_lock_irq(&mhi_cntrl->pm_lock);
557 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
558 write_unlock_irq(&mhi_cntrl->pm_lock);
559 if (unlikely(cur_state != MHI_PM_DISABLE))
560 dev_err(dev, "Error moving from PM state: %s to: %s\n",
561 to_mhi_pm_state_str(cur_state),
562 to_mhi_pm_state_str(MHI_PM_DISABLE));
563
564 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
565 to_mhi_pm_state_str(mhi_cntrl->pm_state),
566 mhi_state_str(mhi_cntrl->dev_state));
567
568 mutex_unlock(&mhi_cntrl->pm_mutex);
569 }
570
571 /* Handle system error transitions */
mhi_pm_sys_error_transition(struct mhi_controller * mhi_cntrl)572 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
573 {
574 enum mhi_pm_state cur_state, prev_state;
575 enum dev_st_transition next_state;
576 struct mhi_event *mhi_event;
577 struct mhi_cmd_ctxt *cmd_ctxt;
578 struct mhi_cmd *mhi_cmd;
579 struct mhi_event_ctxt *er_ctxt;
580 struct device *dev = &mhi_cntrl->mhi_dev->dev;
581 int ret, i;
582
583 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
584 to_mhi_pm_state_str(mhi_cntrl->pm_state),
585 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
586
587 /* We must notify MHI control driver so it can clean up first */
588 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
589
590 mutex_lock(&mhi_cntrl->pm_mutex);
591 write_lock_irq(&mhi_cntrl->pm_lock);
592 prev_state = mhi_cntrl->pm_state;
593 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
594 write_unlock_irq(&mhi_cntrl->pm_lock);
595
596 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
597 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
598 to_mhi_pm_state_str(cur_state),
599 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
600 goto exit_sys_error_transition;
601 }
602
603 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
604 mhi_cntrl->dev_state = MHI_STATE_RESET;
605
606 /* Wake up threads waiting for state transition */
607 wake_up_all(&mhi_cntrl->state_event);
608
609 /* Trigger MHI RESET so that the device will not access host memory */
610 if (MHI_REG_ACCESS_VALID(prev_state)) {
611 u32 in_reset = -1;
612 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
613
614 dev_dbg(dev, "Triggering MHI Reset in device\n");
615 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
616
617 /* Wait for the reset bit to be cleared by the device */
618 ret = wait_event_timeout(mhi_cntrl->state_event,
619 mhi_read_reg_field(mhi_cntrl,
620 mhi_cntrl->regs,
621 MHICTRL,
622 MHICTRL_RESET_MASK,
623 &in_reset) ||
624 !in_reset, timeout);
625 if (!ret || in_reset) {
626 dev_err(dev, "Device failed to exit MHI Reset state\n");
627 goto exit_sys_error_transition;
628 }
629
630 /*
631 * Device will clear BHI_INTVEC as a part of RESET processing,
632 * hence re-program it
633 */
634 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
635 }
636
637 dev_dbg(dev,
638 "Waiting for all pending event ring processing to complete\n");
639 mhi_event = mhi_cntrl->mhi_event;
640 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
641 if (mhi_event->offload_ev)
642 continue;
643 tasklet_kill(&mhi_event->task);
644 }
645
646 /* Release lock and wait for all pending threads to complete */
647 mutex_unlock(&mhi_cntrl->pm_mutex);
648 dev_dbg(dev, "Waiting for all pending threads to complete\n");
649 wake_up_all(&mhi_cntrl->state_event);
650
651 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
652 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
653
654 mutex_lock(&mhi_cntrl->pm_mutex);
655
656 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
657 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
658
659 /* Reset the ev rings and cmd rings */
660 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
661 mhi_cmd = mhi_cntrl->mhi_cmd;
662 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
663 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
664 struct mhi_ring *ring = &mhi_cmd->ring;
665
666 ring->rp = ring->base;
667 ring->wp = ring->base;
668 cmd_ctxt->rp = cmd_ctxt->rbase;
669 cmd_ctxt->wp = cmd_ctxt->rbase;
670 }
671
672 mhi_event = mhi_cntrl->mhi_event;
673 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
674 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
675 mhi_event++) {
676 struct mhi_ring *ring = &mhi_event->ring;
677
678 /* Skip offload events */
679 if (mhi_event->offload_ev)
680 continue;
681
682 ring->rp = ring->base;
683 ring->wp = ring->base;
684 er_ctxt->rp = er_ctxt->rbase;
685 er_ctxt->wp = er_ctxt->rbase;
686 }
687
688 /* Transition to next state */
689 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
690 write_lock_irq(&mhi_cntrl->pm_lock);
691 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
692 write_unlock_irq(&mhi_cntrl->pm_lock);
693 if (cur_state != MHI_PM_POR) {
694 dev_err(dev, "Error moving to state %s from %s\n",
695 to_mhi_pm_state_str(MHI_PM_POR),
696 to_mhi_pm_state_str(cur_state));
697 goto exit_sys_error_transition;
698 }
699 next_state = DEV_ST_TRANSITION_PBL;
700 } else {
701 next_state = DEV_ST_TRANSITION_READY;
702 }
703
704 mhi_queue_state_transition(mhi_cntrl, next_state);
705
706 exit_sys_error_transition:
707 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
708 to_mhi_pm_state_str(mhi_cntrl->pm_state),
709 mhi_state_str(mhi_cntrl->dev_state));
710
711 mutex_unlock(&mhi_cntrl->pm_mutex);
712 }
713
714 /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)715 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
716 enum dev_st_transition state)
717 {
718 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
719 unsigned long flags;
720
721 if (!item)
722 return -ENOMEM;
723
724 item->state = state;
725 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
726 list_add_tail(&item->node, &mhi_cntrl->transition_list);
727 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
728
729 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
730
731 return 0;
732 }
733
734 /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)735 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
736 {
737 struct device *dev = &mhi_cntrl->mhi_dev->dev;
738
739 /* skip if controller supports RDDM */
740 if (mhi_cntrl->rddm_image) {
741 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
742 return;
743 }
744
745 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
746 }
747
748 /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)749 void mhi_pm_st_worker(struct work_struct *work)
750 {
751 struct state_transition *itr, *tmp;
752 LIST_HEAD(head);
753 struct mhi_controller *mhi_cntrl = container_of(work,
754 struct mhi_controller,
755 st_worker);
756 struct device *dev = &mhi_cntrl->mhi_dev->dev;
757
758 spin_lock_irq(&mhi_cntrl->transition_lock);
759 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
760 spin_unlock_irq(&mhi_cntrl->transition_lock);
761
762 list_for_each_entry_safe(itr, tmp, &head, node) {
763 list_del(&itr->node);
764 dev_dbg(dev, "Handling state transition: %s\n",
765 TO_DEV_STATE_TRANS_STR(itr->state));
766
767 switch (itr->state) {
768 case DEV_ST_TRANSITION_PBL:
769 write_lock_irq(&mhi_cntrl->pm_lock);
770 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
771 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
772 write_unlock_irq(&mhi_cntrl->pm_lock);
773 mhi_fw_load_handler(mhi_cntrl);
774 break;
775 case DEV_ST_TRANSITION_SBL:
776 write_lock_irq(&mhi_cntrl->pm_lock);
777 mhi_cntrl->ee = MHI_EE_SBL;
778 write_unlock_irq(&mhi_cntrl->pm_lock);
779 /*
780 * The MHI devices are only created when the client
781 * device switches its Execution Environment (EE) to
782 * either SBL or AMSS states
783 */
784 mhi_create_devices(mhi_cntrl);
785 if (mhi_cntrl->fbc_download)
786 mhi_download_amss_image(mhi_cntrl);
787 break;
788 case DEV_ST_TRANSITION_MISSION_MODE:
789 mhi_pm_mission_mode_transition(mhi_cntrl);
790 break;
791 case DEV_ST_TRANSITION_FP:
792 write_lock_irq(&mhi_cntrl->pm_lock);
793 mhi_cntrl->ee = MHI_EE_FP;
794 write_unlock_irq(&mhi_cntrl->pm_lock);
795 mhi_create_devices(mhi_cntrl);
796 break;
797 case DEV_ST_TRANSITION_READY:
798 mhi_ready_state_transition(mhi_cntrl);
799 break;
800 case DEV_ST_TRANSITION_SYS_ERR:
801 mhi_pm_sys_error_transition(mhi_cntrl);
802 break;
803 case DEV_ST_TRANSITION_DISABLE:
804 mhi_pm_disable_transition(mhi_cntrl);
805 break;
806 default:
807 break;
808 }
809 kfree(itr);
810 }
811 }
812
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)813 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
814 {
815 struct mhi_chan *itr, *tmp;
816 struct device *dev = &mhi_cntrl->mhi_dev->dev;
817 enum mhi_pm_state new_state;
818 int ret;
819
820 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
821 return -EINVAL;
822
823 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
824 return -EIO;
825
826 /* Return busy if there are any pending resources */
827 if (atomic_read(&mhi_cntrl->dev_wake) ||
828 atomic_read(&mhi_cntrl->pending_pkts))
829 return -EBUSY;
830
831 /* Take MHI out of M2 state */
832 read_lock_bh(&mhi_cntrl->pm_lock);
833 mhi_cntrl->wake_get(mhi_cntrl, false);
834 read_unlock_bh(&mhi_cntrl->pm_lock);
835
836 ret = wait_event_timeout(mhi_cntrl->state_event,
837 mhi_cntrl->dev_state == MHI_STATE_M0 ||
838 mhi_cntrl->dev_state == MHI_STATE_M1 ||
839 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
840 msecs_to_jiffies(mhi_cntrl->timeout_ms));
841
842 read_lock_bh(&mhi_cntrl->pm_lock);
843 mhi_cntrl->wake_put(mhi_cntrl, false);
844 read_unlock_bh(&mhi_cntrl->pm_lock);
845
846 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
847 dev_err(dev,
848 "Could not enter M0/M1 state");
849 return -EIO;
850 }
851
852 write_lock_irq(&mhi_cntrl->pm_lock);
853
854 if (atomic_read(&mhi_cntrl->dev_wake) ||
855 atomic_read(&mhi_cntrl->pending_pkts)) {
856 write_unlock_irq(&mhi_cntrl->pm_lock);
857 return -EBUSY;
858 }
859
860 dev_dbg(dev, "Allowing M3 transition\n");
861 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
862 if (new_state != MHI_PM_M3_ENTER) {
863 write_unlock_irq(&mhi_cntrl->pm_lock);
864 dev_err(dev,
865 "Error setting to PM state: %s from: %s\n",
866 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
867 to_mhi_pm_state_str(mhi_cntrl->pm_state));
868 return -EIO;
869 }
870
871 /* Set MHI to M3 and wait for completion */
872 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
873 write_unlock_irq(&mhi_cntrl->pm_lock);
874 dev_dbg(dev, "Waiting for M3 completion\n");
875
876 ret = wait_event_timeout(mhi_cntrl->state_event,
877 mhi_cntrl->dev_state == MHI_STATE_M3 ||
878 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
879 msecs_to_jiffies(mhi_cntrl->timeout_ms));
880
881 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
882 dev_err(dev,
883 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
884 mhi_state_str(mhi_cntrl->dev_state),
885 to_mhi_pm_state_str(mhi_cntrl->pm_state));
886 return -EIO;
887 }
888
889 /* Notify clients about entering LPM */
890 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
891 mutex_lock(&itr->mutex);
892 if (itr->mhi_dev)
893 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
894 mutex_unlock(&itr->mutex);
895 }
896
897 return 0;
898 }
899 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
900
__mhi_pm_resume(struct mhi_controller * mhi_cntrl,bool force)901 static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
902 {
903 struct mhi_chan *itr, *tmp;
904 struct device *dev = &mhi_cntrl->mhi_dev->dev;
905 enum mhi_pm_state cur_state;
906 int ret;
907
908 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
909 to_mhi_pm_state_str(mhi_cntrl->pm_state),
910 mhi_state_str(mhi_cntrl->dev_state));
911
912 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
913 return 0;
914
915 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
916 return -EIO;
917
918 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
919 dev_warn(dev, "Resuming from non M3 state (%s)\n",
920 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
921 if (!force)
922 return -EINVAL;
923 }
924
925 /* Notify clients about exiting LPM */
926 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
927 mutex_lock(&itr->mutex);
928 if (itr->mhi_dev)
929 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
930 mutex_unlock(&itr->mutex);
931 }
932
933 write_lock_irq(&mhi_cntrl->pm_lock);
934 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
935 if (cur_state != MHI_PM_M3_EXIT) {
936 write_unlock_irq(&mhi_cntrl->pm_lock);
937 dev_info(dev,
938 "Error setting to PM state: %s from: %s\n",
939 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
940 to_mhi_pm_state_str(mhi_cntrl->pm_state));
941 return -EIO;
942 }
943
944 /* Set MHI to M0 and wait for completion */
945 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
946 write_unlock_irq(&mhi_cntrl->pm_lock);
947
948 ret = wait_event_timeout(mhi_cntrl->state_event,
949 mhi_cntrl->dev_state == MHI_STATE_M0 ||
950 mhi_cntrl->dev_state == MHI_STATE_M2 ||
951 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
952 msecs_to_jiffies(mhi_cntrl->timeout_ms));
953
954 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
955 dev_err(dev,
956 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
957 mhi_state_str(mhi_cntrl->dev_state),
958 to_mhi_pm_state_str(mhi_cntrl->pm_state));
959 return -EIO;
960 }
961
962 return 0;
963 }
964
mhi_pm_resume(struct mhi_controller * mhi_cntrl)965 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
966 {
967 return __mhi_pm_resume(mhi_cntrl, false);
968 }
969 EXPORT_SYMBOL_GPL(mhi_pm_resume);
970
mhi_pm_resume_force(struct mhi_controller * mhi_cntrl)971 int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
972 {
973 return __mhi_pm_resume(mhi_cntrl, true);
974 }
975 EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
976
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)977 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
978 {
979 int ret;
980
981 /* Wake up the device */
982 read_lock_bh(&mhi_cntrl->pm_lock);
983 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
984 read_unlock_bh(&mhi_cntrl->pm_lock);
985 return -EIO;
986 }
987 mhi_cntrl->wake_get(mhi_cntrl, true);
988 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
989 mhi_trigger_resume(mhi_cntrl);
990 read_unlock_bh(&mhi_cntrl->pm_lock);
991
992 ret = wait_event_timeout(mhi_cntrl->state_event,
993 mhi_cntrl->pm_state == MHI_PM_M0 ||
994 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
995 msecs_to_jiffies(mhi_cntrl->timeout_ms));
996
997 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
998 read_lock_bh(&mhi_cntrl->pm_lock);
999 mhi_cntrl->wake_put(mhi_cntrl, false);
1000 read_unlock_bh(&mhi_cntrl->pm_lock);
1001 return -EIO;
1002 }
1003
1004 return 0;
1005 }
1006
1007 /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)1008 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1009 {
1010 unsigned long flags;
1011
1012 /*
1013 * If force flag is set, then increment the wake count value and
1014 * ring wake db
1015 */
1016 if (unlikely(force)) {
1017 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1018 atomic_inc(&mhi_cntrl->dev_wake);
1019 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1020 !mhi_cntrl->wake_set) {
1021 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1022 mhi_cntrl->wake_set = true;
1023 }
1024 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1025 } else {
1026 /*
1027 * If resources are already requested, then just increment
1028 * the wake count value and return
1029 */
1030 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1031 return;
1032
1033 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1034 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1035 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1036 !mhi_cntrl->wake_set) {
1037 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1038 mhi_cntrl->wake_set = true;
1039 }
1040 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1041 }
1042 }
1043
1044 /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)1045 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1046 bool override)
1047 {
1048 unsigned long flags;
1049
1050 /*
1051 * Only continue if there is a single resource, else just decrement
1052 * and return
1053 */
1054 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1055 return;
1056
1057 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1058 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1059 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1060 mhi_cntrl->wake_set) {
1061 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1062 mhi_cntrl->wake_set = false;
1063 }
1064 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1065 }
1066
mhi_async_power_up(struct mhi_controller * mhi_cntrl)1067 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1068 {
1069 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1070 enum mhi_state state;
1071 enum mhi_ee_type current_ee;
1072 enum dev_st_transition next_state;
1073 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1074 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1075 int ret, i;
1076
1077 dev_info(dev, "Requested to power ON\n");
1078
1079 /* Supply default wake routines if not provided by controller driver */
1080 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1081 !mhi_cntrl->wake_toggle) {
1082 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1083 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1084 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1085 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1086 }
1087
1088 mutex_lock(&mhi_cntrl->pm_mutex);
1089 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1090
1091 /* Setup BHI INTVEC */
1092 write_lock_irq(&mhi_cntrl->pm_lock);
1093 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1094 mhi_cntrl->pm_state = MHI_PM_POR;
1095 mhi_cntrl->ee = MHI_EE_MAX;
1096 current_ee = mhi_get_exec_env(mhi_cntrl);
1097 write_unlock_irq(&mhi_cntrl->pm_lock);
1098
1099 /* Confirm that the device is in valid exec env */
1100 if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1101 dev_err(dev, "%s is not a valid EE for power on\n",
1102 TO_MHI_EXEC_STR(current_ee));
1103 ret = -EIO;
1104 goto error_exit;
1105 }
1106
1107 state = mhi_get_mhi_state(mhi_cntrl);
1108 dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1109 TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1110
1111 if (state == MHI_STATE_SYS_ERR) {
1112 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1113 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1114 MHICTRL_RESET_MASK, 0, interval_us);
1115 if (ret) {
1116 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1117 goto error_exit;
1118 }
1119
1120 /*
1121 * device cleares INTVEC as part of RESET processing,
1122 * re-program it
1123 */
1124 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1125 }
1126
1127 /* IRQs have been requested during probe, so we just need to enable them. */
1128 enable_irq(mhi_cntrl->irq[0]);
1129
1130 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1131 if (mhi_event->offload_ev)
1132 continue;
1133
1134 enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1135 }
1136
1137 /* Transition to next state */
1138 next_state = MHI_IN_PBL(current_ee) ?
1139 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1140
1141 mhi_queue_state_transition(mhi_cntrl, next_state);
1142
1143 mutex_unlock(&mhi_cntrl->pm_mutex);
1144
1145 dev_info(dev, "Power on setup success\n");
1146
1147 return 0;
1148
1149 error_exit:
1150 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1151 mutex_unlock(&mhi_cntrl->pm_mutex);
1152
1153 return ret;
1154 }
1155 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1156
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1157 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1158 {
1159 enum mhi_pm_state cur_state, transition_state;
1160 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1161
1162 mutex_lock(&mhi_cntrl->pm_mutex);
1163 write_lock_irq(&mhi_cntrl->pm_lock);
1164 cur_state = mhi_cntrl->pm_state;
1165 if (cur_state == MHI_PM_DISABLE) {
1166 write_unlock_irq(&mhi_cntrl->pm_lock);
1167 mutex_unlock(&mhi_cntrl->pm_mutex);
1168 return; /* Already powered down */
1169 }
1170
1171 /* If it's not a graceful shutdown, force MHI to linkdown state */
1172 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1173 MHI_PM_LD_ERR_FATAL_DETECT;
1174
1175 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1176 if (cur_state != transition_state) {
1177 dev_err(dev, "Failed to move to state: %s from: %s\n",
1178 to_mhi_pm_state_str(transition_state),
1179 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1180 /* Force link down or error fatal detected state */
1181 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1182 }
1183
1184 /* mark device inactive to avoid any further host processing */
1185 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1186 mhi_cntrl->dev_state = MHI_STATE_RESET;
1187
1188 wake_up_all(&mhi_cntrl->state_event);
1189
1190 write_unlock_irq(&mhi_cntrl->pm_lock);
1191 mutex_unlock(&mhi_cntrl->pm_mutex);
1192
1193 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1194
1195 /* Wait for shutdown to complete */
1196 flush_work(&mhi_cntrl->st_worker);
1197
1198 disable_irq(mhi_cntrl->irq[0]);
1199 }
1200 EXPORT_SYMBOL_GPL(mhi_power_down);
1201
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1202 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1203 {
1204 int ret = mhi_async_power_up(mhi_cntrl);
1205
1206 if (ret)
1207 return ret;
1208
1209 wait_event_timeout(mhi_cntrl->state_event,
1210 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1211 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1212 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1213
1214 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1215 if (ret)
1216 mhi_power_down(mhi_cntrl, false);
1217
1218 return ret;
1219 }
1220 EXPORT_SYMBOL(mhi_sync_power_up);
1221
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1222 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1223 {
1224 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1225 int ret;
1226
1227 /* Check if device is already in RDDM */
1228 if (mhi_cntrl->ee == MHI_EE_RDDM)
1229 return 0;
1230
1231 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1232 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1233
1234 /* Wait for RDDM event */
1235 ret = wait_event_timeout(mhi_cntrl->state_event,
1236 mhi_cntrl->ee == MHI_EE_RDDM,
1237 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1238 ret = ret ? 0 : -EIO;
1239
1240 return ret;
1241 }
1242 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1243
mhi_device_get(struct mhi_device * mhi_dev)1244 void mhi_device_get(struct mhi_device *mhi_dev)
1245 {
1246 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247
1248 mhi_dev->dev_wake++;
1249 read_lock_bh(&mhi_cntrl->pm_lock);
1250 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1251 mhi_trigger_resume(mhi_cntrl);
1252
1253 mhi_cntrl->wake_get(mhi_cntrl, true);
1254 read_unlock_bh(&mhi_cntrl->pm_lock);
1255 }
1256 EXPORT_SYMBOL_GPL(mhi_device_get);
1257
mhi_device_get_sync(struct mhi_device * mhi_dev)1258 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1259 {
1260 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1261 int ret;
1262
1263 ret = __mhi_device_get_sync(mhi_cntrl);
1264 if (!ret)
1265 mhi_dev->dev_wake++;
1266
1267 return ret;
1268 }
1269 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1270
mhi_device_put(struct mhi_device * mhi_dev)1271 void mhi_device_put(struct mhi_device *mhi_dev)
1272 {
1273 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1274
1275 mhi_dev->dev_wake--;
1276 read_lock_bh(&mhi_cntrl->pm_lock);
1277 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1278 mhi_trigger_resume(mhi_cntrl);
1279
1280 mhi_cntrl->wake_put(mhi_cntrl, false);
1281 read_unlock_bh(&mhi_cntrl->pm_lock);
1282 }
1283 EXPORT_SYMBOL_GPL(mhi_device_put);
1284