1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/firmware.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
19 #include "internal.h"
20
21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
mhi_rddm_prepare(struct mhi_controller * mhi_cntrl,struct image_info * img_info)22 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
23 struct image_info *img_info)
24 {
25 struct mhi_buf *mhi_buf = img_info->mhi_buf;
26 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
27 void __iomem *base = mhi_cntrl->bhie;
28 struct device *dev = &mhi_cntrl->mhi_dev->dev;
29 u32 sequence_id;
30 unsigned int i;
31 int ret;
32
33 for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
34 bhi_vec->dma_addr = mhi_buf->dma_addr;
35 bhi_vec->size = mhi_buf->len;
36 }
37
38 dev_dbg(dev, "BHIe programming for RDDM\n");
39
40 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
41 upper_32_bits(mhi_buf->dma_addr));
42
43 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
44 lower_32_bits(mhi_buf->dma_addr));
45
46 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
47 sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
48
49 ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
50 BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
51 if (ret) {
52 dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
53 return ret;
54 }
55
56 dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
57 &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
58
59 return 0;
60 }
61
62 /* Collect RDDM buffer during kernel panic */
__mhi_download_rddm_in_panic(struct mhi_controller * mhi_cntrl)63 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
64 {
65 int ret;
66 u32 rx_status;
67 enum mhi_ee_type ee;
68 const u32 delayus = 2000;
69 u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
70 const u32 rddm_timeout_us = 200000;
71 int rddm_retry = rddm_timeout_us / delayus;
72 void __iomem *base = mhi_cntrl->bhie;
73 struct device *dev = &mhi_cntrl->mhi_dev->dev;
74
75 dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
76 to_mhi_pm_state_str(mhi_cntrl->pm_state),
77 mhi_state_str(mhi_cntrl->dev_state),
78 TO_MHI_EXEC_STR(mhi_cntrl->ee));
79
80 /*
81 * This should only be executing during a kernel panic, we expect all
82 * other cores to shutdown while we're collecting RDDM buffer. After
83 * returning from this function, we expect the device to reset.
84 *
85 * Normaly, we read/write pm_state only after grabbing the
86 * pm_lock, since we're in a panic, skipping it. Also there is no
87 * gurantee that this state change would take effect since
88 * we're setting it w/o grabbing pm_lock
89 */
90 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
91 /* update should take the effect immediately */
92 smp_wmb();
93
94 /*
95 * Make sure device is not already in RDDM. In case the device asserts
96 * and a kernel panic follows, device will already be in RDDM.
97 * Do not trigger SYS ERR again and proceed with waiting for
98 * image download completion.
99 */
100 ee = mhi_get_exec_env(mhi_cntrl);
101 if (ee == MHI_EE_MAX)
102 goto error_exit_rddm;
103
104 if (ee != MHI_EE_RDDM) {
105 dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
106 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
107
108 dev_dbg(dev, "Waiting for device to enter RDDM\n");
109 while (rddm_retry--) {
110 ee = mhi_get_exec_env(mhi_cntrl);
111 if (ee == MHI_EE_RDDM)
112 break;
113
114 udelay(delayus);
115 }
116
117 if (rddm_retry <= 0) {
118 /* Hardware reset so force device to enter RDDM */
119 dev_dbg(dev,
120 "Did not enter RDDM, do a host req reset\n");
121 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
122 MHI_SOC_RESET_REQ_OFFSET,
123 MHI_SOC_RESET_REQ);
124 udelay(delayus);
125 }
126
127 ee = mhi_get_exec_env(mhi_cntrl);
128 }
129
130 dev_dbg(dev,
131 "Waiting for RDDM image download via BHIe, current EE:%s\n",
132 TO_MHI_EXEC_STR(ee));
133
134 while (retry--) {
135 ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
136 BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status);
137 if (ret)
138 return -EIO;
139
140 if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
141 return 0;
142
143 udelay(delayus);
144 }
145
146 ee = mhi_get_exec_env(mhi_cntrl);
147 ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
148
149 dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
150
151 error_exit_rddm:
152 dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
153 TO_MHI_EXEC_STR(ee));
154
155 return -EIO;
156 }
157
158 /* Download RDDM image from device */
mhi_download_rddm_image(struct mhi_controller * mhi_cntrl,bool in_panic)159 int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
160 {
161 void __iomem *base = mhi_cntrl->bhie;
162 struct device *dev = &mhi_cntrl->mhi_dev->dev;
163 u32 rx_status;
164
165 if (in_panic)
166 return __mhi_download_rddm_in_panic(mhi_cntrl);
167
168 dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
169
170 /* Wait for the image download to complete */
171 wait_event_timeout(mhi_cntrl->state_event,
172 mhi_read_reg_field(mhi_cntrl, base,
173 BHIE_RXVECSTATUS_OFFS,
174 BHIE_RXVECSTATUS_STATUS_BMSK,
175 &rx_status) || rx_status,
176 msecs_to_jiffies(mhi_cntrl->timeout_ms));
177
178 return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
179 }
180 EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
181
mhi_fw_load_bhie(struct mhi_controller * mhi_cntrl,const struct mhi_buf * mhi_buf)182 static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
183 const struct mhi_buf *mhi_buf)
184 {
185 void __iomem *base = mhi_cntrl->bhie;
186 struct device *dev = &mhi_cntrl->mhi_dev->dev;
187 rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
188 u32 tx_status, sequence_id;
189 int ret;
190
191 read_lock_bh(pm_lock);
192 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
193 read_unlock_bh(pm_lock);
194 return -EIO;
195 }
196
197 sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
198 dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
199 sequence_id);
200 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
201 upper_32_bits(mhi_buf->dma_addr));
202
203 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
204 lower_32_bits(mhi_buf->dma_addr));
205
206 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
207
208 ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
209 BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
210 read_unlock_bh(pm_lock);
211
212 if (ret)
213 return ret;
214
215 /* Wait for the image download to complete */
216 ret = wait_event_timeout(mhi_cntrl->state_event,
217 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
218 mhi_read_reg_field(mhi_cntrl, base,
219 BHIE_TXVECSTATUS_OFFS,
220 BHIE_TXVECSTATUS_STATUS_BMSK,
221 &tx_status) || tx_status,
222 msecs_to_jiffies(mhi_cntrl->timeout_ms));
223 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
224 tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
225 return -EIO;
226
227 return (!ret) ? -ETIMEDOUT : 0;
228 }
229
mhi_fw_load_bhi(struct mhi_controller * mhi_cntrl,dma_addr_t dma_addr,size_t size)230 static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
231 dma_addr_t dma_addr,
232 size_t size)
233 {
234 u32 tx_status, val, session_id;
235 int i, ret;
236 void __iomem *base = mhi_cntrl->bhi;
237 rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
238 struct device *dev = &mhi_cntrl->mhi_dev->dev;
239 struct {
240 char *name;
241 u32 offset;
242 } error_reg[] = {
243 { "ERROR_CODE", BHI_ERRCODE },
244 { "ERROR_DBG1", BHI_ERRDBG1 },
245 { "ERROR_DBG2", BHI_ERRDBG2 },
246 { "ERROR_DBG3", BHI_ERRDBG3 },
247 { NULL },
248 };
249
250 read_lock_bh(pm_lock);
251 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
252 read_unlock_bh(pm_lock);
253 goto invalid_pm_state;
254 }
255
256 session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
257 dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
258 session_id);
259 mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
260 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
261 upper_32_bits(dma_addr));
262 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
263 lower_32_bits(dma_addr));
264 mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
265 mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
266 read_unlock_bh(pm_lock);
267
268 /* Wait for the image download to complete */
269 ret = wait_event_timeout(mhi_cntrl->state_event,
270 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
271 mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
272 BHI_STATUS_MASK, &tx_status) || tx_status,
273 msecs_to_jiffies(mhi_cntrl->timeout_ms));
274 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
275 goto invalid_pm_state;
276
277 if (tx_status == BHI_STATUS_ERROR) {
278 dev_err(dev, "Image transfer failed\n");
279 read_lock_bh(pm_lock);
280 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
281 for (i = 0; error_reg[i].name; i++) {
282 ret = mhi_read_reg(mhi_cntrl, base,
283 error_reg[i].offset, &val);
284 if (ret)
285 break;
286 dev_err(dev, "Reg: %s value: 0x%x\n",
287 error_reg[i].name, val);
288 }
289 }
290 read_unlock_bh(pm_lock);
291 goto invalid_pm_state;
292 }
293
294 return (!ret) ? -ETIMEDOUT : 0;
295
296 invalid_pm_state:
297
298 return -EIO;
299 }
300
mhi_free_bhie_table(struct mhi_controller * mhi_cntrl,struct image_info * image_info)301 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
302 struct image_info *image_info)
303 {
304 int i;
305 struct mhi_buf *mhi_buf = image_info->mhi_buf;
306
307 for (i = 0; i < image_info->entries; i++, mhi_buf++)
308 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
309 mhi_buf->buf, mhi_buf->dma_addr);
310
311 kfree(image_info->mhi_buf);
312 kfree(image_info);
313 }
314
mhi_alloc_bhie_table(struct mhi_controller * mhi_cntrl,struct image_info ** image_info,size_t alloc_size)315 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
316 struct image_info **image_info,
317 size_t alloc_size)
318 {
319 size_t seg_size = mhi_cntrl->seg_len;
320 int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
321 int i;
322 struct image_info *img_info;
323 struct mhi_buf *mhi_buf;
324
325 img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
326 if (!img_info)
327 return -ENOMEM;
328
329 /* Allocate memory for entries */
330 img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
331 GFP_KERNEL);
332 if (!img_info->mhi_buf)
333 goto error_alloc_mhi_buf;
334
335 /* Allocate and populate vector table */
336 mhi_buf = img_info->mhi_buf;
337 for (i = 0; i < segments; i++, mhi_buf++) {
338 size_t vec_size = seg_size;
339
340 /* Vector table is the last entry */
341 if (i == segments - 1)
342 vec_size = sizeof(struct bhi_vec_entry) * i;
343
344 mhi_buf->len = vec_size;
345 mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
346 vec_size, &mhi_buf->dma_addr,
347 GFP_KERNEL);
348 if (!mhi_buf->buf)
349 goto error_alloc_segment;
350 }
351
352 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
353 img_info->entries = segments;
354 *image_info = img_info;
355
356 return 0;
357
358 error_alloc_segment:
359 for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
360 dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
361 mhi_buf->buf, mhi_buf->dma_addr);
362
363 error_alloc_mhi_buf:
364 kfree(img_info);
365
366 return -ENOMEM;
367 }
368
mhi_firmware_copy(struct mhi_controller * mhi_cntrl,const struct firmware * firmware,struct image_info * img_info)369 static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
370 const struct firmware *firmware,
371 struct image_info *img_info)
372 {
373 size_t remainder = firmware->size;
374 size_t to_cpy;
375 const u8 *buf = firmware->data;
376 struct mhi_buf *mhi_buf = img_info->mhi_buf;
377 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
378
379 while (remainder) {
380 to_cpy = min(remainder, mhi_buf->len);
381 memcpy(mhi_buf->buf, buf, to_cpy);
382 bhi_vec->dma_addr = mhi_buf->dma_addr;
383 bhi_vec->size = to_cpy;
384
385 buf += to_cpy;
386 remainder -= to_cpy;
387 bhi_vec++;
388 mhi_buf++;
389 }
390 }
391
mhi_fw_load_handler(struct mhi_controller * mhi_cntrl)392 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
393 {
394 const struct firmware *firmware = NULL;
395 struct device *dev = &mhi_cntrl->mhi_dev->dev;
396 const char *fw_name;
397 void *buf;
398 dma_addr_t dma_addr;
399 size_t size;
400 int i, ret;
401
402 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
403 dev_err(dev, "Device MHI is not in valid state\n");
404 return;
405 }
406
407 /* save hardware info from BHI */
408 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
409 &mhi_cntrl->serial_number);
410 if (ret)
411 dev_err(dev, "Could not capture serial number via BHI\n");
412
413 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
414 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
415 &mhi_cntrl->oem_pk_hash[i]);
416 if (ret) {
417 dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
418 break;
419 }
420 }
421
422 /* wait for ready on pass through or any other execution environment */
423 if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
424 goto fw_load_ready_state;
425
426 fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
427 mhi_cntrl->edl_image : mhi_cntrl->fw_image;
428
429 if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
430 !mhi_cntrl->seg_len))) {
431 dev_err(dev,
432 "No firmware image defined or !sbl_size || !seg_len\n");
433 goto error_fw_load;
434 }
435
436 ret = request_firmware(&firmware, fw_name, dev);
437 if (ret) {
438 dev_err(dev, "Error loading firmware: %d\n", ret);
439 goto error_fw_load;
440 }
441
442 size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
443
444 /* SBL size provided is maximum size, not necessarily the image size */
445 if (size > firmware->size)
446 size = firmware->size;
447
448 buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
449 GFP_KERNEL);
450 if (!buf) {
451 release_firmware(firmware);
452 goto error_fw_load;
453 }
454
455 /* Download image using BHI */
456 memcpy(buf, firmware->data, size);
457 ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
458 dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
459
460 /* Error or in EDL mode, we're done */
461 if (ret) {
462 dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
463 release_firmware(firmware);
464 goto error_fw_load;
465 }
466
467 /* Wait for ready since EDL image was loaded */
468 if (fw_name == mhi_cntrl->edl_image) {
469 release_firmware(firmware);
470 goto fw_load_ready_state;
471 }
472
473 write_lock_irq(&mhi_cntrl->pm_lock);
474 mhi_cntrl->dev_state = MHI_STATE_RESET;
475 write_unlock_irq(&mhi_cntrl->pm_lock);
476
477 /*
478 * If we're doing fbc, populate vector tables while
479 * device transitioning into MHI READY state
480 */
481 if (mhi_cntrl->fbc_download) {
482 ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
483 firmware->size);
484 if (ret) {
485 release_firmware(firmware);
486 goto error_fw_load;
487 }
488
489 /* Load the firmware into BHIE vec table */
490 mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
491 }
492
493 release_firmware(firmware);
494
495 fw_load_ready_state:
496 /* Transitioning into MHI RESET->READY state */
497 ret = mhi_ready_state_transition(mhi_cntrl);
498 if (ret) {
499 dev_err(dev, "MHI did not enter READY state\n");
500 goto error_ready_state;
501 }
502
503 dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
504 return;
505
506 error_ready_state:
507 if (mhi_cntrl->fbc_download) {
508 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
509 mhi_cntrl->fbc_image = NULL;
510 }
511
512 error_fw_load:
513 mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
514 wake_up_all(&mhi_cntrl->state_event);
515 }
516
mhi_download_amss_image(struct mhi_controller * mhi_cntrl)517 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
518 {
519 struct image_info *image_info = mhi_cntrl->fbc_image;
520 struct device *dev = &mhi_cntrl->mhi_dev->dev;
521 int ret;
522
523 if (!image_info)
524 return -EIO;
525
526 ret = mhi_fw_load_bhie(mhi_cntrl,
527 /* Vector table is the last entry */
528 &image_info->mhi_buf[image_info->entries - 1]);
529 if (ret) {
530 dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
531 mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
532 wake_up_all(&mhi_cntrl->state_event);
533 }
534
535 return ret;
536 }
537