1 /*
2 * io_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * IO dispatcher for a shared memory channel driver.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 /*
20 * Channel Invariant:
21 * There is an important invariant condition which must be maintained per
22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23 * which may cause timeouts and/or failure of the sync_wait_on_event
24 * function.
25 */
26 #include <linux/types.h>
27 #include <linux/list.h>
28
29 /* Host OS */
30 #include <dspbridge/host_os.h>
31 #include <linux/workqueue.h>
32
33 /* ----------------------------------- DSP/BIOS Bridge */
34 #include <dspbridge/dbdefs.h>
35
36 /* Services Layer */
37 #include <dspbridge/ntfy.h>
38 #include <dspbridge/sync.h>
39
40 /* Hardware Abstraction Layer */
41 #include <hw_defs.h>
42 #include <hw_mmu.h>
43
44 /* Bridge Driver */
45 #include <dspbridge/dspdeh.h>
46 #include <dspbridge/dspio.h>
47 #include <dspbridge/dspioctl.h>
48 #include <dspbridge/wdt.h>
49 #include <_tiomap.h>
50 #include <tiomap_io.h>
51 #include <_tiomap_pwr.h>
52
53 /* Platform Manager */
54 #include <dspbridge/cod.h>
55 #include <dspbridge/node.h>
56 #include <dspbridge/dev.h>
57
58 /* Others */
59 #include <dspbridge/rms_sh.h>
60 #include <dspbridge/mgr.h>
61 #include <dspbridge/drv.h>
62 #include "_cmm.h"
63 #include "module_list.h"
64
65 /* This */
66 #include <dspbridge/io_sm.h>
67 #include "_msg_sm.h"
68
69 /* Defines, Data Structures, Typedefs */
70 #define OUTPUTNOTREADY 0xffff
71 #define NOTENABLED 0xffff /* Channel(s) not enabled */
72
73 #define EXTEND "_EXT_END"
74
75 #define SWAP_WORD(x) (x)
76 #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
77
78 #define MAX_PM_REQS 32
79
80 #define MMU_FAULT_HEAD1 0xa5a5a5a5
81 #define MMU_FAULT_HEAD2 0x96969696
82 #define POLL_MAX 1000
83 #define MAX_MMU_DBGBUFF 10240
84
85 /* IO Manager: only one created per board */
86 struct io_mgr {
87 /* These four fields must be the first fields in a io_mgr_ struct */
88 /* Bridge device context */
89 struct bridge_dev_context *bridge_context;
90 /* Function interface to Bridge driver */
91 struct bridge_drv_interface *intf_fxns;
92 struct dev_object *dev_obj; /* Device this board represents */
93
94 /* These fields initialized in bridge_io_create() */
95 struct chnl_mgr *chnl_mgr;
96 struct shm *shared_mem; /* Shared Memory control */
97 u8 *input; /* Address of input channel */
98 u8 *output; /* Address of output channel */
99 struct msg_mgr *msg_mgr; /* Message manager */
100 /* Msg control for from DSP messages */
101 struct msg_ctrl *msg_input_ctrl;
102 /* Msg control for to DSP messages */
103 struct msg_ctrl *msg_output_ctrl;
104 u8 *msg_input; /* Address of input messages */
105 u8 *msg_output; /* Address of output messages */
106 u32 sm_buf_size; /* Size of a shared memory I/O channel */
107 bool shared_irq; /* Is this IRQ shared? */
108 u32 word_size; /* Size in bytes of DSP word */
109 u16 intr_val; /* Interrupt value */
110 /* Private extnd proc info; mmu setup */
111 struct mgr_processorextinfo ext_proc_info;
112 struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
113 struct work_struct io_workq; /* workqueue */
114 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
115 u32 trace_buffer_begin; /* Trace message start address */
116 u32 trace_buffer_end; /* Trace message end address */
117 u32 trace_buffer_current; /* Trace message current address */
118 u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
119 u8 *msg;
120 u32 gpp_va;
121 u32 dsp_va;
122 #endif
123 /* IO Dpc */
124 u32 dpc_req; /* Number of requested DPC's. */
125 u32 dpc_sched; /* Number of executed DPC's. */
126 struct tasklet_struct dpc_tasklet;
127 spinlock_t dpc_lock;
128
129 };
130
131 /* Function Prototypes */
132 static void io_dispatch_pm(struct io_mgr *pio_mgr);
133 static void notify_chnl_complete(struct chnl_object *pchnl,
134 struct chnl_irp *chnl_packet_obj);
135 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
136 u8 io_mode);
137 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
138 u8 io_mode);
139 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
140 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
141 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
142 struct chnl_object *pchnl, u32 mask);
143
144 /* Bus Addr (cached kernel) */
145 static int register_shm_segs(struct io_mgr *hio_mgr,
146 struct cod_manager *cod_man,
147 u32 dw_gpp_base_pa);
148
set_chnl_free(struct shm * sm,u32 chnl)149 static inline void set_chnl_free(struct shm *sm, u32 chnl)
150 {
151 sm->host_free_mask &= ~(1 << chnl);
152 }
153
set_chnl_busy(struct shm * sm,u32 chnl)154 static inline void set_chnl_busy(struct shm *sm, u32 chnl)
155 {
156 sm->host_free_mask |= 1 << chnl;
157 }
158
159
160 /*
161 * ======== bridge_io_create ========
162 * Create an IO manager object.
163 */
bridge_io_create(struct io_mgr ** io_man,struct dev_object * hdev_obj,const struct io_attrs * mgr_attrts)164 int bridge_io_create(struct io_mgr **io_man,
165 struct dev_object *hdev_obj,
166 const struct io_attrs *mgr_attrts)
167 {
168 struct io_mgr *pio_mgr = NULL;
169 struct bridge_dev_context *hbridge_context = NULL;
170 struct cfg_devnode *dev_node_obj;
171 struct chnl_mgr *hchnl_mgr;
172 u8 dev_type;
173
174 /* Check requirements */
175 if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
176 return -EFAULT;
177
178 *io_man = NULL;
179
180 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
181 if (!hchnl_mgr || hchnl_mgr->iomgr)
182 return -EFAULT;
183
184 /*
185 * Message manager will be created when a file is loaded, since
186 * size of message buffer in shared memory is configurable in
187 * the base image.
188 */
189 dev_get_bridge_context(hdev_obj, &hbridge_context);
190 if (!hbridge_context)
191 return -EFAULT;
192
193 dev_get_dev_type(hdev_obj, &dev_type);
194
195 /* Allocate IO manager object */
196 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
197 if (!pio_mgr)
198 return -ENOMEM;
199
200 /* Initialize chnl_mgr object */
201 pio_mgr->chnl_mgr = hchnl_mgr;
202 pio_mgr->word_size = mgr_attrts->word_size;
203
204 if (dev_type == DSP_UNIT) {
205 /* Create an IO DPC */
206 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
207
208 /* Initialize DPC counters */
209 pio_mgr->dpc_req = 0;
210 pio_mgr->dpc_sched = 0;
211
212 spin_lock_init(&pio_mgr->dpc_lock);
213
214 if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
215 bridge_io_destroy(pio_mgr);
216 return -EIO;
217 }
218 }
219
220 pio_mgr->bridge_context = hbridge_context;
221 pio_mgr->shared_irq = mgr_attrts->irq_shared;
222 if (dsp_wdt_init()) {
223 bridge_io_destroy(pio_mgr);
224 return -EPERM;
225 }
226
227 /* Return IO manager object to caller... */
228 hchnl_mgr->iomgr = pio_mgr;
229 *io_man = pio_mgr;
230
231 return 0;
232 }
233
234 /*
235 * ======== bridge_io_destroy ========
236 * Purpose:
237 * Disable interrupts, destroy the IO manager.
238 */
bridge_io_destroy(struct io_mgr * hio_mgr)239 int bridge_io_destroy(struct io_mgr *hio_mgr)
240 {
241 int status = 0;
242 if (hio_mgr) {
243 /* Free IO DPC object */
244 tasklet_kill(&hio_mgr->dpc_tasklet);
245
246 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
247 kfree(hio_mgr->msg);
248 #endif
249 dsp_wdt_exit();
250 /* Free this IO manager object */
251 kfree(hio_mgr);
252 } else {
253 status = -EFAULT;
254 }
255
256 return status;
257 }
258
259 /*
260 * ======== bridge_io_on_loaded ========
261 * Purpose:
262 * Called when a new program is loaded to get shared memory buffer
263 * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
264 * are in DSP address units.
265 */
bridge_io_on_loaded(struct io_mgr * hio_mgr)266 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
267 {
268 struct cod_manager *cod_man;
269 struct chnl_mgr *hchnl_mgr;
270 struct msg_mgr *hmsg_mgr;
271 u32 ul_shm_base;
272 u32 ul_shm_base_offset;
273 u32 ul_shm_limit;
274 u32 ul_shm_length = -1;
275 u32 ul_mem_length = -1;
276 u32 ul_msg_base;
277 u32 ul_msg_limit;
278 u32 ul_msg_length = -1;
279 u32 ul_ext_end;
280 u32 ul_gpp_pa = 0;
281 u32 ul_gpp_va = 0;
282 u32 ul_dsp_va = 0;
283 u32 ul_seg_size = 0;
284 u32 ul_pad_size = 0;
285 u32 i;
286 int status = 0;
287 u8 num_procs = 0;
288 s32 ndx = 0;
289 /* DSP MMU setup table */
290 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
291 struct cfg_hostres *host_res;
292 struct bridge_dev_context *pbridge_context;
293 u32 map_attrs;
294 u32 shm0_end;
295 u32 ul_dyn_ext_base;
296 u32 ul_seg1_size = 0;
297 u32 pa_curr = 0;
298 u32 va_curr = 0;
299 u32 gpp_va_curr = 0;
300 u32 num_bytes = 0;
301 u32 all_bits = 0;
302 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
303 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
304 };
305
306 status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
307 if (!pbridge_context) {
308 status = -EFAULT;
309 goto func_end;
310 }
311
312 host_res = pbridge_context->resources;
313 if (!host_res) {
314 status = -EFAULT;
315 goto func_end;
316 }
317 status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
318 if (!cod_man) {
319 status = -EFAULT;
320 goto func_end;
321 }
322 hchnl_mgr = hio_mgr->chnl_mgr;
323 /* The message manager is destroyed when the board is stopped. */
324 dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
325 hmsg_mgr = hio_mgr->msg_mgr;
326 if (!hchnl_mgr || !hmsg_mgr) {
327 status = -EFAULT;
328 goto func_end;
329 }
330 if (hio_mgr->shared_mem)
331 hio_mgr->shared_mem = NULL;
332
333 /* Get start and length of channel part of shared memory */
334 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
335 &ul_shm_base);
336 if (status) {
337 status = -EFAULT;
338 goto func_end;
339 }
340 status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
341 &ul_shm_limit);
342 if (status) {
343 status = -EFAULT;
344 goto func_end;
345 }
346 if (ul_shm_limit <= ul_shm_base) {
347 status = -EINVAL;
348 goto func_end;
349 }
350 /* Get total length in bytes */
351 ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
352 /* Calculate size of a PROCCOPY shared memory region */
353 dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
354 __func__, (ul_shm_length - sizeof(struct shm)));
355
356 /* Get start and length of message part of shared memory */
357 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
358 &ul_msg_base);
359 if (!status) {
360 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
361 &ul_msg_limit);
362 if (!status) {
363 if (ul_msg_limit <= ul_msg_base) {
364 status = -EINVAL;
365 } else {
366 /*
367 * Length (bytes) of messaging part of shared
368 * memory.
369 */
370 ul_msg_length =
371 (ul_msg_limit - ul_msg_base +
372 1) * hio_mgr->word_size;
373 /*
374 * Total length (bytes) of shared memory:
375 * chnl + msg.
376 */
377 ul_mem_length = ul_shm_length + ul_msg_length;
378 }
379 } else {
380 status = -EFAULT;
381 }
382 } else {
383 status = -EFAULT;
384 }
385 if (!status) {
386 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
387 status =
388 cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
389 #else
390 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
391 &shm0_end);
392 #endif
393 if (status)
394 status = -EFAULT;
395 }
396 if (!status) {
397 status =
398 cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
399 if (status)
400 status = -EFAULT;
401 }
402 if (!status) {
403 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
404 if (status)
405 status = -EFAULT;
406 }
407 if (!status) {
408 /* Get memory reserved in host resources */
409 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
410 &hio_mgr->ext_proc_info,
411 sizeof(struct
412 mgr_processorextinfo),
413 &num_procs);
414
415 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
416 ndx = 0;
417 ul_gpp_pa = host_res->mem_phys[1];
418 ul_gpp_va = host_res->mem_base[1];
419 /* This is the virtual uncached ioremapped address!!! */
420 /* Why can't we directly take the DSPVA from the symbols? */
421 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
422 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
423 ul_seg1_size =
424 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
425 /* 4K align */
426 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
427 /* 64K align */
428 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
429 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
430 UL_PAGE_ALIGN_SIZE);
431 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
432 ul_pad_size = 0x0;
433
434 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
435 "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
436 "ul_seg_size %x ul_seg1_size %x \n", __func__,
437 ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
438 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
439
440 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
441 host_res->mem_length[1]) {
442 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
443 __func__, host_res->mem_length[1],
444 ul_seg_size + ul_seg1_size + ul_pad_size);
445 status = -ENOMEM;
446 }
447 }
448 if (status)
449 goto func_end;
450
451 pa_curr = ul_gpp_pa;
452 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
453 gpp_va_curr = ul_gpp_va;
454 num_bytes = ul_seg1_size;
455
456 /*
457 * Try to fit into TLB entries. If not possible, push them to page
458 * tables. It is quite possible that if sections are not on
459 * bigger page boundary, we may end up making several small pages.
460 * So, push them onto page tables, if that is the case.
461 */
462 map_attrs = 0x00000000;
463 map_attrs = DSP_MAPLITTLEENDIAN;
464 map_attrs |= DSP_MAPPHYSICALADDR;
465 map_attrs |= DSP_MAPELEMSIZE32;
466 map_attrs |= DSP_MAPDONOTLOCK;
467
468 while (num_bytes) {
469 /*
470 * To find the max. page size with which both PA & VA are
471 * aligned.
472 */
473 all_bits = pa_curr | va_curr;
474 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
475 "num_bytes %x\n", all_bits, pa_curr, va_curr,
476 num_bytes);
477 for (i = 0; i < 4; i++) {
478 if ((num_bytes >= page_size[i]) && ((all_bits &
479 (page_size[i] -
480 1)) == 0)) {
481 status =
482 hio_mgr->intf_fxns->
483 brd_mem_map(hio_mgr->bridge_context,
484 pa_curr, va_curr,
485 page_size[i], map_attrs,
486 NULL);
487 if (status)
488 goto func_end;
489 pa_curr += page_size[i];
490 va_curr += page_size[i];
491 gpp_va_curr += page_size[i];
492 num_bytes -= page_size[i];
493 /*
494 * Don't try smaller sizes. Hopefully we have
495 * reached an address aligned to a bigger page
496 * size.
497 */
498 break;
499 }
500 }
501 }
502 pa_curr += ul_pad_size;
503 va_curr += ul_pad_size;
504 gpp_va_curr += ul_pad_size;
505
506 /* Configure the TLB entries for the next cacheable segment */
507 num_bytes = ul_seg_size;
508 va_curr = ul_dsp_va * hio_mgr->word_size;
509 while (num_bytes) {
510 /*
511 * To find the max. page size with which both PA & VA are
512 * aligned.
513 */
514 all_bits = pa_curr | va_curr;
515 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
516 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
517 va_curr, num_bytes);
518 for (i = 0; i < 4; i++) {
519 if (!(num_bytes >= page_size[i]) ||
520 !((all_bits & (page_size[i] - 1)) == 0))
521 continue;
522 if (ndx < MAX_LOCK_TLB_ENTRIES) {
523 /*
524 * This is the physical address written to
525 * DSP MMU.
526 */
527 ae_proc[ndx].gpp_pa = pa_curr;
528 /*
529 * This is the virtual uncached ioremapped
530 * address!!!
531 */
532 ae_proc[ndx].gpp_va = gpp_va_curr;
533 ae_proc[ndx].dsp_va =
534 va_curr / hio_mgr->word_size;
535 ae_proc[ndx].size = page_size[i];
536 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
537 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
538 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
539 dev_dbg(bridge, "shm MMU TLB entry PA %x"
540 " VA %x DSP_VA %x Size %x\n",
541 ae_proc[ndx].gpp_pa,
542 ae_proc[ndx].gpp_va,
543 ae_proc[ndx].dsp_va *
544 hio_mgr->word_size, page_size[i]);
545 ndx++;
546 } else {
547 status =
548 hio_mgr->intf_fxns->
549 brd_mem_map(hio_mgr->bridge_context,
550 pa_curr, va_curr,
551 page_size[i], map_attrs,
552 NULL);
553 dev_dbg(bridge,
554 "shm MMU PTE entry PA %x"
555 " VA %x DSP_VA %x Size %x\n",
556 ae_proc[ndx].gpp_pa,
557 ae_proc[ndx].gpp_va,
558 ae_proc[ndx].dsp_va *
559 hio_mgr->word_size, page_size[i]);
560 if (status)
561 goto func_end;
562 }
563 pa_curr += page_size[i];
564 va_curr += page_size[i];
565 gpp_va_curr += page_size[i];
566 num_bytes -= page_size[i];
567 /*
568 * Don't try smaller sizes. Hopefully we have reached
569 * an address aligned to a bigger page size.
570 */
571 break;
572 }
573 }
574
575 /*
576 * Copy remaining entries from CDB. All entries are 1 MB and
577 * should not conflict with shm entries on MPU or DSP side.
578 */
579 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
580 if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
581 continue;
582
583 if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
584 ul_gpp_pa - 0x100000
585 && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
586 ul_gpp_pa + ul_seg_size)
587 || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
588 ul_dsp_va - 0x100000 / hio_mgr->word_size
589 && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
590 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
591 dev_dbg(bridge,
592 "CDB MMU entry %d conflicts with "
593 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
594 "GppPa %x, DspVa %x, Bytes %x.\n", i,
595 hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
596 hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
597 ul_gpp_pa, ul_dsp_va, ul_seg_size);
598 status = -EPERM;
599 } else {
600 if (ndx < MAX_LOCK_TLB_ENTRIES) {
601 ae_proc[ndx].dsp_va =
602 hio_mgr->ext_proc_info.ty_tlb[i].
603 dsp_virt;
604 ae_proc[ndx].gpp_pa =
605 hio_mgr->ext_proc_info.ty_tlb[i].
606 gpp_phys;
607 ae_proc[ndx].gpp_va = 0;
608 /* 1 MB */
609 ae_proc[ndx].size = 0x100000;
610 dev_dbg(bridge, "shm MMU entry PA %x "
611 "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
612 ae_proc[ndx].dsp_va);
613 ndx++;
614 } else {
615 status = hio_mgr->intf_fxns->brd_mem_map
616 (hio_mgr->bridge_context,
617 hio_mgr->ext_proc_info.ty_tlb[i].
618 gpp_phys,
619 hio_mgr->ext_proc_info.ty_tlb[i].
620 dsp_virt, 0x100000, map_attrs,
621 NULL);
622 }
623 }
624 if (status)
625 goto func_end;
626 }
627
628 map_attrs = 0x00000000;
629 map_attrs = DSP_MAPLITTLEENDIAN;
630 map_attrs |= DSP_MAPPHYSICALADDR;
631 map_attrs |= DSP_MAPELEMSIZE32;
632 map_attrs |= DSP_MAPDONOTLOCK;
633
634 /* Map the L4 peripherals */
635 i = 0;
636 while (l4_peripheral_table[i].phys_addr) {
637 status = hio_mgr->intf_fxns->brd_mem_map
638 (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
639 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
640 map_attrs, NULL);
641 if (status)
642 goto func_end;
643 i++;
644 }
645
646 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
647 ae_proc[i].dsp_va = 0;
648 ae_proc[i].gpp_pa = 0;
649 ae_proc[i].gpp_va = 0;
650 ae_proc[i].size = 0;
651 }
652 /*
653 * Set the shm physical address entry (grayed out in CDB file)
654 * to the virtual uncached ioremapped address of shm reserved
655 * on MPU.
656 */
657 hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
658 (ul_gpp_va + ul_seg1_size + ul_pad_size);
659
660 /*
661 * Need shm Phys addr. IO supports only one DSP for now:
662 * num_procs = 1.
663 */
664 if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
665 status = -EFAULT;
666 goto func_end;
667 } else {
668 if (ae_proc[0].dsp_va > ul_shm_base) {
669 status = -EPERM;
670 goto func_end;
671 }
672 /* ul_shm_base may not be at ul_dsp_va address */
673 ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
674 hio_mgr->word_size;
675 /*
676 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
677 * bridge_brd_start() the MMU will be re-programed with MMU
678 * DSPVa-GPPPa pair info while DSP is in a known
679 * (reset) state.
680 */
681
682 status =
683 hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
684 BRDIOCTL_SETMMUCONFIG,
685 ae_proc);
686 if (status)
687 goto func_end;
688 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
689 ul_shm_base += ul_shm_base_offset;
690 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
691 ul_mem_length);
692 if (ul_shm_base == 0) {
693 status = -EFAULT;
694 goto func_end;
695 }
696 /* Register SM */
697 status =
698 register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
699 }
700
701 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
702 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
703 hio_mgr->output = hio_mgr->input + (ul_shm_length -
704 sizeof(struct shm)) / 2;
705 hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
706
707 /* Set up Shared memory addresses for messaging. */
708 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
709 + ul_shm_length);
710 hio_mgr->msg_input =
711 (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
712 hio_mgr->msg_output_ctrl =
713 (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
714 ul_msg_length / 2);
715 hio_mgr->msg_output =
716 (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
717 hmsg_mgr->max_msgs =
718 ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
719 / sizeof(struct msg_dspmsg);
720 dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
721 "output %p, msg_input_ctrl %p, msg_input %p, "
722 "msg_output_ctrl %p, msg_output %p\n",
723 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
724 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
725 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
726 hio_mgr->msg_output);
727 dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
728 hmsg_mgr->max_msgs);
729 memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
730
731 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
732 /* Get the start address of trace buffer */
733 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
734 &hio_mgr->trace_buffer_begin);
735 if (status) {
736 status = -EFAULT;
737 goto func_end;
738 }
739
740 hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
741 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
742 (hio_mgr->trace_buffer_begin - ul_dsp_va);
743 /* Get the end address of trace buffer */
744 status = cod_get_sym_value(cod_man, SYS_PUTCEND,
745 &hio_mgr->trace_buffer_end);
746 if (status) {
747 status = -EFAULT;
748 goto func_end;
749 }
750 hio_mgr->trace_buffer_end =
751 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
752 (hio_mgr->trace_buffer_end - ul_dsp_va);
753 /* Get the current address of DSP write pointer */
754 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
755 &hio_mgr->trace_buffer_current);
756 if (status) {
757 status = -EFAULT;
758 goto func_end;
759 }
760 hio_mgr->trace_buffer_current =
761 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
762 (hio_mgr->trace_buffer_current - ul_dsp_va);
763 /* Calculate the size of trace buffer */
764 kfree(hio_mgr->msg);
765 hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
766 hio_mgr->trace_buffer_begin) *
767 hio_mgr->word_size) + 2, GFP_KERNEL);
768 if (!hio_mgr->msg)
769 status = -ENOMEM;
770
771 hio_mgr->dsp_va = ul_dsp_va;
772 hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
773
774 #endif
775 func_end:
776 return status;
777 }
778
779 /*
780 * ======== io_buf_size ========
781 * Size of shared memory I/O channel.
782 */
io_buf_size(struct io_mgr * hio_mgr)783 u32 io_buf_size(struct io_mgr *hio_mgr)
784 {
785 if (hio_mgr)
786 return hio_mgr->sm_buf_size;
787 else
788 return 0;
789 }
790
791 /*
792 * ======== io_cancel_chnl ========
793 * Cancel IO on a given PCPY channel.
794 */
io_cancel_chnl(struct io_mgr * hio_mgr,u32 chnl)795 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
796 {
797 struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
798 struct shm *sm;
799
800 if (!hio_mgr)
801 goto func_end;
802 sm = hio_mgr->shared_mem;
803
804 /* Inform DSP that we have no more buffers on this channel */
805 set_chnl_free(sm, chnl);
806
807 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
808 func_end:
809 return;
810 }
811
812
813 /*
814 * ======== io_dispatch_pm ========
815 * Performs I/O dispatch on PM related messages from DSP
816 */
io_dispatch_pm(struct io_mgr * pio_mgr)817 static void io_dispatch_pm(struct io_mgr *pio_mgr)
818 {
819 int status;
820 u32 parg[2];
821
822 /* Perform Power message processing here */
823 parg[0] = pio_mgr->intr_val;
824
825 /* Send the command to the Bridge clk/pwr manager to handle */
826 if (parg[0] == MBX_PM_HIBERNATE_EN) {
827 dev_dbg(bridge, "PM: Hibernate command\n");
828 status = pio_mgr->intf_fxns->
829 dev_cntrl(pio_mgr->bridge_context,
830 BRDIOCTL_PWR_HIBERNATE, parg);
831 if (status)
832 pr_err("%s: hibernate cmd failed 0x%x\n",
833 __func__, status);
834 } else if (parg[0] == MBX_PM_OPP_REQ) {
835 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
836 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
837 status = pio_mgr->intf_fxns->
838 dev_cntrl(pio_mgr->bridge_context,
839 BRDIOCTL_CONSTRAINT_REQUEST, parg);
840 if (status)
841 dev_dbg(bridge, "PM: Failed to set constraint "
842 "= 0x%x\n", parg[1]);
843 } else {
844 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
845 parg[0]);
846 status = pio_mgr->intf_fxns->
847 dev_cntrl(pio_mgr->bridge_context,
848 BRDIOCTL_CLK_CTRL, parg);
849 if (status)
850 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
851 "= 0x%x\n", *parg);
852 }
853 }
854
855 /*
856 * ======== io_dpc ========
857 * Deferred procedure call for shared memory channel driver ISR. Carries
858 * out the dispatch of I/O as a non-preemptible event.It can only be
859 * pre-empted by an ISR.
860 */
io_dpc(unsigned long ref_data)861 void io_dpc(unsigned long ref_data)
862 {
863 struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
864 struct chnl_mgr *chnl_mgr_obj;
865 struct msg_mgr *msg_mgr_obj;
866 struct deh_mgr *hdeh_mgr;
867 u32 requested;
868 u32 serviced;
869
870 if (!pio_mgr)
871 goto func_end;
872 chnl_mgr_obj = pio_mgr->chnl_mgr;
873 dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
874 dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
875 if (!chnl_mgr_obj)
876 goto func_end;
877
878 requested = pio_mgr->dpc_req;
879 serviced = pio_mgr->dpc_sched;
880
881 if (serviced == requested)
882 goto func_end;
883
884 /* Process pending DPC's */
885 do {
886 /* Check value of interrupt reg to ensure it's a valid error */
887 if ((pio_mgr->intr_val > DEH_BASE) &&
888 (pio_mgr->intr_val < DEH_LIMIT)) {
889 /* Notify DSP/BIOS exception */
890 if (hdeh_mgr) {
891 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
892 print_dsp_debug_trace(pio_mgr);
893 #endif
894 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
895 pio_mgr->intr_val);
896 }
897 }
898 /* Proc-copy chanel dispatch */
899 input_chnl(pio_mgr, NULL, IO_SERVICE);
900 output_chnl(pio_mgr, NULL, IO_SERVICE);
901
902 #ifdef CHNL_MESSAGES
903 if (msg_mgr_obj) {
904 /* Perform I/O dispatch on message queues */
905 input_msg(pio_mgr, msg_mgr_obj);
906 output_msg(pio_mgr, msg_mgr_obj);
907 }
908
909 #endif
910 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
911 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
912 /* Notify DSP Trace message */
913 print_dsp_debug_trace(pio_mgr);
914 }
915 #endif
916 serviced++;
917 } while (serviced != requested);
918 pio_mgr->dpc_sched = requested;
919 func_end:
920 return;
921 }
922
923 /*
924 * ======== io_mbox_msg ========
925 * Main interrupt handler for the shared memory IO manager.
926 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
927 * schedules a DPC to dispatch I/O.
928 */
io_mbox_msg(struct notifier_block * self,unsigned long len,void * msg)929 int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
930 {
931 struct io_mgr *pio_mgr;
932 struct dev_object *dev_obj;
933 unsigned long flags;
934
935 dev_obj = dev_get_first();
936 dev_get_io_mgr(dev_obj, &pio_mgr);
937
938 if (!pio_mgr)
939 return NOTIFY_BAD;
940
941 pio_mgr->intr_val = (u16)((u32)msg);
942 if (pio_mgr->intr_val & MBX_PM_CLASS)
943 io_dispatch_pm(pio_mgr);
944
945 if (pio_mgr->intr_val == MBX_DEH_RESET) {
946 pio_mgr->intr_val = 0;
947 } else {
948 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
949 pio_mgr->dpc_req++;
950 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
951 tasklet_schedule(&pio_mgr->dpc_tasklet);
952 }
953 return NOTIFY_OK;
954 }
955
956 /*
957 * ======== io_request_chnl ========
958 * Purpose:
959 * Request chanenel I/O from the DSP. Sets flags in shared memory, then
960 * interrupts the DSP.
961 */
io_request_chnl(struct io_mgr * io_manager,struct chnl_object * pchnl,u8 io_mode,u16 * mbx_val)962 void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
963 u8 io_mode, u16 *mbx_val)
964 {
965 struct chnl_mgr *chnl_mgr_obj;
966 struct shm *sm;
967
968 if (!pchnl || !mbx_val)
969 goto func_end;
970 chnl_mgr_obj = io_manager->chnl_mgr;
971 sm = io_manager->shared_mem;
972 if (io_mode == IO_INPUT) {
973 /* Indicate to the DSP we have a buffer available for input */
974 set_chnl_busy(sm, pchnl->chnl_id);
975 *mbx_val = MBX_PCPY_CLASS;
976 } else if (io_mode == IO_OUTPUT) {
977 /*
978 * Record the fact that we have a buffer available for
979 * output.
980 */
981 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
982 } else {
983 }
984 func_end:
985 return;
986 }
987
988 /*
989 * ======== iosm_schedule ========
990 * Schedule DPC for IO.
991 */
iosm_schedule(struct io_mgr * io_manager)992 void iosm_schedule(struct io_mgr *io_manager)
993 {
994 unsigned long flags;
995
996 if (!io_manager)
997 return;
998
999 /* Increment count of DPC's pending. */
1000 spin_lock_irqsave(&io_manager->dpc_lock, flags);
1001 io_manager->dpc_req++;
1002 spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
1003
1004 /* Schedule DPC */
1005 tasklet_schedule(&io_manager->dpc_tasklet);
1006 }
1007
1008 /*
1009 * ======== find_ready_output ========
1010 * Search for a host output channel which is ready to send. If this is
1011 * called as a result of servicing the DPC, then implement a round
1012 * robin search; otherwise, this was called by a client thread (via
1013 * IO_Dispatch()), so just start searching from the current channel id.
1014 */
find_ready_output(struct chnl_mgr * chnl_mgr_obj,struct chnl_object * pchnl,u32 mask)1015 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1016 struct chnl_object *pchnl, u32 mask)
1017 {
1018 u32 ret = OUTPUTNOTREADY;
1019 u32 id, start_id;
1020 u32 shift;
1021
1022 id = (pchnl !=
1023 NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1024 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1025 if (id >= CHNL_MAXCHANNELS)
1026 goto func_end;
1027 if (mask) {
1028 shift = (1 << id);
1029 start_id = id;
1030 do {
1031 if (mask & shift) {
1032 ret = id;
1033 if (pchnl == NULL)
1034 chnl_mgr_obj->last_output = id;
1035 break;
1036 }
1037 id = id + 1;
1038 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1039 shift = (1 << id);
1040 } while (id != start_id);
1041 }
1042 func_end:
1043 return ret;
1044 }
1045
1046 /*
1047 * ======== input_chnl ========
1048 * Dispatch a buffer on an input channel.
1049 */
input_chnl(struct io_mgr * pio_mgr,struct chnl_object * pchnl,u8 io_mode)1050 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1051 u8 io_mode)
1052 {
1053 struct chnl_mgr *chnl_mgr_obj;
1054 struct shm *sm;
1055 u32 chnl_id;
1056 u32 bytes;
1057 struct chnl_irp *chnl_packet_obj = NULL;
1058 u32 dw_arg;
1059 bool clear_chnl = false;
1060 bool notify_client = false;
1061
1062 sm = pio_mgr->shared_mem;
1063 chnl_mgr_obj = pio_mgr->chnl_mgr;
1064
1065 /* Attempt to perform input */
1066 if (!sm->input_full)
1067 goto func_end;
1068
1069 bytes = sm->input_size * chnl_mgr_obj->word_size;
1070 chnl_id = sm->input_id;
1071 dw_arg = sm->arg;
1072 if (chnl_id >= CHNL_MAXCHANNELS) {
1073 /* Shouldn't be here: would indicate corrupted shm. */
1074 goto func_end;
1075 }
1076 pchnl = chnl_mgr_obj->channels[chnl_id];
1077 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1078 if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1079 /* Get the I/O request, and attempt a transfer */
1080 if (!list_empty(&pchnl->io_requests)) {
1081 if (!pchnl->cio_reqs)
1082 goto func_end;
1083
1084 chnl_packet_obj = list_first_entry(
1085 &pchnl->io_requests,
1086 struct chnl_irp, link);
1087 list_del(&chnl_packet_obj->link);
1088 pchnl->cio_reqs--;
1089
1090 /*
1091 * Ensure we don't overflow the client's
1092 * buffer.
1093 */
1094 bytes = min(bytes, chnl_packet_obj->byte_size);
1095 memcpy(chnl_packet_obj->host_sys_buf,
1096 pio_mgr->input, bytes);
1097 pchnl->bytes_moved += bytes;
1098 chnl_packet_obj->byte_size = bytes;
1099 chnl_packet_obj->arg = dw_arg;
1100 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1101
1102 if (bytes == 0) {
1103 /*
1104 * This assertion fails if the DSP
1105 * sends EOS more than once on this
1106 * channel.
1107 */
1108 if (pchnl->state & CHNL_STATEEOS)
1109 goto func_end;
1110 /*
1111 * Zero bytes indicates EOS. Update
1112 * IOC status for this chirp, and also
1113 * the channel state.
1114 */
1115 chnl_packet_obj->status |=
1116 CHNL_IOCSTATEOS;
1117 pchnl->state |= CHNL_STATEEOS;
1118 /*
1119 * Notify that end of stream has
1120 * occurred.
1121 */
1122 ntfy_notify(pchnl->ntfy_obj,
1123 DSP_STREAMDONE);
1124 }
1125 /* Tell DSP if no more I/O buffers available */
1126 if (list_empty(&pchnl->io_requests))
1127 set_chnl_free(sm, pchnl->chnl_id);
1128 clear_chnl = true;
1129 notify_client = true;
1130 } else {
1131 /*
1132 * Input full for this channel, but we have no
1133 * buffers available. The channel must be
1134 * "idling". Clear out the physical input
1135 * channel.
1136 */
1137 clear_chnl = true;
1138 }
1139 } else {
1140 /* Input channel cancelled: clear input channel */
1141 clear_chnl = true;
1142 }
1143 } else {
1144 /* DPC fired after host closed channel: clear input channel */
1145 clear_chnl = true;
1146 }
1147 if (clear_chnl) {
1148 /* Indicate to the DSP we have read the input */
1149 sm->input_full = 0;
1150 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1151 }
1152 if (notify_client) {
1153 /* Notify client with IO completion record */
1154 notify_chnl_complete(pchnl, chnl_packet_obj);
1155 }
1156 func_end:
1157 return;
1158 }
1159
1160 /*
1161 * ======== input_msg ========
1162 * Copies messages from shared memory to the message queues.
1163 */
input_msg(struct io_mgr * pio_mgr,struct msg_mgr * hmsg_mgr)1164 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1165 {
1166 u32 num_msgs;
1167 u32 i;
1168 u8 *msg_input;
1169 struct msg_queue *msg_queue_obj;
1170 struct msg_frame *pmsg;
1171 struct msg_dspmsg msg;
1172 struct msg_ctrl *msg_ctr_obj;
1173 u32 input_empty;
1174 u32 addr;
1175
1176 msg_ctr_obj = pio_mgr->msg_input_ctrl;
1177 /* Get the number of input messages to be read */
1178 input_empty = msg_ctr_obj->buf_empty;
1179 num_msgs = msg_ctr_obj->size;
1180 if (input_empty)
1181 return;
1182
1183 msg_input = pio_mgr->msg_input;
1184 for (i = 0; i < num_msgs; i++) {
1185 /* Read the next message */
1186 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
1187 msg.msg.cmd =
1188 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1189 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
1190 msg.msg.arg1 =
1191 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1192 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
1193 msg.msg.arg2 =
1194 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1195 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1196 msg.msgq_id =
1197 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1198 msg_input += sizeof(struct msg_dspmsg);
1199
1200 /* Determine which queue to put the message in */
1201 dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
1202 "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
1203 msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
1204 /*
1205 * Interrupt may occur before shared memory and message
1206 * input locations have been set up. If all nodes were
1207 * cleaned up, hmsg_mgr->max_msgs should be 0.
1208 */
1209 list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
1210 list_elem) {
1211 if (msg.msgq_id != msg_queue_obj->msgq_id)
1212 continue;
1213 /* Found it */
1214 if (msg.msg.cmd == RMS_EXITACK) {
1215 /*
1216 * Call the node exit notification.
1217 * The exit message does not get
1218 * queued.
1219 */
1220 (*hmsg_mgr->on_exit)(msg_queue_obj->arg,
1221 msg.msg.arg1);
1222 break;
1223 }
1224 /*
1225 * Not an exit acknowledgement, queue
1226 * the message.
1227 */
1228 if (list_empty(&msg_queue_obj->msg_free_list)) {
1229 /*
1230 * No free frame to copy the
1231 * message into.
1232 */
1233 pr_err("%s: no free msg frames,"
1234 " discarding msg\n",
1235 __func__);
1236 break;
1237 }
1238
1239 pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
1240 struct msg_frame, list_elem);
1241 list_del(&pmsg->list_elem);
1242 pmsg->msg_data = msg;
1243 list_add_tail(&pmsg->list_elem,
1244 &msg_queue_obj->msg_used_list);
1245 ntfy_notify(msg_queue_obj->ntfy_obj,
1246 DSP_NODEMESSAGEREADY);
1247 sync_set_event(msg_queue_obj->sync_event);
1248 }
1249 }
1250 /* Set the post SWI flag */
1251 if (num_msgs > 0) {
1252 /* Tell the DSP we've read the messages */
1253 msg_ctr_obj->buf_empty = true;
1254 msg_ctr_obj->post_swi = true;
1255 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1256 }
1257 }
1258
1259 /*
1260 * ======== notify_chnl_complete ========
1261 * Purpose:
1262 * Signal the channel event, notifying the client that I/O has completed.
1263 */
notify_chnl_complete(struct chnl_object * pchnl,struct chnl_irp * chnl_packet_obj)1264 static void notify_chnl_complete(struct chnl_object *pchnl,
1265 struct chnl_irp *chnl_packet_obj)
1266 {
1267 bool signal_event;
1268
1269 if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
1270 goto func_end;
1271
1272 /*
1273 * Note: we signal the channel event only if the queue of IO
1274 * completions is empty. If it is not empty, the event is sure to be
1275 * signalled by the only IO completion list consumer:
1276 * bridge_chnl_get_ioc().
1277 */
1278 signal_event = list_empty(&pchnl->io_completions);
1279 /* Enqueue the IO completion info for the client */
1280 list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
1281 pchnl->cio_cs++;
1282
1283 if (pchnl->cio_cs > pchnl->chnl_packets)
1284 goto func_end;
1285 /* Signal the channel event (if not already set) that IO is complete */
1286 if (signal_event)
1287 sync_set_event(pchnl->sync_event);
1288
1289 /* Notify that IO is complete */
1290 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1291 func_end:
1292 return;
1293 }
1294
1295 /*
1296 * ======== output_chnl ========
1297 * Purpose:
1298 * Dispatch a buffer on an output channel.
1299 */
output_chnl(struct io_mgr * pio_mgr,struct chnl_object * pchnl,u8 io_mode)1300 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1301 u8 io_mode)
1302 {
1303 struct chnl_mgr *chnl_mgr_obj;
1304 struct shm *sm;
1305 u32 chnl_id;
1306 struct chnl_irp *chnl_packet_obj;
1307 u32 dw_dsp_f_mask;
1308
1309 chnl_mgr_obj = pio_mgr->chnl_mgr;
1310 sm = pio_mgr->shared_mem;
1311 /* Attempt to perform output */
1312 if (sm->output_full)
1313 goto func_end;
1314
1315 if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1316 goto func_end;
1317
1318 /* Look to see if both a PC and DSP output channel are ready */
1319 dw_dsp_f_mask = sm->dsp_free_mask;
1320 chnl_id =
1321 find_ready_output(chnl_mgr_obj, pchnl,
1322 (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1323 if (chnl_id == OUTPUTNOTREADY)
1324 goto func_end;
1325
1326 pchnl = chnl_mgr_obj->channels[chnl_id];
1327 if (!pchnl || list_empty(&pchnl->io_requests)) {
1328 /* Shouldn't get here */
1329 goto func_end;
1330 }
1331
1332 if (!pchnl->cio_reqs)
1333 goto func_end;
1334
1335 /* Get the I/O request, and attempt a transfer */
1336 chnl_packet_obj = list_first_entry(&pchnl->io_requests,
1337 struct chnl_irp, link);
1338 list_del(&chnl_packet_obj->link);
1339
1340 pchnl->cio_reqs--;
1341
1342 /* Record fact that no more I/O buffers available */
1343 if (list_empty(&pchnl->io_requests))
1344 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1345
1346 /* Transfer buffer to DSP side */
1347 chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
1348 chnl_packet_obj->byte_size);
1349 memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1350 chnl_packet_obj->byte_size);
1351 pchnl->bytes_moved += chnl_packet_obj->byte_size;
1352 /* Write all 32 bits of arg */
1353 sm->arg = chnl_packet_obj->arg;
1354 #if _CHNL_WORDSIZE == 2
1355 /* Access can be different SM access word size (e.g. 16/32 bit words) */
1356 sm->output_id = (u16) chnl_id;
1357 sm->output_size = (u16) (chnl_packet_obj->byte_size +
1358 chnl_mgr_obj->word_size - 1) /
1359 (u16) chnl_mgr_obj->word_size;
1360 #else
1361 sm->output_id = chnl_id;
1362 sm->output_size = (chnl_packet_obj->byte_size +
1363 chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1364 #endif
1365 sm->output_full = 1;
1366 /* Indicate to the DSP we have written the output */
1367 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1368 /* Notify client with IO completion record (keep EOS) */
1369 chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1370 notify_chnl_complete(pchnl, chnl_packet_obj);
1371 /* Notify if stream is done. */
1372 if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1373 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1374
1375 func_end:
1376 return;
1377 }
1378
1379 /*
1380 * ======== output_msg ========
1381 * Copies messages from the message queues to the shared memory.
1382 */
output_msg(struct io_mgr * pio_mgr,struct msg_mgr * hmsg_mgr)1383 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1384 {
1385 u32 num_msgs = 0;
1386 u32 i;
1387 struct msg_dspmsg *msg_output;
1388 struct msg_frame *pmsg;
1389 struct msg_ctrl *msg_ctr_obj;
1390 u32 val;
1391 u32 addr;
1392
1393 msg_ctr_obj = pio_mgr->msg_output_ctrl;
1394
1395 /* Check if output has been cleared */
1396 if (!msg_ctr_obj->buf_empty)
1397 return;
1398
1399 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1400 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1401 msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
1402
1403 /* Copy num_msgs messages into shared memory */
1404 for (i = 0; i < num_msgs; i++) {
1405 if (list_empty(&hmsg_mgr->msg_used_list))
1406 continue;
1407
1408 pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
1409 struct msg_frame, list_elem);
1410 list_del(&pmsg->list_elem);
1411
1412 val = (pmsg->msg_data).msgq_id;
1413 addr = (u32) &msg_output->msgq_id;
1414 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1415
1416 val = (pmsg->msg_data).msg.cmd;
1417 addr = (u32) &msg_output->msg.cmd;
1418 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1419
1420 val = (pmsg->msg_data).msg.arg1;
1421 addr = (u32) &msg_output->msg.arg1;
1422 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1423
1424 val = (pmsg->msg_data).msg.arg2;
1425 addr = (u32) &msg_output->msg.arg2;
1426 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1427
1428 msg_output++;
1429 list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
1430 sync_set_event(hmsg_mgr->sync_event);
1431 }
1432
1433 if (num_msgs > 0) {
1434 hmsg_mgr->msgs_pending -= num_msgs;
1435 #if _CHNL_WORDSIZE == 2
1436 /*
1437 * Access can be different SM access word size
1438 * (e.g. 16/32 bit words)
1439 */
1440 msg_ctr_obj->size = (u16) num_msgs;
1441 #else
1442 msg_ctr_obj->size = num_msgs;
1443 #endif
1444 msg_ctr_obj->buf_empty = false;
1445 /* Set the post SWI flag */
1446 msg_ctr_obj->post_swi = true;
1447 /* Tell the DSP we have written the output. */
1448 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1449 }
1450 }
1451
1452 /*
1453 * ======== register_shm_segs ========
1454 * purpose:
1455 * Registers GPP SM segment with CMM.
1456 */
register_shm_segs(struct io_mgr * hio_mgr,struct cod_manager * cod_man,u32 dw_gpp_base_pa)1457 static int register_shm_segs(struct io_mgr *hio_mgr,
1458 struct cod_manager *cod_man,
1459 u32 dw_gpp_base_pa)
1460 {
1461 int status = 0;
1462 u32 ul_shm0_base = 0;
1463 u32 shm0_end = 0;
1464 u32 ul_shm0_rsrvd_start = 0;
1465 u32 ul_rsrvd_size = 0;
1466 u32 ul_gpp_phys;
1467 u32 ul_dsp_virt;
1468 u32 ul_shm_seg_id0 = 0;
1469 u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1470
1471 /*
1472 * Read address and size info for first SM region.
1473 * Get start of 1st SM Heap region.
1474 */
1475 status =
1476 cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1477 if (ul_shm0_base == 0) {
1478 status = -EPERM;
1479 goto func_end;
1480 }
1481 /* Get end of 1st SM Heap region */
1482 if (!status) {
1483 /* Get start and length of message part of shared memory */
1484 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1485 &shm0_end);
1486 if (shm0_end == 0) {
1487 status = -EPERM;
1488 goto func_end;
1489 }
1490 }
1491 /* Start of Gpp reserved region */
1492 if (!status) {
1493 /* Get start and length of message part of shared memory */
1494 status =
1495 cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1496 &ul_shm0_rsrvd_start);
1497 if (ul_shm0_rsrvd_start == 0) {
1498 status = -EPERM;
1499 goto func_end;
1500 }
1501 }
1502 /* Register with CMM */
1503 if (!status) {
1504 status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
1505 if (!status) {
1506 status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
1507 CMM_ALLSEGMENTS);
1508 }
1509 }
1510 /* Register new SM region(s) */
1511 if (!status && (shm0_end - ul_shm0_base) > 0) {
1512 /* Calc size (bytes) of SM the GPP can alloc from */
1513 ul_rsrvd_size =
1514 (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1515 if (ul_rsrvd_size <= 0) {
1516 status = -EPERM;
1517 goto func_end;
1518 }
1519 /* Calc size of SM DSP can alloc from */
1520 ul_dsp_size =
1521 (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1522 if (ul_dsp_size <= 0) {
1523 status = -EPERM;
1524 goto func_end;
1525 }
1526 /* First TLB entry reserved for Bridge SM use. */
1527 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
1528 /* Get size in bytes */
1529 ul_dsp_virt =
1530 hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
1531 hio_mgr->word_size;
1532 /*
1533 * Calc byte offset used to convert GPP phys <-> DSP byte
1534 * address.
1535 */
1536 if (dw_gpp_base_pa > ul_dsp_virt)
1537 dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1538 else
1539 dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1540
1541 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1542 status = -EPERM;
1543 goto func_end;
1544 }
1545 /*
1546 * Calc Gpp phys base of SM region.
1547 * This is actually uncached kernel virtual address.
1548 */
1549 dw_gpp_base_va =
1550 ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1551 ul_dsp_virt;
1552 /*
1553 * Calc Gpp phys base of SM region.
1554 * This is the physical address.
1555 */
1556 dw_gpp_base_pa =
1557 dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1558 ul_dsp_virt;
1559 /* Register SM Segment 0. */
1560 status =
1561 cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
1562 ul_rsrvd_size, dw_offset,
1563 (dw_gpp_base_pa >
1564 ul_dsp_virt) ? CMM_ADDTODSPPA :
1565 CMM_SUBFROMDSPPA,
1566 (u32) (ul_shm0_base *
1567 hio_mgr->word_size),
1568 ul_dsp_size, &ul_shm_seg_id0,
1569 dw_gpp_base_va);
1570 /* First SM region is seg_id = 1 */
1571 if (ul_shm_seg_id0 != 1)
1572 status = -EPERM;
1573 }
1574 func_end:
1575 return status;
1576 }
1577
1578 /* ZCPY IO routines. */
1579 /*
1580 * ======== IO_SHMcontrol ========
1581 * Sets the requested shm setting.
1582 */
io_sh_msetting(struct io_mgr * hio_mgr,u8 desc,void * pargs)1583 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1584 {
1585 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1586 u32 i;
1587 struct dspbridge_platform_data *pdata =
1588 omap_dspbridge_dev->dev.platform_data;
1589
1590 switch (desc) {
1591 case SHM_CURROPP:
1592 /* Update the shared memory with requested OPP information */
1593 if (pargs != NULL)
1594 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1595 *(u32 *) pargs;
1596 else
1597 return -EPERM;
1598 break;
1599 case SHM_OPPINFO:
1600 /*
1601 * Update the shared memory with the voltage, frequency,
1602 * min and max frequency values for an OPP.
1603 */
1604 for (i = 0; i <= dsp_max_opps; i++) {
1605 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1606 voltage = vdd1_dsp_freq[i][0];
1607 dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1608 vdd1_dsp_freq[i][0]);
1609 hio_mgr->shared_mem->opp_table_struct.
1610 opp_point[i].frequency = vdd1_dsp_freq[i][1];
1611 dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1612 vdd1_dsp_freq[i][1]);
1613 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1614 min_freq = vdd1_dsp_freq[i][2];
1615 dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1616 vdd1_dsp_freq[i][2]);
1617 hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1618 max_freq = vdd1_dsp_freq[i][3];
1619 dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1620 vdd1_dsp_freq[i][3]);
1621 }
1622 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1623 dsp_max_opps;
1624 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1625 /* Update the current OPP number */
1626 if (pdata->dsp_get_opp)
1627 i = (*pdata->dsp_get_opp) ();
1628 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1629 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1630 break;
1631 case SHM_GETOPP:
1632 /* Get the OPP that DSP has requested */
1633 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1634 break;
1635 default:
1636 break;
1637 }
1638 #endif
1639 return 0;
1640 }
1641
1642 /*
1643 * ======== bridge_io_get_proc_load ========
1644 * Gets the Processor's Load information
1645 */
bridge_io_get_proc_load(struct io_mgr * hio_mgr,struct dsp_procloadstat * proc_lstat)1646 int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1647 struct dsp_procloadstat *proc_lstat)
1648 {
1649 if (!hio_mgr->shared_mem)
1650 return -EFAULT;
1651
1652 proc_lstat->curr_load =
1653 hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1654 proc_lstat->predicted_load =
1655 hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1656 proc_lstat->curr_dsp_freq =
1657 hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1658 proc_lstat->predicted_freq =
1659 hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1660
1661 dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1662 "Pred Freq = %d\n", proc_lstat->curr_load,
1663 proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1664 proc_lstat->predicted_freq);
1665 return 0;
1666 }
1667
1668
1669 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
print_dsp_debug_trace(struct io_mgr * hio_mgr)1670 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1671 {
1672 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1673
1674 while (true) {
1675 /* Get the DSP current pointer */
1676 ul_gpp_cur_pointer =
1677 *(u32 *) (hio_mgr->trace_buffer_current);
1678 ul_gpp_cur_pointer =
1679 hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1680 hio_mgr->dsp_va);
1681
1682 /* No new debug messages available yet */
1683 if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1684 break;
1685 } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1686 /* Continuous data */
1687 ul_new_message_length =
1688 ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1689
1690 memcpy(hio_mgr->msg,
1691 (char *)hio_mgr->gpp_read_pointer,
1692 ul_new_message_length);
1693 hio_mgr->msg[ul_new_message_length] = '\0';
1694 /*
1695 * Advance the GPP trace pointer to DSP current
1696 * pointer.
1697 */
1698 hio_mgr->gpp_read_pointer += ul_new_message_length;
1699 /* Print the trace messages */
1700 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1701 } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1702 /* Handle trace buffer wraparound */
1703 memcpy(hio_mgr->msg,
1704 (char *)hio_mgr->gpp_read_pointer,
1705 hio_mgr->trace_buffer_end -
1706 hio_mgr->gpp_read_pointer);
1707 ul_new_message_length =
1708 ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1709 memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1710 hio_mgr->gpp_read_pointer],
1711 (char *)hio_mgr->trace_buffer_begin,
1712 ul_new_message_length);
1713 hio_mgr->msg[hio_mgr->trace_buffer_end -
1714 hio_mgr->gpp_read_pointer +
1715 ul_new_message_length] = '\0';
1716 /*
1717 * Advance the GPP trace pointer to DSP current
1718 * pointer.
1719 */
1720 hio_mgr->gpp_read_pointer =
1721 hio_mgr->trace_buffer_begin +
1722 ul_new_message_length;
1723 /* Print the trace messages */
1724 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1725 }
1726 }
1727 }
1728 #endif
1729
1730 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1731 /*
1732 * ======== print_dsp_trace_buffer ========
1733 * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1734 * Parameters:
1735 * hdeh_mgr: Handle to DEH manager object
1736 * number of extra carriage returns to generate.
1737 * Returns:
1738 * 0: Success.
1739 * -ENOMEM: Unable to allocate memory.
1740 * Requires:
1741 * hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1742 */
print_dsp_trace_buffer(struct bridge_dev_context * hbridge_context)1743 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1744 {
1745 int status = 0;
1746 struct cod_manager *cod_mgr;
1747 u32 ul_trace_end;
1748 u32 ul_trace_begin;
1749 u32 trace_cur_pos;
1750 u32 ul_num_bytes = 0;
1751 u32 ul_num_words = 0;
1752 u32 ul_word_size = 2;
1753 char *psz_buf;
1754 char *str_beg;
1755 char *trace_end;
1756 char *buf_end;
1757 char *new_line;
1758
1759 struct bridge_dev_context *pbridge_context = hbridge_context;
1760 struct bridge_drv_interface *intf_fxns;
1761 struct dev_object *dev_obj = (struct dev_object *)
1762 pbridge_context->dev_obj;
1763
1764 status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1765
1766 if (cod_mgr) {
1767 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1768 status =
1769 cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1770 } else {
1771 status = -EFAULT;
1772 }
1773 if (!status)
1774 status =
1775 cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1776
1777 if (!status)
1778 /* trace_cur_pos will hold the address of a DSP pointer */
1779 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1780 &trace_cur_pos);
1781
1782 if (status)
1783 goto func_end;
1784
1785 ul_num_bytes = (ul_trace_end - ul_trace_begin);
1786
1787 ul_num_words = ul_num_bytes * ul_word_size;
1788 status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1789
1790 if (status)
1791 goto func_end;
1792
1793 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1794 if (psz_buf != NULL) {
1795 /* Read trace buffer data */
1796 status = (*intf_fxns->brd_read)(pbridge_context,
1797 (u8 *)psz_buf, (u32)ul_trace_begin,
1798 ul_num_bytes, 0);
1799
1800 if (status)
1801 goto func_end;
1802
1803 /* Pack and do newline conversion */
1804 pr_debug("PrintDspTraceBuffer: "
1805 "before pack and unpack.\n");
1806 pr_debug("%s: DSP Trace Buffer Begin:\n"
1807 "=======================\n%s\n",
1808 __func__, psz_buf);
1809
1810 /* Read the value at the DSP address in trace_cur_pos. */
1811 status = (*intf_fxns->brd_read)(pbridge_context,
1812 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1813 4, 0);
1814 if (status)
1815 goto func_end;
1816 /* Pack and do newline conversion */
1817 pr_info("DSP Trace Buffer Begin:\n"
1818 "=======================\n%s\n",
1819 psz_buf);
1820
1821
1822 /* convert to offset */
1823 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1824
1825 if (ul_num_bytes) {
1826 /*
1827 * The buffer is not full, find the end of the
1828 * data -- buf_end will be >= pszBuf after
1829 * while.
1830 */
1831 buf_end = &psz_buf[ul_num_bytes+1];
1832 /* DSP print position */
1833 trace_end = &psz_buf[trace_cur_pos];
1834
1835 /*
1836 * Search buffer for a new_line and replace it
1837 * with '\0', then print as string.
1838 * Continue until end of buffer is reached.
1839 */
1840 str_beg = trace_end;
1841 ul_num_bytes = buf_end - str_beg;
1842
1843 while (str_beg < buf_end) {
1844 new_line = strnchr(str_beg, ul_num_bytes,
1845 '\n');
1846 if (new_line && new_line < buf_end) {
1847 *new_line = 0;
1848 pr_debug("%s\n", str_beg);
1849 str_beg = ++new_line;
1850 ul_num_bytes = buf_end - str_beg;
1851 } else {
1852 /*
1853 * Assume buffer empty if it contains
1854 * a zero
1855 */
1856 if (*str_beg != '\0') {
1857 str_beg[ul_num_bytes] = 0;
1858 pr_debug("%s\n", str_beg);
1859 }
1860 str_beg = buf_end;
1861 ul_num_bytes = 0;
1862 }
1863 }
1864 /*
1865 * Search buffer for a nNewLine and replace it
1866 * with '\0', then print as string.
1867 * Continue until buffer is exhausted.
1868 */
1869 str_beg = psz_buf;
1870 ul_num_bytes = trace_end - str_beg;
1871
1872 while (str_beg < trace_end) {
1873 new_line = strnchr(str_beg, ul_num_bytes, '\n');
1874 if (new_line != NULL && new_line < trace_end) {
1875 *new_line = 0;
1876 pr_debug("%s\n", str_beg);
1877 str_beg = ++new_line;
1878 ul_num_bytes = trace_end - str_beg;
1879 } else {
1880 /*
1881 * Assume buffer empty if it contains
1882 * a zero
1883 */
1884 if (*str_beg != '\0') {
1885 str_beg[ul_num_bytes] = 0;
1886 pr_debug("%s\n", str_beg);
1887 }
1888 str_beg = trace_end;
1889 ul_num_bytes = 0;
1890 }
1891 }
1892 }
1893 pr_info("\n=======================\n"
1894 "DSP Trace Buffer End:\n");
1895 kfree(psz_buf);
1896 } else {
1897 status = -ENOMEM;
1898 }
1899 func_end:
1900 if (status)
1901 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1902 return status;
1903 }
1904
1905 /**
1906 * dump_dsp_stack() - This function dumps the data on the DSP stack.
1907 * @bridge_context: Bridge driver's device context pointer.
1908 *
1909 */
dump_dsp_stack(struct bridge_dev_context * bridge_context)1910 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1911 {
1912 int status = 0;
1913 struct cod_manager *code_mgr;
1914 struct node_mgr *node_mgr;
1915 u32 trace_begin;
1916 char name[256];
1917 struct {
1918 u32 head[2];
1919 u32 size;
1920 } mmu_fault_dbg_info;
1921 u32 *buffer;
1922 u32 *buffer_beg;
1923 u32 *buffer_end;
1924 u32 exc_type;
1925 u32 dyn_ext_base;
1926 u32 i;
1927 u32 offset_output;
1928 u32 total_size;
1929 u32 poll_cnt;
1930 const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
1931 "IRP", "NRP", "AMR", "SSR",
1932 "ILC", "RILC", "IER", "CSR"};
1933 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
1934 struct bridge_drv_interface *intf_fxns;
1935 struct dev_object *dev_object = bridge_context->dev_obj;
1936
1937 status = dev_get_cod_mgr(dev_object, &code_mgr);
1938 if (!code_mgr) {
1939 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
1940 status = -EFAULT;
1941 }
1942
1943 if (!status) {
1944 status = dev_get_node_manager(dev_object, &node_mgr);
1945 if (!node_mgr) {
1946 pr_debug("%s: Failed on dev_get_node_manager.\n",
1947 __func__);
1948 status = -EFAULT;
1949 }
1950 }
1951
1952 if (!status) {
1953 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
1954 status =
1955 cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
1956 pr_debug("%s: trace_begin Value 0x%x\n",
1957 __func__, trace_begin);
1958 if (status)
1959 pr_debug("%s: Failed on cod_get_sym_value.\n",
1960 __func__);
1961 }
1962 if (!status)
1963 status = dev_get_intf_fxns(dev_object, &intf_fxns);
1964 /*
1965 * Check for the "magic number" in the trace buffer. If it has
1966 * yet to appear then poll the trace buffer to wait for it. Its
1967 * appearance signals that the DSP has finished dumping its state.
1968 */
1969 mmu_fault_dbg_info.head[0] = 0;
1970 mmu_fault_dbg_info.head[1] = 0;
1971 if (!status) {
1972 poll_cnt = 0;
1973 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
1974 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
1975 poll_cnt < POLL_MAX) {
1976
1977 /* Read DSP dump size from the DSP trace buffer... */
1978 status = (*intf_fxns->brd_read)(bridge_context,
1979 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
1980 sizeof(mmu_fault_dbg_info), 0);
1981
1982 if (status)
1983 break;
1984
1985 poll_cnt++;
1986 }
1987
1988 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
1989 mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
1990 status = -ETIME;
1991 pr_err("%s:No DSP MMU-Fault information available.\n",
1992 __func__);
1993 }
1994 }
1995
1996 if (!status) {
1997 total_size = mmu_fault_dbg_info.size;
1998 /* Limit the size in case DSP went crazy */
1999 if (total_size > MAX_MMU_DBGBUFF)
2000 total_size = MAX_MMU_DBGBUFF;
2001
2002 buffer = kzalloc(total_size, GFP_ATOMIC);
2003 if (!buffer) {
2004 status = -ENOMEM;
2005 pr_debug("%s: Failed to "
2006 "allocate stack dump buffer.\n", __func__);
2007 goto func_end;
2008 }
2009
2010 buffer_beg = buffer;
2011 buffer_end = buffer + total_size / 4;
2012
2013 /* Read bytes from the DSP trace buffer... */
2014 status = (*intf_fxns->brd_read)(bridge_context,
2015 (u8 *)buffer, (u32)trace_begin,
2016 total_size, 0);
2017 if (status) {
2018 pr_debug("%s: Failed to Read Trace Buffer.\n",
2019 __func__);
2020 goto func_end;
2021 }
2022
2023 pr_err("\nAproximate Crash Position:\n"
2024 "--------------------------\n");
2025
2026 exc_type = buffer[3];
2027 if (!exc_type)
2028 i = buffer[79]; /* IRP */
2029 else
2030 i = buffer[80]; /* NRP */
2031
2032 status =
2033 cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2034 if (status) {
2035 status = -EFAULT;
2036 goto func_end;
2037 }
2038
2039 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2040 0x1000, &offset_output, name) == 0))
2041 pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2042 i - offset_output);
2043 else
2044 pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2045
2046 buffer += 4;
2047
2048 pr_err("\nExecution Info:\n"
2049 "---------------\n");
2050
2051 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2052 pr_err("Execution context \t%s\n",
2053 exec_ctxt[*buffer++]);
2054 } else {
2055 pr_err("Execution context corrupt\n");
2056 kfree(buffer_beg);
2057 return -EFAULT;
2058 }
2059 pr_err("Task Handle\t\t0x%x\n", *buffer++);
2060 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2061 pr_err("Stack Top\t\t0x%x\n", *buffer++);
2062 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2063 pr_err("Stack Size\t\t0x%x\n", *buffer++);
2064 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2065
2066 pr_err("\nCPU Registers\n"
2067 "---------------\n");
2068
2069 for (i = 0; i < 32; i++) {
2070 if (i == 4 || i == 6 || i == 8)
2071 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2072 i, *buffer++, i-3);
2073 else if (i == 15)
2074 pr_err("A15 0x%-8x [Frame Pointer]\n",
2075 *buffer++);
2076 else
2077 pr_err("A%d 0x%x\n", i, *buffer++);
2078 }
2079
2080 pr_err("\nB0 0x%x\n", *buffer++);
2081 pr_err("B1 0x%x\n", *buffer++);
2082 pr_err("B2 0x%x\n", *buffer++);
2083
2084 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2085 *buffer, 0x1000, &offset_output, name) == 0))
2086
2087 pr_err("B3 0x%-8x [Function Return Pointer:"
2088 " \"%s\" + 0x%x]\n", *buffer, name,
2089 *buffer - offset_output);
2090 else
2091 pr_err("B3 0x%-8x [Function Return Pointer:"
2092 "Unable to match to a symbol.]\n", *buffer);
2093
2094 buffer++;
2095
2096 for (i = 4; i < 32; i++) {
2097 if (i == 4 || i == 6 || i == 8)
2098 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2099 i, *buffer++, i-2);
2100 else if (i == 14)
2101 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2102 *buffer++);
2103 else
2104 pr_err("B%d 0x%x\n", i, *buffer++);
2105 }
2106
2107 pr_err("\n");
2108
2109 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2110 pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2111
2112 pr_err("\nStack:\n"
2113 "------\n");
2114
2115 for (i = 0; buffer < buffer_end; i++, buffer++) {
2116 if ((*buffer > dyn_ext_base) && (
2117 node_find_addr(node_mgr, *buffer , 0x600,
2118 &offset_output, name) == 0))
2119 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2120 i, *buffer, name,
2121 *buffer - offset_output);
2122 else
2123 pr_err("[%d] 0x%x\n", i, *buffer);
2124 }
2125 kfree(buffer_beg);
2126 }
2127 func_end:
2128 return status;
2129 }
2130
2131 /**
2132 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2133 * @bridge_context: Bridge driver's device context pointer.
2134 *
2135 */
dump_dl_modules(struct bridge_dev_context * bridge_context)2136 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2137 {
2138 struct cod_manager *code_mgr;
2139 struct bridge_drv_interface *intf_fxns;
2140 struct bridge_dev_context *bridge_ctxt = bridge_context;
2141 struct dev_object *dev_object = bridge_ctxt->dev_obj;
2142 struct modules_header modules_hdr;
2143 struct dll_module *module_struct = NULL;
2144 u32 module_dsp_addr;
2145 u32 module_size;
2146 u32 module_struct_size = 0;
2147 u32 sect_ndx;
2148 char *sect_str ;
2149 int status = 0;
2150
2151 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2152 if (status) {
2153 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2154 goto func_end;
2155 }
2156
2157 status = dev_get_cod_mgr(dev_object, &code_mgr);
2158 if (!code_mgr) {
2159 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2160 status = -EFAULT;
2161 goto func_end;
2162 }
2163
2164 /* Lookup the address of the modules_header structure */
2165 status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2166 if (status) {
2167 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2168 __func__);
2169 goto func_end;
2170 }
2171
2172 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2173
2174 /* Copy the modules_header structure from DSP memory. */
2175 status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
2176 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2177
2178 if (status) {
2179 pr_debug("%s: Failed failed to read modules header.\n",
2180 __func__);
2181 goto func_end;
2182 }
2183
2184 module_dsp_addr = modules_hdr.first_module;
2185 module_size = modules_hdr.first_module_size;
2186
2187 pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2188 module_size);
2189
2190 pr_err("\nDynamically Loaded Modules:\n"
2191 "---------------------------\n");
2192
2193 /* For each dll_module structure in the list... */
2194 while (module_size) {
2195 /*
2196 * Allocate/re-allocate memory to hold the dll_module
2197 * structure. The memory is re-allocated only if the existing
2198 * allocation is too small.
2199 */
2200 if (module_size > module_struct_size) {
2201 kfree(module_struct);
2202 module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2203 module_struct_size = module_size+128;
2204 pr_debug("%s: allocated module struct %p %d\n",
2205 __func__, module_struct, module_struct_size);
2206 if (!module_struct)
2207 goto func_end;
2208 }
2209 /* Copy the dll_module structure from DSP memory */
2210 status = (*intf_fxns->brd_read)(bridge_context,
2211 (u8 *)module_struct, module_dsp_addr, module_size, 0);
2212
2213 if (status) {
2214 pr_debug(
2215 "%s: Failed to read dll_module stuct for 0x%x.\n",
2216 __func__, module_dsp_addr);
2217 break;
2218 }
2219
2220 /* Update info regarding the _next_ module in the list. */
2221 module_dsp_addr = module_struct->next_module;
2222 module_size = module_struct->next_module_size;
2223
2224 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2225 __func__, module_dsp_addr, module_size,
2226 module_struct->num_sects);
2227
2228 /*
2229 * The section name strings start immedialty following
2230 * the array of dll_sect structures.
2231 */
2232 sect_str = (char *) &module_struct->
2233 sects[module_struct->num_sects];
2234 pr_err("%s\n", sect_str);
2235
2236 /*
2237 * Advance to the first section name string.
2238 * Each string follows the one before.
2239 */
2240 sect_str += strlen(sect_str) + 1;
2241
2242 /* Access each dll_sect structure and its name string. */
2243 for (sect_ndx = 0;
2244 sect_ndx < module_struct->num_sects; sect_ndx++) {
2245 pr_err(" Section: 0x%x ",
2246 module_struct->sects[sect_ndx].sect_load_adr);
2247
2248 if (((u32) sect_str - (u32) module_struct) <
2249 module_struct_size) {
2250 pr_err("%s\n", sect_str);
2251 /* Each string follows the one before. */
2252 sect_str += strlen(sect_str)+1;
2253 } else {
2254 pr_err("<string error>\n");
2255 pr_debug("%s: section name sting address "
2256 "is invalid %p\n", __func__, sect_str);
2257 }
2258 }
2259 }
2260 func_end:
2261 kfree(module_struct);
2262 }
2263 #endif
2264