1 /*
2 * nldr.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 #include <linux/types.h>
20
21 #include <dspbridge/host_os.h>
22
23 #include <dspbridge/dbdefs.h>
24
25 #include <dspbridge/dbc.h>
26
27 /* Platform manager */
28 #include <dspbridge/cod.h>
29 #include <dspbridge/dev.h>
30
31 /* Resource manager */
32 #include <dspbridge/dbll.h>
33 #include <dspbridge/dbdcd.h>
34 #include <dspbridge/rmm.h>
35 #include <dspbridge/uuidutil.h>
36
37 #include <dspbridge/nldr.h>
38 #include <linux/lcm.h>
39
40 /* Name of section containing dynamic load mem */
41 #define DYNMEMSECT ".dspbridge_mem"
42
43 /* Name of section containing dependent library information */
44 #define DEPLIBSECT ".dspbridge_deplibs"
45
46 /* Max depth of recursion for loading node's dependent libraries */
47 #define MAXDEPTH 5
48
49 /* Max number of persistent libraries kept by a node */
50 #define MAXLIBS 5
51
52 /*
53 * Defines for extracting packed dynamic load memory requirements from two
54 * masks.
55 * These defines must match node.cdb and dynm.cdb
56 * Format of data/code mask is:
57 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
58 * where
59 * u = unused
60 * cccccc = preferred/required dynamic mem segid for create phase data/code
61 * dddddd = preferred/required dynamic mem segid for delete phase data/code
62 * eeeeee = preferred/req. dynamic mem segid for execute phase data/code
63 * f = flag indicating if memory is preferred or required:
64 * f = 1 if required, f = 0 if preferred.
65 *
66 * The 6 bits of the segid are interpreted as follows:
67 *
68 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
69 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
70 * If the 6th bit (bit 5) is set, segid has the following interpretation:
71 * segid = 32 - Any internal memory segment can be used.
72 * segid = 33 - Any external memory segment can be used.
73 * segid = 63 - Any memory segment can be used (in this case the
74 * required/preferred flag is irrelevant).
75 *
76 */
77 /* Maximum allowed dynamic loading memory segments */
78 #define MAXMEMSEGS 32
79
80 #define MAXSEGID 3 /* Largest possible (real) segid */
81 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
82 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
83 #define NULLID 63 /* Segid meaning no memory req/pref */
84 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
85 #define SEGMASK 0x3f /* Bits 0 - 5 */
86
87 #define CREATEBIT 0 /* Create segid starts at bit 0 */
88 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
89 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
90
91 /*
92 * Masks that define memory type. Must match defines in dynm.cdb.
93 */
94 #define DYNM_CODE 0x2
95 #define DYNM_DATA 0x4
96 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
97 #define DYNM_INTERNAL 0x8
98 #define DYNM_EXTERNAL 0x10
99
100 /*
101 * Defines for packing memory requirement/preference flags for code and
102 * data of each of the node's phases into one mask.
103 * The bit is set if the segid is required for loading code/data of the
104 * given phase. The bit is not set, if the segid is preferred only.
105 *
106 * These defines are also used as indeces into a segid array for the node.
107 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
108 * create phase data is required or preferred to be loaded into.
109 */
110 #define CREATEDATAFLAGBIT 0
111 #define CREATECODEFLAGBIT 1
112 #define EXECUTEDATAFLAGBIT 2
113 #define EXECUTECODEFLAGBIT 3
114 #define DELETEDATAFLAGBIT 4
115 #define DELETECODEFLAGBIT 5
116 #define MAXFLAGS 6
117
118 /*
119 * These names may be embedded in overlay sections to identify which
120 * node phase the section should be overlayed.
121 */
122 #define PCREATE "create"
123 #define PDELETE "delete"
124 #define PEXECUTE "execute"
125
is_equal_uuid(struct dsp_uuid * uuid1,struct dsp_uuid * uuid2)126 static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
127 struct dsp_uuid *uuid2)
128 {
129 return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
130 }
131
132 /*
133 * ======== mem_seg_info ========
134 * Format of dynamic loading memory segment info in coff file.
135 * Must match dynm.h55.
136 */
137 struct mem_seg_info {
138 u32 segid; /* Dynamic loading memory segment number */
139 u32 base;
140 u32 len;
141 u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
142 };
143
144 /*
145 * ======== lib_node ========
146 * For maintaining a tree of library dependencies.
147 */
148 struct lib_node {
149 struct dbll_library_obj *lib; /* The library */
150 u16 dep_libs; /* Number of dependent libraries */
151 struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
152 };
153
154 /*
155 * ======== ovly_sect ========
156 * Information needed to overlay a section.
157 */
158 struct ovly_sect {
159 struct ovly_sect *next_sect;
160 u32 sect_load_addr; /* Load address of section */
161 u32 sect_run_addr; /* Run address of section */
162 u32 size; /* Size of section */
163 u16 page; /* DBL_CODE, DBL_DATA */
164 };
165
166 /*
167 * ======== ovly_node ========
168 * For maintaining a list of overlay nodes, with sections that need to be
169 * overlayed for each of the nodes phases.
170 */
171 struct ovly_node {
172 struct dsp_uuid uuid;
173 char *node_name;
174 struct ovly_sect *create_sects_list;
175 struct ovly_sect *delete_sects_list;
176 struct ovly_sect *execute_sects_list;
177 struct ovly_sect *other_sects_list;
178 u16 create_sects;
179 u16 delete_sects;
180 u16 execute_sects;
181 u16 other_sects;
182 u16 create_ref;
183 u16 delete_ref;
184 u16 execute_ref;
185 u16 other_ref;
186 };
187
188 /*
189 * ======== nldr_object ========
190 * Overlay loader object.
191 */
192 struct nldr_object {
193 struct dev_object *dev_obj; /* Device object */
194 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
195 struct dbll_tar_obj *dbll; /* The DBL loader */
196 struct dbll_library_obj *base_lib; /* Base image library */
197 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
198 struct dbll_fxns ldr_fxns; /* Loader function table */
199 struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
200 nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
201 nldr_writefxn write_fxn; /* "write" for dynamic nodes */
202 struct ovly_node *ovly_table; /* Table of overlay nodes */
203 u16 ovly_nodes; /* Number of overlay nodes in base */
204 u16 ovly_nid; /* Index for tracking overlay nodes */
205 u16 dload_segs; /* Number of dynamic load mem segs */
206 u32 *seg_table; /* memtypes of dynamic memory segs
207 * indexed by segid
208 */
209 u16 dsp_mau_size; /* Size of DSP MAU */
210 u16 dsp_word_size; /* Size of DSP word */
211 };
212
213 /*
214 * ======== nldr_nodeobject ========
215 * Dynamic node object. This object is created when a node is allocated.
216 */
217 struct nldr_nodeobject {
218 struct nldr_object *nldr_obj; /* Dynamic loader handle */
219 void *priv_ref; /* Handle to pass to dbl_write_fxn */
220 struct dsp_uuid uuid; /* Node's UUID */
221 bool dynamic; /* Dynamically loaded node? */
222 bool overlay; /* Overlay node? */
223 bool *phase_split; /* Multiple phase libraries? */
224 struct lib_node root; /* Library containing node phase */
225 struct lib_node create_lib; /* Library with create phase lib */
226 struct lib_node execute_lib; /* Library with execute phase lib */
227 struct lib_node delete_lib; /* Library with delete phase lib */
228 /* libs remain loaded until Delete */
229 struct lib_node pers_lib_table[MAXLIBS];
230 s32 pers_libs; /* Number of persistent libraries */
231 /* Path in lib dependency tree */
232 struct dbll_library_obj *lib_path[MAXDEPTH + 1];
233 enum nldr_phase phase; /* Node phase currently being loaded */
234
235 /*
236 * Dynamic loading memory segments for data and code of each phase.
237 */
238 u16 seg_id[MAXFLAGS];
239
240 /*
241 * Mask indicating whether each mem segment specified in seg_id[]
242 * is preferred or required.
243 * For example
244 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
245 * then it is required to load execute phase data into the memory
246 * specified by seg_id[EXECUTEDATAFLAGBIT].
247 */
248 u32 code_data_flag_mask;
249 };
250
251 /* Dynamic loader function table */
252 static struct dbll_fxns ldr_fxns = {
253 (dbll_close_fxn) dbll_close,
254 (dbll_create_fxn) dbll_create,
255 (dbll_delete_fxn) dbll_delete,
256 (dbll_exit_fxn) dbll_exit,
257 (dbll_get_attrs_fxn) dbll_get_attrs,
258 (dbll_get_addr_fxn) dbll_get_addr,
259 (dbll_get_c_addr_fxn) dbll_get_c_addr,
260 (dbll_get_sect_fxn) dbll_get_sect,
261 (dbll_init_fxn) dbll_init,
262 (dbll_load_fxn) dbll_load,
263 (dbll_open_fxn) dbll_open,
264 (dbll_read_sect_fxn) dbll_read_sect,
265 (dbll_unload_fxn) dbll_unload,
266 };
267
268 static u32 refs; /* module reference count */
269
270 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
271 u32 addr, u32 bytes);
272 static int add_ovly_node(struct dsp_uuid *uuid_obj,
273 enum dsp_dcdobjtype obj_type, void *handle);
274 static int add_ovly_sect(struct nldr_object *nldr_obj,
275 struct ovly_sect **lst,
276 struct dbll_sect_info *sect_inf,
277 bool *exists, u32 addr, u32 bytes);
278 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
279 s32 mtype);
280 static void free_sects(struct nldr_object *nldr_obj,
281 struct ovly_sect *phase_sects, u16 alloc_num);
282 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
283 char *sym_name, struct dbll_sym_val **sym);
284 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
285 struct lib_node *root, struct dsp_uuid uuid,
286 bool root_prstnt,
287 struct dbll_library_obj **lib_path,
288 enum nldr_phase phase, u16 depth);
289 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
290 enum nldr_phase phase);
291 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
292 u32 align, u32 *dsp_address,
293 s32 segmnt_id,
294 s32 req, bool reserve);
295 static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
296 bool reserve);
297
298 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
299 struct lib_node *root);
300 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
301 enum nldr_phase phase);
302 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
303 struct dbll_library_obj *lib);
304
305 /*
306 * ======== nldr_allocate ========
307 */
nldr_allocate(struct nldr_object * nldr_obj,void * priv_ref,const struct dcd_nodeprops * node_props,struct nldr_nodeobject ** nldr_nodeobj,bool * pf_phase_split)308 int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
309 const struct dcd_nodeprops *node_props,
310 struct nldr_nodeobject **nldr_nodeobj,
311 bool *pf_phase_split)
312 {
313 struct nldr_nodeobject *nldr_node_obj = NULL;
314 int status = 0;
315
316 DBC_REQUIRE(refs > 0);
317 DBC_REQUIRE(node_props != NULL);
318 DBC_REQUIRE(nldr_nodeobj != NULL);
319 DBC_REQUIRE(nldr_obj);
320
321 /* Initialize handle in case of failure */
322 *nldr_nodeobj = NULL;
323 /* Allocate node object */
324 nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
325
326 if (nldr_node_obj == NULL) {
327 status = -ENOMEM;
328 } else {
329 nldr_node_obj->phase_split = pf_phase_split;
330 nldr_node_obj->pers_libs = 0;
331 nldr_node_obj->nldr_obj = nldr_obj;
332 nldr_node_obj->priv_ref = priv_ref;
333 /* Save node's UUID. */
334 nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
335 /*
336 * Determine if node is a dynamically loaded node from
337 * ndb_props.
338 */
339 if (node_props->load_type == NLDR_DYNAMICLOAD) {
340 /* Dynamic node */
341 nldr_node_obj->dynamic = true;
342 /*
343 * Extract memory requirements from ndb_props masks
344 */
345 /* Create phase */
346 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
347 (node_props->data_mem_seg_mask >> CREATEBIT) &
348 SEGMASK;
349 nldr_node_obj->code_data_flag_mask |=
350 ((node_props->data_mem_seg_mask >>
351 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
352 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
353 (node_props->code_mem_seg_mask >>
354 CREATEBIT) & SEGMASK;
355 nldr_node_obj->code_data_flag_mask |=
356 ((node_props->code_mem_seg_mask >>
357 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
358 /* Execute phase */
359 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
360 (node_props->data_mem_seg_mask >>
361 EXECUTEBIT) & SEGMASK;
362 nldr_node_obj->code_data_flag_mask |=
363 ((node_props->data_mem_seg_mask >>
364 (EXECUTEBIT + FLAGBIT)) & 1) <<
365 EXECUTEDATAFLAGBIT;
366 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
367 (node_props->code_mem_seg_mask >>
368 EXECUTEBIT) & SEGMASK;
369 nldr_node_obj->code_data_flag_mask |=
370 ((node_props->code_mem_seg_mask >>
371 (EXECUTEBIT + FLAGBIT)) & 1) <<
372 EXECUTECODEFLAGBIT;
373 /* Delete phase */
374 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
375 (node_props->data_mem_seg_mask >> DELETEBIT) &
376 SEGMASK;
377 nldr_node_obj->code_data_flag_mask |=
378 ((node_props->data_mem_seg_mask >>
379 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
380 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
381 (node_props->code_mem_seg_mask >>
382 DELETEBIT) & SEGMASK;
383 nldr_node_obj->code_data_flag_mask |=
384 ((node_props->code_mem_seg_mask >>
385 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
386 } else {
387 /* Non-dynamically loaded nodes are part of the
388 * base image */
389 nldr_node_obj->root.lib = nldr_obj->base_lib;
390 /* Check for overlay node */
391 if (node_props->load_type == NLDR_OVLYLOAD)
392 nldr_node_obj->overlay = true;
393
394 }
395 *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
396 }
397 /* Cleanup on failure */
398 if (status && nldr_node_obj)
399 kfree(nldr_node_obj);
400
401 DBC_ENSURE((!status && *nldr_nodeobj)
402 || (status && *nldr_nodeobj == NULL));
403 return status;
404 }
405
406 /*
407 * ======== nldr_create ========
408 */
nldr_create(struct nldr_object ** nldr,struct dev_object * hdev_obj,const struct nldr_attrs * pattrs)409 int nldr_create(struct nldr_object **nldr,
410 struct dev_object *hdev_obj,
411 const struct nldr_attrs *pattrs)
412 {
413 struct cod_manager *cod_mgr; /* COD manager */
414 char *psz_coff_buf = NULL;
415 char sz_zl_file[COD_MAXPATHLENGTH];
416 struct nldr_object *nldr_obj = NULL;
417 struct dbll_attrs save_attrs;
418 struct dbll_attrs new_attrs;
419 dbll_flags flags;
420 u32 ul_entry;
421 u16 dload_segs = 0;
422 struct mem_seg_info *mem_info_obj;
423 u32 ul_len = 0;
424 u32 ul_addr;
425 struct rmm_segment *rmm_segs = NULL;
426 u16 i;
427 int status = 0;
428 DBC_REQUIRE(refs > 0);
429 DBC_REQUIRE(nldr != NULL);
430 DBC_REQUIRE(hdev_obj != NULL);
431 DBC_REQUIRE(pattrs != NULL);
432 DBC_REQUIRE(pattrs->ovly != NULL);
433 DBC_REQUIRE(pattrs->write != NULL);
434
435 /* Allocate dynamic loader object */
436 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
437 if (nldr_obj) {
438 nldr_obj->dev_obj = hdev_obj;
439 /* warning, lazy status checking alert! */
440 dev_get_cod_mgr(hdev_obj, &cod_mgr);
441 if (cod_mgr) {
442 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
443 DBC_ASSERT(!status);
444 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
445 DBC_ASSERT(!status);
446 status =
447 cod_get_base_name(cod_mgr, sz_zl_file,
448 COD_MAXPATHLENGTH);
449 DBC_ASSERT(!status);
450 }
451 status = 0;
452 /* end lazy status checking */
453 nldr_obj->dsp_mau_size = pattrs->dsp_mau_size;
454 nldr_obj->dsp_word_size = pattrs->dsp_word_size;
455 nldr_obj->ldr_fxns = ldr_fxns;
456 if (!(nldr_obj->ldr_fxns.init_fxn()))
457 status = -ENOMEM;
458
459 } else {
460 status = -ENOMEM;
461 }
462 /* Create the DCD Manager */
463 if (!status)
464 status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr);
465
466 /* Get dynamic loading memory sections from base lib */
467 if (!status) {
468 status =
469 nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
470 DYNMEMSECT, &ul_addr,
471 &ul_len);
472 if (!status) {
473 psz_coff_buf =
474 kzalloc(ul_len * nldr_obj->dsp_mau_size,
475 GFP_KERNEL);
476 if (!psz_coff_buf)
477 status = -ENOMEM;
478 } else {
479 /* Ok to not have dynamic loading memory */
480 status = 0;
481 ul_len = 0;
482 dev_dbg(bridge, "%s: failed - no dynamic loading mem "
483 "segments: 0x%x\n", __func__, status);
484 }
485 }
486 if (!status && ul_len > 0) {
487 /* Read section containing dynamic load mem segments */
488 status =
489 nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
490 DYNMEMSECT, psz_coff_buf,
491 ul_len);
492 }
493 if (!status && ul_len > 0) {
494 /* Parse memory segment data */
495 dload_segs = (u16) (*((u32 *) psz_coff_buf));
496 if (dload_segs > MAXMEMSEGS)
497 status = -EBADF;
498 }
499 /* Parse dynamic load memory segments */
500 if (!status && dload_segs > 0) {
501 rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
502 GFP_KERNEL);
503 nldr_obj->seg_table =
504 kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
505 if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
506 status = -ENOMEM;
507 } else {
508 nldr_obj->dload_segs = dload_segs;
509 mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
510 sizeof(u32));
511 for (i = 0; i < dload_segs; i++) {
512 rmm_segs[i].base = (mem_info_obj + i)->base;
513 rmm_segs[i].length = (mem_info_obj + i)->len;
514 rmm_segs[i].space = 0;
515 nldr_obj->seg_table[i] =
516 (mem_info_obj + i)->type;
517 dev_dbg(bridge,
518 "(proc) DLL MEMSEGMENT: %d, "
519 "Base: 0x%x, Length: 0x%x\n", i,
520 rmm_segs[i].base, rmm_segs[i].length);
521 }
522 }
523 }
524 /* Create Remote memory manager */
525 if (!status)
526 status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
527
528 if (!status) {
529 /* set the alloc, free, write functions for loader */
530 nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
531 new_attrs = save_attrs;
532 new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
533 new_attrs.free = (dbll_free_fxn) remote_free;
534 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
535 new_attrs.sym_handle = nldr_obj;
536 new_attrs.write = (dbll_write_fxn) pattrs->write;
537 nldr_obj->ovly_fxn = pattrs->ovly;
538 nldr_obj->write_fxn = pattrs->write;
539 nldr_obj->ldr_attrs = new_attrs;
540 }
541 kfree(rmm_segs);
542
543 kfree(psz_coff_buf);
544
545 /* Get overlay nodes */
546 if (!status) {
547 status =
548 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
549 /* lazy check */
550 DBC_ASSERT(!status);
551 /* First count number of overlay nodes */
552 status =
553 dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
554 add_ovly_node, (void *)nldr_obj);
555 /* Now build table of overlay nodes */
556 if (!status && nldr_obj->ovly_nodes > 0) {
557 /* Allocate table for overlay nodes */
558 nldr_obj->ovly_table =
559 kzalloc(sizeof(struct ovly_node) *
560 nldr_obj->ovly_nodes, GFP_KERNEL);
561 /* Put overlay nodes in the table */
562 nldr_obj->ovly_nid = 0;
563 status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
564 add_ovly_node,
565 (void *)nldr_obj);
566 }
567 }
568 /* Do a fake reload of the base image to get overlay section info */
569 if (!status && nldr_obj->ovly_nodes > 0) {
570 save_attrs.write = fake_ovly_write;
571 save_attrs.log_write = add_ovly_info;
572 save_attrs.log_write_handle = nldr_obj;
573 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
574 status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
575 &save_attrs, &ul_entry);
576 }
577 if (!status) {
578 *nldr = (struct nldr_object *)nldr_obj;
579 } else {
580 if (nldr_obj)
581 nldr_delete((struct nldr_object *)nldr_obj);
582
583 *nldr = NULL;
584 }
585 /* FIXME:Temp. Fix. Must be removed */
586 DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
587 return status;
588 }
589
590 /*
591 * ======== nldr_delete ========
592 */
nldr_delete(struct nldr_object * nldr_obj)593 void nldr_delete(struct nldr_object *nldr_obj)
594 {
595 struct ovly_sect *ovly_section;
596 struct ovly_sect *next;
597 u16 i;
598 DBC_REQUIRE(refs > 0);
599 DBC_REQUIRE(nldr_obj);
600
601 nldr_obj->ldr_fxns.exit_fxn();
602 if (nldr_obj->rmm)
603 rmm_delete(nldr_obj->rmm);
604
605 kfree(nldr_obj->seg_table);
606
607 if (nldr_obj->dcd_mgr)
608 dcd_destroy_manager(nldr_obj->dcd_mgr);
609
610 /* Free overlay node information */
611 if (nldr_obj->ovly_table) {
612 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
613 ovly_section =
614 nldr_obj->ovly_table[i].create_sects_list;
615 while (ovly_section) {
616 next = ovly_section->next_sect;
617 kfree(ovly_section);
618 ovly_section = next;
619 }
620 ovly_section =
621 nldr_obj->ovly_table[i].delete_sects_list;
622 while (ovly_section) {
623 next = ovly_section->next_sect;
624 kfree(ovly_section);
625 ovly_section = next;
626 }
627 ovly_section =
628 nldr_obj->ovly_table[i].execute_sects_list;
629 while (ovly_section) {
630 next = ovly_section->next_sect;
631 kfree(ovly_section);
632 ovly_section = next;
633 }
634 ovly_section = nldr_obj->ovly_table[i].other_sects_list;
635 while (ovly_section) {
636 next = ovly_section->next_sect;
637 kfree(ovly_section);
638 ovly_section = next;
639 }
640 }
641 kfree(nldr_obj->ovly_table);
642 }
643 kfree(nldr_obj);
644 }
645
646 /*
647 * ======== nldr_exit ========
648 * Discontinue usage of NLDR module.
649 */
nldr_exit(void)650 void nldr_exit(void)
651 {
652 DBC_REQUIRE(refs > 0);
653
654 refs--;
655
656 if (refs == 0)
657 rmm_exit();
658
659 DBC_ENSURE(refs >= 0);
660 }
661
662 /*
663 * ======== nldr_get_fxn_addr ========
664 */
nldr_get_fxn_addr(struct nldr_nodeobject * nldr_node_obj,char * str_fxn,u32 * addr)665 int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
666 char *str_fxn, u32 * addr)
667 {
668 struct dbll_sym_val *dbll_sym;
669 struct nldr_object *nldr_obj;
670 int status = 0;
671 bool status1 = false;
672 s32 i = 0;
673 struct lib_node root = { NULL, 0, NULL };
674 DBC_REQUIRE(refs > 0);
675 DBC_REQUIRE(nldr_node_obj);
676 DBC_REQUIRE(addr != NULL);
677 DBC_REQUIRE(str_fxn != NULL);
678
679 nldr_obj = nldr_node_obj->nldr_obj;
680 /* Called from node_create(), node_delete(), or node_run(). */
681 if (nldr_node_obj->dynamic && *nldr_node_obj->phase_split) {
682 switch (nldr_node_obj->phase) {
683 case NLDR_CREATE:
684 root = nldr_node_obj->create_lib;
685 break;
686 case NLDR_EXECUTE:
687 root = nldr_node_obj->execute_lib;
688 break;
689 case NLDR_DELETE:
690 root = nldr_node_obj->delete_lib;
691 break;
692 default:
693 DBC_ASSERT(false);
694 break;
695 }
696 } else {
697 /* for Overlay nodes or non-split Dynamic nodes */
698 root = nldr_node_obj->root;
699 }
700 status1 =
701 nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
702 if (!status1)
703 status1 =
704 nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
705 &dbll_sym);
706
707 /* If symbol not found, check dependent libraries */
708 if (!status1) {
709 for (i = 0; i < root.dep_libs; i++) {
710 status1 =
711 nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
712 [i].lib, str_fxn,
713 &dbll_sym);
714 if (!status1) {
715 status1 =
716 nldr_obj->ldr_fxns.
717 get_c_addr_fxn(root.dep_libs_tree[i].lib,
718 str_fxn, &dbll_sym);
719 }
720 if (status1) {
721 /* Symbol found */
722 break;
723 }
724 }
725 }
726 /* Check persistent libraries */
727 if (!status1) {
728 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
729 status1 =
730 nldr_obj->ldr_fxns.
731 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
732 str_fxn, &dbll_sym);
733 if (!status1) {
734 status1 =
735 nldr_obj->ldr_fxns.
736 get_c_addr_fxn(nldr_node_obj->pers_lib_table
737 [i].lib, str_fxn, &dbll_sym);
738 }
739 if (status1) {
740 /* Symbol found */
741 break;
742 }
743 }
744 }
745
746 if (status1)
747 *addr = dbll_sym->value;
748 else
749 status = -ESPIPE;
750
751 return status;
752 }
753
754 /*
755 * ======== nldr_get_rmm_manager ========
756 * Given a NLDR object, retrieve RMM Manager Handle
757 */
nldr_get_rmm_manager(struct nldr_object * nldr,struct rmm_target_obj ** rmm_mgr)758 int nldr_get_rmm_manager(struct nldr_object *nldr,
759 struct rmm_target_obj **rmm_mgr)
760 {
761 int status = 0;
762 struct nldr_object *nldr_obj = nldr;
763 DBC_REQUIRE(rmm_mgr != NULL);
764
765 if (nldr) {
766 *rmm_mgr = nldr_obj->rmm;
767 } else {
768 *rmm_mgr = NULL;
769 status = -EFAULT;
770 }
771
772 DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
773
774 return status;
775 }
776
777 /*
778 * ======== nldr_init ========
779 * Initialize the NLDR module.
780 */
nldr_init(void)781 bool nldr_init(void)
782 {
783 DBC_REQUIRE(refs >= 0);
784
785 if (refs == 0)
786 rmm_init();
787
788 refs++;
789
790 DBC_ENSURE(refs > 0);
791 return true;
792 }
793
794 /*
795 * ======== nldr_load ========
796 */
nldr_load(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)797 int nldr_load(struct nldr_nodeobject *nldr_node_obj,
798 enum nldr_phase phase)
799 {
800 struct nldr_object *nldr_obj;
801 struct dsp_uuid lib_uuid;
802 int status = 0;
803
804 DBC_REQUIRE(refs > 0);
805 DBC_REQUIRE(nldr_node_obj);
806
807 nldr_obj = nldr_node_obj->nldr_obj;
808
809 if (nldr_node_obj->dynamic) {
810 nldr_node_obj->phase = phase;
811
812 lib_uuid = nldr_node_obj->uuid;
813
814 /* At this point, we may not know if node is split into
815 * different libraries. So we'll go ahead and load the
816 * library, and then save the pointer to the appropriate
817 * location after we know. */
818
819 status =
820 load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
821 false, nldr_node_obj->lib_path, phase, 0);
822
823 if (!status) {
824 if (*nldr_node_obj->phase_split) {
825 switch (phase) {
826 case NLDR_CREATE:
827 nldr_node_obj->create_lib =
828 nldr_node_obj->root;
829 break;
830
831 case NLDR_EXECUTE:
832 nldr_node_obj->execute_lib =
833 nldr_node_obj->root;
834 break;
835
836 case NLDR_DELETE:
837 nldr_node_obj->delete_lib =
838 nldr_node_obj->root;
839 break;
840
841 default:
842 DBC_ASSERT(false);
843 break;
844 }
845 }
846 }
847 } else {
848 if (nldr_node_obj->overlay)
849 status = load_ovly(nldr_node_obj, phase);
850
851 }
852
853 return status;
854 }
855
856 /*
857 * ======== nldr_unload ========
858 */
nldr_unload(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)859 int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
860 enum nldr_phase phase)
861 {
862 int status = 0;
863 struct lib_node *root_lib = NULL;
864 s32 i = 0;
865
866 DBC_REQUIRE(refs > 0);
867 DBC_REQUIRE(nldr_node_obj);
868
869 if (nldr_node_obj != NULL) {
870 if (nldr_node_obj->dynamic) {
871 if (*nldr_node_obj->phase_split) {
872 switch (phase) {
873 case NLDR_CREATE:
874 root_lib = &nldr_node_obj->create_lib;
875 break;
876 case NLDR_EXECUTE:
877 root_lib = &nldr_node_obj->execute_lib;
878 break;
879 case NLDR_DELETE:
880 root_lib = &nldr_node_obj->delete_lib;
881 /* Unload persistent libraries */
882 for (i = 0;
883 i < nldr_node_obj->pers_libs;
884 i++) {
885 unload_lib(nldr_node_obj,
886 &nldr_node_obj->
887 pers_lib_table[i]);
888 }
889 nldr_node_obj->pers_libs = 0;
890 break;
891 default:
892 DBC_ASSERT(false);
893 break;
894 }
895 } else {
896 /* Unload main library */
897 root_lib = &nldr_node_obj->root;
898 }
899 if (root_lib)
900 unload_lib(nldr_node_obj, root_lib);
901 } else {
902 if (nldr_node_obj->overlay)
903 unload_ovly(nldr_node_obj, phase);
904
905 }
906 }
907 return status;
908 }
909
910 /*
911 * ======== add_ovly_info ========
912 */
add_ovly_info(void * handle,struct dbll_sect_info * sect_info,u32 addr,u32 bytes)913 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
914 u32 addr, u32 bytes)
915 {
916 char *node_name;
917 char *sect_name = (char *)sect_info->name;
918 bool sect_exists = false;
919 char seps = ':';
920 char *pch;
921 u16 i;
922 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
923 int status = 0;
924
925 /* Is this an overlay section (load address != run address)? */
926 if (sect_info->sect_load_addr == sect_info->sect_run_addr)
927 goto func_end;
928
929 /* Find the node it belongs to */
930 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
931 node_name = nldr_obj->ovly_table[i].node_name;
932 DBC_REQUIRE(node_name);
933 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
934 /* Found the node */
935 break;
936 }
937 }
938 if (!(i < nldr_obj->ovly_nodes))
939 goto func_end;
940
941 /* Determine which phase this section belongs to */
942 for (pch = sect_name + 1; *pch && *pch != seps; pch++)
943 ;
944
945 if (*pch) {
946 pch++; /* Skip over the ':' */
947 if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
948 status =
949 add_ovly_sect(nldr_obj,
950 &nldr_obj->
951 ovly_table[i].create_sects_list,
952 sect_info, §_exists, addr, bytes);
953 if (!status && !sect_exists)
954 nldr_obj->ovly_table[i].create_sects++;
955
956 } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
957 status =
958 add_ovly_sect(nldr_obj,
959 &nldr_obj->
960 ovly_table[i].delete_sects_list,
961 sect_info, §_exists, addr, bytes);
962 if (!status && !sect_exists)
963 nldr_obj->ovly_table[i].delete_sects++;
964
965 } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
966 status =
967 add_ovly_sect(nldr_obj,
968 &nldr_obj->
969 ovly_table[i].execute_sects_list,
970 sect_info, §_exists, addr, bytes);
971 if (!status && !sect_exists)
972 nldr_obj->ovly_table[i].execute_sects++;
973
974 } else {
975 /* Put in "other" sectins */
976 status =
977 add_ovly_sect(nldr_obj,
978 &nldr_obj->
979 ovly_table[i].other_sects_list,
980 sect_info, §_exists, addr, bytes);
981 if (!status && !sect_exists)
982 nldr_obj->ovly_table[i].other_sects++;
983
984 }
985 }
986 func_end:
987 return status;
988 }
989
990 /*
991 * ======== add_ovly_node =========
992 * Callback function passed to dcd_get_objects.
993 */
add_ovly_node(struct dsp_uuid * uuid_obj,enum dsp_dcdobjtype obj_type,void * handle)994 static int add_ovly_node(struct dsp_uuid *uuid_obj,
995 enum dsp_dcdobjtype obj_type, void *handle)
996 {
997 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
998 char *node_name = NULL;
999 char *pbuf = NULL;
1000 u32 len;
1001 struct dcd_genericobj obj_def;
1002 int status = 0;
1003
1004 if (obj_type != DSP_DCDNODETYPE)
1005 goto func_end;
1006
1007 status =
1008 dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type,
1009 &obj_def);
1010 if (status)
1011 goto func_end;
1012
1013 /* If overlay node, add to the list */
1014 if (obj_def.obj_data.node_obj.load_type == NLDR_OVLYLOAD) {
1015 if (nldr_obj->ovly_table == NULL) {
1016 nldr_obj->ovly_nodes++;
1017 } else {
1018 /* Add node to table */
1019 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1020 *uuid_obj;
1021 DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1022 ac_name);
1023 len =
1024 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1025 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1026 pbuf = kzalloc(len + 1, GFP_KERNEL);
1027 if (pbuf == NULL) {
1028 status = -ENOMEM;
1029 } else {
1030 strncpy(pbuf, node_name, len);
1031 nldr_obj->ovly_table[nldr_obj->ovly_nid].
1032 node_name = pbuf;
1033 nldr_obj->ovly_nid++;
1034 }
1035 }
1036 }
1037 /* These were allocated in dcd_get_object_def */
1038 kfree(obj_def.obj_data.node_obj.str_create_phase_fxn);
1039
1040 kfree(obj_def.obj_data.node_obj.str_execute_phase_fxn);
1041
1042 kfree(obj_def.obj_data.node_obj.str_delete_phase_fxn);
1043
1044 kfree(obj_def.obj_data.node_obj.str_i_alg_name);
1045
1046 func_end:
1047 return status;
1048 }
1049
1050 /*
1051 * ======== add_ovly_sect ========
1052 */
add_ovly_sect(struct nldr_object * nldr_obj,struct ovly_sect ** lst,struct dbll_sect_info * sect_inf,bool * exists,u32 addr,u32 bytes)1053 static int add_ovly_sect(struct nldr_object *nldr_obj,
1054 struct ovly_sect **lst,
1055 struct dbll_sect_info *sect_inf,
1056 bool *exists, u32 addr, u32 bytes)
1057 {
1058 struct ovly_sect *new_sect = NULL;
1059 struct ovly_sect *last_sect;
1060 struct ovly_sect *ovly_section;
1061 int status = 0;
1062
1063 ovly_section = last_sect = *lst;
1064 *exists = false;
1065 while (ovly_section) {
1066 /*
1067 * Make sure section has not already been added. Multiple
1068 * 'write' calls may be made to load the section.
1069 */
1070 if (ovly_section->sect_load_addr == addr) {
1071 /* Already added */
1072 *exists = true;
1073 break;
1074 }
1075 last_sect = ovly_section;
1076 ovly_section = ovly_section->next_sect;
1077 }
1078
1079 if (!ovly_section) {
1080 /* New section */
1081 new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1082 if (new_sect == NULL) {
1083 status = -ENOMEM;
1084 } else {
1085 new_sect->sect_load_addr = addr;
1086 new_sect->sect_run_addr = sect_inf->sect_run_addr +
1087 (addr - sect_inf->sect_load_addr);
1088 new_sect->size = bytes;
1089 new_sect->page = sect_inf->type;
1090 }
1091
1092 /* Add to the list */
1093 if (!status) {
1094 if (*lst == NULL) {
1095 /* First in the list */
1096 *lst = new_sect;
1097 } else {
1098 last_sect->next_sect = new_sect;
1099 }
1100 }
1101 }
1102
1103 return status;
1104 }
1105
1106 /*
1107 * ======== fake_ovly_write ========
1108 */
fake_ovly_write(void * handle,u32 dsp_address,void * buf,u32 bytes,s32 mtype)1109 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1110 s32 mtype)
1111 {
1112 return (s32) bytes;
1113 }
1114
1115 /*
1116 * ======== free_sects ========
1117 */
free_sects(struct nldr_object * nldr_obj,struct ovly_sect * phase_sects,u16 alloc_num)1118 static void free_sects(struct nldr_object *nldr_obj,
1119 struct ovly_sect *phase_sects, u16 alloc_num)
1120 {
1121 struct ovly_sect *ovly_section = phase_sects;
1122 u16 i = 0;
1123 bool ret;
1124
1125 while (ovly_section && i < alloc_num) {
1126 /* 'Deallocate' */
1127 /* segid - page not supported yet */
1128 /* Reserved memory */
1129 ret =
1130 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1131 ovly_section->size, true);
1132 DBC_ASSERT(ret);
1133 ovly_section = ovly_section->next_sect;
1134 i++;
1135 }
1136 }
1137
1138 /*
1139 * ======== get_symbol_value ========
1140 * Find symbol in library's base image. If not there, check dependent
1141 * libraries.
1142 */
get_symbol_value(void * handle,void * parg,void * rmm_handle,char * sym_name,struct dbll_sym_val ** sym)1143 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1144 char *sym_name, struct dbll_sym_val **sym)
1145 {
1146 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1147 struct nldr_nodeobject *nldr_node_obj =
1148 (struct nldr_nodeobject *)rmm_handle;
1149 struct lib_node *root = (struct lib_node *)parg;
1150 u16 i;
1151 bool status = false;
1152
1153 /* check the base image */
1154 status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1155 sym_name, sym);
1156 if (!status)
1157 status =
1158 nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1159 sym_name, sym);
1160
1161 /*
1162 * Check in root lib itself. If the library consists of
1163 * multiple object files linked together, some symbols in the
1164 * library may need to be resolved.
1165 */
1166 if (!status) {
1167 status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1168 sym);
1169 if (!status) {
1170 status =
1171 nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1172 sym_name, sym);
1173 }
1174 }
1175
1176 /*
1177 * Check in root lib's dependent libraries, but not dependent
1178 * libraries' dependents.
1179 */
1180 if (!status) {
1181 for (i = 0; i < root->dep_libs; i++) {
1182 status =
1183 nldr_obj->ldr_fxns.get_addr_fxn(root->
1184 dep_libs_tree
1185 [i].lib,
1186 sym_name, sym);
1187 if (!status) {
1188 status =
1189 nldr_obj->ldr_fxns.
1190 get_c_addr_fxn(root->dep_libs_tree[i].lib,
1191 sym_name, sym);
1192 }
1193 if (status) {
1194 /* Symbol found */
1195 break;
1196 }
1197 }
1198 }
1199 /*
1200 * Check in persistent libraries
1201 */
1202 if (!status) {
1203 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1204 status =
1205 nldr_obj->ldr_fxns.
1206 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1207 sym_name, sym);
1208 if (!status) {
1209 status = nldr_obj->ldr_fxns.get_c_addr_fxn
1210 (nldr_node_obj->pers_lib_table[i].lib,
1211 sym_name, sym);
1212 }
1213 if (status) {
1214 /* Symbol found */
1215 break;
1216 }
1217 }
1218 }
1219
1220 return status;
1221 }
1222
1223 /*
1224 * ======== load_lib ========
1225 * Recursively load library and all its dependent libraries. The library
1226 * we're loading is specified by a uuid.
1227 */
load_lib(struct nldr_nodeobject * nldr_node_obj,struct lib_node * root,struct dsp_uuid uuid,bool root_prstnt,struct dbll_library_obj ** lib_path,enum nldr_phase phase,u16 depth)1228 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1229 struct lib_node *root, struct dsp_uuid uuid,
1230 bool root_prstnt,
1231 struct dbll_library_obj **lib_path,
1232 enum nldr_phase phase, u16 depth)
1233 {
1234 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1235 u16 nd_libs = 0; /* Number of dependent libraries */
1236 u16 np_libs = 0; /* Number of persistent libraries */
1237 u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
1238 u16 i;
1239 u32 entry;
1240 u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1241 dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1242 struct dbll_attrs new_attrs;
1243 char *psz_file_name = NULL;
1244 struct dsp_uuid *dep_lib_uui_ds = NULL;
1245 bool *persistent_dep_libs = NULL;
1246 int status = 0;
1247 bool lib_status = false;
1248 struct lib_node *dep_lib;
1249
1250 if (depth > MAXDEPTH) {
1251 /* Error */
1252 DBC_ASSERT(false);
1253 }
1254 root->lib = NULL;
1255 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1256 psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1257 if (psz_file_name == NULL)
1258 status = -ENOMEM;
1259
1260 if (!status) {
1261 /* Get the name of the library */
1262 if (depth == 0) {
1263 status =
1264 dcd_get_library_name(nldr_node_obj->nldr_obj->
1265 dcd_mgr, &uuid, psz_file_name,
1266 &dw_buf_size, phase,
1267 nldr_node_obj->phase_split);
1268 } else {
1269 /* Dependent libraries are registered with a phase */
1270 status =
1271 dcd_get_library_name(nldr_node_obj->nldr_obj->
1272 dcd_mgr, &uuid, psz_file_name,
1273 &dw_buf_size, NLDR_NOPHASE,
1274 NULL);
1275 }
1276 }
1277 if (!status) {
1278 /* Open the library, don't load symbols */
1279 status =
1280 nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1281 DBLL_NOLOAD, &root->lib);
1282 }
1283 /* Done with file name */
1284 kfree(psz_file_name);
1285
1286 /* Check to see if library not already loaded */
1287 if (!status && root_prstnt) {
1288 lib_status =
1289 find_in_persistent_lib_array(nldr_node_obj, root->lib);
1290 /* Close library */
1291 if (lib_status) {
1292 nldr_obj->ldr_fxns.close_fxn(root->lib);
1293 return 0;
1294 }
1295 }
1296 if (!status) {
1297 /* Check for circular dependencies. */
1298 for (i = 0; i < depth; i++) {
1299 if (root->lib == lib_path[i]) {
1300 /* This condition could be checked by a
1301 * tool at build time. */
1302 status = -EILSEQ;
1303 }
1304 }
1305 }
1306 if (!status) {
1307 /* Add library to current path in dependency tree */
1308 lib_path[depth] = root->lib;
1309 depth++;
1310 /* Get number of dependent libraries */
1311 status =
1312 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
1313 &uuid, &nd_libs, &np_libs, phase);
1314 }
1315 DBC_ASSERT(nd_libs >= np_libs);
1316 if (!status) {
1317 if (!(*nldr_node_obj->phase_split))
1318 np_libs = 0;
1319
1320 /* nd_libs = #of dependent libraries */
1321 root->dep_libs = nd_libs - np_libs;
1322 if (nd_libs > 0) {
1323 dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1324 nd_libs, GFP_KERNEL);
1325 persistent_dep_libs =
1326 kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1327 if (!dep_lib_uui_ds || !persistent_dep_libs)
1328 status = -ENOMEM;
1329
1330 if (root->dep_libs > 0) {
1331 /* Allocate arrays for dependent lib UUIDs,
1332 * lib nodes */
1333 root->dep_libs_tree = kzalloc
1334 (sizeof(struct lib_node) *
1335 (root->dep_libs), GFP_KERNEL);
1336 if (!(root->dep_libs_tree))
1337 status = -ENOMEM;
1338
1339 }
1340
1341 if (!status) {
1342 /* Get the dependent library UUIDs */
1343 status =
1344 dcd_get_dep_libs(nldr_node_obj->
1345 nldr_obj->dcd_mgr, &uuid,
1346 nd_libs, dep_lib_uui_ds,
1347 persistent_dep_libs,
1348 phase);
1349 }
1350 }
1351 }
1352
1353 /*
1354 * Recursively load dependent libraries.
1355 */
1356 if (!status) {
1357 for (i = 0; i < nd_libs; i++) {
1358 /* If root library is NOT persistent, and dep library
1359 * is, then record it. If root library IS persistent,
1360 * the deplib is already included */
1361 if (!root_prstnt && persistent_dep_libs[i] &&
1362 *nldr_node_obj->phase_split) {
1363 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1364 status = -EILSEQ;
1365 break;
1366 }
1367
1368 /* Allocate library outside of phase */
1369 dep_lib =
1370 &nldr_node_obj->pers_lib_table
1371 [nldr_node_obj->pers_libs];
1372 } else {
1373 if (root_prstnt)
1374 persistent_dep_libs[i] = true;
1375
1376 /* Allocate library within phase */
1377 dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1378 }
1379
1380 status = load_lib(nldr_node_obj, dep_lib,
1381 dep_lib_uui_ds[i],
1382 persistent_dep_libs[i], lib_path,
1383 phase, depth);
1384
1385 if (!status) {
1386 if ((status != 0) &&
1387 !root_prstnt && persistent_dep_libs[i] &&
1388 *nldr_node_obj->phase_split) {
1389 (nldr_node_obj->pers_libs)++;
1390 } else {
1391 if (!persistent_dep_libs[i] ||
1392 !(*nldr_node_obj->phase_split)) {
1393 nd_libs_loaded++;
1394 }
1395 }
1396 } else {
1397 break;
1398 }
1399 }
1400 }
1401
1402 /* Now we can load the root library */
1403 if (!status) {
1404 new_attrs = nldr_obj->ldr_attrs;
1405 new_attrs.sym_arg = root;
1406 new_attrs.rmm_handle = nldr_node_obj;
1407 new_attrs.input_params = nldr_node_obj->priv_ref;
1408 new_attrs.base_image = false;
1409
1410 status =
1411 nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1412 &entry);
1413 }
1414
1415 /*
1416 * In case of failure, unload any dependent libraries that
1417 * were loaded, and close the root library.
1418 * (Persistent libraries are unloaded from the very top)
1419 */
1420 if (status) {
1421 if (phase != NLDR_EXECUTE) {
1422 for (i = 0; i < nldr_node_obj->pers_libs; i++)
1423 unload_lib(nldr_node_obj,
1424 &nldr_node_obj->pers_lib_table[i]);
1425
1426 nldr_node_obj->pers_libs = 0;
1427 }
1428 for (i = 0; i < nd_libs_loaded; i++)
1429 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1430
1431 if (root->lib)
1432 nldr_obj->ldr_fxns.close_fxn(root->lib);
1433
1434 }
1435
1436 /* Going up one node in the dependency tree */
1437 depth--;
1438
1439 kfree(dep_lib_uui_ds);
1440 dep_lib_uui_ds = NULL;
1441
1442 kfree(persistent_dep_libs);
1443 persistent_dep_libs = NULL;
1444
1445 return status;
1446 }
1447
1448 /*
1449 * ======== load_ovly ========
1450 */
load_ovly(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)1451 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1452 enum nldr_phase phase)
1453 {
1454 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1455 struct ovly_node *po_node = NULL;
1456 struct ovly_sect *phase_sects = NULL;
1457 struct ovly_sect *other_sects_list = NULL;
1458 u16 i;
1459 u16 alloc_num = 0;
1460 u16 other_alloc = 0;
1461 u16 *ref_count = NULL;
1462 u16 *other_ref = NULL;
1463 u32 bytes;
1464 struct ovly_sect *ovly_section;
1465 int status = 0;
1466
1467 /* Find the node in the table */
1468 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1469 if (is_equal_uuid
1470 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1471 /* Found it */
1472 po_node = &(nldr_obj->ovly_table[i]);
1473 break;
1474 }
1475 }
1476
1477 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1478
1479 if (!po_node) {
1480 status = -ENOENT;
1481 goto func_end;
1482 }
1483
1484 switch (phase) {
1485 case NLDR_CREATE:
1486 ref_count = &(po_node->create_ref);
1487 other_ref = &(po_node->other_ref);
1488 phase_sects = po_node->create_sects_list;
1489 other_sects_list = po_node->other_sects_list;
1490 break;
1491
1492 case NLDR_EXECUTE:
1493 ref_count = &(po_node->execute_ref);
1494 phase_sects = po_node->execute_sects_list;
1495 break;
1496
1497 case NLDR_DELETE:
1498 ref_count = &(po_node->delete_ref);
1499 phase_sects = po_node->delete_sects_list;
1500 break;
1501
1502 default:
1503 DBC_ASSERT(false);
1504 break;
1505 }
1506
1507 if (ref_count == NULL)
1508 goto func_end;
1509
1510 if (*ref_count != 0)
1511 goto func_end;
1512
1513 /* 'Allocate' memory for overlay sections of this phase */
1514 ovly_section = phase_sects;
1515 while (ovly_section) {
1516 /* allocate *//* page not supported yet */
1517 /* reserve *//* align */
1518 status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1519 &(ovly_section->sect_run_addr), true);
1520 if (!status) {
1521 ovly_section = ovly_section->next_sect;
1522 alloc_num++;
1523 } else {
1524 break;
1525 }
1526 }
1527 if (other_ref && *other_ref == 0) {
1528 /* 'Allocate' memory for other overlay sections
1529 * (create phase) */
1530 if (!status) {
1531 ovly_section = other_sects_list;
1532 while (ovly_section) {
1533 /* page not supported *//* align */
1534 /* reserve */
1535 status =
1536 rmm_alloc(nldr_obj->rmm, 0,
1537 ovly_section->size, 0,
1538 &(ovly_section->sect_run_addr),
1539 true);
1540 if (!status) {
1541 ovly_section = ovly_section->next_sect;
1542 other_alloc++;
1543 } else {
1544 break;
1545 }
1546 }
1547 }
1548 }
1549 if (*ref_count == 0) {
1550 if (!status) {
1551 /* Load sections for this phase */
1552 ovly_section = phase_sects;
1553 while (ovly_section && !status) {
1554 bytes =
1555 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1556 priv_ref,
1557 ovly_section->
1558 sect_run_addr,
1559 ovly_section->
1560 sect_load_addr,
1561 ovly_section->size,
1562 ovly_section->page);
1563 if (bytes != ovly_section->size)
1564 status = -EPERM;
1565
1566 ovly_section = ovly_section->next_sect;
1567 }
1568 }
1569 }
1570 if (other_ref && *other_ref == 0) {
1571 if (!status) {
1572 /* Load other sections (create phase) */
1573 ovly_section = other_sects_list;
1574 while (ovly_section && !status) {
1575 bytes =
1576 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1577 priv_ref,
1578 ovly_section->
1579 sect_run_addr,
1580 ovly_section->
1581 sect_load_addr,
1582 ovly_section->size,
1583 ovly_section->page);
1584 if (bytes != ovly_section->size)
1585 status = -EPERM;
1586
1587 ovly_section = ovly_section->next_sect;
1588 }
1589 }
1590 }
1591 if (status) {
1592 /* 'Deallocate' memory */
1593 free_sects(nldr_obj, phase_sects, alloc_num);
1594 free_sects(nldr_obj, other_sects_list, other_alloc);
1595 }
1596 func_end:
1597 if (!status && (ref_count != NULL)) {
1598 *ref_count += 1;
1599 if (other_ref)
1600 *other_ref += 1;
1601
1602 }
1603
1604 return status;
1605 }
1606
1607 /*
1608 * ======== remote_alloc ========
1609 */
remote_alloc(void ** ref,u16 mem_sect,u32 size,u32 align,u32 * dsp_address,s32 segmnt_id,s32 req,bool reserve)1610 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1611 u32 align, u32 *dsp_address,
1612 s32 segmnt_id, s32 req,
1613 bool reserve)
1614 {
1615 struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
1616 struct nldr_object *nldr_obj;
1617 struct rmm_target_obj *rmm;
1618 u16 mem_phase_bit = MAXFLAGS;
1619 u16 segid = 0;
1620 u16 i;
1621 u16 mem_sect_type;
1622 u32 word_size;
1623 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1624 bool mem_load_req = false;
1625 int status = -ENOMEM; /* Set to fail */
1626 DBC_REQUIRE(hnode);
1627 DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1628 mem_sect == DBLL_BSS);
1629 nldr_obj = hnode->nldr_obj;
1630 rmm = nldr_obj->rmm;
1631 /* Convert size to DSP words */
1632 word_size =
1633 (size + nldr_obj->dsp_word_size -
1634 1) / nldr_obj->dsp_word_size;
1635 /* Modify memory 'align' to account for DSP cache line size */
1636 align = lcm(GEM_CACHE_LINE_SIZE, align);
1637 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1638 if (segmnt_id != -1) {
1639 rmm_addr_obj->segid = segmnt_id;
1640 segid = segmnt_id;
1641 mem_load_req = req;
1642 } else {
1643 switch (hnode->phase) {
1644 case NLDR_CREATE:
1645 mem_phase_bit = CREATEDATAFLAGBIT;
1646 break;
1647 case NLDR_DELETE:
1648 mem_phase_bit = DELETEDATAFLAGBIT;
1649 break;
1650 case NLDR_EXECUTE:
1651 mem_phase_bit = EXECUTEDATAFLAGBIT;
1652 break;
1653 default:
1654 DBC_ASSERT(false);
1655 break;
1656 }
1657 if (mem_sect == DBLL_CODE)
1658 mem_phase_bit++;
1659
1660 if (mem_phase_bit < MAXFLAGS)
1661 segid = hnode->seg_id[mem_phase_bit];
1662
1663 /* Determine if there is a memory loading requirement */
1664 if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1665 mem_load_req = true;
1666
1667 }
1668 mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1669
1670 /* Find an appropriate segment based on mem_sect */
1671 if (segid == NULLID) {
1672 /* No memory requirements of preferences */
1673 DBC_ASSERT(!mem_load_req);
1674 goto func_cont;
1675 }
1676 if (segid <= MAXSEGID) {
1677 DBC_ASSERT(segid < nldr_obj->dload_segs);
1678 /* Attempt to allocate from segid first. */
1679 rmm_addr_obj->segid = segid;
1680 status =
1681 rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1682 if (status) {
1683 dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1684 __func__, segid);
1685 }
1686 } else {
1687 /* segid > MAXSEGID ==> Internal or external memory */
1688 DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1689 /* Check for any internal or external memory segment,
1690 * depending on segid. */
1691 mem_sect_type |= segid == MEMINTERNALID ?
1692 DYNM_INTERNAL : DYNM_EXTERNAL;
1693 for (i = 0; i < nldr_obj->dload_segs; i++) {
1694 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1695 mem_sect_type)
1696 continue;
1697
1698 status = rmm_alloc(rmm, i, word_size, align,
1699 dsp_address, false);
1700 if (!status) {
1701 /* Save segid for freeing later */
1702 rmm_addr_obj->segid = i;
1703 break;
1704 }
1705 }
1706 }
1707 func_cont:
1708 /* Haven't found memory yet, attempt to find any segment that works */
1709 if (status == -ENOMEM && !mem_load_req) {
1710 dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1711 "another\n", __func__);
1712 for (i = 0; i < nldr_obj->dload_segs; i++) {
1713 /* All bits of mem_sect_type must be set */
1714 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1715 mem_sect_type)
1716 continue;
1717
1718 status = rmm_alloc(rmm, i, word_size, align,
1719 dsp_address, false);
1720 if (!status) {
1721 /* Save segid */
1722 rmm_addr_obj->segid = i;
1723 break;
1724 }
1725 }
1726 }
1727
1728 return status;
1729 }
1730
remote_free(void ** ref,u16 space,u32 dsp_address,u32 size,bool reserve)1731 static int remote_free(void **ref, u16 space, u32 dsp_address,
1732 u32 size, bool reserve)
1733 {
1734 struct nldr_object *nldr_obj = (struct nldr_object *)ref;
1735 struct rmm_target_obj *rmm;
1736 u32 word_size;
1737 int status = -ENOMEM; /* Set to fail */
1738
1739 DBC_REQUIRE(nldr_obj);
1740
1741 rmm = nldr_obj->rmm;
1742
1743 /* Convert size to DSP words */
1744 word_size =
1745 (size + nldr_obj->dsp_word_size -
1746 1) / nldr_obj->dsp_word_size;
1747
1748 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1749 status = 0;
1750
1751 return status;
1752 }
1753
1754 /*
1755 * ======== unload_lib ========
1756 */
unload_lib(struct nldr_nodeobject * nldr_node_obj,struct lib_node * root)1757 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1758 struct lib_node *root)
1759 {
1760 struct dbll_attrs new_attrs;
1761 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1762 u16 i;
1763
1764 DBC_ASSERT(root != NULL);
1765
1766 /* Unload dependent libraries */
1767 for (i = 0; i < root->dep_libs; i++)
1768 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1769
1770 root->dep_libs = 0;
1771
1772 new_attrs = nldr_obj->ldr_attrs;
1773 new_attrs.rmm_handle = nldr_obj->rmm;
1774 new_attrs.input_params = nldr_node_obj->priv_ref;
1775 new_attrs.base_image = false;
1776 new_attrs.sym_arg = root;
1777
1778 if (root->lib) {
1779 /* Unload the root library */
1780 nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1781 nldr_obj->ldr_fxns.close_fxn(root->lib);
1782 }
1783
1784 /* Free dependent library list */
1785 kfree(root->dep_libs_tree);
1786 root->dep_libs_tree = NULL;
1787 }
1788
1789 /*
1790 * ======== unload_ovly ========
1791 */
unload_ovly(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)1792 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1793 enum nldr_phase phase)
1794 {
1795 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1796 struct ovly_node *po_node = NULL;
1797 struct ovly_sect *phase_sects = NULL;
1798 struct ovly_sect *other_sects_list = NULL;
1799 u16 i;
1800 u16 alloc_num = 0;
1801 u16 other_alloc = 0;
1802 u16 *ref_count = NULL;
1803 u16 *other_ref = NULL;
1804
1805 /* Find the node in the table */
1806 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1807 if (is_equal_uuid
1808 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1809 /* Found it */
1810 po_node = &(nldr_obj->ovly_table[i]);
1811 break;
1812 }
1813 }
1814
1815 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1816
1817 if (!po_node)
1818 /* TODO: Should we print warning here? */
1819 return;
1820
1821 switch (phase) {
1822 case NLDR_CREATE:
1823 ref_count = &(po_node->create_ref);
1824 phase_sects = po_node->create_sects_list;
1825 alloc_num = po_node->create_sects;
1826 break;
1827 case NLDR_EXECUTE:
1828 ref_count = &(po_node->execute_ref);
1829 phase_sects = po_node->execute_sects_list;
1830 alloc_num = po_node->execute_sects;
1831 break;
1832 case NLDR_DELETE:
1833 ref_count = &(po_node->delete_ref);
1834 other_ref = &(po_node->other_ref);
1835 phase_sects = po_node->delete_sects_list;
1836 /* 'Other' overlay sections are unloaded in the delete phase */
1837 other_sects_list = po_node->other_sects_list;
1838 alloc_num = po_node->delete_sects;
1839 other_alloc = po_node->other_sects;
1840 break;
1841 default:
1842 DBC_ASSERT(false);
1843 break;
1844 }
1845 DBC_ASSERT(ref_count && (*ref_count > 0));
1846 if (ref_count && (*ref_count > 0)) {
1847 *ref_count -= 1;
1848 if (other_ref) {
1849 DBC_ASSERT(*other_ref > 0);
1850 *other_ref -= 1;
1851 }
1852 }
1853
1854 if (ref_count && *ref_count == 0) {
1855 /* 'Deallocate' memory */
1856 free_sects(nldr_obj, phase_sects, alloc_num);
1857 }
1858 if (other_ref && *other_ref == 0)
1859 free_sects(nldr_obj, other_sects_list, other_alloc);
1860 }
1861
1862 /*
1863 * ======== find_in_persistent_lib_array ========
1864 */
find_in_persistent_lib_array(struct nldr_nodeobject * nldr_node_obj,struct dbll_library_obj * lib)1865 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1866 struct dbll_library_obj *lib)
1867 {
1868 s32 i = 0;
1869
1870 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1871 if (lib == nldr_node_obj->pers_lib_table[i].lib)
1872 return true;
1873
1874 }
1875
1876 return false;
1877 }
1878
1879 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1880 /**
1881 * nldr_find_addr() - Find the closest symbol to the given address based on
1882 * dynamic node object.
1883 *
1884 * @nldr_node: Dynamic node object
1885 * @sym_addr: Given address to find the dsp symbol
1886 * @offset_range: offset range to look for dsp symbol
1887 * @offset_output: Symbol Output address
1888 * @sym_name: String with the dsp symbol
1889 *
1890 * This function finds the node library for a given address and
1891 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1892 */
nldr_find_addr(struct nldr_nodeobject * nldr_node,u32 sym_addr,u32 offset_range,void * offset_output,char * sym_name)1893 int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1894 u32 offset_range, void *offset_output, char *sym_name)
1895 {
1896 int status = 0;
1897 bool status1 = false;
1898 s32 i = 0;
1899 struct lib_node root = { NULL, 0, NULL };
1900 DBC_REQUIRE(refs > 0);
1901 DBC_REQUIRE(offset_output != NULL);
1902 DBC_REQUIRE(sym_name != NULL);
1903 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1904 sym_addr, offset_range, (u32) offset_output, sym_name);
1905
1906 if (nldr_node->dynamic && *nldr_node->phase_split) {
1907 switch (nldr_node->phase) {
1908 case NLDR_CREATE:
1909 root = nldr_node->create_lib;
1910 break;
1911 case NLDR_EXECUTE:
1912 root = nldr_node->execute_lib;
1913 break;
1914 case NLDR_DELETE:
1915 root = nldr_node->delete_lib;
1916 break;
1917 default:
1918 DBC_ASSERT(false);
1919 break;
1920 }
1921 } else {
1922 /* for Overlay nodes or non-split Dynamic nodes */
1923 root = nldr_node->root;
1924 }
1925
1926 status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1927 offset_range, offset_output, sym_name);
1928
1929 /* If symbol not found, check dependent libraries */
1930 if (!status1)
1931 for (i = 0; i < root.dep_libs; i++) {
1932 status1 = dbll_find_dsp_symbol(
1933 root.dep_libs_tree[i].lib, sym_addr,
1934 offset_range, offset_output, sym_name);
1935 if (status1)
1936 /* Symbol found */
1937 break;
1938 }
1939 /* Check persistent libraries */
1940 if (!status1)
1941 for (i = 0; i < nldr_node->pers_libs; i++) {
1942 status1 = dbll_find_dsp_symbol(
1943 nldr_node->pers_lib_table[i].lib, sym_addr,
1944 offset_range, offset_output, sym_name);
1945 if (status1)
1946 /* Symbol found */
1947 break;
1948 }
1949
1950 if (!status1) {
1951 pr_debug("%s: Address 0x%x not found in range %d.\n",
1952 __func__, sym_addr, offset_range);
1953 status = -ESPIPE;
1954 }
1955
1956 return status;
1957 }
1958 #endif
1959