1 /*
2 * nldr.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 #include <linux/types.h>
20
21 #include <dspbridge/host_os.h>
22
23 #include <dspbridge/dbdefs.h>
24
25 /* Platform manager */
26 #include <dspbridge/cod.h>
27 #include <dspbridge/dev.h>
28
29 /* Resource manager */
30 #include <dspbridge/dbll.h>
31 #include <dspbridge/dbdcd.h>
32 #include <dspbridge/rmm.h>
33 #include <dspbridge/uuidutil.h>
34
35 #include <dspbridge/nldr.h>
36 #include <linux/lcm.h>
37
38 /* Name of section containing dynamic load mem */
39 #define DYNMEMSECT ".dspbridge_mem"
40
41 /* Name of section containing dependent library information */
42 #define DEPLIBSECT ".dspbridge_deplibs"
43
44 /* Max depth of recursion for loading node's dependent libraries */
45 #define MAXDEPTH 5
46
47 /* Max number of persistent libraries kept by a node */
48 #define MAXLIBS 5
49
50 /*
51 * Defines for extracting packed dynamic load memory requirements from two
52 * masks.
53 * These defines must match node.cdb and dynm.cdb
54 * Format of data/code mask is:
55 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
56 * where
57 * u = unused
58 * cccccc = preferred/required dynamic mem segid for create phase data/code
59 * dddddd = preferred/required dynamic mem segid for delete phase data/code
60 * eeeeee = preferred/req. dynamic mem segid for execute phase data/code
61 * f = flag indicating if memory is preferred or required:
62 * f = 1 if required, f = 0 if preferred.
63 *
64 * The 6 bits of the segid are interpreted as follows:
65 *
66 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
67 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
68 * If the 6th bit (bit 5) is set, segid has the following interpretation:
69 * segid = 32 - Any internal memory segment can be used.
70 * segid = 33 - Any external memory segment can be used.
71 * segid = 63 - Any memory segment can be used (in this case the
72 * required/preferred flag is irrelevant).
73 *
74 */
75 /* Maximum allowed dynamic loading memory segments */
76 #define MAXMEMSEGS 32
77
78 #define MAXSEGID 3 /* Largest possible (real) segid */
79 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
80 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
81 #define NULLID 63 /* Segid meaning no memory req/pref */
82 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
83 #define SEGMASK 0x3f /* Bits 0 - 5 */
84
85 #define CREATEBIT 0 /* Create segid starts at bit 0 */
86 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
87 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
88
89 /*
90 * Masks that define memory type. Must match defines in dynm.cdb.
91 */
92 #define DYNM_CODE 0x2
93 #define DYNM_DATA 0x4
94 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
95 #define DYNM_INTERNAL 0x8
96 #define DYNM_EXTERNAL 0x10
97
98 /*
99 * Defines for packing memory requirement/preference flags for code and
100 * data of each of the node's phases into one mask.
101 * The bit is set if the segid is required for loading code/data of the
102 * given phase. The bit is not set, if the segid is preferred only.
103 *
104 * These defines are also used as indeces into a segid array for the node.
105 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
106 * create phase data is required or preferred to be loaded into.
107 */
108 #define CREATEDATAFLAGBIT 0
109 #define CREATECODEFLAGBIT 1
110 #define EXECUTEDATAFLAGBIT 2
111 #define EXECUTECODEFLAGBIT 3
112 #define DELETEDATAFLAGBIT 4
113 #define DELETECODEFLAGBIT 5
114 #define MAXFLAGS 6
115
116 /*
117 * These names may be embedded in overlay sections to identify which
118 * node phase the section should be overlayed.
119 */
120 #define PCREATE "create"
121 #define PDELETE "delete"
122 #define PEXECUTE "execute"
123
is_equal_uuid(struct dsp_uuid * uuid1,struct dsp_uuid * uuid2)124 static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
125 struct dsp_uuid *uuid2)
126 {
127 return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
128 }
129
130 /*
131 * ======== mem_seg_info ========
132 * Format of dynamic loading memory segment info in coff file.
133 * Must match dynm.h55.
134 */
135 struct mem_seg_info {
136 u32 segid; /* Dynamic loading memory segment number */
137 u32 base;
138 u32 len;
139 u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
140 };
141
142 /*
143 * ======== lib_node ========
144 * For maintaining a tree of library dependencies.
145 */
146 struct lib_node {
147 struct dbll_library_obj *lib; /* The library */
148 u16 dep_libs; /* Number of dependent libraries */
149 struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
150 };
151
152 /*
153 * ======== ovly_sect ========
154 * Information needed to overlay a section.
155 */
156 struct ovly_sect {
157 struct ovly_sect *next_sect;
158 u32 sect_load_addr; /* Load address of section */
159 u32 sect_run_addr; /* Run address of section */
160 u32 size; /* Size of section */
161 u16 page; /* DBL_CODE, DBL_DATA */
162 };
163
164 /*
165 * ======== ovly_node ========
166 * For maintaining a list of overlay nodes, with sections that need to be
167 * overlayed for each of the nodes phases.
168 */
169 struct ovly_node {
170 struct dsp_uuid uuid;
171 char *node_name;
172 struct ovly_sect *create_sects_list;
173 struct ovly_sect *delete_sects_list;
174 struct ovly_sect *execute_sects_list;
175 struct ovly_sect *other_sects_list;
176 u16 create_sects;
177 u16 delete_sects;
178 u16 execute_sects;
179 u16 other_sects;
180 u16 create_ref;
181 u16 delete_ref;
182 u16 execute_ref;
183 u16 other_ref;
184 };
185
186 /*
187 * ======== nldr_object ========
188 * Overlay loader object.
189 */
190 struct nldr_object {
191 struct dev_object *dev_obj; /* Device object */
192 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
193 struct dbll_tar_obj *dbll; /* The DBL loader */
194 struct dbll_library_obj *base_lib; /* Base image library */
195 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
196 struct dbll_fxns ldr_fxns; /* Loader function table */
197 struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
198 nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
199 nldr_writefxn write_fxn; /* "write" for dynamic nodes */
200 struct ovly_node *ovly_table; /* Table of overlay nodes */
201 u16 ovly_nodes; /* Number of overlay nodes in base */
202 u16 ovly_nid; /* Index for tracking overlay nodes */
203 u16 dload_segs; /* Number of dynamic load mem segs */
204 u32 *seg_table; /* memtypes of dynamic memory segs
205 * indexed by segid
206 */
207 u16 dsp_mau_size; /* Size of DSP MAU */
208 u16 dsp_word_size; /* Size of DSP word */
209 };
210
211 /*
212 * ======== nldr_nodeobject ========
213 * Dynamic node object. This object is created when a node is allocated.
214 */
215 struct nldr_nodeobject {
216 struct nldr_object *nldr_obj; /* Dynamic loader handle */
217 void *priv_ref; /* Handle to pass to dbl_write_fxn */
218 struct dsp_uuid uuid; /* Node's UUID */
219 bool dynamic; /* Dynamically loaded node? */
220 bool overlay; /* Overlay node? */
221 bool *phase_split; /* Multiple phase libraries? */
222 struct lib_node root; /* Library containing node phase */
223 struct lib_node create_lib; /* Library with create phase lib */
224 struct lib_node execute_lib; /* Library with execute phase lib */
225 struct lib_node delete_lib; /* Library with delete phase lib */
226 /* libs remain loaded until Delete */
227 struct lib_node pers_lib_table[MAXLIBS];
228 s32 pers_libs; /* Number of persistent libraries */
229 /* Path in lib dependency tree */
230 struct dbll_library_obj *lib_path[MAXDEPTH + 1];
231 enum nldr_phase phase; /* Node phase currently being loaded */
232
233 /*
234 * Dynamic loading memory segments for data and code of each phase.
235 */
236 u16 seg_id[MAXFLAGS];
237
238 /*
239 * Mask indicating whether each mem segment specified in seg_id[]
240 * is preferred or required.
241 * For example
242 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
243 * then it is required to load execute phase data into the memory
244 * specified by seg_id[EXECUTEDATAFLAGBIT].
245 */
246 u32 code_data_flag_mask;
247 };
248
249 /* Dynamic loader function table */
250 static struct dbll_fxns ldr_fxns = {
251 (dbll_close_fxn) dbll_close,
252 (dbll_create_fxn) dbll_create,
253 (dbll_delete_fxn) dbll_delete,
254 (dbll_exit_fxn) dbll_exit,
255 (dbll_get_attrs_fxn) dbll_get_attrs,
256 (dbll_get_addr_fxn) dbll_get_addr,
257 (dbll_get_c_addr_fxn) dbll_get_c_addr,
258 (dbll_get_sect_fxn) dbll_get_sect,
259 (dbll_init_fxn) dbll_init,
260 (dbll_load_fxn) dbll_load,
261 (dbll_open_fxn) dbll_open,
262 (dbll_read_sect_fxn) dbll_read_sect,
263 (dbll_unload_fxn) dbll_unload,
264 };
265
266 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
267 u32 addr, u32 bytes);
268 static int add_ovly_node(struct dsp_uuid *uuid_obj,
269 enum dsp_dcdobjtype obj_type, void *handle);
270 static int add_ovly_sect(struct nldr_object *nldr_obj,
271 struct ovly_sect **lst,
272 struct dbll_sect_info *sect_inf,
273 bool *exists, u32 addr, u32 bytes);
274 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
275 s32 mtype);
276 static void free_sects(struct nldr_object *nldr_obj,
277 struct ovly_sect *phase_sects, u16 alloc_num);
278 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
279 char *sym_name, struct dbll_sym_val **sym);
280 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
281 struct lib_node *root, struct dsp_uuid uuid,
282 bool root_prstnt,
283 struct dbll_library_obj **lib_path,
284 enum nldr_phase phase, u16 depth);
285 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
286 enum nldr_phase phase);
287 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
288 u32 align, u32 *dsp_address,
289 s32 segmnt_id,
290 s32 req, bool reserve);
291 static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
292 bool reserve);
293
294 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
295 struct lib_node *root);
296 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
297 enum nldr_phase phase);
298 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
299 struct dbll_library_obj *lib);
300
301 /*
302 * ======== nldr_allocate ========
303 */
nldr_allocate(struct nldr_object * nldr_obj,void * priv_ref,const struct dcd_nodeprops * node_props,struct nldr_nodeobject ** nldr_nodeobj,bool * pf_phase_split)304 int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
305 const struct dcd_nodeprops *node_props,
306 struct nldr_nodeobject **nldr_nodeobj,
307 bool *pf_phase_split)
308 {
309 struct nldr_nodeobject *nldr_node_obj = NULL;
310 int status = 0;
311
312 /* Initialize handle in case of failure */
313 *nldr_nodeobj = NULL;
314 /* Allocate node object */
315 nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
316
317 if (nldr_node_obj == NULL) {
318 status = -ENOMEM;
319 } else {
320 nldr_node_obj->phase_split = pf_phase_split;
321 nldr_node_obj->pers_libs = 0;
322 nldr_node_obj->nldr_obj = nldr_obj;
323 nldr_node_obj->priv_ref = priv_ref;
324 /* Save node's UUID. */
325 nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
326 /*
327 * Determine if node is a dynamically loaded node from
328 * ndb_props.
329 */
330 if (node_props->load_type == NLDR_DYNAMICLOAD) {
331 /* Dynamic node */
332 nldr_node_obj->dynamic = true;
333 /*
334 * Extract memory requirements from ndb_props masks
335 */
336 /* Create phase */
337 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
338 (node_props->data_mem_seg_mask >> CREATEBIT) &
339 SEGMASK;
340 nldr_node_obj->code_data_flag_mask |=
341 ((node_props->data_mem_seg_mask >>
342 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
343 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
344 (node_props->code_mem_seg_mask >>
345 CREATEBIT) & SEGMASK;
346 nldr_node_obj->code_data_flag_mask |=
347 ((node_props->code_mem_seg_mask >>
348 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
349 /* Execute phase */
350 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
351 (node_props->data_mem_seg_mask >>
352 EXECUTEBIT) & SEGMASK;
353 nldr_node_obj->code_data_flag_mask |=
354 ((node_props->data_mem_seg_mask >>
355 (EXECUTEBIT + FLAGBIT)) & 1) <<
356 EXECUTEDATAFLAGBIT;
357 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
358 (node_props->code_mem_seg_mask >>
359 EXECUTEBIT) & SEGMASK;
360 nldr_node_obj->code_data_flag_mask |=
361 ((node_props->code_mem_seg_mask >>
362 (EXECUTEBIT + FLAGBIT)) & 1) <<
363 EXECUTECODEFLAGBIT;
364 /* Delete phase */
365 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
366 (node_props->data_mem_seg_mask >> DELETEBIT) &
367 SEGMASK;
368 nldr_node_obj->code_data_flag_mask |=
369 ((node_props->data_mem_seg_mask >>
370 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
371 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
372 (node_props->code_mem_seg_mask >>
373 DELETEBIT) & SEGMASK;
374 nldr_node_obj->code_data_flag_mask |=
375 ((node_props->code_mem_seg_mask >>
376 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
377 } else {
378 /* Non-dynamically loaded nodes are part of the
379 * base image */
380 nldr_node_obj->root.lib = nldr_obj->base_lib;
381 /* Check for overlay node */
382 if (node_props->load_type == NLDR_OVLYLOAD)
383 nldr_node_obj->overlay = true;
384
385 }
386 *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
387 }
388 /* Cleanup on failure */
389 if (status && nldr_node_obj)
390 kfree(nldr_node_obj);
391
392 return status;
393 }
394
395 /*
396 * ======== nldr_create ========
397 */
nldr_create(struct nldr_object ** nldr,struct dev_object * hdev_obj,const struct nldr_attrs * pattrs)398 int nldr_create(struct nldr_object **nldr,
399 struct dev_object *hdev_obj,
400 const struct nldr_attrs *pattrs)
401 {
402 struct cod_manager *cod_mgr; /* COD manager */
403 char *psz_coff_buf = NULL;
404 char sz_zl_file[COD_MAXPATHLENGTH];
405 struct nldr_object *nldr_obj = NULL;
406 struct dbll_attrs save_attrs;
407 struct dbll_attrs new_attrs;
408 dbll_flags flags;
409 u32 ul_entry;
410 u16 dload_segs = 0;
411 struct mem_seg_info *mem_info_obj;
412 u32 ul_len = 0;
413 u32 ul_addr;
414 struct rmm_segment *rmm_segs = NULL;
415 u16 i;
416 int status = 0;
417
418 /* Allocate dynamic loader object */
419 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
420 if (nldr_obj) {
421 nldr_obj->dev_obj = hdev_obj;
422 /* warning, lazy status checking alert! */
423 dev_get_cod_mgr(hdev_obj, &cod_mgr);
424 if (cod_mgr) {
425 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
426 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
427 status =
428 cod_get_base_name(cod_mgr, sz_zl_file,
429 COD_MAXPATHLENGTH);
430 }
431 status = 0;
432 /* end lazy status checking */
433 nldr_obj->dsp_mau_size = pattrs->dsp_mau_size;
434 nldr_obj->dsp_word_size = pattrs->dsp_word_size;
435 nldr_obj->ldr_fxns = ldr_fxns;
436 if (!(nldr_obj->ldr_fxns.init_fxn()))
437 status = -ENOMEM;
438
439 } else {
440 status = -ENOMEM;
441 }
442 /* Create the DCD Manager */
443 if (!status)
444 status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr);
445
446 /* Get dynamic loading memory sections from base lib */
447 if (!status) {
448 status =
449 nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
450 DYNMEMSECT, &ul_addr,
451 &ul_len);
452 if (!status) {
453 psz_coff_buf =
454 kzalloc(ul_len * nldr_obj->dsp_mau_size,
455 GFP_KERNEL);
456 if (!psz_coff_buf)
457 status = -ENOMEM;
458 } else {
459 /* Ok to not have dynamic loading memory */
460 status = 0;
461 ul_len = 0;
462 dev_dbg(bridge, "%s: failed - no dynamic loading mem "
463 "segments: 0x%x\n", __func__, status);
464 }
465 }
466 if (!status && ul_len > 0) {
467 /* Read section containing dynamic load mem segments */
468 status =
469 nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
470 DYNMEMSECT, psz_coff_buf,
471 ul_len);
472 }
473 if (!status && ul_len > 0) {
474 /* Parse memory segment data */
475 dload_segs = (u16) (*((u32 *) psz_coff_buf));
476 if (dload_segs > MAXMEMSEGS)
477 status = -EBADF;
478 }
479 /* Parse dynamic load memory segments */
480 if (!status && dload_segs > 0) {
481 rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
482 GFP_KERNEL);
483 nldr_obj->seg_table =
484 kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
485 if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
486 status = -ENOMEM;
487 } else {
488 nldr_obj->dload_segs = dload_segs;
489 mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
490 sizeof(u32));
491 for (i = 0; i < dload_segs; i++) {
492 rmm_segs[i].base = (mem_info_obj + i)->base;
493 rmm_segs[i].length = (mem_info_obj + i)->len;
494 rmm_segs[i].space = 0;
495 nldr_obj->seg_table[i] =
496 (mem_info_obj + i)->type;
497 dev_dbg(bridge,
498 "(proc) DLL MEMSEGMENT: %d, "
499 "Base: 0x%x, Length: 0x%x\n", i,
500 rmm_segs[i].base, rmm_segs[i].length);
501 }
502 }
503 }
504 /* Create Remote memory manager */
505 if (!status)
506 status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
507
508 if (!status) {
509 /* set the alloc, free, write functions for loader */
510 nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
511 new_attrs = save_attrs;
512 new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
513 new_attrs.free = (dbll_free_fxn) remote_free;
514 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
515 new_attrs.sym_handle = nldr_obj;
516 new_attrs.write = (dbll_write_fxn) pattrs->write;
517 nldr_obj->ovly_fxn = pattrs->ovly;
518 nldr_obj->write_fxn = pattrs->write;
519 nldr_obj->ldr_attrs = new_attrs;
520 }
521 kfree(rmm_segs);
522
523 kfree(psz_coff_buf);
524
525 /* Get overlay nodes */
526 if (!status) {
527 status =
528 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
529 /* lazy check */
530 /* First count number of overlay nodes */
531 status =
532 dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
533 add_ovly_node, (void *)nldr_obj);
534 /* Now build table of overlay nodes */
535 if (!status && nldr_obj->ovly_nodes > 0) {
536 /* Allocate table for overlay nodes */
537 nldr_obj->ovly_table =
538 kzalloc(sizeof(struct ovly_node) *
539 nldr_obj->ovly_nodes, GFP_KERNEL);
540 /* Put overlay nodes in the table */
541 nldr_obj->ovly_nid = 0;
542 status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
543 add_ovly_node,
544 (void *)nldr_obj);
545 }
546 }
547 /* Do a fake reload of the base image to get overlay section info */
548 if (!status && nldr_obj->ovly_nodes > 0) {
549 save_attrs.write = fake_ovly_write;
550 save_attrs.log_write = add_ovly_info;
551 save_attrs.log_write_handle = nldr_obj;
552 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
553 status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
554 &save_attrs, &ul_entry);
555 }
556 if (!status) {
557 *nldr = (struct nldr_object *)nldr_obj;
558 } else {
559 if (nldr_obj)
560 nldr_delete((struct nldr_object *)nldr_obj);
561
562 *nldr = NULL;
563 }
564 /* FIXME:Temp. Fix. Must be removed */
565 return status;
566 }
567
568 /*
569 * ======== nldr_delete ========
570 */
nldr_delete(struct nldr_object * nldr_obj)571 void nldr_delete(struct nldr_object *nldr_obj)
572 {
573 struct ovly_sect *ovly_section;
574 struct ovly_sect *next;
575 u16 i;
576
577 nldr_obj->ldr_fxns.exit_fxn();
578 if (nldr_obj->rmm)
579 rmm_delete(nldr_obj->rmm);
580
581 kfree(nldr_obj->seg_table);
582
583 if (nldr_obj->dcd_mgr)
584 dcd_destroy_manager(nldr_obj->dcd_mgr);
585
586 /* Free overlay node information */
587 if (nldr_obj->ovly_table) {
588 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
589 ovly_section =
590 nldr_obj->ovly_table[i].create_sects_list;
591 while (ovly_section) {
592 next = ovly_section->next_sect;
593 kfree(ovly_section);
594 ovly_section = next;
595 }
596 ovly_section =
597 nldr_obj->ovly_table[i].delete_sects_list;
598 while (ovly_section) {
599 next = ovly_section->next_sect;
600 kfree(ovly_section);
601 ovly_section = next;
602 }
603 ovly_section =
604 nldr_obj->ovly_table[i].execute_sects_list;
605 while (ovly_section) {
606 next = ovly_section->next_sect;
607 kfree(ovly_section);
608 ovly_section = next;
609 }
610 ovly_section = nldr_obj->ovly_table[i].other_sects_list;
611 while (ovly_section) {
612 next = ovly_section->next_sect;
613 kfree(ovly_section);
614 ovly_section = next;
615 }
616 }
617 kfree(nldr_obj->ovly_table);
618 }
619 kfree(nldr_obj);
620 }
621
622 /*
623 * ======== nldr_get_fxn_addr ========
624 */
nldr_get_fxn_addr(struct nldr_nodeobject * nldr_node_obj,char * str_fxn,u32 * addr)625 int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
626 char *str_fxn, u32 * addr)
627 {
628 struct dbll_sym_val *dbll_sym;
629 struct nldr_object *nldr_obj;
630 int status = 0;
631 bool status1 = false;
632 s32 i = 0;
633 struct lib_node root = { NULL, 0, NULL };
634
635 nldr_obj = nldr_node_obj->nldr_obj;
636 /* Called from node_create(), node_delete(), or node_run(). */
637 if (nldr_node_obj->dynamic && *nldr_node_obj->phase_split) {
638 switch (nldr_node_obj->phase) {
639 case NLDR_CREATE:
640 root = nldr_node_obj->create_lib;
641 break;
642 case NLDR_EXECUTE:
643 root = nldr_node_obj->execute_lib;
644 break;
645 case NLDR_DELETE:
646 root = nldr_node_obj->delete_lib;
647 break;
648 default:
649 break;
650 }
651 } else {
652 /* for Overlay nodes or non-split Dynamic nodes */
653 root = nldr_node_obj->root;
654 }
655 status1 =
656 nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
657 if (!status1)
658 status1 =
659 nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
660 &dbll_sym);
661
662 /* If symbol not found, check dependent libraries */
663 if (!status1) {
664 for (i = 0; i < root.dep_libs; i++) {
665 status1 =
666 nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
667 [i].lib, str_fxn,
668 &dbll_sym);
669 if (!status1) {
670 status1 =
671 nldr_obj->ldr_fxns.
672 get_c_addr_fxn(root.dep_libs_tree[i].lib,
673 str_fxn, &dbll_sym);
674 }
675 if (status1) {
676 /* Symbol found */
677 break;
678 }
679 }
680 }
681 /* Check persistent libraries */
682 if (!status1) {
683 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
684 status1 =
685 nldr_obj->ldr_fxns.
686 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
687 str_fxn, &dbll_sym);
688 if (!status1) {
689 status1 =
690 nldr_obj->ldr_fxns.
691 get_c_addr_fxn(nldr_node_obj->pers_lib_table
692 [i].lib, str_fxn, &dbll_sym);
693 }
694 if (status1) {
695 /* Symbol found */
696 break;
697 }
698 }
699 }
700
701 if (status1)
702 *addr = dbll_sym->value;
703 else
704 status = -ESPIPE;
705
706 return status;
707 }
708
709 /*
710 * ======== nldr_get_rmm_manager ========
711 * Given a NLDR object, retrieve RMM Manager Handle
712 */
nldr_get_rmm_manager(struct nldr_object * nldr,struct rmm_target_obj ** rmm_mgr)713 int nldr_get_rmm_manager(struct nldr_object *nldr,
714 struct rmm_target_obj **rmm_mgr)
715 {
716 int status = 0;
717 struct nldr_object *nldr_obj = nldr;
718
719 if (nldr) {
720 *rmm_mgr = nldr_obj->rmm;
721 } else {
722 *rmm_mgr = NULL;
723 status = -EFAULT;
724 }
725
726 return status;
727 }
728
729 /*
730 * ======== nldr_load ========
731 */
nldr_load(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)732 int nldr_load(struct nldr_nodeobject *nldr_node_obj,
733 enum nldr_phase phase)
734 {
735 struct nldr_object *nldr_obj;
736 struct dsp_uuid lib_uuid;
737 int status = 0;
738
739 nldr_obj = nldr_node_obj->nldr_obj;
740
741 if (nldr_node_obj->dynamic) {
742 nldr_node_obj->phase = phase;
743
744 lib_uuid = nldr_node_obj->uuid;
745
746 /* At this point, we may not know if node is split into
747 * different libraries. So we'll go ahead and load the
748 * library, and then save the pointer to the appropriate
749 * location after we know. */
750
751 status =
752 load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
753 false, nldr_node_obj->lib_path, phase, 0);
754
755 if (!status) {
756 if (*nldr_node_obj->phase_split) {
757 switch (phase) {
758 case NLDR_CREATE:
759 nldr_node_obj->create_lib =
760 nldr_node_obj->root;
761 break;
762
763 case NLDR_EXECUTE:
764 nldr_node_obj->execute_lib =
765 nldr_node_obj->root;
766 break;
767
768 case NLDR_DELETE:
769 nldr_node_obj->delete_lib =
770 nldr_node_obj->root;
771 break;
772
773 default:
774 break;
775 }
776 }
777 }
778 } else {
779 if (nldr_node_obj->overlay)
780 status = load_ovly(nldr_node_obj, phase);
781
782 }
783
784 return status;
785 }
786
787 /*
788 * ======== nldr_unload ========
789 */
nldr_unload(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)790 int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
791 enum nldr_phase phase)
792 {
793 int status = 0;
794 struct lib_node *root_lib = NULL;
795 s32 i = 0;
796
797 if (nldr_node_obj != NULL) {
798 if (nldr_node_obj->dynamic) {
799 if (*nldr_node_obj->phase_split) {
800 switch (phase) {
801 case NLDR_CREATE:
802 root_lib = &nldr_node_obj->create_lib;
803 break;
804 case NLDR_EXECUTE:
805 root_lib = &nldr_node_obj->execute_lib;
806 break;
807 case NLDR_DELETE:
808 root_lib = &nldr_node_obj->delete_lib;
809 /* Unload persistent libraries */
810 for (i = 0;
811 i < nldr_node_obj->pers_libs;
812 i++) {
813 unload_lib(nldr_node_obj,
814 &nldr_node_obj->
815 pers_lib_table[i]);
816 }
817 nldr_node_obj->pers_libs = 0;
818 break;
819 default:
820 break;
821 }
822 } else {
823 /* Unload main library */
824 root_lib = &nldr_node_obj->root;
825 }
826 if (root_lib)
827 unload_lib(nldr_node_obj, root_lib);
828 } else {
829 if (nldr_node_obj->overlay)
830 unload_ovly(nldr_node_obj, phase);
831
832 }
833 }
834 return status;
835 }
836
837 /*
838 * ======== add_ovly_info ========
839 */
add_ovly_info(void * handle,struct dbll_sect_info * sect_info,u32 addr,u32 bytes)840 static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
841 u32 addr, u32 bytes)
842 {
843 char *node_name;
844 char *sect_name = (char *)sect_info->name;
845 bool sect_exists = false;
846 char seps = ':';
847 char *pch;
848 u16 i;
849 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
850 int status = 0;
851
852 /* Is this an overlay section (load address != run address)? */
853 if (sect_info->sect_load_addr == sect_info->sect_run_addr)
854 goto func_end;
855
856 /* Find the node it belongs to */
857 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
858 node_name = nldr_obj->ovly_table[i].node_name;
859 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
860 /* Found the node */
861 break;
862 }
863 }
864 if (!(i < nldr_obj->ovly_nodes))
865 goto func_end;
866
867 /* Determine which phase this section belongs to */
868 for (pch = sect_name + 1; *pch && *pch != seps; pch++)
869 ;
870
871 if (*pch) {
872 pch++; /* Skip over the ':' */
873 if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
874 status =
875 add_ovly_sect(nldr_obj,
876 &nldr_obj->
877 ovly_table[i].create_sects_list,
878 sect_info, §_exists, addr, bytes);
879 if (!status && !sect_exists)
880 nldr_obj->ovly_table[i].create_sects++;
881
882 } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
883 status =
884 add_ovly_sect(nldr_obj,
885 &nldr_obj->
886 ovly_table[i].delete_sects_list,
887 sect_info, §_exists, addr, bytes);
888 if (!status && !sect_exists)
889 nldr_obj->ovly_table[i].delete_sects++;
890
891 } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
892 status =
893 add_ovly_sect(nldr_obj,
894 &nldr_obj->
895 ovly_table[i].execute_sects_list,
896 sect_info, §_exists, addr, bytes);
897 if (!status && !sect_exists)
898 nldr_obj->ovly_table[i].execute_sects++;
899
900 } else {
901 /* Put in "other" sectins */
902 status =
903 add_ovly_sect(nldr_obj,
904 &nldr_obj->
905 ovly_table[i].other_sects_list,
906 sect_info, §_exists, addr, bytes);
907 if (!status && !sect_exists)
908 nldr_obj->ovly_table[i].other_sects++;
909
910 }
911 }
912 func_end:
913 return status;
914 }
915
916 /*
917 * ======== add_ovly_node =========
918 * Callback function passed to dcd_get_objects.
919 */
add_ovly_node(struct dsp_uuid * uuid_obj,enum dsp_dcdobjtype obj_type,void * handle)920 static int add_ovly_node(struct dsp_uuid *uuid_obj,
921 enum dsp_dcdobjtype obj_type, void *handle)
922 {
923 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
924 char *node_name = NULL;
925 char *pbuf = NULL;
926 u32 len;
927 struct dcd_genericobj obj_def;
928 int status = 0;
929
930 if (obj_type != DSP_DCDNODETYPE)
931 goto func_end;
932
933 status =
934 dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type,
935 &obj_def);
936 if (status)
937 goto func_end;
938
939 /* If overlay node, add to the list */
940 if (obj_def.obj_data.node_obj.load_type == NLDR_OVLYLOAD) {
941 if (nldr_obj->ovly_table == NULL) {
942 nldr_obj->ovly_nodes++;
943 } else {
944 /* Add node to table */
945 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
946 *uuid_obj;
947 len =
948 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
949 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
950 pbuf = kzalloc(len + 1, GFP_KERNEL);
951 if (pbuf == NULL) {
952 status = -ENOMEM;
953 } else {
954 strncpy(pbuf, node_name, len);
955 nldr_obj->ovly_table[nldr_obj->ovly_nid].
956 node_name = pbuf;
957 nldr_obj->ovly_nid++;
958 }
959 }
960 }
961 /* These were allocated in dcd_get_object_def */
962 kfree(obj_def.obj_data.node_obj.str_create_phase_fxn);
963
964 kfree(obj_def.obj_data.node_obj.str_execute_phase_fxn);
965
966 kfree(obj_def.obj_data.node_obj.str_delete_phase_fxn);
967
968 kfree(obj_def.obj_data.node_obj.str_i_alg_name);
969
970 func_end:
971 return status;
972 }
973
974 /*
975 * ======== add_ovly_sect ========
976 */
add_ovly_sect(struct nldr_object * nldr_obj,struct ovly_sect ** lst,struct dbll_sect_info * sect_inf,bool * exists,u32 addr,u32 bytes)977 static int add_ovly_sect(struct nldr_object *nldr_obj,
978 struct ovly_sect **lst,
979 struct dbll_sect_info *sect_inf,
980 bool *exists, u32 addr, u32 bytes)
981 {
982 struct ovly_sect *new_sect = NULL;
983 struct ovly_sect *last_sect;
984 struct ovly_sect *ovly_section;
985 int status = 0;
986
987 ovly_section = last_sect = *lst;
988 *exists = false;
989 while (ovly_section) {
990 /*
991 * Make sure section has not already been added. Multiple
992 * 'write' calls may be made to load the section.
993 */
994 if (ovly_section->sect_load_addr == addr) {
995 /* Already added */
996 *exists = true;
997 break;
998 }
999 last_sect = ovly_section;
1000 ovly_section = ovly_section->next_sect;
1001 }
1002
1003 if (!ovly_section) {
1004 /* New section */
1005 new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1006 if (new_sect == NULL) {
1007 status = -ENOMEM;
1008 } else {
1009 new_sect->sect_load_addr = addr;
1010 new_sect->sect_run_addr = sect_inf->sect_run_addr +
1011 (addr - sect_inf->sect_load_addr);
1012 new_sect->size = bytes;
1013 new_sect->page = sect_inf->type;
1014 }
1015
1016 /* Add to the list */
1017 if (!status) {
1018 if (*lst == NULL) {
1019 /* First in the list */
1020 *lst = new_sect;
1021 } else {
1022 last_sect->next_sect = new_sect;
1023 }
1024 }
1025 }
1026
1027 return status;
1028 }
1029
1030 /*
1031 * ======== fake_ovly_write ========
1032 */
fake_ovly_write(void * handle,u32 dsp_address,void * buf,u32 bytes,s32 mtype)1033 static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1034 s32 mtype)
1035 {
1036 return (s32) bytes;
1037 }
1038
1039 /*
1040 * ======== free_sects ========
1041 */
free_sects(struct nldr_object * nldr_obj,struct ovly_sect * phase_sects,u16 alloc_num)1042 static void free_sects(struct nldr_object *nldr_obj,
1043 struct ovly_sect *phase_sects, u16 alloc_num)
1044 {
1045 struct ovly_sect *ovly_section = phase_sects;
1046 u16 i = 0;
1047 bool ret;
1048
1049 while (ovly_section && i < alloc_num) {
1050 /* 'Deallocate' */
1051 /* segid - page not supported yet */
1052 /* Reserved memory */
1053 ret =
1054 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1055 ovly_section->size, true);
1056 ovly_section = ovly_section->next_sect;
1057 i++;
1058 }
1059 }
1060
1061 /*
1062 * ======== get_symbol_value ========
1063 * Find symbol in library's base image. If not there, check dependent
1064 * libraries.
1065 */
get_symbol_value(void * handle,void * parg,void * rmm_handle,char * sym_name,struct dbll_sym_val ** sym)1066 static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1067 char *sym_name, struct dbll_sym_val **sym)
1068 {
1069 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1070 struct nldr_nodeobject *nldr_node_obj =
1071 (struct nldr_nodeobject *)rmm_handle;
1072 struct lib_node *root = (struct lib_node *)parg;
1073 u16 i;
1074 bool status = false;
1075
1076 /* check the base image */
1077 status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1078 sym_name, sym);
1079 if (!status)
1080 status =
1081 nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1082 sym_name, sym);
1083
1084 /*
1085 * Check in root lib itself. If the library consists of
1086 * multiple object files linked together, some symbols in the
1087 * library may need to be resolved.
1088 */
1089 if (!status) {
1090 status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1091 sym);
1092 if (!status) {
1093 status =
1094 nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1095 sym_name, sym);
1096 }
1097 }
1098
1099 /*
1100 * Check in root lib's dependent libraries, but not dependent
1101 * libraries' dependents.
1102 */
1103 if (!status) {
1104 for (i = 0; i < root->dep_libs; i++) {
1105 status =
1106 nldr_obj->ldr_fxns.get_addr_fxn(root->
1107 dep_libs_tree
1108 [i].lib,
1109 sym_name, sym);
1110 if (!status) {
1111 status =
1112 nldr_obj->ldr_fxns.
1113 get_c_addr_fxn(root->dep_libs_tree[i].lib,
1114 sym_name, sym);
1115 }
1116 if (status) {
1117 /* Symbol found */
1118 break;
1119 }
1120 }
1121 }
1122 /*
1123 * Check in persistent libraries
1124 */
1125 if (!status) {
1126 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1127 status =
1128 nldr_obj->ldr_fxns.
1129 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1130 sym_name, sym);
1131 if (!status) {
1132 status = nldr_obj->ldr_fxns.get_c_addr_fxn
1133 (nldr_node_obj->pers_lib_table[i].lib,
1134 sym_name, sym);
1135 }
1136 if (status) {
1137 /* Symbol found */
1138 break;
1139 }
1140 }
1141 }
1142
1143 return status;
1144 }
1145
1146 /*
1147 * ======== load_lib ========
1148 * Recursively load library and all its dependent libraries. The library
1149 * we're loading is specified by a uuid.
1150 */
load_lib(struct nldr_nodeobject * nldr_node_obj,struct lib_node * root,struct dsp_uuid uuid,bool root_prstnt,struct dbll_library_obj ** lib_path,enum nldr_phase phase,u16 depth)1151 static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1152 struct lib_node *root, struct dsp_uuid uuid,
1153 bool root_prstnt,
1154 struct dbll_library_obj **lib_path,
1155 enum nldr_phase phase, u16 depth)
1156 {
1157 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1158 u16 nd_libs = 0; /* Number of dependent libraries */
1159 u16 np_libs = 0; /* Number of persistent libraries */
1160 u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
1161 u16 i;
1162 u32 entry;
1163 u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1164 dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1165 struct dbll_attrs new_attrs;
1166 char *psz_file_name = NULL;
1167 struct dsp_uuid *dep_lib_uui_ds = NULL;
1168 bool *persistent_dep_libs = NULL;
1169 int status = 0;
1170 bool lib_status = false;
1171 struct lib_node *dep_lib;
1172
1173 if (depth > MAXDEPTH) {
1174 /* Error */
1175 }
1176 root->lib = NULL;
1177 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1178 psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1179 if (psz_file_name == NULL)
1180 status = -ENOMEM;
1181
1182 if (!status) {
1183 /* Get the name of the library */
1184 if (depth == 0) {
1185 status =
1186 dcd_get_library_name(nldr_node_obj->nldr_obj->
1187 dcd_mgr, &uuid, psz_file_name,
1188 &dw_buf_size, phase,
1189 nldr_node_obj->phase_split);
1190 } else {
1191 /* Dependent libraries are registered with a phase */
1192 status =
1193 dcd_get_library_name(nldr_node_obj->nldr_obj->
1194 dcd_mgr, &uuid, psz_file_name,
1195 &dw_buf_size, NLDR_NOPHASE,
1196 NULL);
1197 }
1198 }
1199 if (!status) {
1200 /* Open the library, don't load symbols */
1201 status =
1202 nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1203 DBLL_NOLOAD, &root->lib);
1204 }
1205 /* Done with file name */
1206 kfree(psz_file_name);
1207
1208 /* Check to see if library not already loaded */
1209 if (!status && root_prstnt) {
1210 lib_status =
1211 find_in_persistent_lib_array(nldr_node_obj, root->lib);
1212 /* Close library */
1213 if (lib_status) {
1214 nldr_obj->ldr_fxns.close_fxn(root->lib);
1215 return 0;
1216 }
1217 }
1218 if (!status) {
1219 /* Check for circular dependencies. */
1220 for (i = 0; i < depth; i++) {
1221 if (root->lib == lib_path[i]) {
1222 /* This condition could be checked by a
1223 * tool at build time. */
1224 status = -EILSEQ;
1225 }
1226 }
1227 }
1228 if (!status) {
1229 /* Add library to current path in dependency tree */
1230 lib_path[depth] = root->lib;
1231 depth++;
1232 /* Get number of dependent libraries */
1233 status =
1234 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
1235 &uuid, &nd_libs, &np_libs, phase);
1236 }
1237 if (!status) {
1238 if (!(*nldr_node_obj->phase_split))
1239 np_libs = 0;
1240
1241 /* nd_libs = #of dependent libraries */
1242 root->dep_libs = nd_libs - np_libs;
1243 if (nd_libs > 0) {
1244 dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1245 nd_libs, GFP_KERNEL);
1246 persistent_dep_libs =
1247 kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1248 if (!dep_lib_uui_ds || !persistent_dep_libs)
1249 status = -ENOMEM;
1250
1251 if (root->dep_libs > 0) {
1252 /* Allocate arrays for dependent lib UUIDs,
1253 * lib nodes */
1254 root->dep_libs_tree = kzalloc
1255 (sizeof(struct lib_node) *
1256 (root->dep_libs), GFP_KERNEL);
1257 if (!(root->dep_libs_tree))
1258 status = -ENOMEM;
1259
1260 }
1261
1262 if (!status) {
1263 /* Get the dependent library UUIDs */
1264 status =
1265 dcd_get_dep_libs(nldr_node_obj->
1266 nldr_obj->dcd_mgr, &uuid,
1267 nd_libs, dep_lib_uui_ds,
1268 persistent_dep_libs,
1269 phase);
1270 }
1271 }
1272 }
1273
1274 /*
1275 * Recursively load dependent libraries.
1276 */
1277 if (!status) {
1278 for (i = 0; i < nd_libs; i++) {
1279 /* If root library is NOT persistent, and dep library
1280 * is, then record it. If root library IS persistent,
1281 * the deplib is already included */
1282 if (!root_prstnt && persistent_dep_libs[i] &&
1283 *nldr_node_obj->phase_split) {
1284 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1285 status = -EILSEQ;
1286 break;
1287 }
1288
1289 /* Allocate library outside of phase */
1290 dep_lib =
1291 &nldr_node_obj->pers_lib_table
1292 [nldr_node_obj->pers_libs];
1293 } else {
1294 if (root_prstnt)
1295 persistent_dep_libs[i] = true;
1296
1297 /* Allocate library within phase */
1298 dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1299 }
1300
1301 status = load_lib(nldr_node_obj, dep_lib,
1302 dep_lib_uui_ds[i],
1303 persistent_dep_libs[i], lib_path,
1304 phase, depth);
1305
1306 if (!status) {
1307 if ((status != 0) &&
1308 !root_prstnt && persistent_dep_libs[i] &&
1309 *nldr_node_obj->phase_split) {
1310 (nldr_node_obj->pers_libs)++;
1311 } else {
1312 if (!persistent_dep_libs[i] ||
1313 !(*nldr_node_obj->phase_split)) {
1314 nd_libs_loaded++;
1315 }
1316 }
1317 } else {
1318 break;
1319 }
1320 }
1321 }
1322
1323 /* Now we can load the root library */
1324 if (!status) {
1325 new_attrs = nldr_obj->ldr_attrs;
1326 new_attrs.sym_arg = root;
1327 new_attrs.rmm_handle = nldr_node_obj;
1328 new_attrs.input_params = nldr_node_obj->priv_ref;
1329 new_attrs.base_image = false;
1330
1331 status =
1332 nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1333 &entry);
1334 }
1335
1336 /*
1337 * In case of failure, unload any dependent libraries that
1338 * were loaded, and close the root library.
1339 * (Persistent libraries are unloaded from the very top)
1340 */
1341 if (status) {
1342 if (phase != NLDR_EXECUTE) {
1343 for (i = 0; i < nldr_node_obj->pers_libs; i++)
1344 unload_lib(nldr_node_obj,
1345 &nldr_node_obj->pers_lib_table[i]);
1346
1347 nldr_node_obj->pers_libs = 0;
1348 }
1349 for (i = 0; i < nd_libs_loaded; i++)
1350 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1351
1352 if (root->lib)
1353 nldr_obj->ldr_fxns.close_fxn(root->lib);
1354
1355 }
1356
1357 /* Going up one node in the dependency tree */
1358 depth--;
1359
1360 kfree(dep_lib_uui_ds);
1361 dep_lib_uui_ds = NULL;
1362
1363 kfree(persistent_dep_libs);
1364 persistent_dep_libs = NULL;
1365
1366 return status;
1367 }
1368
1369 /*
1370 * ======== load_ovly ========
1371 */
load_ovly(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)1372 static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1373 enum nldr_phase phase)
1374 {
1375 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1376 struct ovly_node *po_node = NULL;
1377 struct ovly_sect *phase_sects = NULL;
1378 struct ovly_sect *other_sects_list = NULL;
1379 u16 i;
1380 u16 alloc_num = 0;
1381 u16 other_alloc = 0;
1382 u16 *ref_count = NULL;
1383 u16 *other_ref = NULL;
1384 u32 bytes;
1385 struct ovly_sect *ovly_section;
1386 int status = 0;
1387
1388 /* Find the node in the table */
1389 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1390 if (is_equal_uuid
1391 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1392 /* Found it */
1393 po_node = &(nldr_obj->ovly_table[i]);
1394 break;
1395 }
1396 }
1397
1398
1399 if (!po_node) {
1400 status = -ENOENT;
1401 goto func_end;
1402 }
1403
1404 switch (phase) {
1405 case NLDR_CREATE:
1406 ref_count = &(po_node->create_ref);
1407 other_ref = &(po_node->other_ref);
1408 phase_sects = po_node->create_sects_list;
1409 other_sects_list = po_node->other_sects_list;
1410 break;
1411
1412 case NLDR_EXECUTE:
1413 ref_count = &(po_node->execute_ref);
1414 phase_sects = po_node->execute_sects_list;
1415 break;
1416
1417 case NLDR_DELETE:
1418 ref_count = &(po_node->delete_ref);
1419 phase_sects = po_node->delete_sects_list;
1420 break;
1421
1422 default:
1423 break;
1424 }
1425
1426 if (ref_count == NULL)
1427 goto func_end;
1428
1429 if (*ref_count != 0)
1430 goto func_end;
1431
1432 /* 'Allocate' memory for overlay sections of this phase */
1433 ovly_section = phase_sects;
1434 while (ovly_section) {
1435 /* allocate *//* page not supported yet */
1436 /* reserve *//* align */
1437 status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1438 &(ovly_section->sect_run_addr), true);
1439 if (!status) {
1440 ovly_section = ovly_section->next_sect;
1441 alloc_num++;
1442 } else {
1443 break;
1444 }
1445 }
1446 if (other_ref && *other_ref == 0) {
1447 /* 'Allocate' memory for other overlay sections
1448 * (create phase) */
1449 if (!status) {
1450 ovly_section = other_sects_list;
1451 while (ovly_section) {
1452 /* page not supported *//* align */
1453 /* reserve */
1454 status =
1455 rmm_alloc(nldr_obj->rmm, 0,
1456 ovly_section->size, 0,
1457 &(ovly_section->sect_run_addr),
1458 true);
1459 if (!status) {
1460 ovly_section = ovly_section->next_sect;
1461 other_alloc++;
1462 } else {
1463 break;
1464 }
1465 }
1466 }
1467 }
1468 if (*ref_count == 0) {
1469 if (!status) {
1470 /* Load sections for this phase */
1471 ovly_section = phase_sects;
1472 while (ovly_section && !status) {
1473 bytes =
1474 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1475 priv_ref,
1476 ovly_section->
1477 sect_run_addr,
1478 ovly_section->
1479 sect_load_addr,
1480 ovly_section->size,
1481 ovly_section->page);
1482 if (bytes != ovly_section->size)
1483 status = -EPERM;
1484
1485 ovly_section = ovly_section->next_sect;
1486 }
1487 }
1488 }
1489 if (other_ref && *other_ref == 0) {
1490 if (!status) {
1491 /* Load other sections (create phase) */
1492 ovly_section = other_sects_list;
1493 while (ovly_section && !status) {
1494 bytes =
1495 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1496 priv_ref,
1497 ovly_section->
1498 sect_run_addr,
1499 ovly_section->
1500 sect_load_addr,
1501 ovly_section->size,
1502 ovly_section->page);
1503 if (bytes != ovly_section->size)
1504 status = -EPERM;
1505
1506 ovly_section = ovly_section->next_sect;
1507 }
1508 }
1509 }
1510 if (status) {
1511 /* 'Deallocate' memory */
1512 free_sects(nldr_obj, phase_sects, alloc_num);
1513 free_sects(nldr_obj, other_sects_list, other_alloc);
1514 }
1515 func_end:
1516 if (!status && (ref_count != NULL)) {
1517 *ref_count += 1;
1518 if (other_ref)
1519 *other_ref += 1;
1520
1521 }
1522
1523 return status;
1524 }
1525
1526 /*
1527 * ======== remote_alloc ========
1528 */
remote_alloc(void ** ref,u16 mem_sect,u32 size,u32 align,u32 * dsp_address,s32 segmnt_id,s32 req,bool reserve)1529 static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1530 u32 align, u32 *dsp_address,
1531 s32 segmnt_id, s32 req,
1532 bool reserve)
1533 {
1534 struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
1535 struct nldr_object *nldr_obj;
1536 struct rmm_target_obj *rmm;
1537 u16 mem_phase_bit = MAXFLAGS;
1538 u16 segid = 0;
1539 u16 i;
1540 u16 mem_sect_type;
1541 u32 word_size;
1542 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1543 bool mem_load_req = false;
1544 int status = -ENOMEM; /* Set to fail */
1545 nldr_obj = hnode->nldr_obj;
1546 rmm = nldr_obj->rmm;
1547 /* Convert size to DSP words */
1548 word_size =
1549 (size + nldr_obj->dsp_word_size -
1550 1) / nldr_obj->dsp_word_size;
1551 /* Modify memory 'align' to account for DSP cache line size */
1552 align = lcm(GEM_CACHE_LINE_SIZE, align);
1553 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1554 if (segmnt_id != -1) {
1555 rmm_addr_obj->segid = segmnt_id;
1556 segid = segmnt_id;
1557 mem_load_req = req;
1558 } else {
1559 switch (hnode->phase) {
1560 case NLDR_CREATE:
1561 mem_phase_bit = CREATEDATAFLAGBIT;
1562 break;
1563 case NLDR_DELETE:
1564 mem_phase_bit = DELETEDATAFLAGBIT;
1565 break;
1566 case NLDR_EXECUTE:
1567 mem_phase_bit = EXECUTEDATAFLAGBIT;
1568 break;
1569 default:
1570 break;
1571 }
1572 if (mem_sect == DBLL_CODE)
1573 mem_phase_bit++;
1574
1575 if (mem_phase_bit < MAXFLAGS)
1576 segid = hnode->seg_id[mem_phase_bit];
1577
1578 /* Determine if there is a memory loading requirement */
1579 if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1580 mem_load_req = true;
1581
1582 }
1583 mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1584
1585 /* Find an appropriate segment based on mem_sect */
1586 if (segid == NULLID) {
1587 /* No memory requirements of preferences */
1588 goto func_cont;
1589 }
1590 if (segid <= MAXSEGID) {
1591 /* Attempt to allocate from segid first. */
1592 rmm_addr_obj->segid = segid;
1593 status =
1594 rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1595 if (status) {
1596 dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1597 __func__, segid);
1598 }
1599 } else {
1600 /* segid > MAXSEGID ==> Internal or external memory */
1601 /* Check for any internal or external memory segment,
1602 * depending on segid. */
1603 mem_sect_type |= segid == MEMINTERNALID ?
1604 DYNM_INTERNAL : DYNM_EXTERNAL;
1605 for (i = 0; i < nldr_obj->dload_segs; i++) {
1606 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1607 mem_sect_type)
1608 continue;
1609
1610 status = rmm_alloc(rmm, i, word_size, align,
1611 dsp_address, false);
1612 if (!status) {
1613 /* Save segid for freeing later */
1614 rmm_addr_obj->segid = i;
1615 break;
1616 }
1617 }
1618 }
1619 func_cont:
1620 /* Haven't found memory yet, attempt to find any segment that works */
1621 if (status == -ENOMEM && !mem_load_req) {
1622 dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1623 "another\n", __func__);
1624 for (i = 0; i < nldr_obj->dload_segs; i++) {
1625 /* All bits of mem_sect_type must be set */
1626 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1627 mem_sect_type)
1628 continue;
1629
1630 status = rmm_alloc(rmm, i, word_size, align,
1631 dsp_address, false);
1632 if (!status) {
1633 /* Save segid */
1634 rmm_addr_obj->segid = i;
1635 break;
1636 }
1637 }
1638 }
1639
1640 return status;
1641 }
1642
remote_free(void ** ref,u16 space,u32 dsp_address,u32 size,bool reserve)1643 static int remote_free(void **ref, u16 space, u32 dsp_address,
1644 u32 size, bool reserve)
1645 {
1646 struct nldr_object *nldr_obj = (struct nldr_object *)ref;
1647 struct rmm_target_obj *rmm;
1648 u32 word_size;
1649 int status = -ENOMEM; /* Set to fail */
1650
1651 rmm = nldr_obj->rmm;
1652
1653 /* Convert size to DSP words */
1654 word_size =
1655 (size + nldr_obj->dsp_word_size -
1656 1) / nldr_obj->dsp_word_size;
1657
1658 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1659 status = 0;
1660
1661 return status;
1662 }
1663
1664 /*
1665 * ======== unload_lib ========
1666 */
unload_lib(struct nldr_nodeobject * nldr_node_obj,struct lib_node * root)1667 static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1668 struct lib_node *root)
1669 {
1670 struct dbll_attrs new_attrs;
1671 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1672 u16 i;
1673
1674
1675 /* Unload dependent libraries */
1676 for (i = 0; i < root->dep_libs; i++)
1677 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1678
1679 root->dep_libs = 0;
1680
1681 new_attrs = nldr_obj->ldr_attrs;
1682 new_attrs.rmm_handle = nldr_obj->rmm;
1683 new_attrs.input_params = nldr_node_obj->priv_ref;
1684 new_attrs.base_image = false;
1685 new_attrs.sym_arg = root;
1686
1687 if (root->lib) {
1688 /* Unload the root library */
1689 nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1690 nldr_obj->ldr_fxns.close_fxn(root->lib);
1691 }
1692
1693 /* Free dependent library list */
1694 kfree(root->dep_libs_tree);
1695 root->dep_libs_tree = NULL;
1696 }
1697
1698 /*
1699 * ======== unload_ovly ========
1700 */
unload_ovly(struct nldr_nodeobject * nldr_node_obj,enum nldr_phase phase)1701 static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1702 enum nldr_phase phase)
1703 {
1704 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1705 struct ovly_node *po_node = NULL;
1706 struct ovly_sect *phase_sects = NULL;
1707 struct ovly_sect *other_sects_list = NULL;
1708 u16 i;
1709 u16 alloc_num = 0;
1710 u16 other_alloc = 0;
1711 u16 *ref_count = NULL;
1712 u16 *other_ref = NULL;
1713
1714 /* Find the node in the table */
1715 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1716 if (is_equal_uuid
1717 (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1718 /* Found it */
1719 po_node = &(nldr_obj->ovly_table[i]);
1720 break;
1721 }
1722 }
1723
1724
1725 if (!po_node)
1726 /* TODO: Should we print warning here? */
1727 return;
1728
1729 switch (phase) {
1730 case NLDR_CREATE:
1731 ref_count = &(po_node->create_ref);
1732 phase_sects = po_node->create_sects_list;
1733 alloc_num = po_node->create_sects;
1734 break;
1735 case NLDR_EXECUTE:
1736 ref_count = &(po_node->execute_ref);
1737 phase_sects = po_node->execute_sects_list;
1738 alloc_num = po_node->execute_sects;
1739 break;
1740 case NLDR_DELETE:
1741 ref_count = &(po_node->delete_ref);
1742 other_ref = &(po_node->other_ref);
1743 phase_sects = po_node->delete_sects_list;
1744 /* 'Other' overlay sections are unloaded in the delete phase */
1745 other_sects_list = po_node->other_sects_list;
1746 alloc_num = po_node->delete_sects;
1747 other_alloc = po_node->other_sects;
1748 break;
1749 default:
1750 break;
1751 }
1752 if (ref_count && (*ref_count > 0)) {
1753 *ref_count -= 1;
1754 if (other_ref) {
1755 *other_ref -= 1;
1756 }
1757 }
1758
1759 if (ref_count && *ref_count == 0) {
1760 /* 'Deallocate' memory */
1761 free_sects(nldr_obj, phase_sects, alloc_num);
1762 }
1763 if (other_ref && *other_ref == 0)
1764 free_sects(nldr_obj, other_sects_list, other_alloc);
1765 }
1766
1767 /*
1768 * ======== find_in_persistent_lib_array ========
1769 */
find_in_persistent_lib_array(struct nldr_nodeobject * nldr_node_obj,struct dbll_library_obj * lib)1770 static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1771 struct dbll_library_obj *lib)
1772 {
1773 s32 i = 0;
1774
1775 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1776 if (lib == nldr_node_obj->pers_lib_table[i].lib)
1777 return true;
1778
1779 }
1780
1781 return false;
1782 }
1783
1784 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1785 /**
1786 * nldr_find_addr() - Find the closest symbol to the given address based on
1787 * dynamic node object.
1788 *
1789 * @nldr_node: Dynamic node object
1790 * @sym_addr: Given address to find the dsp symbol
1791 * @offset_range: offset range to look for dsp symbol
1792 * @offset_output: Symbol Output address
1793 * @sym_name: String with the dsp symbol
1794 *
1795 * This function finds the node library for a given address and
1796 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1797 */
nldr_find_addr(struct nldr_nodeobject * nldr_node,u32 sym_addr,u32 offset_range,void * offset_output,char * sym_name)1798 int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1799 u32 offset_range, void *offset_output, char *sym_name)
1800 {
1801 int status = 0;
1802 bool status1 = false;
1803 s32 i = 0;
1804 struct lib_node root = { NULL, 0, NULL };
1805 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1806 sym_addr, offset_range, (u32) offset_output, sym_name);
1807
1808 if (nldr_node->dynamic && *nldr_node->phase_split) {
1809 switch (nldr_node->phase) {
1810 case NLDR_CREATE:
1811 root = nldr_node->create_lib;
1812 break;
1813 case NLDR_EXECUTE:
1814 root = nldr_node->execute_lib;
1815 break;
1816 case NLDR_DELETE:
1817 root = nldr_node->delete_lib;
1818 break;
1819 default:
1820 break;
1821 }
1822 } else {
1823 /* for Overlay nodes or non-split Dynamic nodes */
1824 root = nldr_node->root;
1825 }
1826
1827 status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1828 offset_range, offset_output, sym_name);
1829
1830 /* If symbol not found, check dependent libraries */
1831 if (!status1)
1832 for (i = 0; i < root.dep_libs; i++) {
1833 status1 = dbll_find_dsp_symbol(
1834 root.dep_libs_tree[i].lib, sym_addr,
1835 offset_range, offset_output, sym_name);
1836 if (status1)
1837 /* Symbol found */
1838 break;
1839 }
1840 /* Check persistent libraries */
1841 if (!status1)
1842 for (i = 0; i < nldr_node->pers_libs; i++) {
1843 status1 = dbll_find_dsp_symbol(
1844 nldr_node->pers_lib_table[i].lib, sym_addr,
1845 offset_range, offset_output, sym_name);
1846 if (status1)
1847 /* Symbol found */
1848 break;
1849 }
1850
1851 if (!status1) {
1852 pr_debug("%s: Address 0x%x not found in range %d.\n",
1853 __func__, sym_addr, offset_range);
1854 status = -ESPIPE;
1855 }
1856
1857 return status;
1858 }
1859 #endif
1860