1 /*
2  * node.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * DSP/BIOS Bridge Node Manager.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 
19 #include <linux/types.h>
20 #include <linux/bitmap.h>
21 #include <linux/list.h>
22 
23 /*  ----------------------------------- Host OS */
24 #include <dspbridge/host_os.h>
25 
26 /*  ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
28 
29 /*  ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/memdefs.h>
31 #include <dspbridge/proc.h>
32 #include <dspbridge/strm.h>
33 #include <dspbridge/sync.h>
34 #include <dspbridge/ntfy.h>
35 
36 /*  ----------------------------------- Platform Manager */
37 #include <dspbridge/cmm.h>
38 #include <dspbridge/cod.h>
39 #include <dspbridge/dev.h>
40 #include <dspbridge/msg.h>
41 
42 /*  ----------------------------------- Resource Manager */
43 #include <dspbridge/dbdcd.h>
44 #include <dspbridge/disp.h>
45 #include <dspbridge/rms_sh.h>
46 
47 /*  ----------------------------------- Link Driver */
48 #include <dspbridge/dspdefs.h>
49 #include <dspbridge/dspioctl.h>
50 
51 /*  ----------------------------------- Others */
52 #include <dspbridge/uuidutil.h>
53 
54 /*  ----------------------------------- This */
55 #include <dspbridge/nodepriv.h>
56 #include <dspbridge/node.h>
57 #include <dspbridge/dmm.h>
58 
59 /* Static/Dynamic Loader includes */
60 #include <dspbridge/dbll.h>
61 #include <dspbridge/nldr.h>
62 
63 #include <dspbridge/drv.h>
64 #include <dspbridge/resourcecleanup.h>
65 #include <_tiomap.h>
66 
67 #include <dspbridge/dspdeh.h>
68 
69 #define HOSTPREFIX	  "/host"
70 #define PIPEPREFIX	  "/dbpipe"
71 
72 #define MAX_INPUTS(h)  \
73 		((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
74 #define MAX_OUTPUTS(h) \
75 		((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
76 
77 #define NODE_GET_PRIORITY(h) ((h)->prio)
78 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
79 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
80 
81 #define MAXPIPES	100	/* Max # of /pipe connections (CSL limit) */
82 #define MAXDEVSUFFIXLEN 2	/* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
83 
84 #define PIPENAMELEN     (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
85 #define HOSTNAMELEN     (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
86 
87 #define MAXDEVNAMELEN	32	/* dsp_ndbprops.ac_name size */
88 #define CREATEPHASE	1
89 #define EXECUTEPHASE	2
90 #define DELETEPHASE	3
91 
92 /* Define default STRM parameters */
93 /*
94  *  TBD: Put in header file, make global DSP_STRMATTRS with defaults,
95  *  or make defaults configurable.
96  */
97 #define DEFAULTBUFSIZE		32
98 #define DEFAULTNBUFS		2
99 #define DEFAULTSEGID		0
100 #define DEFAULTALIGNMENT	0
101 #define DEFAULTTIMEOUT		10000
102 
103 #define RMSQUERYSERVER		0
104 #define RMSCONFIGURESERVER	1
105 #define RMSCREATENODE		2
106 #define RMSEXECUTENODE		3
107 #define RMSDELETENODE		4
108 #define RMSCHANGENODEPRIORITY	5
109 #define RMSREADMEMORY		6
110 #define RMSWRITEMEMORY		7
111 #define RMSCOPY			8
112 #define MAXTIMEOUT		2000
113 
114 #define NUMRMSFXNS		9
115 
116 #define PWR_TIMEOUT		500	/* default PWR timeout in msec */
117 
118 #define STACKSEGLABEL "L1DSRAM_HEAP"	/* Label for DSP Stack Segment Addr */
119 
120 /*
121  *  ======== node_mgr ========
122  */
123 struct node_mgr {
124 	struct dev_object *dev_obj;	/* Device object */
125 	/* Function interface to Bridge driver */
126 	struct bridge_drv_interface *intf_fxns;
127 	struct dcd_manager *dcd_mgr;	/* Proc/Node data manager */
128 	struct disp_object *disp_obj;	/* Node dispatcher */
129 	struct list_head node_list;	/* List of all allocated nodes */
130 	u32 num_nodes;		/* Number of nodes in node_list */
131 	u32 num_created;	/* Number of nodes *created* on DSP */
132 	DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
133 	DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
134 	/* Channel allocation bitmap */
135 	DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
136 	/* DMA Channel allocation bitmap */
137 	DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
138 	/* Zero-Copy Channel alloc bitmap */
139 	DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
140 	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
141 	struct mutex node_mgr_lock;	/* For critical sections */
142 	u32 fxn_addrs[NUMRMSFXNS];	/* RMS function addresses */
143 	struct msg_mgr *msg_mgr_obj;
144 
145 	/* Processor properties needed by Node Dispatcher */
146 	u32 num_chnls;		/* Total number of channels */
147 	u32 chnl_offset;	/* Offset of chnl ids rsvd for RMS */
148 	u32 chnl_buf_size;	/* Buffer size for data to RMS */
149 	int proc_family;	/* eg, 5000 */
150 	int proc_type;		/* eg, 5510 */
151 	u32 dsp_word_size;	/* Size of DSP word on host bytes */
152 	u32 dsp_data_mau_size;	/* Size of DSP data MAU */
153 	u32 dsp_mau_size;	/* Size of MAU */
154 	s32 min_pri;		/* Minimum runtime priority for node */
155 	s32 max_pri;		/* Maximum runtime priority for node */
156 
157 	struct strm_mgr *strm_mgr_obj;	/* STRM manager */
158 
159 	/* Loader properties */
160 	struct nldr_object *nldr_obj;	/* Handle to loader */
161 	struct node_ldr_fxns nldr_fxns;	/* Handle to loader functions */
162 };
163 
164 /*
165  *  ======== connecttype ========
166  */
167 enum connecttype {
168 	NOTCONNECTED = 0,
169 	NODECONNECT,
170 	HOSTCONNECT,
171 	DEVICECONNECT,
172 };
173 
174 /*
175  *  ======== stream_chnl ========
176  */
177 struct stream_chnl {
178 	enum connecttype type;	/* Type of stream connection */
179 	u32 dev_id;		/* pipe or channel id */
180 };
181 
182 /*
183  *  ======== node_object ========
184  */
185 struct node_object {
186 	struct list_head list_elem;
187 	struct node_mgr *node_mgr;	/* The manager of this node */
188 	struct proc_object *processor;	/* Back pointer to processor */
189 	struct dsp_uuid node_uuid;	/* Node's ID */
190 	s32 prio;		/* Node's current priority */
191 	u32 timeout;		/* Timeout for blocking NODE calls */
192 	u32 heap_size;		/* Heap Size */
193 	u32 dsp_heap_virt_addr;	/* Heap Size */
194 	u32 gpp_heap_virt_addr;	/* Heap Size */
195 	enum node_type ntype;	/* Type of node: message, task, etc */
196 	enum node_state node_state;	/* NODE_ALLOCATED, NODE_CREATED, ... */
197 	u32 num_inputs;		/* Current number of inputs */
198 	u32 num_outputs;	/* Current number of outputs */
199 	u32 max_input_index;	/* Current max input stream index */
200 	u32 max_output_index;	/* Current max output stream index */
201 	struct stream_chnl *inputs;	/* Node's input streams */
202 	struct stream_chnl *outputs;	/* Node's output streams */
203 	struct node_createargs create_args;	/* Args for node create func */
204 	nodeenv node_env;	/* Environment returned by RMS */
205 	struct dcd_genericobj dcd_props;	/* Node properties from DCD */
206 	struct dsp_cbdata *args;	/* Optional args to pass to node */
207 	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
208 	char *str_dev_name;	/* device name, if device node */
209 	struct sync_object *sync_done;	/* Synchronize node_terminate */
210 	s32 exit_status;	/* execute function return status */
211 
212 	/* Information needed for node_get_attr() */
213 	void *device_owner;	/* If dev node, task that owns it */
214 	u32 num_gpp_inputs;	/* Current # of from GPP streams */
215 	u32 num_gpp_outputs;	/* Current # of to GPP streams */
216 	/* Current stream connections */
217 	struct dsp_streamconnect *stream_connect;
218 
219 	/* Message queue */
220 	struct msg_queue *msg_queue_obj;
221 
222 	/* These fields used for SM messaging */
223 	struct cmm_xlatorobject *xlator;	/* Node's SM addr translator */
224 
225 	/* Handle to pass to dynamic loader */
226 	struct nldr_nodeobject *nldr_node_obj;
227 	bool loaded;		/* Code is (dynamically) loaded */
228 	bool phase_split;	/* Phases split in many libs or ovly */
229 
230 };
231 
232 /* Default buffer attributes */
233 static struct dsp_bufferattr node_dfltbufattrs = {
234 	.cb_struct = 0,
235 	.segment_id = 1,
236 	.buf_alignment = 0,
237 };
238 
239 static void delete_node(struct node_object *hnode,
240 			struct process_context *pr_ctxt);
241 static void delete_node_mgr(struct node_mgr *hnode_mgr);
242 static void fill_stream_connect(struct node_object *node1,
243 				struct node_object *node2, u32 stream1,
244 				u32 stream2);
245 static void fill_stream_def(struct node_object *hnode,
246 			    struct node_strmdef *pstrm_def,
247 			    struct dsp_strmattr *pattrs);
248 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
250 				  u32 phase);
251 static int get_node_props(struct dcd_manager *hdcd_mgr,
252 				 struct node_object *hnode,
253 				 const struct dsp_uuid *node_uuid,
254 				 struct dcd_genericobj *dcd_prop);
255 static int get_proc_props(struct node_mgr *hnode_mgr,
256 				 struct dev_object *hdev_obj);
257 static int get_rms_fxns(struct node_mgr *hnode_mgr);
258 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
259 		u32 ul_num_bytes, u32 mem_space);
260 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
261 		     u32 ul_num_bytes, u32 mem_space);
262 
263 /* Dynamic loader functions. */
264 static struct node_ldr_fxns nldr_fxns = {
265 	nldr_allocate,
266 	nldr_create,
267 	nldr_delete,
268 	nldr_get_fxn_addr,
269 	nldr_load,
270 	nldr_unload,
271 };
272 
node_get_state(void * hnode)273 enum node_state node_get_state(void *hnode)
274 {
275 	struct node_object *pnode = (struct node_object *)hnode;
276 	if (!pnode)
277 		return -1;
278 	return pnode->node_state;
279 }
280 
281 /*
282  *  ======== node_allocate ========
283  *  Purpose:
284  *      Allocate GPP resources to manage a node on the DSP.
285  */
node_allocate(struct proc_object * hprocessor,const struct dsp_uuid * node_uuid,const struct dsp_cbdata * pargs,const struct dsp_nodeattrin * attr_in,struct node_res_object ** noderes,struct process_context * pr_ctxt)286 int node_allocate(struct proc_object *hprocessor,
287 			const struct dsp_uuid *node_uuid,
288 			const struct dsp_cbdata *pargs,
289 			const struct dsp_nodeattrin *attr_in,
290 			struct node_res_object **noderes,
291 			struct process_context *pr_ctxt)
292 {
293 	struct node_mgr *hnode_mgr;
294 	struct dev_object *hdev_obj;
295 	struct node_object *pnode = NULL;
296 	enum node_type node_type = NODE_TASK;
297 	struct node_msgargs *pmsg_args;
298 	struct node_taskargs *ptask_args;
299 	u32 num_streams;
300 	struct bridge_drv_interface *intf_fxns;
301 	int status = 0;
302 	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
303 	u32 proc_id;
304 	u32 pul_value;
305 	u32 dynext_base;
306 	u32 off_set = 0;
307 	u32 ul_stack_seg_addr, ul_stack_seg_val;
308 	u32 ul_gpp_mem_base;
309 	struct cfg_hostres *host_res;
310 	struct bridge_dev_context *pbridge_context;
311 	u32 mapped_addr = 0;
312 	u32 map_attrs = 0x0;
313 	struct dsp_processorstate proc_state;
314 #ifdef DSP_DMM_DEBUG
315 	struct dmm_object *dmm_mgr;
316 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
317 #endif
318 
319 	void *node_res;
320 
321 	*noderes = NULL;
322 
323 	status = proc_get_processor_id(hprocessor, &proc_id);
324 
325 	if (proc_id != DSP_UNIT)
326 		goto func_end;
327 
328 	status = proc_get_dev_object(hprocessor, &hdev_obj);
329 	if (!status) {
330 		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
331 		if (hnode_mgr == NULL)
332 			status = -EPERM;
333 
334 	}
335 
336 	if (status)
337 		goto func_end;
338 
339 	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
340 	if (!pbridge_context) {
341 		status = -EFAULT;
342 		goto func_end;
343 	}
344 
345 	status = proc_get_state(hprocessor, &proc_state,
346 				sizeof(struct dsp_processorstate));
347 	if (status)
348 		goto func_end;
349 	/* If processor is in error state then don't attempt
350 	   to send the message */
351 	if (proc_state.proc_state == PROC_ERROR) {
352 		status = -EPERM;
353 		goto func_end;
354 	}
355 
356 	/* Assuming that 0 is not a valid function address */
357 	if (hnode_mgr->fxn_addrs[0] == 0) {
358 		/* No RMS on target - we currently can't handle this */
359 		pr_err("%s: Failed, no RMS in base image\n", __func__);
360 		status = -EPERM;
361 	} else {
362 		/* Validate attr_in fields, if non-NULL */
363 		if (attr_in) {
364 			/* Check if attr_in->prio is within range */
365 			if (attr_in->prio < hnode_mgr->min_pri ||
366 			    attr_in->prio > hnode_mgr->max_pri)
367 				status = -EDOM;
368 		}
369 	}
370 	/* Allocate node object and fill in */
371 	if (status)
372 		goto func_end;
373 
374 	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
375 	if (pnode == NULL) {
376 		status = -ENOMEM;
377 		goto func_end;
378 	}
379 	pnode->node_mgr = hnode_mgr;
380 	/* This critical section protects get_node_props */
381 	mutex_lock(&hnode_mgr->node_mgr_lock);
382 
383 	/* Get dsp_ndbprops from node database */
384 	status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
385 				&(pnode->dcd_props));
386 	if (status)
387 		goto func_cont;
388 
389 	pnode->node_uuid = *node_uuid;
390 	pnode->processor = hprocessor;
391 	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
392 	pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
393 	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
394 
395 	/* Currently only C64 DSP builds support Node Dynamic * heaps */
396 	/* Allocate memory for node heap */
397 	pnode->create_args.asa.task_arg_obj.heap_size = 0;
398 	pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
399 	pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
400 	pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
401 	if (!attr_in)
402 		goto func_cont;
403 
404 	/* Check if we have a user allocated node heap */
405 	if (!(attr_in->pgpp_virt_addr))
406 		goto func_cont;
407 
408 	/* check for page aligned Heap size */
409 	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
410 		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
411 		       __func__, attr_in->heap_size);
412 		status = -EINVAL;
413 	} else {
414 		pnode->create_args.asa.task_arg_obj.heap_size =
415 		    attr_in->heap_size;
416 		pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
417 		    (u32) attr_in->pgpp_virt_addr;
418 	}
419 	if (status)
420 		goto func_cont;
421 
422 	status = proc_reserve_memory(hprocessor,
423 				     pnode->create_args.asa.task_arg_obj.
424 				     heap_size + PAGE_SIZE,
425 				     (void **)&(pnode->create_args.asa.
426 					task_arg_obj.dsp_heap_res_addr),
427 				     pr_ctxt);
428 	if (status) {
429 		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
430 		       __func__, status);
431 		goto func_cont;
432 	}
433 #ifdef DSP_DMM_DEBUG
434 	status = dmm_get_handle(p_proc_object, &dmm_mgr);
435 	if (!dmm_mgr) {
436 		status = DSP_EHANDLE;
437 		goto func_cont;
438 	}
439 
440 	dmm_mem_map_dump(dmm_mgr);
441 #endif
442 
443 	map_attrs |= DSP_MAPLITTLEENDIAN;
444 	map_attrs |= DSP_MAPELEMSIZE32;
445 	map_attrs |= DSP_MAPVIRTUALADDR;
446 	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
447 			  pnode->create_args.asa.task_arg_obj.heap_size,
448 			  (void *)pnode->create_args.asa.task_arg_obj.
449 			  dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
450 			  pr_ctxt);
451 	if (status)
452 		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
453 		       __func__, status);
454 	else
455 		pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
456 		    (u32) mapped_addr;
457 
458 func_cont:
459 	mutex_unlock(&hnode_mgr->node_mgr_lock);
460 	if (attr_in != NULL) {
461 		/* Overrides of NBD properties */
462 		pnode->timeout = attr_in->timeout;
463 		pnode->prio = attr_in->prio;
464 	}
465 	/* Create object to manage notifications */
466 	if (!status) {
467 		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
468 							GFP_KERNEL);
469 		if (pnode->ntfy_obj)
470 			ntfy_init(pnode->ntfy_obj);
471 		else
472 			status = -ENOMEM;
473 	}
474 
475 	if (!status) {
476 		node_type = node_get_type(pnode);
477 		/*  Allocate dsp_streamconnect array for device, task, and
478 		 *  dais socket nodes. */
479 		if (node_type != NODE_MESSAGE) {
480 			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
481 			pnode->stream_connect = kzalloc(num_streams *
482 					sizeof(struct dsp_streamconnect),
483 					GFP_KERNEL);
484 			if (num_streams > 0 && pnode->stream_connect == NULL)
485 				status = -ENOMEM;
486 
487 		}
488 		if (!status && (node_type == NODE_TASK ||
489 					      node_type == NODE_DAISSOCKET)) {
490 			/* Allocate arrays for maintainig stream connections */
491 			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
492 					sizeof(struct stream_chnl), GFP_KERNEL);
493 			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
494 					sizeof(struct stream_chnl), GFP_KERNEL);
495 			ptask_args = &(pnode->create_args.asa.task_arg_obj);
496 			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
497 						sizeof(struct node_strmdef),
498 						GFP_KERNEL);
499 			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
500 						sizeof(struct node_strmdef),
501 						GFP_KERNEL);
502 			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
503 						       ptask_args->strm_in_def
504 						       == NULL))
505 			    || (MAX_OUTPUTS(pnode) > 0
506 				&& (pnode->outputs == NULL
507 				    || ptask_args->strm_out_def == NULL)))
508 				status = -ENOMEM;
509 		}
510 	}
511 	if (!status && (node_type != NODE_DEVICE)) {
512 		/* Create an event that will be posted when RMS_EXIT is
513 		 * received. */
514 		pnode->sync_done = kzalloc(sizeof(struct sync_object),
515 								GFP_KERNEL);
516 		if (pnode->sync_done)
517 			sync_init_event(pnode->sync_done);
518 		else
519 			status = -ENOMEM;
520 
521 		if (!status) {
522 			/*Get the shared mem mgr for this nodes dev object */
523 			status = cmm_get_handle(hprocessor, &hcmm_mgr);
524 			if (!status) {
525 				/* Allocate a SM addr translator for this node
526 				 * w/ deflt attr */
527 				status = cmm_xlator_create(&pnode->xlator,
528 							   hcmm_mgr, NULL);
529 			}
530 		}
531 		if (!status) {
532 			/* Fill in message args */
533 			if ((pargs != NULL) && (pargs->cb_data > 0)) {
534 				pmsg_args =
535 				    &(pnode->create_args.asa.node_msg_args);
536 				pmsg_args->pdata = kzalloc(pargs->cb_data,
537 								GFP_KERNEL);
538 				if (pmsg_args->pdata == NULL) {
539 					status = -ENOMEM;
540 				} else {
541 					pmsg_args->arg_length = pargs->cb_data;
542 					memcpy(pmsg_args->pdata,
543 					       pargs->node_data,
544 					       pargs->cb_data);
545 				}
546 			}
547 		}
548 	}
549 
550 	if (!status && node_type != NODE_DEVICE) {
551 		/* Create a message queue for this node */
552 		intf_fxns = hnode_mgr->intf_fxns;
553 		status =
554 		    (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
555 							&pnode->msg_queue_obj,
556 							0,
557 							pnode->create_args.asa.
558 							node_msg_args.max_msgs,
559 							pnode);
560 	}
561 
562 	if (!status) {
563 		/* Create object for dynamic loading */
564 
565 		status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
566 							   (void *)pnode,
567 							   &pnode->dcd_props.
568 							   obj_data.node_obj,
569 							   &pnode->
570 							   nldr_node_obj,
571 							   &pnode->phase_split);
572 	}
573 
574 	/* Compare value read from Node Properties and check if it is same as
575 	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
576 	 * GPP Address, Read the value in that address and override the
577 	 * stack_seg value in task args */
578 	if (!status &&
579 	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
580 	    stack_seg_name != NULL) {
581 		if (strcmp((char *)
582 			   pnode->dcd_props.obj_data.node_obj.ndb_props.
583 			   stack_seg_name, STACKSEGLABEL) == 0) {
584 			status =
585 			    hnode_mgr->nldr_fxns.
586 			    get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
587 					     &dynext_base);
588 			if (status)
589 				pr_err("%s: Failed to get addr for DYNEXT_BEG"
590 				       " status = 0x%x\n", __func__, status);
591 
592 			status =
593 			    hnode_mgr->nldr_fxns.
594 			    get_fxn_addr(pnode->nldr_node_obj,
595 					     "L1DSRAM_HEAP", &pul_value);
596 
597 			if (status)
598 				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
599 				       " status = 0x%x\n", __func__, status);
600 
601 			host_res = pbridge_context->resources;
602 			if (!host_res)
603 				status = -EPERM;
604 
605 			if (status) {
606 				pr_err("%s: Failed to get host resource, status"
607 				       " = 0x%x\n", __func__, status);
608 				goto func_end;
609 			}
610 
611 			ul_gpp_mem_base = (u32) host_res->mem_base[1];
612 			off_set = pul_value - dynext_base;
613 			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
614 			ul_stack_seg_val = readl(ul_stack_seg_addr);
615 
616 			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
617 				" 0x%x\n", __func__, ul_stack_seg_val,
618 				ul_stack_seg_addr);
619 
620 			pnode->create_args.asa.task_arg_obj.stack_seg =
621 			    ul_stack_seg_val;
622 
623 		}
624 	}
625 
626 	if (!status) {
627 		/* Add the node to the node manager's list of allocated
628 		 * nodes. */
629 		NODE_SET_STATE(pnode, NODE_ALLOCATED);
630 
631 		mutex_lock(&hnode_mgr->node_mgr_lock);
632 
633 		list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
634 		++(hnode_mgr->num_nodes);
635 
636 		/* Exit critical section */
637 		mutex_unlock(&hnode_mgr->node_mgr_lock);
638 
639 		/* Preset this to assume phases are split
640 		 * (for overlay and dll) */
641 		pnode->phase_split = true;
642 
643 		/* Notify all clients registered for DSP_NODESTATECHANGE. */
644 		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
645 	} else {
646 		/* Cleanup */
647 		if (pnode)
648 			delete_node(pnode, pr_ctxt);
649 
650 	}
651 
652 	if (!status) {
653 		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
654 		if (status) {
655 			delete_node(pnode, pr_ctxt);
656 			goto func_end;
657 		}
658 
659 		*noderes = (struct node_res_object *)node_res;
660 		drv_proc_node_update_heap_status(node_res, true);
661 		drv_proc_node_update_status(node_res, true);
662 	}
663 func_end:
664 	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
665 		"node_res: %p status: 0x%x\n", __func__, hprocessor,
666 		node_uuid, pargs, attr_in, noderes, status);
667 	return status;
668 }
669 
670 /*
671  *  ======== node_alloc_msg_buf ========
672  *  Purpose:
673  *      Allocates buffer for zero copy messaging.
674  */
node_alloc_msg_buf(struct node_object * hnode,u32 usize,struct dsp_bufferattr * pattr,u8 ** pbuffer)675 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
676 			 struct dsp_bufferattr *pattr,
677 			 u8 **pbuffer)
678 {
679 	struct node_object *pnode = (struct node_object *)hnode;
680 	int status = 0;
681 	bool va_flag = false;
682 	bool set_info;
683 	u32 proc_id;
684 
685 	if (!pnode)
686 		status = -EFAULT;
687 	else if (node_get_type(pnode) == NODE_DEVICE)
688 		status = -EPERM;
689 
690 	if (status)
691 		goto func_end;
692 
693 	if (pattr == NULL)
694 		pattr = &node_dfltbufattrs;	/* set defaults */
695 
696 	status = proc_get_processor_id(pnode->processor, &proc_id);
697 	if (proc_id != DSP_UNIT) {
698 		goto func_end;
699 	}
700 	/*  If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
701 	 *  virt  address, so set this info in this node's translator
702 	 *  object for  future ref. If MEM_GETVIRTUALSEGID then retrieve
703 	 *  virtual address  from node's translator. */
704 	if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
705 	    (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
706 		va_flag = true;
707 		set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
708 		    true : false;
709 		/* Clear mask bits */
710 		pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
711 		/* Set/get this node's translators virtual address base/size */
712 		status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
713 					 pattr->segment_id, set_info);
714 	}
715 	if (!status && (!va_flag)) {
716 		if (pattr->segment_id != 1) {
717 			/* Node supports single SM segment only. */
718 			status = -EBADR;
719 		}
720 		/*  Arbitrary SM buffer alignment not supported for host side
721 		 *  allocs, but guaranteed for the following alignment
722 		 *  values. */
723 		switch (pattr->buf_alignment) {
724 		case 0:
725 		case 1:
726 		case 2:
727 		case 4:
728 			break;
729 		default:
730 			/* alignment value not suportted */
731 			status = -EPERM;
732 			break;
733 		}
734 		if (!status) {
735 			/* allocate physical buffer from seg_id in node's
736 			 * translator */
737 			(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
738 						   usize);
739 			if (*pbuffer == NULL) {
740 				pr_err("%s: error - Out of shared memory\n",
741 				       __func__);
742 				status = -ENOMEM;
743 			}
744 		}
745 	}
746 func_end:
747 	return status;
748 }
749 
750 /*
751  *  ======== node_change_priority ========
752  *  Purpose:
753  *      Change the priority of a node in the allocated state, or that is
754  *      currently running or paused on the target.
755  */
node_change_priority(struct node_object * hnode,s32 prio)756 int node_change_priority(struct node_object *hnode, s32 prio)
757 {
758 	struct node_object *pnode = (struct node_object *)hnode;
759 	struct node_mgr *hnode_mgr = NULL;
760 	enum node_type node_type;
761 	enum node_state state;
762 	int status = 0;
763 	u32 proc_id;
764 
765 	if (!hnode || !hnode->node_mgr) {
766 		status = -EFAULT;
767 	} else {
768 		hnode_mgr = hnode->node_mgr;
769 		node_type = node_get_type(hnode);
770 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
771 			status = -EPERM;
772 		else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
773 			status = -EDOM;
774 	}
775 	if (status)
776 		goto func_end;
777 
778 	/* Enter critical section */
779 	mutex_lock(&hnode_mgr->node_mgr_lock);
780 
781 	state = node_get_state(hnode);
782 	if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
783 		NODE_SET_PRIORITY(hnode, prio);
784 	} else {
785 		if (state != NODE_RUNNING) {
786 			status = -EBADR;
787 			goto func_cont;
788 		}
789 		status = proc_get_processor_id(pnode->processor, &proc_id);
790 		if (proc_id == DSP_UNIT) {
791 			status =
792 			    disp_node_change_priority(hnode_mgr->disp_obj,
793 						      hnode,
794 						      hnode_mgr->fxn_addrs
795 						      [RMSCHANGENODEPRIORITY],
796 						      hnode->node_env, prio);
797 		}
798 		if (status >= 0)
799 			NODE_SET_PRIORITY(hnode, prio);
800 
801 	}
802 func_cont:
803 	/* Leave critical section */
804 	mutex_unlock(&hnode_mgr->node_mgr_lock);
805 func_end:
806 	return status;
807 }
808 
809 /*
810  *  ======== node_connect ========
811  *  Purpose:
812  *      Connect two nodes on the DSP, or a node on the DSP to the GPP.
813  */
node_connect(struct node_object * node1,u32 stream1,struct node_object * node2,u32 stream2,struct dsp_strmattr * pattrs,struct dsp_cbdata * conn_param)814 int node_connect(struct node_object *node1, u32 stream1,
815 			struct node_object *node2,
816 			u32 stream2, struct dsp_strmattr *pattrs,
817 			struct dsp_cbdata *conn_param)
818 {
819 	struct node_mgr *hnode_mgr;
820 	char *pstr_dev_name = NULL;
821 	enum node_type node1_type = NODE_TASK;
822 	enum node_type node2_type = NODE_TASK;
823 	enum dsp_strmmode strm_mode;
824 	struct node_strmdef *pstrm_def;
825 	struct node_strmdef *input = NULL;
826 	struct node_strmdef *output = NULL;
827 	struct node_object *dev_node_obj;
828 	struct node_object *hnode;
829 	struct stream_chnl *pstream;
830 	u32 pipe_id;
831 	u32 chnl_id;
832 	s8 chnl_mode;
833 	u32 dw_length;
834 	int status = 0;
835 
836 	if (!node1 || !node2)
837 		return -EFAULT;
838 
839 	/* The two nodes must be on the same processor */
840 	if (node1 != (struct node_object *)DSP_HGPPNODE &&
841 			node2 != (struct node_object *)DSP_HGPPNODE &&
842 			node1->node_mgr != node2->node_mgr)
843 		return -EPERM;
844 
845 	/* Cannot connect a node to itself */
846 	if (node1 == node2)
847 		return -EPERM;
848 
849 	/* node_get_type() will return NODE_GPP if hnode =  DSP_HGPPNODE. */
850 	node1_type = node_get_type(node1);
851 	node2_type = node_get_type(node2);
852 	/* Check stream indices ranges */
853 	if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
854 				stream1 >= MAX_OUTPUTS(node1)) ||
855 			(node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
856 			 stream2 >= MAX_INPUTS(node2)))
857 		return -EINVAL;
858 
859 	/*
860 	 *  Only the following types of connections are allowed:
861 	 *      task/dais socket < == > task/dais socket
862 	 *      task/dais socket < == > device
863 	 *      task/dais socket < == > GPP
864 	 *
865 	 *  ie, no message nodes, and at least one task or dais
866 	 *  socket node.
867 	 */
868 	if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
869 			(node1_type != NODE_TASK &&
870 			 node1_type != NODE_DAISSOCKET &&
871 			 node2_type != NODE_TASK &&
872 			 node2_type != NODE_DAISSOCKET))
873 		return -EPERM;
874 	/*
875 	 * Check stream mode. Default is STRMMODE_PROCCOPY.
876 	 */
877 	if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
878 		return -EPERM;	/* illegal stream mode */
879 
880 	if (node1_type != NODE_GPP) {
881 		hnode_mgr = node1->node_mgr;
882 	} else {
883 		hnode_mgr = node2->node_mgr;
884 	}
885 
886 	/* Enter critical section */
887 	mutex_lock(&hnode_mgr->node_mgr_lock);
888 
889 	/* Nodes must be in the allocated state */
890 	if (node1_type != NODE_GPP &&
891 			node_get_state(node1) != NODE_ALLOCATED) {
892 		status = -EBADR;
893 		goto out_unlock;
894 	}
895 
896 	if (node2_type != NODE_GPP &&
897 			node_get_state(node2) != NODE_ALLOCATED) {
898 		status = -EBADR;
899 		goto out_unlock;
900 	}
901 
902 	/*
903 	 *  Check that stream indices for task and dais socket nodes
904 	 *  are not already be used. (Device nodes checked later)
905 	 */
906 	if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
907 		output = &(node1->create_args.asa.
908 				task_arg_obj.strm_out_def[stream1]);
909 		if (output->sz_device) {
910 			status = -EISCONN;
911 			goto out_unlock;
912 		}
913 
914 	}
915 	if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
916 		input = &(node2->create_args.asa.
917 				task_arg_obj.strm_in_def[stream2]);
918 		if (input->sz_device) {
919 			status = -EISCONN;
920 			goto out_unlock;
921 		}
922 
923 	}
924 	/* Connecting two task nodes? */
925 	if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
926 				(node2_type == NODE_TASK ||
927 				 node2_type == NODE_DAISSOCKET)) {
928 		/* Find available pipe */
929 		pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
930 		if (pipe_id == MAXPIPES) {
931 			status = -ECONNREFUSED;
932 			goto out_unlock;
933 		}
934 		set_bit(pipe_id, hnode_mgr->pipe_map);
935 		node1->outputs[stream1].type = NODECONNECT;
936 		node2->inputs[stream2].type = NODECONNECT;
937 		node1->outputs[stream1].dev_id = pipe_id;
938 		node2->inputs[stream2].dev_id = pipe_id;
939 		output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
940 		input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
941 		if (!output->sz_device || !input->sz_device) {
942 			/* Undo the connection */
943 			kfree(output->sz_device);
944 			kfree(input->sz_device);
945 			clear_bit(pipe_id, hnode_mgr->pipe_map);
946 			status = -ENOMEM;
947 			goto out_unlock;
948 		}
949 		/* Copy "/dbpipe<pipId>" name to device names */
950 		sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
951 		strcpy(input->sz_device, output->sz_device);
952 	}
953 	/* Connecting task node to host? */
954 	if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
955 		pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
956 		if (!pstr_dev_name) {
957 			status = -ENOMEM;
958 			goto out_unlock;
959 		}
960 
961 		chnl_mode = (node1_type == NODE_GPP) ?
962 			CHNL_MODETODSP : CHNL_MODEFROMDSP;
963 
964 		/*
965 		 *  Reserve a channel id. We need to put the name "/host<id>"
966 		 *  in the node's create_args, but the host
967 		 *  side channel will not be opened until DSPStream_Open is
968 		 *  called for this node.
969 		 */
970 		strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
971 		switch (strm_mode) {
972 		case STRMMODE_RDMA:
973 			chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
974 					CHNL_MAXCHANNELS);
975 			if (chnl_id < CHNL_MAXCHANNELS) {
976 				set_bit(chnl_id, hnode_mgr->dma_chnl_map);
977 				/* dma chans are 2nd transport chnl set
978 				 * ids(e.g. 16-31) */
979 				chnl_id = chnl_id + hnode_mgr->num_chnls;
980 			}
981 			break;
982 		case STRMMODE_ZEROCOPY:
983 			chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
984 					CHNL_MAXCHANNELS);
985 			if (chnl_id < CHNL_MAXCHANNELS) {
986 				set_bit(chnl_id, hnode_mgr->zc_chnl_map);
987 				/* zero-copy chans are 3nd transport set
988 				 * (e.g. 32-47) */
989 				chnl_id = chnl_id +
990 					(2 * hnode_mgr->num_chnls);
991 			}
992 			break;
993 		case STRMMODE_PROCCOPY:
994 			chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
995 					CHNL_MAXCHANNELS);
996 			if (chnl_id < CHNL_MAXCHANNELS)
997 				set_bit(chnl_id, hnode_mgr->chnl_map);
998 			break;
999 		default:
1000 			status = -EINVAL;
1001 			goto out_unlock;
1002 		}
1003 		if (chnl_id == CHNL_MAXCHANNELS) {
1004 			status = -ECONNREFUSED;
1005 			goto out_unlock;
1006 		}
1007 
1008 		if (node1 == (struct node_object *)DSP_HGPPNODE) {
1009 			node2->inputs[stream2].type = HOSTCONNECT;
1010 			node2->inputs[stream2].dev_id = chnl_id;
1011 			input->sz_device = pstr_dev_name;
1012 		} else {
1013 			node1->outputs[stream1].type = HOSTCONNECT;
1014 			node1->outputs[stream1].dev_id = chnl_id;
1015 			output->sz_device = pstr_dev_name;
1016 		}
1017 		sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1018 	}
1019 	/* Connecting task node to device node? */
1020 	if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1021 		if (node2_type == NODE_DEVICE) {
1022 			/* node1 == > device */
1023 			dev_node_obj = node2;
1024 			hnode = node1;
1025 			pstream = &(node1->outputs[stream1]);
1026 			pstrm_def = output;
1027 		} else {
1028 			/* device == > node2 */
1029 			dev_node_obj = node1;
1030 			hnode = node2;
1031 			pstream = &(node2->inputs[stream2]);
1032 			pstrm_def = input;
1033 		}
1034 		/* Set up create args */
1035 		pstream->type = DEVICECONNECT;
1036 		dw_length = strlen(dev_node_obj->str_dev_name);
1037 		if (conn_param)
1038 			pstrm_def->sz_device = kzalloc(dw_length + 1 +
1039 					conn_param->cb_data,
1040 					GFP_KERNEL);
1041 		else
1042 			pstrm_def->sz_device = kzalloc(dw_length + 1,
1043 					GFP_KERNEL);
1044 		if (!pstrm_def->sz_device) {
1045 			status = -ENOMEM;
1046 			goto out_unlock;
1047 		}
1048 		/* Copy device name */
1049 		strncpy(pstrm_def->sz_device,
1050 				dev_node_obj->str_dev_name, dw_length);
1051 		if (conn_param)
1052 			strncat(pstrm_def->sz_device,
1053 					(char *)conn_param->node_data,
1054 					(u32) conn_param->cb_data);
1055 		dev_node_obj->device_owner = hnode;
1056 	}
1057 	/* Fill in create args */
1058 	if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1059 		node1->create_args.asa.task_arg_obj.num_outputs++;
1060 		fill_stream_def(node1, output, pattrs);
1061 	}
1062 	if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1063 		node2->create_args.asa.task_arg_obj.num_inputs++;
1064 		fill_stream_def(node2, input, pattrs);
1065 	}
1066 	/* Update node1 and node2 stream_connect */
1067 	if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1068 		node1->num_outputs++;
1069 		if (stream1 > node1->max_output_index)
1070 			node1->max_output_index = stream1;
1071 
1072 	}
1073 	if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1074 		node2->num_inputs++;
1075 		if (stream2 > node2->max_input_index)
1076 			node2->max_input_index = stream2;
1077 
1078 	}
1079 	fill_stream_connect(node1, node2, stream1, stream2);
1080 	/* end of sync_enter_cs */
1081 	/* Exit critical section */
1082 out_unlock:
1083 	if (status && pstr_dev_name)
1084 		kfree(pstr_dev_name);
1085 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1086 	dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1087 			"pattrs: %p status: 0x%x\n", __func__, node1,
1088 			stream1, node2, stream2, pattrs, status);
1089 	return status;
1090 }
1091 
1092 /*
1093  *  ======== node_create ========
1094  *  Purpose:
1095  *      Create a node on the DSP by remotely calling the node's create function.
1096  */
node_create(struct node_object * hnode)1097 int node_create(struct node_object *hnode)
1098 {
1099 	struct node_object *pnode = (struct node_object *)hnode;
1100 	struct node_mgr *hnode_mgr;
1101 	struct bridge_drv_interface *intf_fxns;
1102 	u32 ul_create_fxn;
1103 	enum node_type node_type;
1104 	int status = 0;
1105 	int status1 = 0;
1106 	struct dsp_cbdata cb_data;
1107 	u32 proc_id = 255;
1108 	struct dsp_processorstate proc_state;
1109 	struct proc_object *hprocessor;
1110 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1111 	struct dspbridge_platform_data *pdata =
1112 	    omap_dspbridge_dev->dev.platform_data;
1113 #endif
1114 
1115 	if (!pnode) {
1116 		status = -EFAULT;
1117 		goto func_end;
1118 	}
1119 	hprocessor = hnode->processor;
1120 	status = proc_get_state(hprocessor, &proc_state,
1121 				sizeof(struct dsp_processorstate));
1122 	if (status)
1123 		goto func_end;
1124 	/* If processor is in error state then don't attempt to create
1125 	   new node */
1126 	if (proc_state.proc_state == PROC_ERROR) {
1127 		status = -EPERM;
1128 		goto func_end;
1129 	}
1130 	/* create struct dsp_cbdata struct for PWR calls */
1131 	cb_data.cb_data = PWR_TIMEOUT;
1132 	node_type = node_get_type(hnode);
1133 	hnode_mgr = hnode->node_mgr;
1134 	intf_fxns = hnode_mgr->intf_fxns;
1135 	/* Get access to node dispatcher */
1136 	mutex_lock(&hnode_mgr->node_mgr_lock);
1137 
1138 	/* Check node state */
1139 	if (node_get_state(hnode) != NODE_ALLOCATED)
1140 		status = -EBADR;
1141 
1142 	if (!status)
1143 		status = proc_get_processor_id(pnode->processor, &proc_id);
1144 
1145 	if (status)
1146 		goto func_cont2;
1147 
1148 	if (proc_id != DSP_UNIT)
1149 		goto func_cont2;
1150 
1151 	/* Make sure streams are properly connected */
1152 	if ((hnode->num_inputs && hnode->max_input_index >
1153 	     hnode->num_inputs - 1) ||
1154 	    (hnode->num_outputs && hnode->max_output_index >
1155 	     hnode->num_outputs - 1))
1156 		status = -ENOTCONN;
1157 
1158 	if (!status) {
1159 		/* If node's create function is not loaded, load it */
1160 		/* Boost the OPP level to max level that DSP can be requested */
1161 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1162 		if (pdata->cpu_set_freq)
1163 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1164 #endif
1165 		status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1166 						       NLDR_CREATE);
1167 		/* Get address of node's create function */
1168 		if (!status) {
1169 			hnode->loaded = true;
1170 			if (node_type != NODE_DEVICE) {
1171 				status = get_fxn_address(hnode, &ul_create_fxn,
1172 							 CREATEPHASE);
1173 			}
1174 		} else {
1175 			pr_err("%s: failed to load create code: 0x%x\n",
1176 			       __func__, status);
1177 		}
1178 		/* Request the lowest OPP level */
1179 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1180 		if (pdata->cpu_set_freq)
1181 			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1182 #endif
1183 		/* Get address of iAlg functions, if socket node */
1184 		if (!status) {
1185 			if (node_type == NODE_DAISSOCKET) {
1186 				status = hnode_mgr->nldr_fxns.get_fxn_addr
1187 				    (hnode->nldr_node_obj,
1188 				     hnode->dcd_props.obj_data.node_obj.
1189 				     str_i_alg_name,
1190 				     &hnode->create_args.asa.
1191 				     task_arg_obj.dais_arg);
1192 			}
1193 		}
1194 	}
1195 	if (!status) {
1196 		if (node_type != NODE_DEVICE) {
1197 			status = disp_node_create(hnode_mgr->disp_obj, hnode,
1198 						  hnode_mgr->fxn_addrs
1199 						  [RMSCREATENODE],
1200 						  ul_create_fxn,
1201 						  &(hnode->create_args),
1202 						  &(hnode->node_env));
1203 			if (status >= 0) {
1204 				/* Set the message queue id to the node env
1205 				 * pointer */
1206 				intf_fxns = hnode_mgr->intf_fxns;
1207 				(*intf_fxns->msg_set_queue_id) (hnode->
1208 							msg_queue_obj,
1209 							hnode->node_env);
1210 			}
1211 		}
1212 	}
1213 	/*  Phase II/Overlays: Create, execute, delete phases  possibly in
1214 	 *  different files/sections. */
1215 	if (hnode->loaded && hnode->phase_split) {
1216 		/* If create code was dynamically loaded, we can now unload
1217 		 * it. */
1218 		status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1219 							  NLDR_CREATE);
1220 		hnode->loaded = false;
1221 	}
1222 	if (status1)
1223 		pr_err("%s: Failed to unload create code: 0x%x\n",
1224 		       __func__, status1);
1225 func_cont2:
1226 	/* Update node state and node manager state */
1227 	if (status >= 0) {
1228 		NODE_SET_STATE(hnode, NODE_CREATED);
1229 		hnode_mgr->num_created++;
1230 		goto func_cont;
1231 	}
1232 	if (status != -EBADR) {
1233 		/* Put back in NODE_ALLOCATED state if error occurred */
1234 		NODE_SET_STATE(hnode, NODE_ALLOCATED);
1235 	}
1236 func_cont:
1237 	/* Free access to node dispatcher */
1238 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1239 func_end:
1240 	if (status >= 0) {
1241 		proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1242 		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1243 	}
1244 
1245 	dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1246 		hnode, status);
1247 	return status;
1248 }
1249 
1250 /*
1251  *  ======== node_create_mgr ========
1252  *  Purpose:
1253  *      Create a NODE Manager object.
1254  */
node_create_mgr(struct node_mgr ** node_man,struct dev_object * hdev_obj)1255 int node_create_mgr(struct node_mgr **node_man,
1256 			   struct dev_object *hdev_obj)
1257 {
1258 	u32 i;
1259 	struct node_mgr *node_mgr_obj = NULL;
1260 	struct disp_attr disp_attr_obj;
1261 	char *sz_zl_file = "";
1262 	struct nldr_attrs nldr_attrs_obj;
1263 	int status = 0;
1264 	u8 dev_type;
1265 
1266 	*node_man = NULL;
1267 	/* Allocate Node manager object */
1268 	node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1269 	if (!node_mgr_obj)
1270 		return -ENOMEM;
1271 
1272 	node_mgr_obj->dev_obj = hdev_obj;
1273 
1274 	node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1275 			GFP_KERNEL);
1276 	if (!node_mgr_obj->ntfy_obj) {
1277 		status = -ENOMEM;
1278 		goto out_err;
1279 	}
1280 	ntfy_init(node_mgr_obj->ntfy_obj);
1281 
1282 	INIT_LIST_HEAD(&node_mgr_obj->node_list);
1283 
1284 	dev_get_dev_type(hdev_obj, &dev_type);
1285 
1286 	status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1287 	if (status)
1288 		goto out_err;
1289 
1290 	status = get_proc_props(node_mgr_obj, hdev_obj);
1291 	if (status)
1292 		goto out_err;
1293 
1294 	/* Create NODE Dispatcher */
1295 	disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1296 	disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1297 	disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1298 	disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1299 
1300 	status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1301 	if (status)
1302 		goto out_err;
1303 
1304 	/* Create a STRM Manager */
1305 	status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1306 	if (status)
1307 		goto out_err;
1308 
1309 	dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1310 	/* Get msg_ctrl queue manager */
1311 	dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1312 	mutex_init(&node_mgr_obj->node_mgr_lock);
1313 
1314 	/* Block out reserved channels */
1315 	for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1316 		set_bit(i, node_mgr_obj->chnl_map);
1317 
1318 	/* Block out channels reserved for RMS */
1319 	set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1320 	set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1321 
1322 	/* NO RM Server on the IVA */
1323 	if (dev_type != IVA_UNIT) {
1324 		/* Get addresses of any RMS functions loaded */
1325 		status = get_rms_fxns(node_mgr_obj);
1326 		if (status)
1327 			goto out_err;
1328 	}
1329 
1330 	/* Get loader functions and create loader */
1331 	node_mgr_obj->nldr_fxns = nldr_fxns;	/* Dyn loader funcs */
1332 
1333 	nldr_attrs_obj.ovly = ovly;
1334 	nldr_attrs_obj.write = mem_write;
1335 	nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1336 	nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1337 	status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1338 			hdev_obj,
1339 			&nldr_attrs_obj);
1340 	if (status)
1341 		goto out_err;
1342 
1343 	*node_man = node_mgr_obj;
1344 
1345 	return status;
1346 out_err:
1347 	delete_node_mgr(node_mgr_obj);
1348 	return status;
1349 }
1350 
1351 /*
1352  *  ======== node_delete ========
1353  *  Purpose:
1354  *      Delete a node on the DSP by remotely calling the node's delete function.
1355  *      Loads the node's delete function if necessary. Free GPP side resources
1356  *      after node's delete function returns.
1357  */
node_delete(struct node_res_object * noderes,struct process_context * pr_ctxt)1358 int node_delete(struct node_res_object *noderes,
1359 		       struct process_context *pr_ctxt)
1360 {
1361 	struct node_object *pnode = noderes->node;
1362 	struct node_mgr *hnode_mgr;
1363 	struct proc_object *hprocessor;
1364 	struct disp_object *disp_obj;
1365 	u32 ul_delete_fxn;
1366 	enum node_type node_type;
1367 	enum node_state state;
1368 	int status = 0;
1369 	int status1 = 0;
1370 	struct dsp_cbdata cb_data;
1371 	u32 proc_id;
1372 	struct bridge_drv_interface *intf_fxns;
1373 
1374 	void *node_res = noderes;
1375 
1376 	struct dsp_processorstate proc_state;
1377 
1378 	if (!pnode) {
1379 		status = -EFAULT;
1380 		goto func_end;
1381 	}
1382 	/* create struct dsp_cbdata struct for PWR call */
1383 	cb_data.cb_data = PWR_TIMEOUT;
1384 	hnode_mgr = pnode->node_mgr;
1385 	hprocessor = pnode->processor;
1386 	disp_obj = hnode_mgr->disp_obj;
1387 	node_type = node_get_type(pnode);
1388 	intf_fxns = hnode_mgr->intf_fxns;
1389 	/* Enter critical section */
1390 	mutex_lock(&hnode_mgr->node_mgr_lock);
1391 
1392 	state = node_get_state(pnode);
1393 	/*  Execute delete phase code for non-device node in all cases
1394 	 *  except when the node was only allocated. Delete phase must be
1395 	 *  executed even if create phase was executed, but failed.
1396 	 *  If the node environment pointer is non-NULL, the delete phase
1397 	 *  code must be  executed. */
1398 	if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1399 	    node_type != NODE_DEVICE) {
1400 		status = proc_get_processor_id(pnode->processor, &proc_id);
1401 		if (status)
1402 			goto func_cont1;
1403 
1404 		if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1405 			/*  If node has terminated, execute phase code will
1406 			 *  have already been unloaded in node_on_exit(). If the
1407 			 *  node is PAUSED, the execute phase is loaded, and it
1408 			 *  is now ok to unload it. If the node is running, we
1409 			 *  will unload the execute phase only after deleting
1410 			 *  the node. */
1411 			if (state == NODE_PAUSED && pnode->loaded &&
1412 			    pnode->phase_split) {
1413 				/* Ok to unload execute code as long as node
1414 				 * is not * running */
1415 				status1 =
1416 				    hnode_mgr->nldr_fxns.
1417 				    unload(pnode->nldr_node_obj,
1418 					       NLDR_EXECUTE);
1419 				pnode->loaded = false;
1420 				NODE_SET_STATE(pnode, NODE_DONE);
1421 			}
1422 			/* Load delete phase code if not loaded or if haven't
1423 			 * * unloaded EXECUTE phase */
1424 			if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1425 			    pnode->phase_split) {
1426 				status =
1427 				    hnode_mgr->nldr_fxns.
1428 				    load(pnode->nldr_node_obj, NLDR_DELETE);
1429 				if (!status)
1430 					pnode->loaded = true;
1431 				else
1432 					pr_err("%s: fail - load delete code:"
1433 					       " 0x%x\n", __func__, status);
1434 			}
1435 		}
1436 func_cont1:
1437 		if (!status) {
1438 			/* Unblock a thread trying to terminate the node */
1439 			(void)sync_set_event(pnode->sync_done);
1440 			if (proc_id == DSP_UNIT) {
1441 				/* ul_delete_fxn = address of node's delete
1442 				 * function */
1443 				status = get_fxn_address(pnode, &ul_delete_fxn,
1444 							 DELETEPHASE);
1445 			} else if (proc_id == IVA_UNIT)
1446 				ul_delete_fxn = (u32) pnode->node_env;
1447 			if (!status) {
1448 				status = proc_get_state(hprocessor,
1449 						&proc_state,
1450 						sizeof(struct
1451 						       dsp_processorstate));
1452 				if (proc_state.proc_state != PROC_ERROR) {
1453 					status =
1454 					    disp_node_delete(disp_obj, pnode,
1455 							     hnode_mgr->
1456 							     fxn_addrs
1457 							     [RMSDELETENODE],
1458 							     ul_delete_fxn,
1459 							     pnode->node_env);
1460 				} else
1461 					NODE_SET_STATE(pnode, NODE_DONE);
1462 
1463 				/* Unload execute, if not unloaded, and delete
1464 				 * function */
1465 				if (state == NODE_RUNNING &&
1466 				    pnode->phase_split) {
1467 					status1 =
1468 					    hnode_mgr->nldr_fxns.
1469 					    unload(pnode->nldr_node_obj,
1470 						       NLDR_EXECUTE);
1471 				}
1472 				if (status1)
1473 					pr_err("%s: fail - unload execute code:"
1474 					       " 0x%x\n", __func__, status1);
1475 
1476 				status1 =
1477 				    hnode_mgr->nldr_fxns.unload(pnode->
1478 							    nldr_node_obj,
1479 							    NLDR_DELETE);
1480 				pnode->loaded = false;
1481 				if (status1)
1482 					pr_err("%s: fail - unload delete code: "
1483 					       "0x%x\n", __func__, status1);
1484 			}
1485 		}
1486 	}
1487 	/* Free host side resources even if a failure occurred */
1488 	/* Remove node from hnode_mgr->node_list */
1489 	list_del(&pnode->list_elem);
1490 	hnode_mgr->num_nodes--;
1491 	/* Decrement count of nodes created on DSP */
1492 	if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1493 					  (pnode->node_env != (u32) NULL)))
1494 		hnode_mgr->num_created--;
1495 	/*  Free host-side resources allocated by node_create()
1496 	 *  delete_node() fails if SM buffers not freed by client! */
1497 	drv_proc_node_update_status(node_res, false);
1498 	delete_node(pnode, pr_ctxt);
1499 
1500 	/*
1501 	 * Release all Node resources and its context
1502 	 */
1503 	idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1504 	kfree(node_res);
1505 
1506 	/* Exit critical section */
1507 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1508 	proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1509 func_end:
1510 	dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1511 	return status;
1512 }
1513 
1514 /*
1515  *  ======== node_delete_mgr ========
1516  *  Purpose:
1517  *      Delete the NODE Manager.
1518  */
node_delete_mgr(struct node_mgr * hnode_mgr)1519 int node_delete_mgr(struct node_mgr *hnode_mgr)
1520 {
1521 	if (!hnode_mgr)
1522 		return -EFAULT;
1523 
1524 	delete_node_mgr(hnode_mgr);
1525 
1526 	return 0;
1527 }
1528 
1529 /*
1530  *  ======== node_enum_nodes ========
1531  *  Purpose:
1532  *      Enumerate currently allocated nodes.
1533  */
node_enum_nodes(struct node_mgr * hnode_mgr,void ** node_tab,u32 node_tab_size,u32 * pu_num_nodes,u32 * pu_allocated)1534 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1535 			   u32 node_tab_size, u32 *pu_num_nodes,
1536 			   u32 *pu_allocated)
1537 {
1538 	struct node_object *hnode;
1539 	u32 i = 0;
1540 	int status = 0;
1541 
1542 	if (!hnode_mgr) {
1543 		status = -EFAULT;
1544 		goto func_end;
1545 	}
1546 	/* Enter critical section */
1547 	mutex_lock(&hnode_mgr->node_mgr_lock);
1548 
1549 	if (hnode_mgr->num_nodes > node_tab_size) {
1550 		*pu_allocated = hnode_mgr->num_nodes;
1551 		*pu_num_nodes = 0;
1552 		status = -EINVAL;
1553 	} else {
1554 		list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1555 			node_tab[i++] = hnode;
1556 		*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1557 	}
1558 	/* end of sync_enter_cs */
1559 	/* Exit critical section */
1560 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1561 func_end:
1562 	return status;
1563 }
1564 
1565 /*
1566  *  ======== node_free_msg_buf ========
1567  *  Purpose:
1568  *      Frees the message buffer.
1569  */
node_free_msg_buf(struct node_object * hnode,u8 * pbuffer,struct dsp_bufferattr * pattr)1570 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1571 			     struct dsp_bufferattr *pattr)
1572 {
1573 	struct node_object *pnode = (struct node_object *)hnode;
1574 	int status = 0;
1575 	u32 proc_id;
1576 
1577 	if (!hnode) {
1578 		status = -EFAULT;
1579 		goto func_end;
1580 	}
1581 	status = proc_get_processor_id(pnode->processor, &proc_id);
1582 	if (proc_id == DSP_UNIT) {
1583 		if (!status) {
1584 			if (pattr == NULL) {
1585 				/* set defaults */
1586 				pattr = &node_dfltbufattrs;
1587 			}
1588 			/* Node supports single SM segment only */
1589 			if (pattr->segment_id != 1)
1590 				status = -EBADR;
1591 
1592 			/* pbuffer is clients Va. */
1593 			status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1594 		}
1595 	} else {
1596 	}
1597 func_end:
1598 	return status;
1599 }
1600 
1601 /*
1602  *  ======== node_get_attr ========
1603  *  Purpose:
1604  *      Copy the current attributes of the specified node into a dsp_nodeattr
1605  *      structure.
1606  */
node_get_attr(struct node_object * hnode,struct dsp_nodeattr * pattr,u32 attr_size)1607 int node_get_attr(struct node_object *hnode,
1608 			 struct dsp_nodeattr *pattr, u32 attr_size)
1609 {
1610 	struct node_mgr *hnode_mgr;
1611 
1612 	if (!hnode)
1613 		return -EFAULT;
1614 
1615 	hnode_mgr = hnode->node_mgr;
1616 	/* Enter hnode_mgr critical section (since we're accessing
1617 	 * data that could be changed by node_change_priority() and
1618 	 * node_connect(). */
1619 	mutex_lock(&hnode_mgr->node_mgr_lock);
1620 	pattr->cb_struct = sizeof(struct dsp_nodeattr);
1621 	/* dsp_nodeattrin */
1622 	pattr->in_node_attr_in.cb_struct =
1623 		sizeof(struct dsp_nodeattrin);
1624 	pattr->in_node_attr_in.prio = hnode->prio;
1625 	pattr->in_node_attr_in.timeout = hnode->timeout;
1626 	pattr->in_node_attr_in.heap_size =
1627 		hnode->create_args.asa.task_arg_obj.heap_size;
1628 	pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1629 		hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1630 	pattr->node_attr_inputs = hnode->num_gpp_inputs;
1631 	pattr->node_attr_outputs = hnode->num_gpp_outputs;
1632 	/* dsp_nodeinfo */
1633 	get_node_info(hnode, &(pattr->node_info));
1634 	/* end of sync_enter_cs */
1635 	/* Exit critical section */
1636 	mutex_unlock(&hnode_mgr->node_mgr_lock);
1637 
1638 	return 0;
1639 }
1640 
1641 /*
1642  *  ======== node_get_channel_id ========
1643  *  Purpose:
1644  *      Get the channel index reserved for a stream connection between the
1645  *      host and a node.
1646  */
node_get_channel_id(struct node_object * hnode,u32 dir,u32 index,u32 * chan_id)1647 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1648 			       u32 *chan_id)
1649 {
1650 	enum node_type node_type;
1651 	int status = -EINVAL;
1652 
1653 	if (!hnode) {
1654 		status = -EFAULT;
1655 		return status;
1656 	}
1657 	node_type = node_get_type(hnode);
1658 	if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1659 		status = -EPERM;
1660 		return status;
1661 	}
1662 	if (dir == DSP_TONODE) {
1663 		if (index < MAX_INPUTS(hnode)) {
1664 			if (hnode->inputs[index].type == HOSTCONNECT) {
1665 				*chan_id = hnode->inputs[index].dev_id;
1666 				status = 0;
1667 			}
1668 		}
1669 	} else {
1670 		if (index < MAX_OUTPUTS(hnode)) {
1671 			if (hnode->outputs[index].type == HOSTCONNECT) {
1672 				*chan_id = hnode->outputs[index].dev_id;
1673 				status = 0;
1674 			}
1675 		}
1676 	}
1677 	return status;
1678 }
1679 
1680 /*
1681  *  ======== node_get_message ========
1682  *  Purpose:
1683  *      Retrieve a message from a node on the DSP.
1684  */
node_get_message(struct node_object * hnode,struct dsp_msg * message,u32 utimeout)1685 int node_get_message(struct node_object *hnode,
1686 			    struct dsp_msg *message, u32 utimeout)
1687 {
1688 	struct node_mgr *hnode_mgr;
1689 	enum node_type node_type;
1690 	struct bridge_drv_interface *intf_fxns;
1691 	int status = 0;
1692 	void *tmp_buf;
1693 	struct dsp_processorstate proc_state;
1694 	struct proc_object *hprocessor;
1695 
1696 	if (!hnode) {
1697 		status = -EFAULT;
1698 		goto func_end;
1699 	}
1700 	hprocessor = hnode->processor;
1701 	status = proc_get_state(hprocessor, &proc_state,
1702 				sizeof(struct dsp_processorstate));
1703 	if (status)
1704 		goto func_end;
1705 	/* If processor is in error state then don't attempt to get the
1706 	   message */
1707 	if (proc_state.proc_state == PROC_ERROR) {
1708 		status = -EPERM;
1709 		goto func_end;
1710 	}
1711 	hnode_mgr = hnode->node_mgr;
1712 	node_type = node_get_type(hnode);
1713 	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1714 	    node_type != NODE_DAISSOCKET) {
1715 		status = -EPERM;
1716 		goto func_end;
1717 	}
1718 	/*  This function will block unless a message is available. Since
1719 	 *  DSPNode_RegisterNotify() allows notification when a message
1720 	 *  is available, the system can be designed so that
1721 	 *  DSPNode_GetMessage() is only called when a message is
1722 	 *  available. */
1723 	intf_fxns = hnode_mgr->intf_fxns;
1724 	status =
1725 	    (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1726 	/* Check if message contains SM descriptor */
1727 	if (status || !(message->cmd & DSP_RMSBUFDESC))
1728 		goto func_end;
1729 
1730 	/* Translate DSP byte addr to GPP Va. */
1731 	tmp_buf = cmm_xlator_translate(hnode->xlator,
1732 				       (void *)(message->arg1 *
1733 						hnode->node_mgr->
1734 						dsp_word_size), CMM_DSPPA2PA);
1735 	if (tmp_buf != NULL) {
1736 		/* now convert this GPP Pa to Va */
1737 		tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1738 					       CMM_PA2VA);
1739 		if (tmp_buf != NULL) {
1740 			/* Adjust SM size in msg */
1741 			message->arg1 = (u32) tmp_buf;
1742 			message->arg2 *= hnode->node_mgr->dsp_word_size;
1743 		} else {
1744 			status = -ESRCH;
1745 		}
1746 	} else {
1747 		status = -ESRCH;
1748 	}
1749 func_end:
1750 	dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1751 		hnode, message, utimeout);
1752 	return status;
1753 }
1754 
1755 /*
1756  *   ======== node_get_nldr_obj ========
1757  */
node_get_nldr_obj(struct node_mgr * hnode_mgr,struct nldr_object ** nldr_ovlyobj)1758 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1759 			     struct nldr_object **nldr_ovlyobj)
1760 {
1761 	int status = 0;
1762 	struct node_mgr *node_mgr_obj = hnode_mgr;
1763 
1764 	if (!hnode_mgr)
1765 		status = -EFAULT;
1766 	else
1767 		*nldr_ovlyobj = node_mgr_obj->nldr_obj;
1768 
1769 	return status;
1770 }
1771 
1772 /*
1773  *  ======== node_get_strm_mgr ========
1774  *  Purpose:
1775  *      Returns the Stream manager.
1776  */
node_get_strm_mgr(struct node_object * hnode,struct strm_mgr ** strm_man)1777 int node_get_strm_mgr(struct node_object *hnode,
1778 			     struct strm_mgr **strm_man)
1779 {
1780 	int status = 0;
1781 
1782 	if (!hnode)
1783 		status = -EFAULT;
1784 	else
1785 		*strm_man = hnode->node_mgr->strm_mgr_obj;
1786 
1787 	return status;
1788 }
1789 
1790 /*
1791  *  ======== node_get_load_type ========
1792  */
node_get_load_type(struct node_object * hnode)1793 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1794 {
1795 	if (!hnode) {
1796 		dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1797 		return -1;
1798 	} else {
1799 		return hnode->dcd_props.obj_data.node_obj.load_type;
1800 	}
1801 }
1802 
1803 /*
1804  *  ======== node_get_timeout ========
1805  *  Purpose:
1806  *      Returns the timeout value for this node.
1807  */
node_get_timeout(struct node_object * hnode)1808 u32 node_get_timeout(struct node_object *hnode)
1809 {
1810 	if (!hnode) {
1811 		dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1812 		return 0;
1813 	} else {
1814 		return hnode->timeout;
1815 	}
1816 }
1817 
1818 /*
1819  *  ======== node_get_type ========
1820  *  Purpose:
1821  *      Returns the node type.
1822  */
node_get_type(struct node_object * hnode)1823 enum node_type node_get_type(struct node_object *hnode)
1824 {
1825 	enum node_type node_type;
1826 
1827 	if (hnode == (struct node_object *)DSP_HGPPNODE)
1828 		node_type = NODE_GPP;
1829 	else {
1830 		if (!hnode)
1831 			node_type = -1;
1832 		else
1833 			node_type = hnode->ntype;
1834 	}
1835 	return node_type;
1836 }
1837 
1838 /*
1839  *  ======== node_on_exit ========
1840  *  Purpose:
1841  *      Gets called when RMS_EXIT is received for a node.
1842  */
node_on_exit(struct node_object * hnode,s32 node_status)1843 void node_on_exit(struct node_object *hnode, s32 node_status)
1844 {
1845 	if (!hnode)
1846 		return;
1847 
1848 	/* Set node state to done */
1849 	NODE_SET_STATE(hnode, NODE_DONE);
1850 	hnode->exit_status = node_status;
1851 	if (hnode->loaded && hnode->phase_split) {
1852 		(void)hnode->node_mgr->nldr_fxns.unload(hnode->
1853 							     nldr_node_obj,
1854 							     NLDR_EXECUTE);
1855 		hnode->loaded = false;
1856 	}
1857 	/* Unblock call to node_terminate */
1858 	(void)sync_set_event(hnode->sync_done);
1859 	/* Notify clients */
1860 	proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1861 	ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1862 }
1863 
1864 /*
1865  *  ======== node_pause ========
1866  *  Purpose:
1867  *      Suspend execution of a node currently running on the DSP.
1868  */
node_pause(struct node_object * hnode)1869 int node_pause(struct node_object *hnode)
1870 {
1871 	struct node_object *pnode = (struct node_object *)hnode;
1872 	enum node_type node_type;
1873 	enum node_state state;
1874 	struct node_mgr *hnode_mgr;
1875 	int status = 0;
1876 	u32 proc_id;
1877 	struct dsp_processorstate proc_state;
1878 	struct proc_object *hprocessor;
1879 
1880 	if (!hnode) {
1881 		status = -EFAULT;
1882 	} else {
1883 		node_type = node_get_type(hnode);
1884 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
1885 			status = -EPERM;
1886 	}
1887 	if (status)
1888 		goto func_end;
1889 
1890 	status = proc_get_processor_id(pnode->processor, &proc_id);
1891 
1892 	if (proc_id == IVA_UNIT)
1893 		status = -ENOSYS;
1894 
1895 	if (!status) {
1896 		hnode_mgr = hnode->node_mgr;
1897 
1898 		/* Enter critical section */
1899 		mutex_lock(&hnode_mgr->node_mgr_lock);
1900 		state = node_get_state(hnode);
1901 		/* Check node state */
1902 		if (state != NODE_RUNNING)
1903 			status = -EBADR;
1904 
1905 		if (status)
1906 			goto func_cont;
1907 		hprocessor = hnode->processor;
1908 		status = proc_get_state(hprocessor, &proc_state,
1909 				sizeof(struct dsp_processorstate));
1910 		if (status)
1911 			goto func_cont;
1912 		/* If processor is in error state then don't attempt
1913 		   to send the message */
1914 		if (proc_state.proc_state == PROC_ERROR) {
1915 			status = -EPERM;
1916 			goto func_cont;
1917 		}
1918 
1919 		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
1920 			hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
1921 			hnode->node_env, NODE_SUSPENDEDPRI);
1922 
1923 		/* Update state */
1924 		if (status >= 0)
1925 			NODE_SET_STATE(hnode, NODE_PAUSED);
1926 
1927 func_cont:
1928 		/* End of sync_enter_cs */
1929 		/* Leave critical section */
1930 		mutex_unlock(&hnode_mgr->node_mgr_lock);
1931 		if (status >= 0) {
1932 			proc_notify_clients(hnode->processor,
1933 					    DSP_NODESTATECHANGE);
1934 			ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1935 		}
1936 	}
1937 func_end:
1938 	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
1939 	return status;
1940 }
1941 
1942 /*
1943  *  ======== node_put_message ========
1944  *  Purpose:
1945  *      Send a message to a message node, task node, or XDAIS socket node. This
1946  *      function will block until the message stream can accommodate the
1947  *      message, or a timeout occurs.
1948  */
node_put_message(struct node_object * hnode,const struct dsp_msg * pmsg,u32 utimeout)1949 int node_put_message(struct node_object *hnode,
1950 			    const struct dsp_msg *pmsg, u32 utimeout)
1951 {
1952 	struct node_mgr *hnode_mgr = NULL;
1953 	enum node_type node_type;
1954 	struct bridge_drv_interface *intf_fxns;
1955 	enum node_state state;
1956 	int status = 0;
1957 	void *tmp_buf;
1958 	struct dsp_msg new_msg;
1959 	struct dsp_processorstate proc_state;
1960 	struct proc_object *hprocessor;
1961 
1962 	if (!hnode) {
1963 		status = -EFAULT;
1964 		goto func_end;
1965 	}
1966 	hprocessor = hnode->processor;
1967 	status = proc_get_state(hprocessor, &proc_state,
1968 				sizeof(struct dsp_processorstate));
1969 	if (status)
1970 		goto func_end;
1971 	/* If processor is in bad state then don't attempt sending the
1972 	   message */
1973 	if (proc_state.proc_state == PROC_ERROR) {
1974 		status = -EPERM;
1975 		goto func_end;
1976 	}
1977 	hnode_mgr = hnode->node_mgr;
1978 	node_type = node_get_type(hnode);
1979 	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1980 	    node_type != NODE_DAISSOCKET)
1981 		status = -EPERM;
1982 
1983 	if (!status) {
1984 		/*  Check node state. Can't send messages to a node after
1985 		 *  we've sent the RMS_EXIT command. There is still the
1986 		 *  possibility that node_terminate can be called after we've
1987 		 *  checked the state. Could add another SYNC object to
1988 		 *  prevent this (can't use node_mgr_lock, since we don't
1989 		 *  want to block other NODE functions). However, the node may
1990 		 *  still exit on its own, before this message is sent. */
1991 		mutex_lock(&hnode_mgr->node_mgr_lock);
1992 		state = node_get_state(hnode);
1993 		if (state == NODE_TERMINATING || state == NODE_DONE)
1994 			status = -EBADR;
1995 
1996 		/* end of sync_enter_cs */
1997 		mutex_unlock(&hnode_mgr->node_mgr_lock);
1998 	}
1999 	if (status)
2000 		goto func_end;
2001 
2002 	/* assign pmsg values to new msg */
2003 	new_msg = *pmsg;
2004 	/* Now, check if message contains a SM buffer descriptor */
2005 	if (pmsg->cmd & DSP_RMSBUFDESC) {
2006 		/* Translate GPP Va to DSP physical buf Ptr. */
2007 		tmp_buf = cmm_xlator_translate(hnode->xlator,
2008 					       (void *)new_msg.arg1,
2009 					       CMM_VA2DSPPA);
2010 		if (tmp_buf != NULL) {
2011 			/* got translation, convert to MAUs in msg */
2012 			if (hnode->node_mgr->dsp_word_size != 0) {
2013 				new_msg.arg1 =
2014 				    (u32) tmp_buf /
2015 				    hnode->node_mgr->dsp_word_size;
2016 				/* MAUs */
2017 				new_msg.arg2 /= hnode->node_mgr->
2018 				    dsp_word_size;
2019 			} else {
2020 				pr_err("%s: dsp_word_size is zero!\n",
2021 				       __func__);
2022 				status = -EPERM;	/* bad DSPWordSize */
2023 			}
2024 		} else {	/* failed to translate buffer address */
2025 			status = -ESRCH;
2026 		}
2027 	}
2028 	if (!status) {
2029 		intf_fxns = hnode_mgr->intf_fxns;
2030 		status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2031 						    &new_msg, utimeout);
2032 	}
2033 func_end:
2034 	dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2035 		"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2036 	return status;
2037 }
2038 
2039 /*
2040  *  ======== node_register_notify ========
2041  *  Purpose:
2042  *      Register to be notified on specific events for this node.
2043  */
node_register_notify(struct node_object * hnode,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)2044 int node_register_notify(struct node_object *hnode, u32 event_mask,
2045 				u32 notify_type,
2046 				struct dsp_notification *hnotification)
2047 {
2048 	struct bridge_drv_interface *intf_fxns;
2049 	int status = 0;
2050 
2051 	if (!hnode) {
2052 		status = -EFAULT;
2053 	} else {
2054 		/* Check if event mask is a valid node related event */
2055 		if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2056 			status = -EINVAL;
2057 
2058 		/* Check if notify type is valid */
2059 		if (notify_type != DSP_SIGNALEVENT)
2060 			status = -EINVAL;
2061 
2062 		/* Only one Notification can be registered at a
2063 		 * time - Limitation */
2064 		if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2065 			status = -EINVAL;
2066 	}
2067 	if (!status) {
2068 		if (event_mask == DSP_NODESTATECHANGE) {
2069 			status = ntfy_register(hnode->ntfy_obj, hnotification,
2070 					       event_mask & DSP_NODESTATECHANGE,
2071 					       notify_type);
2072 		} else {
2073 			/* Send Message part of event mask to msg_ctrl */
2074 			intf_fxns = hnode->node_mgr->intf_fxns;
2075 			status = (*intf_fxns->msg_register_notify)
2076 			    (hnode->msg_queue_obj,
2077 			     event_mask & DSP_NODEMESSAGEREADY, notify_type,
2078 			     hnotification);
2079 		}
2080 
2081 	}
2082 	dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2083 		"hnotification: %p status 0x%x\n", __func__, hnode,
2084 		event_mask, notify_type, hnotification, status);
2085 	return status;
2086 }
2087 
2088 /*
2089  *  ======== node_run ========
2090  *  Purpose:
2091  *      Start execution of a node's execute phase, or resume execution of a node
2092  *      that has been suspended (via NODE_NodePause()) on the DSP. Load the
2093  *      node's execute function if necessary.
2094  */
node_run(struct node_object * hnode)2095 int node_run(struct node_object *hnode)
2096 {
2097 	struct node_object *pnode = (struct node_object *)hnode;
2098 	struct node_mgr *hnode_mgr;
2099 	enum node_type node_type;
2100 	enum node_state state;
2101 	u32 ul_execute_fxn;
2102 	u32 ul_fxn_addr;
2103 	int status = 0;
2104 	u32 proc_id;
2105 	struct bridge_drv_interface *intf_fxns;
2106 	struct dsp_processorstate proc_state;
2107 	struct proc_object *hprocessor;
2108 
2109 	if (!hnode) {
2110 		status = -EFAULT;
2111 		goto func_end;
2112 	}
2113 	hprocessor = hnode->processor;
2114 	status = proc_get_state(hprocessor, &proc_state,
2115 				sizeof(struct dsp_processorstate));
2116 	if (status)
2117 		goto func_end;
2118 	/* If processor is in error state then don't attempt to run the node */
2119 	if (proc_state.proc_state == PROC_ERROR) {
2120 		status = -EPERM;
2121 		goto func_end;
2122 	}
2123 	node_type = node_get_type(hnode);
2124 	if (node_type == NODE_DEVICE)
2125 		status = -EPERM;
2126 	if (status)
2127 		goto func_end;
2128 
2129 	hnode_mgr = hnode->node_mgr;
2130 	if (!hnode_mgr) {
2131 		status = -EFAULT;
2132 		goto func_end;
2133 	}
2134 	intf_fxns = hnode_mgr->intf_fxns;
2135 	/* Enter critical section */
2136 	mutex_lock(&hnode_mgr->node_mgr_lock);
2137 
2138 	state = node_get_state(hnode);
2139 	if (state != NODE_CREATED && state != NODE_PAUSED)
2140 		status = -EBADR;
2141 
2142 	if (!status)
2143 		status = proc_get_processor_id(pnode->processor, &proc_id);
2144 
2145 	if (status)
2146 		goto func_cont1;
2147 
2148 	if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2149 		goto func_cont1;
2150 
2151 	if (state == NODE_CREATED) {
2152 		/* If node's execute function is not loaded, load it */
2153 		if (!(hnode->loaded) && hnode->phase_split) {
2154 			status =
2155 			    hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2156 							  NLDR_EXECUTE);
2157 			if (!status) {
2158 				hnode->loaded = true;
2159 			} else {
2160 				pr_err("%s: fail - load execute code: 0x%x\n",
2161 				       __func__, status);
2162 			}
2163 		}
2164 		if (!status) {
2165 			/* Get address of node's execute function */
2166 			if (proc_id == IVA_UNIT)
2167 				ul_execute_fxn = (u32) hnode->node_env;
2168 			else {
2169 				status = get_fxn_address(hnode, &ul_execute_fxn,
2170 							 EXECUTEPHASE);
2171 			}
2172 		}
2173 		if (!status) {
2174 			ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2175 			status =
2176 			    disp_node_run(hnode_mgr->disp_obj, hnode,
2177 					  ul_fxn_addr, ul_execute_fxn,
2178 					  hnode->node_env);
2179 		}
2180 	} else if (state == NODE_PAUSED) {
2181 		ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2182 		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2183 						   ul_fxn_addr, hnode->node_env,
2184 						   NODE_GET_PRIORITY(hnode));
2185 	} else {
2186 		/* We should never get here */
2187 	}
2188 func_cont1:
2189 	/* Update node state. */
2190 	if (status >= 0)
2191 		NODE_SET_STATE(hnode, NODE_RUNNING);
2192 	else			/* Set state back to previous value */
2193 		NODE_SET_STATE(hnode, state);
2194 	/*End of sync_enter_cs */
2195 	/* Exit critical section */
2196 	mutex_unlock(&hnode_mgr->node_mgr_lock);
2197 	if (status >= 0) {
2198 		proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2199 		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2200 	}
2201 func_end:
2202 	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2203 	return status;
2204 }
2205 
2206 /*
2207  *  ======== node_terminate ========
2208  *  Purpose:
2209  *      Signal a node running on the DSP that it should exit its execute phase
2210  *      function.
2211  */
node_terminate(struct node_object * hnode,int * pstatus)2212 int node_terminate(struct node_object *hnode, int *pstatus)
2213 {
2214 	struct node_object *pnode = (struct node_object *)hnode;
2215 	struct node_mgr *hnode_mgr = NULL;
2216 	enum node_type node_type;
2217 	struct bridge_drv_interface *intf_fxns;
2218 	enum node_state state;
2219 	struct dsp_msg msg, killmsg;
2220 	int status = 0;
2221 	u32 proc_id, kill_time_out;
2222 	struct deh_mgr *hdeh_mgr;
2223 	struct dsp_processorstate proc_state;
2224 
2225 	if (!hnode || !hnode->node_mgr) {
2226 		status = -EFAULT;
2227 		goto func_end;
2228 	}
2229 	if (pnode->processor == NULL) {
2230 		status = -EFAULT;
2231 		goto func_end;
2232 	}
2233 	status = proc_get_processor_id(pnode->processor, &proc_id);
2234 
2235 	if (!status) {
2236 		hnode_mgr = hnode->node_mgr;
2237 		node_type = node_get_type(hnode);
2238 		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2239 			status = -EPERM;
2240 	}
2241 	if (!status) {
2242 		/* Check node state */
2243 		mutex_lock(&hnode_mgr->node_mgr_lock);
2244 		state = node_get_state(hnode);
2245 		if (state != NODE_RUNNING) {
2246 			status = -EBADR;
2247 			/* Set the exit status if node terminated on
2248 			 * its own. */
2249 			if (state == NODE_DONE)
2250 				*pstatus = hnode->exit_status;
2251 
2252 		} else {
2253 			NODE_SET_STATE(hnode, NODE_TERMINATING);
2254 		}
2255 		/* end of sync_enter_cs */
2256 		mutex_unlock(&hnode_mgr->node_mgr_lock);
2257 	}
2258 	if (!status) {
2259 		/*
2260 		 *  Send exit message. Do not change state to NODE_DONE
2261 		 *  here. That will be done in callback.
2262 		 */
2263 		status = proc_get_state(pnode->processor, &proc_state,
2264 					sizeof(struct dsp_processorstate));
2265 		if (status)
2266 			goto func_cont;
2267 		/* If processor is in error state then don't attempt to send
2268 		 * A kill task command */
2269 		if (proc_state.proc_state == PROC_ERROR) {
2270 			status = -EPERM;
2271 			goto func_cont;
2272 		}
2273 
2274 		msg.cmd = RMS_EXIT;
2275 		msg.arg1 = hnode->node_env;
2276 		killmsg.cmd = RMS_KILLTASK;
2277 		killmsg.arg1 = hnode->node_env;
2278 		intf_fxns = hnode_mgr->intf_fxns;
2279 
2280 		if (hnode->timeout > MAXTIMEOUT)
2281 			kill_time_out = MAXTIMEOUT;
2282 		else
2283 			kill_time_out = (hnode->timeout) * 2;
2284 
2285 		status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2286 						    hnode->timeout);
2287 		if (status)
2288 			goto func_cont;
2289 
2290 		/*
2291 		 * Wait on synchronization object that will be
2292 		 * posted in the callback on receiving RMS_EXIT
2293 		 * message, or by node_delete. Check for valid hnode,
2294 		 * in case posted by node_delete().
2295 		 */
2296 		status = sync_wait_on_event(hnode->sync_done,
2297 					    kill_time_out / 2);
2298 		if (status != ETIME)
2299 			goto func_cont;
2300 
2301 		status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2302 						&killmsg, hnode->timeout);
2303 		if (status)
2304 			goto func_cont;
2305 		status = sync_wait_on_event(hnode->sync_done,
2306 					     kill_time_out / 2);
2307 		if (status) {
2308 			/*
2309 			 * Here it goes the part of the simulation of
2310 			 * the DSP exception.
2311 			 */
2312 			dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2313 			if (!hdeh_mgr)
2314 				goto func_cont;
2315 
2316 			bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2317 		}
2318 	}
2319 func_cont:
2320 	if (!status) {
2321 		/* Enter CS before getting exit status, in case node was
2322 		 * deleted. */
2323 		mutex_lock(&hnode_mgr->node_mgr_lock);
2324 		/* Make sure node wasn't deleted while we blocked */
2325 		if (!hnode) {
2326 			status = -EPERM;
2327 		} else {
2328 			*pstatus = hnode->exit_status;
2329 			dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2330 				__func__, hnode, hnode->node_env, status);
2331 		}
2332 		mutex_unlock(&hnode_mgr->node_mgr_lock);
2333 	}			/*End of sync_enter_cs */
2334 func_end:
2335 	return status;
2336 }
2337 
2338 /*
2339  *  ======== delete_node ========
2340  *  Purpose:
2341  *      Free GPP resources allocated in node_allocate() or node_connect().
2342  */
delete_node(struct node_object * hnode,struct process_context * pr_ctxt)2343 static void delete_node(struct node_object *hnode,
2344 			struct process_context *pr_ctxt)
2345 {
2346 	struct node_mgr *hnode_mgr;
2347 	struct bridge_drv_interface *intf_fxns;
2348 	u32 i;
2349 	enum node_type node_type;
2350 	struct stream_chnl stream;
2351 	struct node_msgargs node_msg_args;
2352 	struct node_taskargs task_arg_obj;
2353 #ifdef DSP_DMM_DEBUG
2354 	struct dmm_object *dmm_mgr;
2355 	struct proc_object *p_proc_object =
2356 	    (struct proc_object *)hnode->processor;
2357 #endif
2358 	int status;
2359 	if (!hnode)
2360 		goto func_end;
2361 	hnode_mgr = hnode->node_mgr;
2362 	if (!hnode_mgr)
2363 		goto func_end;
2364 
2365 	node_type = node_get_type(hnode);
2366 	if (node_type != NODE_DEVICE) {
2367 		node_msg_args = hnode->create_args.asa.node_msg_args;
2368 		kfree(node_msg_args.pdata);
2369 
2370 		/* Free msg_ctrl queue */
2371 		if (hnode->msg_queue_obj) {
2372 			intf_fxns = hnode_mgr->intf_fxns;
2373 			(*intf_fxns->msg_delete_queue) (hnode->
2374 							    msg_queue_obj);
2375 			hnode->msg_queue_obj = NULL;
2376 		}
2377 
2378 		kfree(hnode->sync_done);
2379 
2380 		/* Free all stream info */
2381 		if (hnode->inputs) {
2382 			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2383 				stream = hnode->inputs[i];
2384 				free_stream(hnode_mgr, stream);
2385 			}
2386 			kfree(hnode->inputs);
2387 			hnode->inputs = NULL;
2388 		}
2389 		if (hnode->outputs) {
2390 			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2391 				stream = hnode->outputs[i];
2392 				free_stream(hnode_mgr, stream);
2393 			}
2394 			kfree(hnode->outputs);
2395 			hnode->outputs = NULL;
2396 		}
2397 		task_arg_obj = hnode->create_args.asa.task_arg_obj;
2398 		if (task_arg_obj.strm_in_def) {
2399 			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2400 				kfree(task_arg_obj.strm_in_def[i].sz_device);
2401 				task_arg_obj.strm_in_def[i].sz_device = NULL;
2402 			}
2403 			kfree(task_arg_obj.strm_in_def);
2404 			task_arg_obj.strm_in_def = NULL;
2405 		}
2406 		if (task_arg_obj.strm_out_def) {
2407 			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2408 				kfree(task_arg_obj.strm_out_def[i].sz_device);
2409 				task_arg_obj.strm_out_def[i].sz_device = NULL;
2410 			}
2411 			kfree(task_arg_obj.strm_out_def);
2412 			task_arg_obj.strm_out_def = NULL;
2413 		}
2414 		if (task_arg_obj.dsp_heap_res_addr) {
2415 			status = proc_un_map(hnode->processor, (void *)
2416 					     task_arg_obj.dsp_heap_addr,
2417 					     pr_ctxt);
2418 
2419 			status = proc_un_reserve_memory(hnode->processor,
2420 							(void *)
2421 							task_arg_obj.
2422 							dsp_heap_res_addr,
2423 							pr_ctxt);
2424 #ifdef DSP_DMM_DEBUG
2425 			status = dmm_get_handle(p_proc_object, &dmm_mgr);
2426 			if (dmm_mgr)
2427 				dmm_mem_map_dump(dmm_mgr);
2428 			else
2429 				status = DSP_EHANDLE;
2430 #endif
2431 		}
2432 	}
2433 	if (node_type != NODE_MESSAGE) {
2434 		kfree(hnode->stream_connect);
2435 		hnode->stream_connect = NULL;
2436 	}
2437 	kfree(hnode->str_dev_name);
2438 	hnode->str_dev_name = NULL;
2439 
2440 	if (hnode->ntfy_obj) {
2441 		ntfy_delete(hnode->ntfy_obj);
2442 		kfree(hnode->ntfy_obj);
2443 		hnode->ntfy_obj = NULL;
2444 	}
2445 
2446 	/* These were allocated in dcd_get_object_def (via node_allocate) */
2447 	kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2448 	hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2449 
2450 	kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2451 	hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2452 
2453 	kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2454 	hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2455 
2456 	kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2457 	hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2458 
2459 	/* Free all SM address translator resources */
2460 	kfree(hnode->xlator);
2461 	kfree(hnode->nldr_node_obj);
2462 	hnode->nldr_node_obj = NULL;
2463 	hnode->node_mgr = NULL;
2464 	kfree(hnode);
2465 	hnode = NULL;
2466 func_end:
2467 	return;
2468 }
2469 
2470 /*
2471  *  ======== delete_node_mgr ========
2472  *  Purpose:
2473  *      Frees the node manager.
2474  */
delete_node_mgr(struct node_mgr * hnode_mgr)2475 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2476 {
2477 	struct node_object *hnode, *tmp;
2478 
2479 	if (hnode_mgr) {
2480 		/* Free resources */
2481 		if (hnode_mgr->dcd_mgr)
2482 			dcd_destroy_manager(hnode_mgr->dcd_mgr);
2483 
2484 		/* Remove any elements remaining in lists */
2485 		list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2486 				list_elem) {
2487 			list_del(&hnode->list_elem);
2488 			delete_node(hnode, NULL);
2489 		}
2490 		mutex_destroy(&hnode_mgr->node_mgr_lock);
2491 		if (hnode_mgr->ntfy_obj) {
2492 			ntfy_delete(hnode_mgr->ntfy_obj);
2493 			kfree(hnode_mgr->ntfy_obj);
2494 		}
2495 
2496 		if (hnode_mgr->disp_obj)
2497 			disp_delete(hnode_mgr->disp_obj);
2498 
2499 		if (hnode_mgr->strm_mgr_obj)
2500 			strm_delete(hnode_mgr->strm_mgr_obj);
2501 
2502 		/* Delete the loader */
2503 		if (hnode_mgr->nldr_obj)
2504 			hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2505 
2506 		kfree(hnode_mgr);
2507 	}
2508 }
2509 
2510 /*
2511  *  ======== fill_stream_connect ========
2512  *  Purpose:
2513  *      Fills stream information.
2514  */
fill_stream_connect(struct node_object * node1,struct node_object * node2,u32 stream1,u32 stream2)2515 static void fill_stream_connect(struct node_object *node1,
2516 				struct node_object *node2,
2517 				u32 stream1, u32 stream2)
2518 {
2519 	u32 strm_index;
2520 	struct dsp_streamconnect *strm1 = NULL;
2521 	struct dsp_streamconnect *strm2 = NULL;
2522 	enum node_type node1_type = NODE_TASK;
2523 	enum node_type node2_type = NODE_TASK;
2524 
2525 	node1_type = node_get_type(node1);
2526 	node2_type = node_get_type(node2);
2527 	if (node1 != (struct node_object *)DSP_HGPPNODE) {
2528 
2529 		if (node1_type != NODE_DEVICE) {
2530 			strm_index = node1->num_inputs +
2531 			    node1->num_outputs - 1;
2532 			strm1 = &(node1->stream_connect[strm_index]);
2533 			strm1->cb_struct = sizeof(struct dsp_streamconnect);
2534 			strm1->this_node_stream_index = stream1;
2535 		}
2536 
2537 		if (node2 != (struct node_object *)DSP_HGPPNODE) {
2538 			/* NODE == > NODE */
2539 			if (node1_type != NODE_DEVICE) {
2540 				strm1->connected_node = node2;
2541 				strm1->ui_connected_node_id = node2->node_uuid;
2542 				strm1->connected_node_stream_index = stream2;
2543 				strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2544 			}
2545 			if (node2_type != NODE_DEVICE) {
2546 				strm_index = node2->num_inputs +
2547 				    node2->num_outputs - 1;
2548 				strm2 = &(node2->stream_connect[strm_index]);
2549 				strm2->cb_struct =
2550 				    sizeof(struct dsp_streamconnect);
2551 				strm2->this_node_stream_index = stream2;
2552 				strm2->connected_node = node1;
2553 				strm2->ui_connected_node_id = node1->node_uuid;
2554 				strm2->connected_node_stream_index = stream1;
2555 				strm2->connect_type = CONNECTTYPE_NODEINPUT;
2556 			}
2557 		} else if (node1_type != NODE_DEVICE)
2558 			strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2559 	} else {
2560 		/* GPP == > NODE */
2561 		strm_index = node2->num_inputs + node2->num_outputs - 1;
2562 		strm2 = &(node2->stream_connect[strm_index]);
2563 		strm2->cb_struct = sizeof(struct dsp_streamconnect);
2564 		strm2->this_node_stream_index = stream2;
2565 		strm2->connect_type = CONNECTTYPE_GPPINPUT;
2566 	}
2567 }
2568 
2569 /*
2570  *  ======== fill_stream_def ========
2571  *  Purpose:
2572  *      Fills Stream attributes.
2573  */
fill_stream_def(struct node_object * hnode,struct node_strmdef * pstrm_def,struct dsp_strmattr * pattrs)2574 static void fill_stream_def(struct node_object *hnode,
2575 			    struct node_strmdef *pstrm_def,
2576 			    struct dsp_strmattr *pattrs)
2577 {
2578 	struct node_mgr *hnode_mgr = hnode->node_mgr;
2579 
2580 	if (pattrs != NULL) {
2581 		pstrm_def->num_bufs = pattrs->num_bufs;
2582 		pstrm_def->buf_size =
2583 		    pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2584 		pstrm_def->seg_id = pattrs->seg_id;
2585 		pstrm_def->buf_alignment = pattrs->buf_alignment;
2586 		pstrm_def->timeout = pattrs->timeout;
2587 	} else {
2588 		pstrm_def->num_bufs = DEFAULTNBUFS;
2589 		pstrm_def->buf_size =
2590 		    DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2591 		pstrm_def->seg_id = DEFAULTSEGID;
2592 		pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2593 		pstrm_def->timeout = DEFAULTTIMEOUT;
2594 	}
2595 }
2596 
2597 /*
2598  *  ======== free_stream ========
2599  *  Purpose:
2600  *      Updates the channel mask and frees the pipe id.
2601  */
free_stream(struct node_mgr * hnode_mgr,struct stream_chnl stream)2602 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2603 {
2604 	/* Free up the pipe id unless other node has not yet been deleted. */
2605 	if (stream.type == NODECONNECT) {
2606 		if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2607 			/* The other node has already been deleted */
2608 			clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2609 			clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2610 		} else {
2611 			/* The other node has not been deleted yet */
2612 			set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2613 		}
2614 	} else if (stream.type == HOSTCONNECT) {
2615 		if (stream.dev_id < hnode_mgr->num_chnls) {
2616 			clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2617 		} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2618 			/* dsp-dma */
2619 			clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2620 					hnode_mgr->dma_chnl_map);
2621 		} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2622 			/* zero-copy */
2623 			clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2624 					hnode_mgr->zc_chnl_map);
2625 		}
2626 	}
2627 }
2628 
2629 /*
2630  *  ======== get_fxn_address ========
2631  *  Purpose:
2632  *      Retrieves the address for create, execute or delete phase for a node.
2633  */
get_fxn_address(struct node_object * hnode,u32 * fxn_addr,u32 phase)2634 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2635 				  u32 phase)
2636 {
2637 	char *pstr_fxn_name = NULL;
2638 	struct node_mgr *hnode_mgr = hnode->node_mgr;
2639 	int status = 0;
2640 
2641 	switch (phase) {
2642 	case CREATEPHASE:
2643 		pstr_fxn_name =
2644 		    hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2645 		break;
2646 	case EXECUTEPHASE:
2647 		pstr_fxn_name =
2648 		    hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2649 		break;
2650 	case DELETEPHASE:
2651 		pstr_fxn_name =
2652 		    hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2653 		break;
2654 	default:
2655 		/* Should never get here */
2656 		break;
2657 	}
2658 
2659 	status =
2660 	    hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2661 						  pstr_fxn_name, fxn_addr);
2662 
2663 	return status;
2664 }
2665 
2666 /*
2667  *  ======== get_node_info ========
2668  *  Purpose:
2669  *      Retrieves the node information.
2670  */
get_node_info(struct node_object * hnode,struct dsp_nodeinfo * node_info)2671 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2672 {
2673 	u32 i;
2674 
2675 	node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2676 	node_info->nb_node_database_props =
2677 	    hnode->dcd_props.obj_data.node_obj.ndb_props;
2678 	node_info->execution_priority = hnode->prio;
2679 	node_info->device_owner = hnode->device_owner;
2680 	node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2681 	node_info->node_env = hnode->node_env;
2682 
2683 	node_info->ns_execution_state = node_get_state(hnode);
2684 
2685 	/* Copy stream connect data */
2686 	for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2687 		node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2688 
2689 }
2690 
2691 /*
2692  *  ======== get_node_props ========
2693  *  Purpose:
2694  *      Retrieve node properties.
2695  */
get_node_props(struct dcd_manager * hdcd_mgr,struct node_object * hnode,const struct dsp_uuid * node_uuid,struct dcd_genericobj * dcd_prop)2696 static int get_node_props(struct dcd_manager *hdcd_mgr,
2697 				 struct node_object *hnode,
2698 				 const struct dsp_uuid *node_uuid,
2699 				 struct dcd_genericobj *dcd_prop)
2700 {
2701 	u32 len;
2702 	struct node_msgargs *pmsg_args;
2703 	struct node_taskargs *task_arg_obj;
2704 	enum node_type node_type = NODE_TASK;
2705 	struct dsp_ndbprops *pndb_props =
2706 	    &(dcd_prop->obj_data.node_obj.ndb_props);
2707 	int status = 0;
2708 	char sz_uuid[MAXUUIDLEN];
2709 
2710 	status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2711 				    DSP_DCDNODETYPE, dcd_prop);
2712 
2713 	if (!status) {
2714 		hnode->ntype = node_type = pndb_props->ntype;
2715 
2716 		/* Create UUID value to set in registry. */
2717 		uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2718 				    MAXUUIDLEN);
2719 		dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2720 
2721 		/* Fill in message args that come from NDB */
2722 		if (node_type != NODE_DEVICE) {
2723 			pmsg_args = &(hnode->create_args.asa.node_msg_args);
2724 			pmsg_args->seg_id =
2725 			    dcd_prop->obj_data.node_obj.msg_segid;
2726 			pmsg_args->notify_type =
2727 			    dcd_prop->obj_data.node_obj.msg_notify_type;
2728 			pmsg_args->max_msgs = pndb_props->message_depth;
2729 			dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2730 				pmsg_args->max_msgs);
2731 		} else {
2732 			/* Copy device name */
2733 			len = strlen(pndb_props->ac_name);
2734 			hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2735 			if (hnode->str_dev_name == NULL) {
2736 				status = -ENOMEM;
2737 			} else {
2738 				strncpy(hnode->str_dev_name,
2739 					pndb_props->ac_name, len);
2740 			}
2741 		}
2742 	}
2743 	if (!status) {
2744 		/* Fill in create args that come from NDB */
2745 		if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2746 			task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2747 			task_arg_obj->prio = pndb_props->prio;
2748 			task_arg_obj->stack_size = pndb_props->stack_size;
2749 			task_arg_obj->sys_stack_size =
2750 			    pndb_props->sys_stack_size;
2751 			task_arg_obj->stack_seg = pndb_props->stack_seg;
2752 			dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2753 				"0x%x words System Stack Size: 0x%x words "
2754 				"Stack Segment: 0x%x profile count : 0x%x\n",
2755 				task_arg_obj->prio, task_arg_obj->stack_size,
2756 				task_arg_obj->sys_stack_size,
2757 				task_arg_obj->stack_seg,
2758 				pndb_props->count_profiles);
2759 		}
2760 	}
2761 
2762 	return status;
2763 }
2764 
2765 /*
2766  *  ======== get_proc_props ========
2767  *  Purpose:
2768  *      Retrieve the processor properties.
2769  */
get_proc_props(struct node_mgr * hnode_mgr,struct dev_object * hdev_obj)2770 static int get_proc_props(struct node_mgr *hnode_mgr,
2771 				 struct dev_object *hdev_obj)
2772 {
2773 	struct cfg_hostres *host_res;
2774 	struct bridge_dev_context *pbridge_context;
2775 	int status = 0;
2776 
2777 	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2778 	if (!pbridge_context)
2779 		status = -EFAULT;
2780 
2781 	if (!status) {
2782 		host_res = pbridge_context->resources;
2783 		if (!host_res)
2784 			return -EPERM;
2785 		hnode_mgr->chnl_offset = host_res->chnl_offset;
2786 		hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2787 		hnode_mgr->num_chnls = host_res->num_chnls;
2788 
2789 		/*
2790 		 *  PROC will add an API to get dsp_processorinfo.
2791 		 *  Fill in default values for now.
2792 		 */
2793 		/* TODO -- Instead of hard coding, take from registry */
2794 		hnode_mgr->proc_family = 6000;
2795 		hnode_mgr->proc_type = 6410;
2796 		hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2797 		hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2798 		hnode_mgr->dsp_word_size = DSPWORDSIZE;
2799 		hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2800 		hnode_mgr->dsp_mau_size = 1;
2801 
2802 	}
2803 	return status;
2804 }
2805 
2806 /*
2807  *  ======== node_get_uuid_props ========
2808  *  Purpose:
2809  *      Fetch Node UUID properties from DCD/DOF file.
2810  */
node_get_uuid_props(void * hprocessor,const struct dsp_uuid * node_uuid,struct dsp_ndbprops * node_props)2811 int node_get_uuid_props(void *hprocessor,
2812 			       const struct dsp_uuid *node_uuid,
2813 			       struct dsp_ndbprops *node_props)
2814 {
2815 	struct node_mgr *hnode_mgr = NULL;
2816 	struct dev_object *hdev_obj;
2817 	int status = 0;
2818 	struct dcd_nodeprops dcd_node_props;
2819 	struct dsp_processorstate proc_state;
2820 
2821 	if (hprocessor == NULL || node_uuid == NULL) {
2822 		status = -EFAULT;
2823 		goto func_end;
2824 	}
2825 	status = proc_get_state(hprocessor, &proc_state,
2826 				sizeof(struct dsp_processorstate));
2827 	if (status)
2828 		goto func_end;
2829 	/* If processor is in error state then don't attempt
2830 	   to send the message */
2831 	if (proc_state.proc_state == PROC_ERROR) {
2832 		status = -EPERM;
2833 		goto func_end;
2834 	}
2835 
2836 	status = proc_get_dev_object(hprocessor, &hdev_obj);
2837 	if (hdev_obj) {
2838 		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2839 		if (hnode_mgr == NULL) {
2840 			status = -EFAULT;
2841 			goto func_end;
2842 		}
2843 	}
2844 
2845 	/*
2846 	 * Enter the critical section. This is needed because
2847 	 * dcd_get_object_def will ultimately end up calling dbll_open/close,
2848 	 * which needs to be protected in order to not corrupt the zlib manager
2849 	 * (COD).
2850 	 */
2851 	mutex_lock(&hnode_mgr->node_mgr_lock);
2852 
2853 	dcd_node_props.str_create_phase_fxn = NULL;
2854 	dcd_node_props.str_execute_phase_fxn = NULL;
2855 	dcd_node_props.str_delete_phase_fxn = NULL;
2856 	dcd_node_props.str_i_alg_name = NULL;
2857 
2858 	status = dcd_get_object_def(hnode_mgr->dcd_mgr,
2859 		(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
2860 		(struct dcd_genericobj *)&dcd_node_props);
2861 
2862 	if (!status) {
2863 		*node_props = dcd_node_props.ndb_props;
2864 		kfree(dcd_node_props.str_create_phase_fxn);
2865 
2866 		kfree(dcd_node_props.str_execute_phase_fxn);
2867 
2868 		kfree(dcd_node_props.str_delete_phase_fxn);
2869 
2870 		kfree(dcd_node_props.str_i_alg_name);
2871 	}
2872 	/*  Leave the critical section, we're done. */
2873 	mutex_unlock(&hnode_mgr->node_mgr_lock);
2874 func_end:
2875 	return status;
2876 }
2877 
2878 /*
2879  *  ======== get_rms_fxns ========
2880  *  Purpose:
2881  *      Retrieve the RMS functions.
2882  */
get_rms_fxns(struct node_mgr * hnode_mgr)2883 static int get_rms_fxns(struct node_mgr *hnode_mgr)
2884 {
2885 	s32 i;
2886 	struct dev_object *dev_obj = hnode_mgr->dev_obj;
2887 	int status = 0;
2888 
2889 	static char *psz_fxns[NUMRMSFXNS] = {
2890 		"RMS_queryServer",	/* RMSQUERYSERVER */
2891 		"RMS_configureServer",	/* RMSCONFIGURESERVER */
2892 		"RMS_createNode",	/* RMSCREATENODE */
2893 		"RMS_executeNode",	/* RMSEXECUTENODE */
2894 		"RMS_deleteNode",	/* RMSDELETENODE */
2895 		"RMS_changeNodePriority",	/* RMSCHANGENODEPRIORITY */
2896 		"RMS_readMemory",	/* RMSREADMEMORY */
2897 		"RMS_writeMemory",	/* RMSWRITEMEMORY */
2898 		"RMS_copy",	/* RMSCOPY */
2899 	};
2900 
2901 	for (i = 0; i < NUMRMSFXNS; i++) {
2902 		status = dev_get_symbol(dev_obj, psz_fxns[i],
2903 					&(hnode_mgr->fxn_addrs[i]));
2904 		if (status) {
2905 			if (status == -ESPIPE) {
2906 				/*
2907 				 *  May be loaded dynamically (in the future),
2908 				 *  but return an error for now.
2909 				 */
2910 				dev_dbg(bridge, "%s: RMS function: %s currently"
2911 					" not loaded\n", __func__, psz_fxns[i]);
2912 			} else {
2913 				dev_dbg(bridge, "%s: Symbol not found: %s "
2914 					"status = 0x%x\n", __func__,
2915 					psz_fxns[i], status);
2916 				break;
2917 			}
2918 		}
2919 	}
2920 
2921 	return status;
2922 }
2923 
2924 /*
2925  *  ======== ovly ========
2926  *  Purpose:
2927  *      Called during overlay.Sends command to RMS to copy a block of data.
2928  */
ovly(void * priv_ref,u32 dsp_run_addr,u32 dsp_load_addr,u32 ul_num_bytes,u32 mem_space)2929 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
2930 		u32 ul_num_bytes, u32 mem_space)
2931 {
2932 	struct node_object *hnode = (struct node_object *)priv_ref;
2933 	struct node_mgr *hnode_mgr;
2934 	u32 ul_bytes = 0;
2935 	u32 ul_size;
2936 	u32 ul_timeout;
2937 	int status = 0;
2938 	struct bridge_dev_context *hbridge_context;
2939 	/* Function interface to Bridge driver*/
2940 	struct bridge_drv_interface *intf_fxns;
2941 
2942 	hnode_mgr = hnode->node_mgr;
2943 
2944 	ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
2945 	ul_timeout = hnode->timeout;
2946 
2947 	/* Call new MemCopy function */
2948 	intf_fxns = hnode_mgr->intf_fxns;
2949 	status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2950 	if (!status) {
2951 		status =
2952 		    (*intf_fxns->brd_mem_copy) (hbridge_context,
2953 						dsp_run_addr, dsp_load_addr,
2954 						ul_num_bytes, (u32) mem_space);
2955 		if (!status)
2956 			ul_bytes = ul_num_bytes;
2957 		else
2958 			pr_debug("%s: failed to copy brd memory, status 0x%x\n",
2959 				 __func__, status);
2960 	} else {
2961 		pr_debug("%s: failed to get Bridge context, status 0x%x\n",
2962 			 __func__, status);
2963 	}
2964 
2965 	return ul_bytes;
2966 }
2967 
2968 /*
2969  *  ======== mem_write ========
2970  */
mem_write(void * priv_ref,u32 dsp_add,void * pbuf,u32 ul_num_bytes,u32 mem_space)2971 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
2972 		     u32 ul_num_bytes, u32 mem_space)
2973 {
2974 	struct node_object *hnode = (struct node_object *)priv_ref;
2975 	struct node_mgr *hnode_mgr;
2976 	u16 mem_sect_type;
2977 	u32 ul_timeout;
2978 	int status = 0;
2979 	struct bridge_dev_context *hbridge_context;
2980 	/* Function interface to Bridge driver */
2981 	struct bridge_drv_interface *intf_fxns;
2982 
2983 	hnode_mgr = hnode->node_mgr;
2984 
2985 	ul_timeout = hnode->timeout;
2986 	mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
2987 
2988 	/* Call new MemWrite function */
2989 	intf_fxns = hnode_mgr->intf_fxns;
2990 	status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2991 	status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
2992 					dsp_add, ul_num_bytes, mem_sect_type);
2993 
2994 	return ul_num_bytes;
2995 }
2996 
2997 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
2998 /*
2999  *  ======== node_find_addr ========
3000  */
node_find_addr(struct node_mgr * node_mgr,u32 sym_addr,u32 offset_range,void * sym_addr_output,char * sym_name)3001 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3002 		u32 offset_range, void *sym_addr_output, char *sym_name)
3003 {
3004 	struct node_object *node_obj;
3005 	int status = -ENOENT;
3006 
3007 	pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x,  %s)\n", __func__,
3008 			(unsigned int) node_mgr,
3009 			sym_addr, offset_range,
3010 			(unsigned int) sym_addr_output, sym_name);
3011 
3012 	list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3013 		status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3014 			offset_range, sym_addr_output, sym_name);
3015 		if (!status)
3016 			break;
3017 	}
3018 
3019 	return status;
3020 }
3021 #endif
3022