Fix common misspellings
[linux-2.6-block.git] / drivers / staging / tidspbridge / rmgr / proc.c
CommitLineData
7d55524d
ORL
1/*
2 * proc.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor interface at the driver level.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
2094f12d 19#include <linux/types.h>
7d55524d
ORL
20/* ------------------------------------ Host OS */
21#include <linux/dma-mapping.h>
22#include <linux/scatterlist.h>
23#include <dspbridge/host_os.h>
24
25/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
26#include <dspbridge/dbdefs.h>
27
28/* ----------------------------------- Trace & Debug */
29#include <dspbridge/dbc.h>
30
31/* ----------------------------------- OS Adaptation Layer */
7d55524d
ORL
32#include <dspbridge/ntfy.h>
33#include <dspbridge/sync.h>
34/* ----------------------------------- Bridge Driver */
35#include <dspbridge/dspdefs.h>
36#include <dspbridge/dspdeh.h>
37/* ----------------------------------- Platform Manager */
38#include <dspbridge/cod.h>
39#include <dspbridge/dev.h>
40#include <dspbridge/procpriv.h>
677f2ded 41#include <dspbridge/dmm.h>
7d55524d
ORL
42
43/* ----------------------------------- Resource Manager */
44#include <dspbridge/mgr.h>
45#include <dspbridge/node.h>
46#include <dspbridge/nldr.h>
47#include <dspbridge/rmm.h>
48
49/* ----------------------------------- Others */
50#include <dspbridge/dbdcd.h>
51#include <dspbridge/msg.h>
52#include <dspbridge/dspioctl.h>
53#include <dspbridge/drv.h>
54
55/* ----------------------------------- This */
56#include <dspbridge/proc.h>
57#include <dspbridge/pwr.h>
58
59#include <dspbridge/resourcecleanup.h>
60/* ----------------------------------- Defines, Data Structures, Typedefs */
61#define MAXCMDLINELEN 255
62#define PROC_ENVPROCID "PROC_ID=%d"
63#define MAXPROCIDLEN (8 + 5)
64#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
65#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
66#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
67
68#define DSP_CACHE_LINE 128
69
70#define BUFMODE_MASK (3 << 14)
71
72/* Buffer modes from DSP perspective */
73#define RBUF 0x4000 /* Input buffer */
74#define WBUF 0x8000 /* Output Buffer */
75
76extern struct device *bridge;
77
78/* ----------------------------------- Globals */
79
80/* The proc_object structure. */
81struct proc_object {
82 struct list_head link; /* Link to next proc_object */
085467b8 83 struct dev_object *dev_obj; /* Device this PROC represents */
7d55524d 84 u32 process; /* Process owning this Processor */
085467b8 85 struct mgr_object *mgr_obj; /* Manager Object Handle */
7d55524d
ORL
86 u32 attach_count; /* Processor attach count */
87 u32 processor_id; /* Processor number */
a534f17b 88 u32 timeout; /* Time out count */
7d55524d 89 enum dsp_procstate proc_state; /* Processor state */
085467b8 90 u32 unit; /* DDSP unit number */
7d55524d
ORL
91 bool is_already_attached; /*
92 * True if the Device below has
93 * GPP Client attached
94 */
95 struct ntfy_object *ntfy_obj; /* Manages notifications */
96 /* Bridge Context Handle */
085467b8 97 struct bridge_dev_context *bridge_context;
7d55524d
ORL
98 /* Function interface to Bridge driver */
99 struct bridge_drv_interface *intf_fxns;
085467b8 100 char *last_coff;
7d55524d
ORL
101 struct list_head proc_list;
102};
103
104static u32 refs;
105
106DEFINE_MUTEX(proc_lock); /* For critical sections */
107
108/* ----------------------------------- Function Prototypes */
c8c1ad8c 109static int proc_monitor(struct proc_object *proc_obj);
7d55524d
ORL
110static s32 get_envp_count(char **envp);
111static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 112 s32 cnew_envp, char *sz_var);
7d55524d
ORL
113
114/* remember mapping information */
115static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
116 u32 mpu_addr, u32 dsp_addr, u32 size)
117{
118 struct dmm_map_object *map_obj;
119
120 u32 num_usr_pgs = size / PG_SIZE4K;
121
122 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
123 __func__, mpu_addr,
124 dsp_addr, size);
125
126 map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
127 if (!map_obj) {
128 pr_err("%s: kzalloc failed\n", __func__);
129 return NULL;
130 }
131 INIT_LIST_HEAD(&map_obj->link);
132
133 map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
134 GFP_KERNEL);
135 if (!map_obj->pages) {
136 pr_err("%s: kzalloc failed\n", __func__);
137 kfree(map_obj);
138 return NULL;
139 }
140
141 map_obj->mpu_addr = mpu_addr;
142 map_obj->dsp_addr = dsp_addr;
143 map_obj->size = size;
144 map_obj->num_usr_pgs = num_usr_pgs;
145
146 spin_lock(&pr_ctxt->dmm_map_lock);
147 list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
148 spin_unlock(&pr_ctxt->dmm_map_lock);
149
150 return map_obj;
151}
152
2fa28a51
FC
153static int match_exact_map_obj(struct dmm_map_object *map_obj,
154 u32 dsp_addr, u32 size)
155{
156 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
157 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
158 __func__, dsp_addr, map_obj->size, size);
159
160 return map_obj->dsp_addr == dsp_addr &&
161 map_obj->size == size;
162}
163
7d55524d 164static void remove_mapping_information(struct process_context *pr_ctxt,
2fa28a51 165 u32 dsp_addr, u32 size)
7d55524d
ORL
166{
167 struct dmm_map_object *map_obj;
168
2fa28a51
FC
169 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
170 dsp_addr, size);
7d55524d
ORL
171
172 spin_lock(&pr_ctxt->dmm_map_lock);
173 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
2fa28a51 174 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
7d55524d
ORL
175 __func__,
176 map_obj->mpu_addr,
2fa28a51
FC
177 map_obj->dsp_addr,
178 map_obj->size);
7d55524d 179
2fa28a51 180 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
7d55524d
ORL
181 pr_debug("%s: match, deleting map info\n", __func__);
182 list_del(&map_obj->link);
183 kfree(map_obj->dma_info.sg);
184 kfree(map_obj->pages);
185 kfree(map_obj);
186 goto out;
187 }
188 pr_debug("%s: candidate didn't match\n", __func__);
189 }
190
191 pr_err("%s: failed to find given map info\n", __func__);
192out:
193 spin_unlock(&pr_ctxt->dmm_map_lock);
194}
195
196static int match_containing_map_obj(struct dmm_map_object *map_obj,
197 u32 mpu_addr, u32 size)
198{
199 u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
200
201 return mpu_addr >= map_obj->mpu_addr &&
202 mpu_addr + size <= map_obj_end;
203}
204
205static struct dmm_map_object *find_containing_mapping(
206 struct process_context *pr_ctxt,
207 u32 mpu_addr, u32 size)
208{
209 struct dmm_map_object *map_obj;
210 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
211 mpu_addr, size);
212
213 spin_lock(&pr_ctxt->dmm_map_lock);
214 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
215 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
216 __func__,
217 map_obj->mpu_addr,
218 map_obj->dsp_addr,
219 map_obj->size);
220 if (match_containing_map_obj(map_obj, mpu_addr, size)) {
221 pr_debug("%s: match!\n", __func__);
222 goto out;
223 }
224
225 pr_debug("%s: no match!\n", __func__);
226 }
227
228 map_obj = NULL;
229out:
230 spin_unlock(&pr_ctxt->dmm_map_lock);
231 return map_obj;
232}
233
234static int find_first_page_in_cache(struct dmm_map_object *map_obj,
235 unsigned long mpu_addr)
236{
237 u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
238 u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
239 int pg_index = requested_base_page - mapped_base_page;
240
241 if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
242 pr_err("%s: failed (got %d)\n", __func__, pg_index);
243 return -1;
244 }
245
246 pr_debug("%s: first page is %d\n", __func__, pg_index);
247 return pg_index;
248}
249
250static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
251 int pg_i)
252{
253 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
254 pg_i, map_obj->num_usr_pgs);
255
256 if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
257 pr_err("%s: requested pg_i %d is out of mapped range\n",
258 __func__, pg_i);
259 return NULL;
260 }
261
262 return map_obj->pages[pg_i];
263}
264
265/*
266 * ======== proc_attach ========
267 * Purpose:
268 * Prepare for communication with a particular DSP processor, and return
269 * a handle to the processor object.
270 */
271int
272proc_attach(u32 processor_id,
21aaf42e 273 const struct dsp_processorattrin *attr_in,
7d55524d
ORL
274 void **ph_processor, struct process_context *pr_ctxt)
275{
276 int status = 0;
277 struct dev_object *hdev_obj;
278 struct proc_object *p_proc_object = NULL;
279 struct mgr_object *hmgr_obj = NULL;
280 struct drv_object *hdrv_obj = NULL;
73b87a91 281 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
282 u8 dev_type;
283
284 DBC_REQUIRE(refs > 0);
285 DBC_REQUIRE(ph_processor != NULL);
286
a534f17b
RS
287 if (pr_ctxt->processor) {
288 *ph_processor = pr_ctxt->processor;
7d55524d
ORL
289 return status;
290 }
291
292 /* Get the Driver and Manager Object Handles */
73b87a91
IGC
293 if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
294 status = -ENODATA;
295 pr_err("%s: Failed to get object handles\n", __func__);
296 } else {
297 hdrv_obj = drv_datap->drv_object;
298 hmgr_obj = drv_datap->mgr_object;
299 }
7d55524d 300
a741ea6e 301 if (!status) {
7d55524d
ORL
302 /* Get the Device Object */
303 status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
304 }
a741ea6e 305 if (!status)
7d55524d
ORL
306 status = dev_get_dev_type(hdev_obj, &dev_type);
307
b66e0986 308 if (status)
7d55524d
ORL
309 goto func_end;
310
311 /* If we made it this far, create the Proceesor object: */
312 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
313 /* Fill out the Processor Object: */
314 if (p_proc_object == NULL) {
315 status = -ENOMEM;
316 goto func_end;
317 }
085467b8
RS
318 p_proc_object->dev_obj = hdev_obj;
319 p_proc_object->mgr_obj = hmgr_obj;
7d55524d
ORL
320 p_proc_object->processor_id = dev_type;
321 /* Store TGID instead of process handle */
322 p_proc_object->process = current->tgid;
323
324 INIT_LIST_HEAD(&p_proc_object->proc_list);
325
326 if (attr_in)
a534f17b 327 p_proc_object->timeout = attr_in->timeout;
7d55524d 328 else
a534f17b 329 p_proc_object->timeout = PROC_DFLT_TIMEOUT;
7d55524d
ORL
330
331 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 332 if (!status) {
7d55524d 333 status = dev_get_bridge_context(hdev_obj,
085467b8 334 &p_proc_object->bridge_context);
b66e0986 335 if (status)
7d55524d
ORL
336 kfree(p_proc_object);
337 } else
338 kfree(p_proc_object);
339
b66e0986 340 if (status)
7d55524d
ORL
341 goto func_end;
342
343 /* Create the Notification Object */
344 /* This is created with no event mask, no notify mask
345 * and no valid handle to the notification. They all get
346 * filled up when proc_register_notify is called */
347 p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
348 GFP_KERNEL);
349 if (p_proc_object->ntfy_obj)
350 ntfy_init(p_proc_object->ntfy_obj);
351 else
352 status = -ENOMEM;
353
a741ea6e 354 if (!status) {
7d55524d
ORL
355 /* Insert the Processor Object into the DEV List.
356 * Return handle to this Processor Object:
357 * Find out if the Device is already attached to a
358 * Processor. If so, return AlreadyAttached status */
085467b8 359 status = dev_insert_proc_object(p_proc_object->dev_obj,
7d55524d
ORL
360 (u32) p_proc_object,
361 &p_proc_object->
362 is_already_attached);
a741ea6e 363 if (!status) {
7d55524d
ORL
364 if (p_proc_object->is_already_attached)
365 status = 0;
366 } else {
367 if (p_proc_object->ntfy_obj) {
368 ntfy_delete(p_proc_object->ntfy_obj);
369 kfree(p_proc_object->ntfy_obj);
370 }
371
372 kfree(p_proc_object);
373 }
a741ea6e 374 if (!status) {
7d55524d 375 *ph_processor = (void *)p_proc_object;
a534f17b 376 pr_ctxt->processor = *ph_processor;
7d55524d
ORL
377 (void)proc_notify_clients(p_proc_object,
378 DSP_PROCESSORATTACH);
379 }
380 } else {
b66e0986 381 /* Don't leak memory if status is failed */
7d55524d
ORL
382 kfree(p_proc_object);
383 }
384func_end:
385 DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
a741ea6e 386 (!status && p_proc_object) ||
7d55524d
ORL
387 (status == 0 && p_proc_object));
388
389 return status;
390}
391
392static int get_exec_file(struct cfg_devnode *dev_node_obj,
393 struct dev_object *hdev_obj,
b301c858 394 u32 size, char *exec_file)
7d55524d
ORL
395{
396 u8 dev_type;
397 s32 len;
315a1a20 398 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
399
400 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
315a1a20
IGC
401
402 if (!exec_file)
403 return -EFAULT;
404
7d55524d 405 if (dev_type == DSP_UNIT) {
315a1a20
IGC
406 if (!drv_datap || !drv_datap->base_img)
407 return -EFAULT;
408
409 if (strlen(drv_datap->base_img) > size)
410 return -EINVAL;
411
412 strcpy(exec_file, drv_datap->base_img);
413 } else if (dev_type == IVA_UNIT && iva_img) {
414 len = strlen(iva_img);
415 strncpy(exec_file, iva_img, len + 1);
416 } else {
417 return -ENOENT;
7d55524d 418 }
315a1a20
IGC
419
420 return 0;
7d55524d
ORL
421}
422
423/*
424 * ======== proc_auto_start ======== =
425 * Purpose:
426 * A Particular device gets loaded with the default image
427 * if the AutoStart flag is set.
428 * Parameters:
429 * hdev_obj: Handle to the Device
430 * Returns:
431 * 0: On Successful Loading
432 * -EPERM General Failure
433 * Requires:
434 * hdev_obj != NULL
435 * Ensures:
436 */
437int proc_auto_start(struct cfg_devnode *dev_node_obj,
438 struct dev_object *hdev_obj)
439{
440 int status = -EPERM;
441 struct proc_object *p_proc_object;
442 char sz_exec_file[MAXCMDLINELEN];
443 char *argv[2];
444 struct mgr_object *hmgr_obj = NULL;
73b87a91 445 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
446 u8 dev_type;
447
448 DBC_REQUIRE(refs > 0);
449 DBC_REQUIRE(dev_node_obj != NULL);
450 DBC_REQUIRE(hdev_obj != NULL);
451
452 /* Create a Dummy PROC Object */
73b87a91
IGC
453 if (!drv_datap || !drv_datap->mgr_object) {
454 status = -ENODATA;
455 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 456 goto func_end;
73b87a91
IGC
457 } else {
458 hmgr_obj = drv_datap->mgr_object;
459 }
7d55524d
ORL
460
461 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
462 if (p_proc_object == NULL) {
463 status = -ENOMEM;
464 goto func_end;
465 }
085467b8
RS
466 p_proc_object->dev_obj = hdev_obj;
467 p_proc_object->mgr_obj = hmgr_obj;
7d55524d 468 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 469 if (!status)
7d55524d 470 status = dev_get_bridge_context(hdev_obj,
085467b8 471 &p_proc_object->bridge_context);
b66e0986 472 if (status)
7d55524d
ORL
473 goto func_cont;
474
475 /* Stop the Device, put it into standby mode */
476 status = proc_stop(p_proc_object);
477
b66e0986 478 if (status)
7d55524d
ORL
479 goto func_cont;
480
481 /* Get the default executable for this board... */
482 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
483 p_proc_object->processor_id = dev_type;
484 status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
485 sz_exec_file);
a741ea6e 486 if (!status) {
7d55524d
ORL
487 argv[0] = sz_exec_file;
488 argv[1] = NULL;
489 /* ...and try to load it: */
cd4f13c0 490 status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
a741ea6e 491 if (!status)
7d55524d
ORL
492 status = proc_start(p_proc_object);
493 }
085467b8
RS
494 kfree(p_proc_object->last_coff);
495 p_proc_object->last_coff = NULL;
7d55524d
ORL
496func_cont:
497 kfree(p_proc_object);
498func_end:
499 return status;
500}
501
502/*
503 * ======== proc_ctrl ========
504 * Purpose:
505 * Pass control information to the GPP device driver managing the
506 * DSP processor.
507 *
508 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
509 * application developer's API.
510 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
511 * Operation. arg can be null.
512 */
9d7d0a52 513int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
7d55524d
ORL
514{
515 int status = 0;
516 struct proc_object *p_proc_object = hprocessor;
517 u32 timeout = 0;
518
519 DBC_REQUIRE(refs > 0);
520
521 if (p_proc_object) {
522 /* intercept PWR deep sleep command */
523 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
524 timeout = arg->cb_data;
525 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
526 }
527 /* intercept PWR emergency sleep command */
528 else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
529 timeout = arg->cb_data;
530 status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
531 } else if (dw_cmd == PWR_DEEPSLEEP) {
532 /* timeout = arg->cb_data; */
533 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
534 }
535 /* intercept PWR wake commands */
536 else if (dw_cmd == BRDIOCTL_WAKEUP) {
537 timeout = arg->cb_data;
538 status = pwr_wake_dsp(timeout);
539 } else if (dw_cmd == PWR_WAKEUP) {
540 /* timeout = arg->cb_data; */
541 status = pwr_wake_dsp(timeout);
542 } else
e17ba7f2 543 if (!((*p_proc_object->intf_fxns->dev_cntrl)
085467b8 544 (p_proc_object->bridge_context, dw_cmd,
7d55524d
ORL
545 arg))) {
546 status = 0;
547 } else {
548 status = -EPERM;
549 }
550 } else {
551 status = -EFAULT;
552 }
553
554 return status;
555}
556
557/*
558 * ======== proc_detach ========
559 * Purpose:
560 * Destroys the Processor Object. Removes the notification from the Dev
561 * List.
562 */
563int proc_detach(struct process_context *pr_ctxt)
564{
565 int status = 0;
566 struct proc_object *p_proc_object = NULL;
567
568 DBC_REQUIRE(refs > 0);
569
a534f17b 570 p_proc_object = (struct proc_object *)pr_ctxt->processor;
7d55524d
ORL
571
572 if (p_proc_object) {
573 /* Notify the Client */
574 ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
575 /* Remove the notification memory */
576 if (p_proc_object->ntfy_obj) {
577 ntfy_delete(p_proc_object->ntfy_obj);
578 kfree(p_proc_object->ntfy_obj);
579 }
580
085467b8
RS
581 kfree(p_proc_object->last_coff);
582 p_proc_object->last_coff = NULL;
7d55524d 583 /* Remove the Proc from the DEV List */
085467b8 584 (void)dev_remove_proc_object(p_proc_object->dev_obj,
7d55524d
ORL
585 (u32) p_proc_object);
586 /* Free the Processor Object */
587 kfree(p_proc_object);
a534f17b 588 pr_ctxt->processor = NULL;
7d55524d
ORL
589 } else {
590 status = -EFAULT;
591 }
592
593 return status;
594}
595
596/*
597 * ======== proc_enum_nodes ========
598 * Purpose:
599 * Enumerate and get configuration information about nodes allocated
600 * on a DSP processor.
601 */
602int proc_enum_nodes(void *hprocessor, void **node_tab,
e6bf74f0
MN
603 u32 node_tab_size, u32 *pu_num_nodes,
604 u32 *pu_allocated)
7d55524d
ORL
605{
606 int status = -EPERM;
607 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
608 struct node_mgr *hnode_mgr = NULL;
609
610 DBC_REQUIRE(refs > 0);
611 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
612 DBC_REQUIRE(pu_num_nodes != NULL);
613 DBC_REQUIRE(pu_allocated != NULL);
614
615 if (p_proc_object) {
085467b8 616 if (!(dev_get_node_manager(p_proc_object->dev_obj,
7d55524d
ORL
617 &hnode_mgr))) {
618 if (hnode_mgr) {
619 status = node_enum_nodes(hnode_mgr, node_tab,
620 node_tab_size,
621 pu_num_nodes,
622 pu_allocated);
623 }
624 }
625 } else {
626 status = -EFAULT;
627 }
628
629 return status;
630}
631
632/* Cache operation against kernel address instead of users */
633static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
634 ssize_t len, int pg_i)
635{
636 struct page *page;
637 unsigned long offset;
638 ssize_t rest;
639 int ret = 0, i = 0;
640 struct scatterlist *sg = map_obj->dma_info.sg;
641
642 while (len) {
643 page = get_mapping_page(map_obj, pg_i);
644 if (!page) {
645 pr_err("%s: no page for %08lx\n", __func__, start);
646 ret = -EINVAL;
647 goto out;
648 } else if (IS_ERR(page)) {
649 pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
650 PTR_ERR(page));
651 ret = PTR_ERR(page);
652 goto out;
653 }
654
655 offset = start & ~PAGE_MASK;
656 rest = min_t(ssize_t, PAGE_SIZE - offset, len);
657
658 sg_set_page(&sg[i], page, rest, offset);
659
660 len -= rest;
661 start += rest;
662 pg_i++, i++;
663 }
664
665 if (i != map_obj->dma_info.num_pages) {
666 pr_err("%s: bad number of sg iterations\n", __func__);
667 ret = -EFAULT;
668 goto out;
669 }
670
671out:
672 return ret;
673}
674
675static int memory_regain_ownership(struct dmm_map_object *map_obj,
676 unsigned long start, ssize_t len, enum dma_data_direction dir)
677{
678 int ret = 0;
679 unsigned long first_data_page = start >> PAGE_SHIFT;
680 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
681 /* calculating the number of pages this area spans */
682 unsigned long num_pages = last_data_page - first_data_page + 1;
683 struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
684
685 if (!dma_info->sg)
686 goto out;
687
688 if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
689 pr_err("%s: dma info doesn't match given params\n", __func__);
690 return -EINVAL;
691 }
692
693 dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
694
695 pr_debug("%s: dma_map_sg unmapped\n", __func__);
696
697 kfree(dma_info->sg);
698
699 map_obj->dma_info.sg = NULL;
700
701out:
702 return ret;
703}
704
705/* Cache operation against kernel address instead of users */
706static int memory_give_ownership(struct dmm_map_object *map_obj,
707 unsigned long start, ssize_t len, enum dma_data_direction dir)
708{
709 int pg_i, ret, sg_num;
710 struct scatterlist *sg;
711 unsigned long first_data_page = start >> PAGE_SHIFT;
712 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
713 /* calculating the number of pages this area spans */
714 unsigned long num_pages = last_data_page - first_data_page + 1;
715
716 pg_i = find_first_page_in_cache(map_obj, start);
717 if (pg_i < 0) {
718 pr_err("%s: failed to find first page in cache\n", __func__);
719 ret = -EINVAL;
720 goto out;
721 }
722
723 sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
724 if (!sg) {
725 pr_err("%s: kcalloc failed\n", __func__);
726 ret = -ENOMEM;
727 goto out;
728 }
729
730 sg_init_table(sg, num_pages);
731
732 /* cleanup a previous sg allocation */
733 /* this may happen if application doesn't signal for e/o DMA */
734 kfree(map_obj->dma_info.sg);
735
736 map_obj->dma_info.sg = sg;
737 map_obj->dma_info.dir = dir;
738 map_obj->dma_info.num_pages = num_pages;
739
740 ret = build_dma_sg(map_obj, start, len, pg_i);
741 if (ret)
742 goto kfree_sg;
743
744 sg_num = dma_map_sg(bridge, sg, num_pages, dir);
745 if (sg_num < 1) {
746 pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
747 ret = -EFAULT;
748 goto kfree_sg;
749 }
750
751 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
752 map_obj->dma_info.sg_num = sg_num;
753
754 return 0;
755
756kfree_sg:
757 kfree(sg);
758 map_obj->dma_info.sg = NULL;
759out:
760 return ret;
761}
762
763int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
764 enum dma_data_direction dir)
765{
766 /* Keep STATUS here for future additions to this function */
767 int status = 0;
768 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
769 struct dmm_map_object *map_obj;
770
771 DBC_REQUIRE(refs > 0);
772
773 if (!pr_ctxt) {
774 status = -EFAULT;
775 goto err_out;
776 }
777
778 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
779 (u32)pmpu_addr,
780 ul_size, dir);
781
ab42abf3
FC
782 mutex_lock(&proc_lock);
783
7d55524d
ORL
784 /* find requested memory are in cached mapping information */
785 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
786 if (!map_obj) {
787 pr_err("%s: find_containing_mapping failed\n", __func__);
788 status = -EFAULT;
ab42abf3 789 goto no_map;
7d55524d
ORL
790 }
791
792 if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
793 pr_err("%s: InValid address parameters %p %x\n",
794 __func__, pmpu_addr, ul_size);
795 status = -EFAULT;
796 }
797
ab42abf3
FC
798no_map:
799 mutex_unlock(&proc_lock);
7d55524d
ORL
800err_out:
801
802 return status;
803}
804
805int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
806 enum dma_data_direction dir)
807{
808 /* Keep STATUS here for future additions to this function */
809 int status = 0;
810 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
811 struct dmm_map_object *map_obj;
812
813 DBC_REQUIRE(refs > 0);
814
815 if (!pr_ctxt) {
816 status = -EFAULT;
817 goto err_out;
818 }
819
820 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
821 (u32)pmpu_addr,
822 ul_size, dir);
823
ab42abf3
FC
824 mutex_lock(&proc_lock);
825
7d55524d
ORL
826 /* find requested memory are in cached mapping information */
827 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
828 if (!map_obj) {
829 pr_err("%s: find_containing_mapping failed\n", __func__);
830 status = -EFAULT;
ab42abf3 831 goto no_map;
7d55524d
ORL
832 }
833
834 if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
835 pr_err("%s: InValid address parameters %p %x\n",
836 __func__, pmpu_addr, ul_size);
837 status = -EFAULT;
7d55524d
ORL
838 }
839
ab42abf3
FC
840no_map:
841 mutex_unlock(&proc_lock);
7d55524d
ORL
842err_out:
843 return status;
844}
845
846/*
847 * ======== proc_flush_memory ========
848 * Purpose:
849 * Flush cache
850 */
851int proc_flush_memory(void *hprocessor, void *pmpu_addr,
852 u32 ul_size, u32 ul_flags)
853{
854 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
855
856 return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
857}
858
859/*
860 * ======== proc_invalidate_memory ========
861 * Purpose:
862 * Invalidates the memory specified
863 */
864int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
865{
866 enum dma_data_direction dir = DMA_FROM_DEVICE;
867
868 return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
869}
870
871/*
872 * ======== proc_get_resource_info ========
873 * Purpose:
874 * Enumerate the resources currently available on a processor.
875 */
876int proc_get_resource_info(void *hprocessor, u32 resource_type,
e6bf74f0 877 struct dsp_resourceinfo *resource_info,
7d55524d
ORL
878 u32 resource_info_size)
879{
880 int status = -EPERM;
881 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
882 struct node_mgr *hnode_mgr = NULL;
883 struct nldr_object *nldr_obj = NULL;
884 struct rmm_target_obj *rmm = NULL;
885 struct io_mgr *hio_mgr = NULL; /* IO manager handle */
886
887 DBC_REQUIRE(refs > 0);
888 DBC_REQUIRE(resource_info != NULL);
889 DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
890
891 if (!p_proc_object) {
892 status = -EFAULT;
893 goto func_end;
894 }
895 switch (resource_type) {
896 case DSP_RESOURCE_DYNDARAM:
897 case DSP_RESOURCE_DYNSARAM:
898 case DSP_RESOURCE_DYNEXTERNAL:
899 case DSP_RESOURCE_DYNSRAM:
085467b8 900 status = dev_get_node_manager(p_proc_object->dev_obj,
7d55524d
ORL
901 &hnode_mgr);
902 if (!hnode_mgr) {
903 status = -EFAULT;
904 goto func_end;
905 }
906
907 status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
a741ea6e 908 if (!status) {
7d55524d
ORL
909 status = nldr_get_rmm_manager(nldr_obj, &rmm);
910 if (rmm) {
911 if (!rmm_stat(rmm,
912 (enum dsp_memtype)resource_type,
913 (struct dsp_memstat *)
914 &(resource_info->result.
915 mem_stat)))
916 status = -EINVAL;
917 } else {
918 status = -EFAULT;
919 }
920 }
921 break;
922 case DSP_RESOURCE_PROCLOAD:
085467b8 923 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
7d55524d
ORL
924 if (hio_mgr)
925 status =
926 p_proc_object->intf_fxns->
09f13304 927 io_get_proc_load(hio_mgr,
7d55524d
ORL
928 (struct dsp_procloadstat *)
929 &(resource_info->result.
930 proc_load_stat));
931 else
932 status = -EFAULT;
933 break;
934 default:
935 status = -EPERM;
936 break;
937 }
938func_end:
939 return status;
940}
941
942/*
943 * ======== proc_exit ========
944 * Purpose:
945 * Decrement reference count, and free resources when reference count is
946 * 0.
947 */
948void proc_exit(void)
949{
950 DBC_REQUIRE(refs > 0);
951
952 refs--;
953
954 DBC_ENSURE(refs >= 0);
955}
956
957/*
958 * ======== proc_get_dev_object ========
959 * Purpose:
960 * Return the Dev Object handle for a given Processor.
961 *
962 */
963int proc_get_dev_object(void *hprocessor,
e436d07d 964 struct dev_object **device_obj)
7d55524d
ORL
965{
966 int status = -EPERM;
967 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
968
969 DBC_REQUIRE(refs > 0);
e436d07d 970 DBC_REQUIRE(device_obj != NULL);
7d55524d
ORL
971
972 if (p_proc_object) {
085467b8 973 *device_obj = p_proc_object->dev_obj;
7d55524d
ORL
974 status = 0;
975 } else {
e436d07d 976 *device_obj = NULL;
7d55524d
ORL
977 status = -EFAULT;
978 }
979
a741ea6e 980 DBC_ENSURE((!status && *device_obj != NULL) ||
b66e0986 981 (status && *device_obj == NULL));
7d55524d
ORL
982
983 return status;
984}
985
986/*
987 * ======== proc_get_state ========
988 * Purpose:
989 * Report the state of the specified DSP processor.
990 */
991int proc_get_state(void *hprocessor,
e6bf74f0 992 struct dsp_processorstate *proc_state_obj,
7d55524d
ORL
993 u32 state_info_size)
994{
995 int status = 0;
996 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
997 int brd_status;
7d55524d
ORL
998
999 DBC_REQUIRE(refs > 0);
1000 DBC_REQUIRE(proc_state_obj != NULL);
1001 DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
1002
1003 if (p_proc_object) {
1004 /* First, retrieve BRD state information */
e17ba7f2 1005 status = (*p_proc_object->intf_fxns->brd_status)
085467b8 1006 (p_proc_object->bridge_context, &brd_status);
a741ea6e 1007 if (!status) {
7d55524d
ORL
1008 switch (brd_status) {
1009 case BRD_STOPPED:
1010 proc_state_obj->proc_state = PROC_STOPPED;
1011 break;
1012 case BRD_SLEEP_TRANSITION:
1013 case BRD_DSP_HIBERNATION:
1014 /* Fall through */
1015 case BRD_RUNNING:
1016 proc_state_obj->proc_state = PROC_RUNNING;
1017 break;
1018 case BRD_LOADED:
1019 proc_state_obj->proc_state = PROC_LOADED;
1020 break;
1021 case BRD_ERROR:
1022 proc_state_obj->proc_state = PROC_ERROR;
1023 break;
1024 default:
1025 proc_state_obj->proc_state = 0xFF;
1026 status = -EPERM;
1027 break;
1028 }
1029 }
7d55524d
ORL
1030 } else {
1031 status = -EFAULT;
1032 }
1033 dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1034 __func__, status, proc_state_obj->proc_state);
1035 return status;
1036}
1037
1038/*
1039 * ======== proc_get_trace ========
1040 * Purpose:
1041 * Retrieve the current contents of the trace buffer, located on the
1042 * Processor. Predefined symbols for the trace buffer must have been
1043 * configured into the DSP executable.
1044 * Details:
1045 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1046 * trace buffer, only. Treat it as an undocumented feature.
1047 * This call is destructive, meaning the processor is placed in the monitor
1048 * state as a result of this function.
1049 */
1050int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
1051{
1052 int status;
1053 status = -ENOSYS;
1054 return status;
1055}
1056
1057/*
1058 * ======== proc_init ========
1059 * Purpose:
1060 * Initialize PROC's private state, keeping a reference count on each call
1061 */
1062bool proc_init(void)
1063{
1064 bool ret = true;
1065
1066 DBC_REQUIRE(refs >= 0);
1067
1068 if (ret)
1069 refs++;
1070
1071 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
1072
1073 return ret;
1074}
1075
1076/*
1077 * ======== proc_load ========
1078 * Purpose:
1079 * Reset a processor and load a new base program image.
1080 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1081 * application developer's API.
1082 */
9d7d0a52
MN
1083int proc_load(void *hprocessor, const s32 argc_index,
1084 const char **user_args, const char **user_envp)
7d55524d
ORL
1085{
1086 int status = 0;
1087 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1088 struct io_mgr *hio_mgr; /* IO manager handle */
1089 struct msg_mgr *hmsg_mgr;
1090 struct cod_manager *cod_mgr; /* Code manager handle */
1091 char *pargv0; /* temp argv[0] ptr */
1092 char **new_envp; /* Updated envp[] array. */
1093 char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
1094 s32 envp_elems; /* Num elements in envp[]. */
1095 s32 cnew_envp; /* " " in new_envp[] */
1096 s32 nproc_id = 0; /* Anticipate MP version. */
1097 struct dcd_manager *hdcd_handle;
677f2ded 1098 struct dmm_object *dmm_mgr;
7d55524d
ORL
1099 u32 dw_ext_end;
1100 u32 proc_id;
1101 int brd_state;
1102 struct drv_data *drv_datap = dev_get_drvdata(bridge);
1103
1104#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1105 struct timeval tv1;
1106 struct timeval tv2;
1107#endif
1108
b3d23688 1109#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1110 struct dspbridge_platform_data *pdata =
1111 omap_dspbridge_dev->dev.platform_data;
1112#endif
1113
1114 DBC_REQUIRE(refs > 0);
1115 DBC_REQUIRE(argc_index > 0);
1116 DBC_REQUIRE(user_args != NULL);
1117
1118#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1119 do_gettimeofday(&tv1);
1120#endif
1121 if (!p_proc_object) {
1122 status = -EFAULT;
1123 goto func_end;
1124 }
085467b8 1125 dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
7d55524d
ORL
1126 if (!cod_mgr) {
1127 status = -EPERM;
1128 goto func_end;
1129 }
1130 status = proc_stop(hprocessor);
b66e0986 1131 if (status)
7d55524d
ORL
1132 goto func_end;
1133
1134 /* Place the board in the monitor state. */
1135 status = proc_monitor(hprocessor);
b66e0986 1136 if (status)
7d55524d
ORL
1137 goto func_end;
1138
1139 /* Save ptr to original argv[0]. */
1140 pargv0 = (char *)user_args[0];
1141 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1142 envp_elems = get_envp_count((char **)user_envp);
1143 cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1144 new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1145 if (new_envp) {
1146 status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1147 nproc_id);
1148 if (status == -1) {
1149 dev_dbg(bridge, "%s: Proc ID string overflow\n",
1150 __func__);
1151 status = -EPERM;
1152 } else {
1153 new_envp =
1154 prepend_envp(new_envp, (char **)user_envp,
1155 envp_elems, cnew_envp, sz_proc_id);
1156 /* Get the DCD Handle */
085467b8 1157 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
7d55524d 1158 (u32 *) &hdcd_handle);
a741ea6e 1159 if (!status) {
7d55524d
ORL
1160 /* Before proceeding with new load,
1161 * check if a previously registered COFF
1162 * exists.
1163 * If yes, unregister nodes in previously
1164 * registered COFF. If any error occurred,
1165 * set previously registered COFF to NULL. */
085467b8 1166 if (p_proc_object->last_coff != NULL) {
7d55524d
ORL
1167 status =
1168 dcd_auto_unregister(hdcd_handle,
1169 p_proc_object->
085467b8 1170 last_coff);
7d55524d
ORL
1171 /* Regardless of auto unregister status,
1172 * free previously allocated
1173 * memory. */
085467b8
RS
1174 kfree(p_proc_object->last_coff);
1175 p_proc_object->last_coff = NULL;
7d55524d
ORL
1176 }
1177 }
1178 /* On success, do cod_open_base() */
1179 status = cod_open_base(cod_mgr, (char *)user_args[0],
1180 COD_SYMB);
1181 }
1182 } else {
1183 status = -ENOMEM;
1184 }
a741ea6e 1185 if (!status) {
7d55524d
ORL
1186 /* Auto-register data base */
1187 /* Get the DCD Handle */
085467b8 1188 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
7d55524d 1189 (u32 *) &hdcd_handle);
a741ea6e 1190 if (!status) {
7d55524d
ORL
1191 /* Auto register nodes in specified COFF
1192 * file. If registration did not fail,
1193 * (status = 0 or -EACCES)
1194 * save the name of the COFF file for
1195 * de-registration in the future. */
1196 status =
1197 dcd_auto_register(hdcd_handle,
1198 (char *)user_args[0]);
1199 if (status == -EACCES)
1200 status = 0;
1201
b66e0986 1202 if (status) {
7d55524d
ORL
1203 status = -EPERM;
1204 } else {
085467b8 1205 DBC_ASSERT(p_proc_object->last_coff ==
7d55524d
ORL
1206 NULL);
1207 /* Allocate memory for pszLastCoff */
085467b8 1208 p_proc_object->last_coff =
7d55524d
ORL
1209 kzalloc((strlen(user_args[0]) +
1210 1), GFP_KERNEL);
1211 /* If memory allocated, save COFF file name */
085467b8
RS
1212 if (p_proc_object->last_coff) {
1213 strncpy(p_proc_object->last_coff,
7d55524d
ORL
1214 (char *)user_args[0],
1215 (strlen((char *)user_args[0]) +
1216 1));
1217 }
1218 }
1219 }
1220 }
1221 /* Update shared memory address and size */
a741ea6e 1222 if (!status) {
7d55524d
ORL
1223 /* Create the message manager. This must be done
1224 * before calling the IOOnLoaded function. */
085467b8 1225 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
7d55524d 1226 if (!hmsg_mgr) {
085467b8 1227 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
7d55524d 1228 (msg_onexit) node_on_exit);
a741ea6e 1229 DBC_ASSERT(!status);
085467b8 1230 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
7d55524d
ORL
1231 }
1232 }
a741ea6e 1233 if (!status) {
7d55524d 1234 /* Set the Device object's message manager */
085467b8 1235 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
7d55524d 1236 if (hio_mgr)
09f13304 1237 status = (*p_proc_object->intf_fxns->io_on_loaded)
7d55524d
ORL
1238 (hio_mgr);
1239 else
1240 status = -EFAULT;
1241 }
a741ea6e 1242 if (!status) {
7d55524d
ORL
1243 /* Now, attempt to load an exec: */
1244
1245 /* Boost the OPP level to Maximum level supported by baseport */
b3d23688 1246#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1247 if (pdata->cpu_set_freq)
1248 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1249#endif
1250 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1251 dev_brd_write_fxn,
085467b8 1252 p_proc_object->dev_obj, NULL);
b66e0986 1253 if (status) {
7d55524d
ORL
1254 if (status == -EBADF) {
1255 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1256 __func__);
1257 }
1258 if (status == -ESPIPE) {
1259 pr_err("%s: Couldn't parse the file\n",
1260 __func__);
1261 }
1262 }
1263 /* Requesting the lowest opp supported */
b3d23688 1264#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1265 if (pdata->cpu_set_freq)
1266 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1267#endif
1268
1269 }
a741ea6e 1270 if (!status) {
7d55524d 1271 /* Update the Processor status to loaded */
e17ba7f2 1272 status = (*p_proc_object->intf_fxns->brd_set_state)
085467b8 1273 (p_proc_object->bridge_context, BRD_LOADED);
a741ea6e 1274 if (!status) {
7d55524d
ORL
1275 p_proc_object->proc_state = PROC_LOADED;
1276 if (p_proc_object->ntfy_obj)
1277 proc_notify_clients(p_proc_object,
1278 DSP_PROCESSORSTATECHANGE);
1279 }
1280 }
a741ea6e 1281 if (!status) {
7d55524d
ORL
1282 status = proc_get_processor_id(hprocessor, &proc_id);
1283 if (proc_id == DSP_UNIT) {
1284 /* Use all available DSP address space after EXTMEM
1285 * for DMM */
a741ea6e 1286 if (!status)
7d55524d
ORL
1287 status = cod_get_sym_value(cod_mgr, EXTEND,
1288 &dw_ext_end);
677f2ded
FC
1289
1290 /* Reset DMM structs and add an initial free chunk */
1291 if (!status) {
1292 status =
085467b8 1293 dev_get_dmm_mgr(p_proc_object->dev_obj,
677f2ded
FC
1294 &dmm_mgr);
1295 if (dmm_mgr) {
1296 /* Set dw_ext_end to DMM START u8
1297 * address */
1298 dw_ext_end =
1299 (dw_ext_end + 1) * DSPWORDSIZE;
1300 /* DMM memory is from EXT_END */
1301 status = dmm_create_tables(dmm_mgr,
1302 dw_ext_end,
1303 DMMPOOLSIZE);
1304 } else {
1305 status = -EFAULT;
1306 }
1307 }
7d55524d
ORL
1308 }
1309 }
1310 /* Restore the original argv[0] */
1311 kfree(new_envp);
1312 user_args[0] = pargv0;
a741ea6e 1313 if (!status) {
e17ba7f2 1314 if (!((*p_proc_object->intf_fxns->brd_status)
085467b8 1315 (p_proc_object->bridge_context, &brd_state))) {
7d55524d
ORL
1316 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1317 kfree(drv_datap->base_img);
1318 drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
1319 GFP_KERNEL);
1320 if (drv_datap->base_img)
1321 strncpy(drv_datap->base_img, pargv0,
1322 strlen(pargv0) + 1);
1323 else
1324 status = -ENOMEM;
1325 DBC_ASSERT(brd_state == BRD_LOADED);
1326 }
1327 }
1328
1329func_end:
cfccf244 1330 if (status) {
7d55524d 1331 pr_err("%s: Processor failed to load\n", __func__);
cfccf244
ER
1332 proc_stop(p_proc_object);
1333 }
a741ea6e 1334 DBC_ENSURE((!status
7d55524d 1335 && p_proc_object->proc_state == PROC_LOADED)
b66e0986 1336 || status);
7d55524d
ORL
1337#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1338 do_gettimeofday(&tv2);
1339 if (tv2.tv_usec < tv1.tv_usec) {
1340 tv2.tv_usec += 1000000;
1341 tv2.tv_sec--;
1342 }
1343 dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1344 tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1345#endif
1346 return status;
1347}
1348
1349/*
1350 * ======== proc_map ========
1351 * Purpose:
1352 * Maps a MPU buffer to DSP address space.
1353 */
1354int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1355 void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1356 struct process_context *pr_ctxt)
1357{
1358 u32 va_align;
1359 u32 pa_align;
2fa28a51 1360 struct dmm_object *dmm_mgr;
7d55524d
ORL
1361 u32 size_align;
1362 int status = 0;
1363 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1364 struct dmm_map_object *map_obj;
d0b345f3 1365 u32 tmp_addr = 0;
7d55524d 1366
b3d23688 1367#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
7d55524d
ORL
1368 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1369 if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1370 !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1371 pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1372 (u32)pmpu_addr, ul_size);
1373 return -EFAULT;
1374 }
1375 }
1376#endif
1377
1378 /* Calculate the page-aligned PA, VA and size */
1379 va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1380 pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1381 size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1382 PG_SIZE4K);
1383
1384 if (!p_proc_object) {
1385 status = -EFAULT;
1386 goto func_end;
1387 }
1388 /* Critical section */
1389 mutex_lock(&proc_lock);
2fa28a51
FC
1390 dmm_get_handle(p_proc_object, &dmm_mgr);
1391 if (dmm_mgr)
1392 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1393 else
1394 status = -EFAULT;
7d55524d
ORL
1395
1396 /* Add mapping to the page tables. */
a741ea6e 1397 if (!status) {
d0b345f3
FC
1398
1399 /* Mapped address = MSB of VA | LSB of PA */
1400 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
7d55524d 1401 /* mapped memory resource tracking */
d0b345f3 1402 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
7d55524d 1403 size_align);
d0b345f3 1404 if (!map_obj)
7d55524d 1405 status = -ENOMEM;
d0b345f3 1406 else
3c882de5 1407 status = (*p_proc_object->intf_fxns->brd_mem_map)
085467b8 1408 (p_proc_object->bridge_context, pa_align, va_align,
d0b345f3 1409 size_align, ul_map_attr, map_obj->pages);
7d55524d 1410 }
a741ea6e 1411 if (!status) {
7d55524d 1412 /* Mapped address = MSB of VA | LSB of PA */
d0b345f3 1413 *pp_map_addr = (void *) tmp_addr;
7d55524d 1414 } else {
d0b345f3 1415 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
2fa28a51 1416 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
7d55524d
ORL
1417 }
1418 mutex_unlock(&proc_lock);
1419
b66e0986 1420 if (status)
7d55524d
ORL
1421 goto func_end;
1422
1423func_end:
1424 dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1425 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1426 "pa_align %x, size_align %x status 0x%x\n", __func__,
1427 hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1428 pp_map_addr, va_align, pa_align, size_align, status);
1429
1430 return status;
1431}
1432
1433/*
1434 * ======== proc_register_notify ========
1435 * Purpose:
1436 * Register to be notified of specific processor events.
1437 */
1438int proc_register_notify(void *hprocessor, u32 event_mask,
1439 u32 notify_type, struct dsp_notification
1440 * hnotification)
1441{
1442 int status = 0;
1443 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1444 struct deh_mgr *hdeh_mgr;
1445
1446 DBC_REQUIRE(hnotification != NULL);
1447 DBC_REQUIRE(refs > 0);
1448
1449 /* Check processor handle */
1450 if (!p_proc_object) {
1451 status = -EFAULT;
1452 goto func_end;
1453 }
1454 /* Check if event mask is a valid processor related event */
1455 if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1456 DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1457 DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1458 DSP_WDTOVERFLOW))
1459 status = -EINVAL;
1460
1461 /* Check if notify type is valid */
1462 if (notify_type != DSP_SIGNALEVENT)
1463 status = -EINVAL;
1464
a741ea6e 1465 if (!status) {
7d55524d
ORL
1466 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1467 * or DSP_PWRERROR then register event immediately. */
1468 if (event_mask &
1469 ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1470 DSP_WDTOVERFLOW)) {
1471 status = ntfy_register(p_proc_object->ntfy_obj,
1472 hnotification, event_mask,
1473 notify_type);
1474 /* Special case alert, special case alert!
1475 * If we're trying to *deregister* (i.e. event_mask
1476 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1477 * we have to deregister with the DEH manager.
1478 * There's no way to know, based on event_mask which
1479 * manager the notification event was registered with,
1480 * so if we're trying to deregister and ntfy_register
1481 * failed, we'll give the deh manager a shot.
1482 */
b66e0986 1483 if ((event_mask == 0) && status) {
7d55524d 1484 status =
085467b8 1485 dev_get_deh_mgr(p_proc_object->dev_obj,
7d55524d 1486 &hdeh_mgr);
7d55524d 1487 status =
61a5b769
FC
1488 bridge_deh_register_notify(hdeh_mgr,
1489 event_mask,
1490 notify_type,
1491 hnotification);
7d55524d
ORL
1492 }
1493 } else {
085467b8 1494 status = dev_get_deh_mgr(p_proc_object->dev_obj,
7d55524d 1495 &hdeh_mgr);
7d55524d 1496 status =
61a5b769
FC
1497 bridge_deh_register_notify(hdeh_mgr,
1498 event_mask,
1499 notify_type,
1500 hnotification);
7d55524d
ORL
1501
1502 }
1503 }
1504func_end:
1505 return status;
1506}
1507
2fa28a51
FC
1508/*
1509 * ======== proc_reserve_memory ========
1510 * Purpose:
1511 * Reserve a virtually contiguous region of DSP address space.
1512 */
1513int proc_reserve_memory(void *hprocessor, u32 ul_size,
1514 void **pp_rsv_addr,
1515 struct process_context *pr_ctxt)
1516{
1517 struct dmm_object *dmm_mgr;
1518 int status = 0;
1519 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
a2890350 1520 struct dmm_rsv_object *rsv_obj;
2fa28a51
FC
1521
1522 if (!p_proc_object) {
1523 status = -EFAULT;
1524 goto func_end;
1525 }
1526
1527 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1528 if (!dmm_mgr) {
1529 status = -EFAULT;
1530 goto func_end;
1531 }
1532
1533 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
a2890350
FC
1534 if (status != 0)
1535 goto func_end;
1536
1537 /*
1538 * A successful reserve should be followed by insertion of rsv_obj
1539 * into dmm_rsv_list, so that reserved memory resource tracking
1540 * remains uptodate
1541 */
1542 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1543 if (rsv_obj) {
1544 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1545 spin_lock(&pr_ctxt->dmm_rsv_lock);
1546 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1547 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1548 }
1549
2fa28a51
FC
1550func_end:
1551 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1552 "status 0x%x\n", __func__, hprocessor,
1553 ul_size, pp_rsv_addr, status);
1554 return status;
1555}
1556
7d55524d
ORL
1557/*
1558 * ======== proc_start ========
1559 * Purpose:
1560 * Start a processor running.
1561 */
1562int proc_start(void *hprocessor)
1563{
1564 int status = 0;
1565 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1566 struct cod_manager *cod_mgr; /* Code manager handle */
1567 u32 dw_dsp_addr; /* Loaded code's entry point. */
1568 int brd_state;
1569
1570 DBC_REQUIRE(refs > 0);
1571 if (!p_proc_object) {
1572 status = -EFAULT;
1573 goto func_end;
1574 }
1575 /* Call the bridge_brd_start */
1576 if (p_proc_object->proc_state != PROC_LOADED) {
1577 status = -EBADR;
1578 goto func_end;
1579 }
085467b8 1580 status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
7d55524d
ORL
1581 if (!cod_mgr) {
1582 status = -EFAULT;
1583 goto func_cont;
1584 }
1585
1586 status = cod_get_entry(cod_mgr, &dw_dsp_addr);
b66e0986 1587 if (status)
7d55524d
ORL
1588 goto func_cont;
1589
e17ba7f2 1590 status = (*p_proc_object->intf_fxns->brd_start)
085467b8 1591 (p_proc_object->bridge_context, dw_dsp_addr);
b66e0986 1592 if (status)
7d55524d
ORL
1593 goto func_cont;
1594
1595 /* Call dev_create2 */
085467b8 1596 status = dev_create2(p_proc_object->dev_obj);
a741ea6e 1597 if (!status) {
7d55524d
ORL
1598 p_proc_object->proc_state = PROC_RUNNING;
1599 /* Deep sleep switces off the peripheral clocks.
1600 * we just put the DSP CPU in idle in the idle loop.
1601 * so there is no need to send a command to DSP */
1602
1603 if (p_proc_object->ntfy_obj) {
1604 proc_notify_clients(p_proc_object,
1605 DSP_PROCESSORSTATECHANGE);
1606 }
1607 } else {
1608 /* Failed to Create Node Manager and DISP Object
1609 * Stop the Processor from running. Put it in STOPPED State */
1610 (void)(*p_proc_object->intf_fxns->
085467b8 1611 brd_stop) (p_proc_object->bridge_context);
7d55524d
ORL
1612 p_proc_object->proc_state = PROC_STOPPED;
1613 }
1614func_cont:
a741ea6e 1615 if (!status) {
e17ba7f2 1616 if (!((*p_proc_object->intf_fxns->brd_status)
085467b8 1617 (p_proc_object->bridge_context, &brd_state))) {
7d55524d
ORL
1618 pr_info("%s: dsp in running state\n", __func__);
1619 DBC_ASSERT(brd_state != BRD_HIBERNATION);
1620 }
1621 } else {
1622 pr_err("%s: Failed to start the dsp\n", __func__);
cfccf244 1623 proc_stop(p_proc_object);
7d55524d
ORL
1624 }
1625
1626func_end:
a741ea6e 1627 DBC_ENSURE((!status && p_proc_object->proc_state ==
b66e0986 1628 PROC_RUNNING) || status);
7d55524d
ORL
1629 return status;
1630}
1631
1632/*
1633 * ======== proc_stop ========
1634 * Purpose:
1635 * Stop a processor running.
1636 */
1637int proc_stop(void *hprocessor)
1638{
1639 int status = 0;
1640 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1641 struct msg_mgr *hmsg_mgr;
1642 struct node_mgr *hnode_mgr;
1643 void *hnode;
1644 u32 node_tab_size = 1;
1645 u32 num_nodes = 0;
1646 u32 nodes_allocated = 0;
1647 int brd_state;
1648
1649 DBC_REQUIRE(refs > 0);
1650 if (!p_proc_object) {
1651 status = -EFAULT;
1652 goto func_end;
1653 }
7d55524d 1654 /* check if there are any running nodes */
085467b8 1655 status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
a741ea6e 1656 if (!status && hnode_mgr) {
7d55524d
ORL
1657 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1658 &num_nodes, &nodes_allocated);
1659 if ((status == -EINVAL) || (nodes_allocated > 0)) {
1660 pr_err("%s: Can't stop device, active nodes = %d \n",
1661 __func__, nodes_allocated);
1662 return -EBADR;
1663 }
1664 }
1665 /* Call the bridge_brd_stop */
1666 /* It is OK to stop a device that does n't have nodes OR not started */
1667 status =
1668 (*p_proc_object->intf_fxns->
085467b8 1669 brd_stop) (p_proc_object->bridge_context);
a741ea6e 1670 if (!status) {
7d55524d
ORL
1671 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1672 p_proc_object->proc_state = PROC_STOPPED;
25985edc 1673 /* Destroy the Node Manager, msg_ctrl Manager */
085467b8 1674 if (!(dev_destroy2(p_proc_object->dev_obj))) {
7d55524d 1675 /* Destroy the msg_ctrl by calling msg_delete */
085467b8 1676 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
7d55524d
ORL
1677 if (hmsg_mgr) {
1678 msg_delete(hmsg_mgr);
085467b8 1679 dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
7d55524d 1680 }
a741ea6e 1681 if (!((*p_proc_object->
e17ba7f2 1682 intf_fxns->brd_status) (p_proc_object->
085467b8 1683 bridge_context,
7d55524d
ORL
1684 &brd_state)))
1685 DBC_ASSERT(brd_state == BRD_STOPPED);
1686 }
1687 } else {
1688 pr_err("%s: Failed to stop the processor\n", __func__);
1689 }
1690func_end:
1691
1692 return status;
1693}
1694
1695/*
1696 * ======== proc_un_map ========
1697 * Purpose:
1698 * Removes a MPU buffer mapping from the DSP address space.
1699 */
1700int proc_un_map(void *hprocessor, void *map_addr,
1701 struct process_context *pr_ctxt)
1702{
1703 int status = 0;
1704 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
2fa28a51 1705 struct dmm_object *dmm_mgr;
7d55524d 1706 u32 va_align;
677f2ded 1707 u32 size_align;
7d55524d
ORL
1708
1709 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1710 if (!p_proc_object) {
1711 status = -EFAULT;
1712 goto func_end;
1713 }
1714
2fa28a51
FC
1715 status = dmm_get_handle(hprocessor, &dmm_mgr);
1716 if (!dmm_mgr) {
1717 status = -EFAULT;
1718 goto func_end;
1719 }
1720
7d55524d
ORL
1721 /* Critical section */
1722 mutex_lock(&proc_lock);
2fa28a51
FC
1723 /*
1724 * Update DMM structures. Get the size to unmap.
1725 * This function returns error if the VA is not mapped
1726 */
1727 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
7d55524d 1728 /* Remove mapping from the page tables. */
d0b345f3 1729 if (!status) {
3c882de5 1730 status = (*p_proc_object->intf_fxns->brd_mem_un_map)
085467b8 1731 (p_proc_object->bridge_context, va_align, size_align);
d0b345f3 1732 }
7d55524d 1733
b66e0986 1734 if (status)
ab42abf3 1735 goto unmap_failed;
7d55524d
ORL
1736
1737 /*
1738 * A successful unmap should be followed by removal of map_obj
1739 * from dmm_map_list, so that mapped memory resource tracking
1740 * remains uptodate
1741 */
2fa28a51 1742 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
7d55524d 1743
ab42abf3
FC
1744unmap_failed:
1745 mutex_unlock(&proc_lock);
1746
7d55524d
ORL
1747func_end:
1748 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1749 __func__, hprocessor, map_addr, status);
1750 return status;
1751}
1752
2fa28a51
FC
1753/*
1754 * ======== proc_un_reserve_memory ========
1755 * Purpose:
1756 * Frees a previously reserved region of DSP address space.
1757 */
1758int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1759 struct process_context *pr_ctxt)
1760{
1761 struct dmm_object *dmm_mgr;
1762 int status = 0;
1763 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
a2890350 1764 struct dmm_rsv_object *rsv_obj;
2fa28a51
FC
1765
1766 if (!p_proc_object) {
1767 status = -EFAULT;
1768 goto func_end;
1769 }
1770
1771 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1772 if (!dmm_mgr) {
1773 status = -EFAULT;
1774 goto func_end;
1775 }
1776
1777 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
a2890350
FC
1778 if (status != 0)
1779 goto func_end;
1780
1781 /*
1782 * A successful unreserve should be followed by removal of rsv_obj
1783 * from dmm_rsv_list, so that reserved memory resource tracking
1784 * remains uptodate
1785 */
1786 spin_lock(&pr_ctxt->dmm_rsv_lock);
1787 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1788 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1789 list_del(&rsv_obj->link);
1790 kfree(rsv_obj);
1791 break;
1792 }
1793 }
1794 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1795
2fa28a51
FC
1796func_end:
1797 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1798 __func__, hprocessor, prsv_addr, status);
1799 return status;
1800}
1801
7d55524d
ORL
1802/*
1803 * ======== = proc_monitor ======== ==
1804 * Purpose:
1805 * Place the Processor in Monitor State. This is an internal
1806 * function and a requirement before Processor is loaded.
1807 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1808 * In dev_destroy2 we delete the node manager.
1809 * Parameters:
1810 * p_proc_object: Pointer to Processor Object
1811 * Returns:
1812 * 0: Processor placed in monitor mode.
1813 * !0: Failed to place processor in monitor mode.
1814 * Requires:
1815 * Valid Processor Handle
1816 * Ensures:
1817 * Success: ProcObject state is PROC_IDLE
1818 */
c8c1ad8c 1819static int proc_monitor(struct proc_object *proc_obj)
7d55524d
ORL
1820{
1821 int status = -EPERM;
1822 struct msg_mgr *hmsg_mgr;
1823 int brd_state;
1824
1825 DBC_REQUIRE(refs > 0);
c8c1ad8c 1826 DBC_REQUIRE(proc_obj);
7d55524d
ORL
1827
1828 /* This is needed only when Device is loaded when it is
1829 * already 'ACTIVE' */
25985edc 1830 /* Destroy the Node Manager, msg_ctrl Manager */
085467b8 1831 if (!dev_destroy2(proc_obj->dev_obj)) {
7d55524d 1832 /* Destroy the msg_ctrl by calling msg_delete */
085467b8 1833 dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
7d55524d
ORL
1834 if (hmsg_mgr) {
1835 msg_delete(hmsg_mgr);
085467b8 1836 dev_set_msg_mgr(proc_obj->dev_obj, NULL);
7d55524d
ORL
1837 }
1838 }
1839 /* Place the Board in the Monitor State */
3c882de5 1840 if (!((*proc_obj->intf_fxns->brd_monitor)
085467b8 1841 (proc_obj->bridge_context))) {
7d55524d 1842 status = 0;
e17ba7f2 1843 if (!((*proc_obj->intf_fxns->brd_status)
085467b8 1844 (proc_obj->bridge_context, &brd_state)))
7d55524d
ORL
1845 DBC_ASSERT(brd_state == BRD_IDLE);
1846 }
1847
a741ea6e 1848 DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
b66e0986 1849 status);
7d55524d
ORL
1850 return status;
1851}
1852
1853/*
1854 * ======== get_envp_count ========
1855 * Purpose:
1856 * Return the number of elements in the envp array, including the
1857 * terminating NULL element.
1858 */
1859static s32 get_envp_count(char **envp)
1860{
1861 s32 ret = 0;
1862 if (envp) {
1863 while (*envp++)
1864 ret++;
1865
1866 ret += 1; /* Include the terminating NULL in the count. */
1867 }
1868
1869 return ret;
1870}
1871
1872/*
1873 * ======== prepend_envp ========
1874 * Purpose:
1875 * Prepend an environment variable=value pair to the new envp array, and
1876 * copy in the existing var=value pairs in the old envp array.
1877 */
1878static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 1879 s32 cnew_envp, char *sz_var)
7d55524d
ORL
1880{
1881 char **pp_envp = new_envp;
1882
1883 DBC_REQUIRE(new_envp);
1884
1885 /* Prepend new environ var=value string */
0cd343a4 1886 *new_envp++ = sz_var;
7d55524d
ORL
1887
1888 /* Copy user's environment into our own. */
1889 while (envp_elems--)
1890 *new_envp++ = *envp++;
1891
1892 /* Ensure NULL terminates the new environment strings array. */
1893 if (envp_elems == 0)
1894 *new_envp = NULL;
1895
1896 return pp_envp;
1897}
1898
1899/*
1900 * ======== proc_notify_clients ========
1901 * Purpose:
1902 * Notify the processor the events.
1903 */
0cd343a4 1904int proc_notify_clients(void *proc, u32 events)
7d55524d
ORL
1905{
1906 int status = 0;
e6890692 1907 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d
ORL
1908
1909 DBC_REQUIRE(p_proc_object);
bf968b0a 1910 DBC_REQUIRE(is_valid_proc_event(events));
7d55524d
ORL
1911 DBC_REQUIRE(refs > 0);
1912 if (!p_proc_object) {
1913 status = -EFAULT;
1914 goto func_end;
1915 }
1916
0cd343a4 1917 ntfy_notify(p_proc_object->ntfy_obj, events);
7d55524d
ORL
1918func_end:
1919 return status;
1920}
1921
1922/*
1923 * ======== proc_notify_all_clients ========
1924 * Purpose:
1925 * Notify the processor the events. This includes notifying all clients
1926 * attached to a particulat DSP.
1927 */
0cd343a4 1928int proc_notify_all_clients(void *proc, u32 events)
7d55524d
ORL
1929{
1930 int status = 0;
e6890692 1931 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d 1932
bf968b0a 1933 DBC_REQUIRE(is_valid_proc_event(events));
7d55524d
ORL
1934 DBC_REQUIRE(refs > 0);
1935
1936 if (!p_proc_object) {
1937 status = -EFAULT;
1938 goto func_end;
1939 }
1940
085467b8 1941 dev_notify_clients(p_proc_object->dev_obj, events);
7d55524d
ORL
1942
1943func_end:
1944 return status;
1945}
1946
1947/*
1948 * ======== proc_get_processor_id ========
1949 * Purpose:
1950 * Retrieves the processor ID.
1951 */
13b18c29 1952int proc_get_processor_id(void *proc, u32 * proc_id)
7d55524d
ORL
1953{
1954 int status = 0;
e6890692 1955 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d
ORL
1956
1957 if (p_proc_object)
13b18c29 1958 *proc_id = p_proc_object->processor_id;
7d55524d
ORL
1959 else
1960 status = -EFAULT;
1961
1962 return status;
1963}