Commit | Line | Data |
---|---|---|
999e07d6 ORL |
1 | /* |
2 | * tiomap.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * Processor Manager Driver for TI OMAP3430 EVM. | |
7 | * | |
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
18 | ||
82d4b477 FC |
19 | #include <plat/dsp.h> |
20 | ||
2094f12d | 21 | #include <linux/types.h> |
999e07d6 ORL |
22 | /* ----------------------------------- Host OS */ |
23 | #include <dspbridge/host_os.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/mmzone.h> | |
999e07d6 ORL |
26 | |
27 | /* ----------------------------------- DSP/BIOS Bridge */ | |
999e07d6 ORL |
28 | #include <dspbridge/dbdefs.h> |
29 | ||
30 | /* ----------------------------------- Trace & Debug */ | |
31 | #include <dspbridge/dbc.h> | |
32 | ||
33 | /* ----------------------------------- OS Adaptation Layer */ | |
999e07d6 ORL |
34 | #include <dspbridge/drv.h> |
35 | #include <dspbridge/sync.h> | |
36 | ||
58c1ceb1 FC |
37 | /* ------------------------------------ Hardware Abstraction Layer */ |
38 | #include <hw_defs.h> | |
39 | #include <hw_mmu.h> | |
40 | ||
999e07d6 ORL |
41 | /* ----------------------------------- Link Driver */ |
42 | #include <dspbridge/dspdefs.h> | |
43 | #include <dspbridge/dspchnl.h> | |
44 | #include <dspbridge/dspdeh.h> | |
45 | #include <dspbridge/dspio.h> | |
46 | #include <dspbridge/dspmsg.h> | |
47 | #include <dspbridge/pwr.h> | |
48 | #include <dspbridge/io_sm.h> | |
49 | ||
50 | /* ----------------------------------- Platform Manager */ | |
51 | #include <dspbridge/dev.h> | |
52 | #include <dspbridge/dspapi.h> | |
677f2ded | 53 | #include <dspbridge/dmm.h> |
999e07d6 ORL |
54 | #include <dspbridge/wdt.h> |
55 | ||
56 | /* ----------------------------------- Local */ | |
57 | #include "_tiomap.h" | |
58 | #include "_tiomap_pwr.h" | |
59 | #include "tiomap_io.h" | |
60 | ||
61 | /* Offset in shared mem to write to in order to synchronize start with DSP */ | |
62 | #define SHMSYNCOFFSET 4 /* GPP byte offset */ | |
63 | ||
64 | #define BUFFERSIZE 1024 | |
65 | ||
66 | #define TIHELEN_ACKTIMEOUT 10000 | |
67 | ||
68 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | |
69 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | |
70 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | |
71 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | |
72 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 | |
73 | #define PAGES_II_LVL_TABLE 512 | |
f5bd96bb | 74 | #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) |
999e07d6 | 75 | |
a9db2036 FC |
76 | /* |
77 | * This is a totally ugly layer violation, but needed until | |
78 | * omap_ctrl_set_dsp_boot*() are provided. | |
79 | */ | |
80 | #define OMAP3_IVA2_BOOTMOD_IDLE 1 | |
81 | #define OMAP2_CONTROL_GENERAL 0x270 | |
82 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) | |
83 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) | |
84 | ||
85 | #define OMAP343X_CTRL_REGADDR(reg) \ | |
86 | OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) | |
87 | ||
88 | ||
999e07d6 | 89 | /* Forward Declarations: */ |
c8c1ad8c RS |
90 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); |
91 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, | |
e6bf74f0 | 92 | u8 *host_buff, |
b301c858 | 93 | u32 dsp_addr, u32 ul_num_bytes, |
5e2eae57 | 94 | u32 mem_type); |
c8c1ad8c | 95 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, |
b301c858 | 96 | u32 dsp_addr); |
c8c1ad8c | 97 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, |
a5120278 | 98 | int *board_state); |
c8c1ad8c RS |
99 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt); |
100 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, | |
9d7d0a52 | 101 | u8 *host_buff, |
b301c858 | 102 | u32 dsp_addr, u32 ul_num_bytes, |
5e2eae57 | 103 | u32 mem_type); |
e6890692 | 104 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, |
5e2eae57 | 105 | u32 brd_state); |
e6890692 | 106 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, |
5e2eae57 RS |
107 | u32 dsp_dest_addr, u32 dsp_src_addr, |
108 | u32 ul_num_bytes, u32 mem_type); | |
c8c1ad8c | 109 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 110 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 111 | u32 ul_num_bytes, u32 mem_type); |
d0b345f3 FC |
112 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, |
113 | u32 ul_mpu_addr, u32 virt_addr, | |
114 | u32 ul_num_bytes, u32 ul_map_attr, | |
115 | struct page **mapped_pages); | |
116 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | |
50ad26f4 | 117 | u32 virt_addr, u32 ul_num_bytes); |
e6bf74f0 | 118 | static int bridge_dev_create(struct bridge_dev_context |
fb6aabb7 | 119 | **dev_cntxt, |
999e07d6 | 120 | struct dev_object *hdev_obj, |
9d7d0a52 | 121 | struct cfg_hostres *config_param); |
999e07d6 | 122 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, |
e6bf74f0 | 123 | u32 dw_cmd, void *pargs); |
c8c1ad8c | 124 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); |
d0b345f3 | 125 | static u32 user_va2_pa(struct mm_struct *mm, u32 address); |
ac8a139a FC |
126 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, |
127 | u32 va, u32 size, | |
128 | struct hw_mmu_map_attrs_t *map_attrs); | |
129 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |
130 | u32 size, struct hw_mmu_map_attrs_t *attrs); | |
131 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |
132 | u32 ul_mpu_addr, u32 virt_addr, | |
133 | u32 ul_num_bytes, | |
134 | struct hw_mmu_map_attrs_t *hw_attrs); | |
135 | ||
999e07d6 ORL |
136 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); |
137 | ||
ac8a139a FC |
138 | /* ----------------------------------- Globals */ |
139 | ||
140 | /* Attributes of L2 page tables for DSP MMU */ | |
141 | struct page_info { | |
142 | u32 num_entries; /* Number of valid PTEs in the L2 PT */ | |
143 | }; | |
144 | ||
145 | /* Attributes used to manage the DSP MMU page tables */ | |
146 | struct pg_table_attrs { | |
147 | spinlock_t pg_lock; /* Critical section object handle */ | |
148 | ||
149 | u32 l1_base_pa; /* Physical address of the L1 PT */ | |
150 | u32 l1_base_va; /* Virtual address of the L1 PT */ | |
151 | u32 l1_size; /* Size of the L1 PT */ | |
152 | u32 l1_tbl_alloc_pa; | |
153 | /* Physical address of Allocated mem for L1 table. May not be aligned */ | |
154 | u32 l1_tbl_alloc_va; | |
155 | /* Virtual address of Allocated mem for L1 table. May not be aligned */ | |
156 | u32 l1_tbl_alloc_sz; | |
157 | /* Size of consistent memory allocated for L1 table. | |
158 | * May not be aligned */ | |
159 | ||
160 | u32 l2_base_pa; /* Physical address of the L2 PT */ | |
161 | u32 l2_base_va; /* Virtual address of the L2 PT */ | |
162 | u32 l2_size; /* Size of the L2 PT */ | |
163 | u32 l2_tbl_alloc_pa; | |
164 | /* Physical address of Allocated mem for L2 table. May not be aligned */ | |
165 | u32 l2_tbl_alloc_va; | |
166 | /* Virtual address of Allocated mem for L2 table. May not be aligned */ | |
167 | u32 l2_tbl_alloc_sz; | |
168 | /* Size of consistent memory allocated for L2 table. | |
169 | * May not be aligned */ | |
170 | ||
171 | u32 l2_num_pages; /* Number of allocated L2 PT */ | |
172 | /* Array [l2_num_pages] of L2 PT info structs */ | |
173 | struct page_info *pg_info; | |
174 | }; | |
175 | ||
999e07d6 ORL |
176 | /* |
177 | * This Bridge driver's function interface table. | |
178 | */ | |
179 | static struct bridge_drv_interface drv_interface_fxns = { | |
180 | /* Bridge API ver. for which this bridge driver is built. */ | |
181 | BRD_API_MAJOR_VERSION, | |
182 | BRD_API_MINOR_VERSION, | |
183 | bridge_dev_create, | |
184 | bridge_dev_destroy, | |
185 | bridge_dev_ctrl, | |
186 | bridge_brd_monitor, | |
187 | bridge_brd_start, | |
188 | bridge_brd_stop, | |
189 | bridge_brd_status, | |
190 | bridge_brd_read, | |
191 | bridge_brd_write, | |
192 | bridge_brd_set_state, | |
193 | bridge_brd_mem_copy, | |
194 | bridge_brd_mem_write, | |
d0b345f3 FC |
195 | bridge_brd_mem_map, |
196 | bridge_brd_mem_un_map, | |
999e07d6 ORL |
197 | /* The following CHNL functions are provided by chnl_io.lib: */ |
198 | bridge_chnl_create, | |
199 | bridge_chnl_destroy, | |
200 | bridge_chnl_open, | |
201 | bridge_chnl_close, | |
202 | bridge_chnl_add_io_req, | |
203 | bridge_chnl_get_ioc, | |
204 | bridge_chnl_cancel_io, | |
205 | bridge_chnl_flush_io, | |
206 | bridge_chnl_get_info, | |
207 | bridge_chnl_get_mgr_info, | |
208 | bridge_chnl_idle, | |
209 | bridge_chnl_register_notify, | |
999e07d6 ORL |
210 | /* The following IO functions are provided by chnl_io.lib: */ |
211 | bridge_io_create, | |
212 | bridge_io_destroy, | |
213 | bridge_io_on_loaded, | |
214 | bridge_io_get_proc_load, | |
215 | /* The following msg_ctrl functions are provided by chnl_io.lib: */ | |
216 | bridge_msg_create, | |
217 | bridge_msg_create_queue, | |
218 | bridge_msg_delete, | |
219 | bridge_msg_delete_queue, | |
220 | bridge_msg_get, | |
221 | bridge_msg_put, | |
222 | bridge_msg_register_notify, | |
223 | bridge_msg_set_queue_id, | |
224 | }; | |
225 | ||
85d139c9 ORL |
226 | static struct notifier_block dsp_mbox_notifier = { |
227 | .notifier_call = io_mbox_msg, | |
228 | }; | |
229 | ||
ac8a139a FC |
230 | static inline void flush_all(struct bridge_dev_context *dev_context) |
231 | { | |
232 | if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || | |
233 | dev_context->dw_brd_state == BRD_HIBERNATION) | |
234 | wake_dsp(dev_context, NULL); | |
235 | ||
236 | hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); | |
237 | } | |
238 | ||
239 | static void bad_page_dump(u32 pa, struct page *pg) | |
240 | { | |
241 | pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); | |
242 | pr_emerg("Bad page state in process '%s'\n" | |
243 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | |
244 | "Backtrace:\n", | |
245 | current->comm, pg, (int)(2 * sizeof(unsigned long)), | |
246 | (unsigned long)pg->flags, pg->mapping, | |
247 | page_mapcount(pg), page_count(pg)); | |
248 | dump_stack(); | |
249 | } | |
250 | ||
999e07d6 ORL |
251 | /* |
252 | * ======== bridge_drv_entry ======== | |
253 | * purpose: | |
254 | * Bridge Driver entry point. | |
255 | */ | |
e6bf74f0 | 256 | void bridge_drv_entry(struct bridge_drv_interface **drv_intf, |
9d7d0a52 | 257 | const char *driver_file_name) |
999e07d6 ORL |
258 | { |
259 | ||
260 | DBC_REQUIRE(driver_file_name != NULL); | |
261 | ||
999e07d6 | 262 | if (strcmp(driver_file_name, "UMA") == 0) |
fb6aabb7 | 263 | *drv_intf = &drv_interface_fxns; |
999e07d6 ORL |
264 | else |
265 | dev_dbg(bridge, "%s Unknown Bridge file name", __func__); | |
266 | ||
267 | } | |
268 | ||
269 | /* | |
270 | * ======== bridge_brd_monitor ======== | |
271 | * purpose: | |
272 | * This bridge_brd_monitor puts DSP into a Loadable state. | |
273 | * i.e Application can load and start the device. | |
274 | * | |
275 | * Preconditions: | |
276 | * Device in 'OFF' state. | |
277 | */ | |
e6890692 | 278 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) |
999e07d6 | 279 | { |
e6890692 | 280 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 281 | u32 temp; |
82d4b477 FC |
282 | struct omap_dsp_platform_data *pdata = |
283 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 ORL |
284 | |
285 | temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
286 | OMAP_POWERSTATEST_MASK; | |
287 | if (!(temp & 0x02)) { | |
288 | /* IVA2 is not in ON state */ | |
289 | /* Read and set PM_PWSTCTRL_IVA2 to ON */ | |
290 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | |
291 | PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | |
292 | /* Set the SW supervised state transition */ | |
293 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, | |
294 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
295 | ||
296 | /* Wait until the state has moved to ON */ | |
297 | while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
298 | OMAP_INTRANSITION_MASK) | |
299 | ; | |
300 | /* Disable Automatic transition */ | |
301 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, | |
302 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
303 | } | |
1cf3fb2d FC |
304 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
305 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 ORL |
306 | dsp_clk_enable(DSP_CLK_IVA2); |
307 | ||
e6486d8c ER |
308 | /* set the device state to IDLE */ |
309 | dev_context->dw_brd_state = BRD_IDLE; | |
310 | ||
311 | return 0; | |
999e07d6 ORL |
312 | } |
313 | ||
314 | /* | |
315 | * ======== bridge_brd_read ======== | |
316 | * purpose: | |
317 | * Reads buffers for DSP memory. | |
318 | */ | |
e6890692 | 319 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, |
e6bf74f0 | 320 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 321 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
322 | { |
323 | int status = 0; | |
e6890692 | 324 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 325 | u32 offset; |
e6890692 | 326 | u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr; |
999e07d6 | 327 | |
b301c858 | 328 | if (dsp_addr < dev_context->dw_dsp_start_add) { |
999e07d6 ORL |
329 | status = -EPERM; |
330 | return status; | |
331 | } | |
332 | /* change here to account for the 3 bands of the DSP internal memory */ | |
b301c858 | 333 | if ((dsp_addr - dev_context->dw_dsp_start_add) < |
999e07d6 | 334 | dev_context->dw_internal_size) { |
b301c858 | 335 | offset = dsp_addr - dev_context->dw_dsp_start_add; |
999e07d6 | 336 | } else { |
aa09b091 | 337 | status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, |
5e2eae57 | 338 | ul_num_bytes, mem_type); |
999e07d6 ORL |
339 | return status; |
340 | } | |
341 | /* copy the data from DSP memory, */ | |
aa09b091 | 342 | memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes); |
999e07d6 ORL |
343 | return status; |
344 | } | |
345 | ||
346 | /* | |
347 | * ======== bridge_brd_set_state ======== | |
348 | * purpose: | |
349 | * This routine updates the Board status. | |
350 | */ | |
e6890692 | 351 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, |
5e2eae57 | 352 | u32 brd_state) |
999e07d6 ORL |
353 | { |
354 | int status = 0; | |
e6890692 | 355 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 356 | |
5e2eae57 | 357 | dev_context->dw_brd_state = brd_state; |
999e07d6 ORL |
358 | return status; |
359 | } | |
360 | ||
361 | /* | |
362 | * ======== bridge_brd_start ======== | |
363 | * purpose: | |
364 | * Initializes DSP MMU and Starts DSP. | |
365 | * | |
366 | * Preconditions: | |
367 | * a) DSP domain is 'ACTIVE'. | |
368 | * b) DSP_RST1 is asserted. | |
369 | * b) DSP_RST2 is released. | |
370 | */ | |
e6890692 | 371 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, |
b301c858 | 372 | u32 dsp_addr) |
999e07d6 ORL |
373 | { |
374 | int status = 0; | |
e6890692 | 375 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 ORL |
376 | u32 dw_sync_addr = 0; |
377 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ | |
378 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ | |
379 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ | |
380 | /* Offset of shm_base_virt from tlb_base_virt */ | |
381 | u32 ul_shm_offset_virt; | |
1cf3fb2d FC |
382 | s32 entry_ndx; |
383 | s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ | |
999e07d6 ORL |
384 | struct cfg_hostres *resources = NULL; |
385 | u32 temp; | |
386 | u32 ul_dsp_clk_rate; | |
387 | u32 ul_dsp_clk_addr; | |
388 | u32 ul_bios_gp_timer; | |
389 | u32 clk_cmd; | |
390 | struct io_mgr *hio_mgr; | |
391 | u32 ul_load_monitor_timer; | |
82d4b477 FC |
392 | struct omap_dsp_platform_data *pdata = |
393 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 ORL |
394 | |
395 | /* The device context contains all the mmu setup info from when the | |
396 | * last dsp base image was loaded. The first entry is always | |
397 | * SHMMEM base. */ | |
398 | /* Get SHM_BEG - convert to byte address */ | |
399 | (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME, | |
400 | &ul_shm_base_virt); | |
401 | ul_shm_base_virt *= DSPWORDSIZE; | |
402 | DBC_ASSERT(ul_shm_base_virt != 0); | |
403 | /* DSP Virtual address */ | |
1cf3fb2d | 404 | ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; |
999e07d6 ORL |
405 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
406 | ul_shm_offset_virt = | |
407 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); | |
408 | /* Kernel logical address */ | |
1cf3fb2d | 409 | ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; |
999e07d6 ORL |
410 | |
411 | DBC_ASSERT(ul_shm_base != 0); | |
412 | /* 2nd wd is used as sync field */ | |
413 | dw_sync_addr = ul_shm_base + SHMSYNCOFFSET; | |
414 | /* Write a signature into the shm base + offset; this will | |
415 | * get cleared when the DSP program starts. */ | |
416 | if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) { | |
417 | pr_err("%s: Illegal SM base\n", __func__); | |
418 | status = -EPERM; | |
419 | } else | |
b3c8aef0 | 420 | __raw_writel(0xffffffff, dw_sync_addr); |
999e07d6 | 421 | |
e6486d8c | 422 | if (!status) { |
999e07d6 ORL |
423 | resources = dev_context->resources; |
424 | if (!resources) | |
425 | status = -EPERM; | |
426 | ||
427 | /* Assert RST1 i.e only the RST only for DSP megacell */ | |
e6486d8c | 428 | if (!status) { |
999e07d6 ORL |
429 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, |
430 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, | |
431 | OMAP2_RM_RSTCTRL); | |
432 | /* Mask address with 1K for compatibility */ | |
b301c858 | 433 | __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, |
999e07d6 ORL |
434 | OMAP343X_CTRL_REGADDR( |
435 | OMAP343X_CONTROL_IVA2_BOOTADDR)); | |
436 | /* | |
437 | * Set bootmode to self loop if dsp_debug flag is true | |
438 | */ | |
439 | __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, | |
440 | OMAP343X_CTRL_REGADDR( | |
441 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | |
442 | } | |
443 | } | |
0c10e91b | 444 | if (!status) { |
50ad26f4 FC |
445 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to |
446 | * IVA2 SYSC register */ | |
447 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | |
448 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
449 | udelay(100); | |
450 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | |
451 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
452 | udelay(100); | |
453 | ||
454 | /* Disbale the DSP MMU */ | |
455 | hw_mmu_disable(resources->dw_dmmu_base); | |
456 | /* Disable TWL */ | |
457 | hw_mmu_twl_disable(resources->dw_dmmu_base); | |
458 | ||
1cf3fb2d FC |
459 | /* Only make TLB entry if both addresses are non-zero */ |
460 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; | |
461 | entry_ndx++) { | |
462 | struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; | |
50ad26f4 FC |
463 | struct hw_mmu_map_attrs_t map_attrs = { |
464 | .endianism = e->endianism, | |
465 | .element_size = e->elem_size, | |
466 | .mixed_size = e->mixed_mode, | |
467 | }; | |
1cf3fb2d FC |
468 | |
469 | if (!e->ul_gpp_pa || !e->ul_dsp_va) | |
999e07d6 ORL |
470 | continue; |
471 | ||
1cf3fb2d FC |
472 | dev_dbg(bridge, |
473 | "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", | |
474 | itmp_entry_ndx, | |
475 | e->ul_gpp_pa, | |
476 | e->ul_dsp_va, | |
477 | e->ul_size); | |
999e07d6 | 478 | |
50ad26f4 FC |
479 | hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, |
480 | e->ul_gpp_pa, | |
481 | e->ul_dsp_va, | |
482 | e->ul_size, | |
483 | itmp_entry_ndx, | |
484 | &map_attrs, 1, 1); | |
485 | ||
1cf3fb2d | 486 | itmp_entry_ndx++; |
999e07d6 ORL |
487 | } |
488 | } | |
489 | ||
490 | /* Lock the above TLB entries and get the BIOS and load monitor timer | |
491 | * information */ | |
e6486d8c | 492 | if (!status) { |
50ad26f4 FC |
493 | hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); |
494 | hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); | |
495 | hw_mmu_ttb_set(resources->dw_dmmu_base, | |
496 | dev_context->pt_attrs->l1_base_pa); | |
497 | hw_mmu_twl_enable(resources->dw_dmmu_base); | |
498 | /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ | |
499 | ||
500 | temp = __raw_readl((resources->dw_dmmu_base) + 0x10); | |
501 | temp = (temp & 0xFFFFFFEF) | 0x11; | |
502 | __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); | |
503 | ||
504 | /* Let the DSP MMU run */ | |
505 | hw_mmu_enable(resources->dw_dmmu_base); | |
506 | ||
999e07d6 ORL |
507 | /* Enable the BIOS clock */ |
508 | (void)dev_get_symbol(dev_context->hdev_obj, | |
509 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); | |
510 | (void)dev_get_symbol(dev_context->hdev_obj, | |
511 | BRIDGEINIT_LOADMON_GPTIMER, | |
512 | &ul_load_monitor_timer); | |
50ad26f4 | 513 | } |
999e07d6 | 514 | |
50ad26f4 | 515 | if (!status) { |
999e07d6 ORL |
516 | if (ul_load_monitor_timer != 0xFFFF) { |
517 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | |
518 | ul_load_monitor_timer; | |
519 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | |
520 | } else { | |
521 | dev_dbg(bridge, "Not able to get the symbol for Load " | |
522 | "Monitor Timer\n"); | |
523 | } | |
50ad26f4 | 524 | } |
999e07d6 | 525 | |
50ad26f4 | 526 | if (!status) { |
999e07d6 ORL |
527 | if (ul_bios_gp_timer != 0xFFFF) { |
528 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | |
529 | ul_bios_gp_timer; | |
530 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | |
531 | } else { | |
532 | dev_dbg(bridge, | |
533 | "Not able to get the symbol for BIOS Timer\n"); | |
534 | } | |
50ad26f4 | 535 | } |
999e07d6 | 536 | |
50ad26f4 | 537 | if (!status) { |
999e07d6 ORL |
538 | /* Set the DSP clock rate */ |
539 | (void)dev_get_symbol(dev_context->hdev_obj, | |
540 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); | |
541 | /*Set Autoidle Mode for IVA2 PLL */ | |
542 | (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, | |
543 | OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); | |
544 | ||
545 | if ((unsigned int *)ul_dsp_clk_addr != NULL) { | |
546 | /* Get the clock rate */ | |
547 | ul_dsp_clk_rate = dsp_clk_get_iva2_rate(); | |
548 | dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n", | |
549 | __func__, ul_dsp_clk_rate); | |
550 | (void)bridge_brd_write(dev_context, | |
551 | (u8 *) &ul_dsp_clk_rate, | |
552 | ul_dsp_clk_addr, sizeof(u32), 0); | |
553 | } | |
554 | /* | |
555 | * Enable Mailbox events and also drain any pending | |
556 | * stale messages. | |
557 | */ | |
85d139c9 | 558 | dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier); |
999e07d6 ORL |
559 | if (IS_ERR(dev_context->mbox)) { |
560 | dev_context->mbox = NULL; | |
561 | pr_err("%s: Failed to get dsp mailbox handle\n", | |
562 | __func__); | |
563 | status = -EPERM; | |
564 | } | |
565 | ||
566 | } | |
e6486d8c | 567 | if (!status) { |
999e07d6 | 568 | /*PM_IVA2GRPSEL_PER = 0xC0;*/ |
7124cb17 | 569 | temp = readl(resources->dw_per_pm_base + 0xA8); |
999e07d6 | 570 | temp = (temp & 0xFFFFFF30) | 0xC0; |
7124cb17 | 571 | writel(temp, resources->dw_per_pm_base + 0xA8); |
999e07d6 ORL |
572 | |
573 | /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ | |
7124cb17 | 574 | temp = readl(resources->dw_per_pm_base + 0xA4); |
999e07d6 | 575 | temp = (temp & 0xFFFFFF3F); |
7124cb17 | 576 | writel(temp, resources->dw_per_pm_base + 0xA4); |
999e07d6 | 577 | /*CM_SLEEPDEP_PER |= 0x04; */ |
7124cb17 | 578 | temp = readl(resources->dw_per_base + 0x44); |
999e07d6 | 579 | temp = (temp & 0xFFFFFFFB) | 0x04; |
7124cb17 | 580 | writel(temp, resources->dw_per_base + 0x44); |
999e07d6 ORL |
581 | |
582 | /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ | |
583 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, | |
584 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
585 | ||
586 | /* Let DSP go */ | |
587 | dev_dbg(bridge, "%s Unreset\n", __func__); | |
50ad26f4 FC |
588 | /* Enable DSP MMU Interrupts */ |
589 | hw_mmu_event_enable(resources->dw_dmmu_base, | |
590 | HW_MMU_ALL_INTERRUPTS); | |
999e07d6 ORL |
591 | /* release the RST1, DSP starts executing now .. */ |
592 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, | |
593 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
594 | ||
595 | dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr); | |
b301c858 | 596 | dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr); |
999e07d6 | 597 | if (dsp_debug) |
b3c8aef0 | 598 | while (__raw_readw(dw_sync_addr)) |
859171ca | 599 | ; |
999e07d6 ORL |
600 | |
601 | /* Wait for DSP to clear word in shared memory */ | |
602 | /* Read the Location */ | |
603 | if (!wait_for_start(dev_context, dw_sync_addr)) | |
604 | status = -ETIMEDOUT; | |
605 | ||
606 | /* Start wdt */ | |
607 | dsp_wdt_sm_set((void *)ul_shm_base); | |
608 | dsp_wdt_enable(true); | |
609 | ||
610 | status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); | |
611 | if (hio_mgr) { | |
612 | io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); | |
613 | /* Write the synchronization bit to indicate the | |
614 | * completion of OPP table update to DSP | |
615 | */ | |
b3c8aef0 | 616 | __raw_writel(0XCAFECAFE, dw_sync_addr); |
999e07d6 ORL |
617 | |
618 | /* update board state */ | |
619 | dev_context->dw_brd_state = BRD_RUNNING; | |
1cf3fb2d | 620 | /* (void)chnlsm_enable_interrupt(dev_context); */ |
999e07d6 ORL |
621 | } else { |
622 | dev_context->dw_brd_state = BRD_UNKNOWN; | |
623 | } | |
624 | } | |
625 | return status; | |
626 | } | |
627 | ||
628 | /* | |
629 | * ======== bridge_brd_stop ======== | |
630 | * purpose: | |
631 | * Puts DSP in self loop. | |
632 | * | |
633 | * Preconditions : | |
634 | * a) None | |
635 | */ | |
e6890692 | 636 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) |
999e07d6 ORL |
637 | { |
638 | int status = 0; | |
e6890692 | 639 | struct bridge_dev_context *dev_context = dev_ctxt; |
ac8a139a | 640 | struct pg_table_attrs *pt_attrs; |
999e07d6 | 641 | u32 dsp_pwr_state; |
82d4b477 FC |
642 | struct omap_dsp_platform_data *pdata = |
643 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 ORL |
644 | |
645 | if (dev_context->dw_brd_state == BRD_STOPPED) | |
646 | return status; | |
647 | ||
648 | /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, | |
649 | * before turning off the clocks.. This is to ensure that there are no | |
650 | * pending L3 or other transactons from IVA2 */ | |
651 | dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
652 | OMAP_POWERSTATEST_MASK; | |
653 | if (dsp_pwr_state != PWRDM_POWER_OFF) { | |
a2c22721 ER |
654 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
655 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 ORL |
656 | sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE); |
657 | mdelay(10); | |
658 | ||
999e07d6 ORL |
659 | /* IVA2 is not in OFF state */ |
660 | /* Set PM_PWSTCTRL_IVA2 to OFF */ | |
661 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | |
662 | PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | |
663 | /* Set the SW supervised state transition for Sleep */ | |
664 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, | |
665 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
999e07d6 ORL |
666 | } |
667 | udelay(10); | |
668 | /* Release the Ext Base virtual Address as the next DSP Program | |
669 | * may have a different load address */ | |
670 | if (dev_context->dw_dsp_ext_base_addr) | |
671 | dev_context->dw_dsp_ext_base_addr = 0; | |
672 | ||
673 | dev_context->dw_brd_state = BRD_STOPPED; /* update board state */ | |
674 | ||
675 | dsp_wdt_enable(false); | |
676 | ||
ac8a139a FC |
677 | /* This is a good place to clear the MMU page tables as well */ |
678 | if (dev_context->pt_attrs) { | |
679 | pt_attrs = dev_context->pt_attrs; | |
680 | memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); | |
681 | memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); | |
682 | memset((u8 *) pt_attrs->pg_info, 0x00, | |
683 | (pt_attrs->l2_num_pages * sizeof(struct page_info))); | |
684 | } | |
999e07d6 ORL |
685 | /* Disable the mailbox interrupts */ |
686 | if (dev_context->mbox) { | |
687 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); | |
85d139c9 | 688 | omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier); |
999e07d6 ORL |
689 | dev_context->mbox = NULL; |
690 | } | |
1cf3fb2d FC |
691 | /* Reset IVA2 clocks*/ |
692 | (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | | |
693 | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 | 694 | |
0ee5ab30 ER |
695 | dsp_clock_disable_all(dev_context->dsp_per_clks); |
696 | dsp_clk_disable(DSP_CLK_IVA2); | |
a2c22721 | 697 | |
999e07d6 ORL |
698 | return status; |
699 | } | |
700 | ||
999e07d6 ORL |
701 | /* |
702 | * ======== bridge_brd_status ======== | |
703 | * Returns the board status. | |
704 | */ | |
e6890692 | 705 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, |
a5120278 | 706 | int *board_state) |
999e07d6 | 707 | { |
e6890692 | 708 | struct bridge_dev_context *dev_context = dev_ctxt; |
a5120278 | 709 | *board_state = dev_context->dw_brd_state; |
999e07d6 ORL |
710 | return 0; |
711 | } | |
712 | ||
713 | /* | |
714 | * ======== bridge_brd_write ======== | |
715 | * Copies the buffers to DSP internal or external memory. | |
716 | */ | |
e6890692 | 717 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 718 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 719 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
720 | { |
721 | int status = 0; | |
e6890692 | 722 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 723 | |
b301c858 | 724 | if (dsp_addr < dev_context->dw_dsp_start_add) { |
999e07d6 ORL |
725 | status = -EPERM; |
726 | return status; | |
727 | } | |
b301c858 | 728 | if ((dsp_addr - dev_context->dw_dsp_start_add) < |
999e07d6 | 729 | dev_context->dw_internal_size) { |
aa09b091 | 730 | status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, |
5e2eae57 | 731 | ul_num_bytes, mem_type); |
999e07d6 | 732 | } else { |
aa09b091 | 733 | status = write_ext_dsp_data(dev_context, host_buff, dsp_addr, |
5e2eae57 | 734 | ul_num_bytes, mem_type, false); |
999e07d6 ORL |
735 | } |
736 | ||
737 | return status; | |
738 | } | |
739 | ||
740 | /* | |
741 | * ======== bridge_dev_create ======== | |
742 | * Creates a driver object. Puts DSP in self loop. | |
743 | */ | |
e6bf74f0 | 744 | static int bridge_dev_create(struct bridge_dev_context |
fb6aabb7 | 745 | **dev_cntxt, |
999e07d6 | 746 | struct dev_object *hdev_obj, |
9d7d0a52 | 747 | struct cfg_hostres *config_param) |
999e07d6 ORL |
748 | { |
749 | int status = 0; | |
750 | struct bridge_dev_context *dev_context = NULL; | |
751 | s32 entry_ndx; | |
aa09b091 | 752 | struct cfg_hostres *resources = config_param; |
ac8a139a FC |
753 | struct pg_table_attrs *pt_attrs; |
754 | u32 pg_tbl_pa; | |
755 | u32 pg_tbl_va; | |
756 | u32 align_size; | |
999e07d6 ORL |
757 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
758 | ||
759 | /* Allocate and initialize a data structure to contain the bridge driver | |
760 | * state, which becomes the context for later calls into this driver */ | |
761 | dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL); | |
762 | if (!dev_context) { | |
763 | status = -ENOMEM; | |
764 | goto func_end; | |
765 | } | |
766 | ||
767 | dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE; | |
768 | dev_context->dw_self_loop = (u32) NULL; | |
769 | dev_context->dsp_per_clks = 0; | |
770 | dev_context->dw_internal_size = OMAP_DSP_SIZE; | |
771 | /* Clear dev context MMU table entries. | |
772 | * These get set on bridge_io_on_loaded() call after program loaded. */ | |
773 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { | |
774 | dev_context->atlb_entry[entry_ndx].ul_gpp_pa = | |
775 | dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0; | |
776 | } | |
999e07d6 | 777 | dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) |
aa09b091 | 778 | (config_param-> |
999e07d6 ORL |
779 | dw_mem_base |
780 | [3]), | |
aa09b091 | 781 | config_param-> |
999e07d6 ORL |
782 | dw_mem_length |
783 | [3]); | |
784 | if (!dev_context->dw_dsp_base_addr) | |
785 | status = -EPERM; | |
786 | ||
ac8a139a FC |
787 | pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); |
788 | if (pt_attrs != NULL) { | |
8e290fd4 | 789 | pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */ |
ac8a139a FC |
790 | align_size = pt_attrs->l1_size; |
791 | /* Align sizes are expected to be power of 2 */ | |
792 | /* we like to get aligned on L1 table size */ | |
793 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, | |
794 | align_size, &pg_tbl_pa); | |
795 | ||
796 | /* Check if the PA is aligned for us */ | |
797 | if ((pg_tbl_pa) & (align_size - 1)) { | |
798 | /* PA not aligned to page table size , | |
799 | * try with more allocation and align */ | |
800 | mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, | |
801 | pt_attrs->l1_size); | |
802 | /* we like to get aligned on L1 table size */ | |
803 | pg_tbl_va = | |
804 | (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, | |
805 | align_size, &pg_tbl_pa); | |
806 | /* We should be able to get aligned table now */ | |
807 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | |
808 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | |
809 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; | |
810 | /* Align the PA to the next 'align' boundary */ | |
811 | pt_attrs->l1_base_pa = | |
812 | ((pg_tbl_pa) + | |
813 | (align_size - 1)) & (~(align_size - 1)); | |
814 | pt_attrs->l1_base_va = | |
815 | pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); | |
816 | } else { | |
817 | /* We got aligned PA, cool */ | |
818 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | |
819 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | |
820 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; | |
821 | pt_attrs->l1_base_pa = pg_tbl_pa; | |
822 | pt_attrs->l1_base_va = pg_tbl_va; | |
823 | } | |
824 | if (pt_attrs->l1_base_va) | |
825 | memset((u8 *) pt_attrs->l1_base_va, 0x00, | |
826 | pt_attrs->l1_size); | |
827 | ||
828 | /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + | |
829 | * L4 pages */ | |
830 | pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); | |
831 | pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * | |
832 | pt_attrs->l2_num_pages; | |
833 | align_size = 4; /* Make it u32 aligned */ | |
834 | /* we like to get aligned on L1 table size */ | |
835 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, | |
836 | align_size, &pg_tbl_pa); | |
837 | pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; | |
838 | pt_attrs->l2_tbl_alloc_va = pg_tbl_va; | |
839 | pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; | |
840 | pt_attrs->l2_base_pa = pg_tbl_pa; | |
841 | pt_attrs->l2_base_va = pg_tbl_va; | |
842 | ||
843 | if (pt_attrs->l2_base_va) | |
844 | memset((u8 *) pt_attrs->l2_base_va, 0x00, | |
845 | pt_attrs->l2_size); | |
846 | ||
847 | pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * | |
848 | sizeof(struct page_info), GFP_KERNEL); | |
849 | dev_dbg(bridge, | |
850 | "L1 pa %x, va %x, size %x\n L2 pa %x, va " | |
851 | "%x, size %x\n", pt_attrs->l1_base_pa, | |
852 | pt_attrs->l1_base_va, pt_attrs->l1_size, | |
853 | pt_attrs->l2_base_pa, pt_attrs->l2_base_va, | |
854 | pt_attrs->l2_size); | |
855 | dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", | |
856 | pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); | |
857 | } | |
858 | if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && | |
859 | (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) | |
860 | dev_context->pt_attrs = pt_attrs; | |
861 | else | |
862 | status = -ENOMEM; | |
863 | ||
e6486d8c | 864 | if (!status) { |
ac8a139a | 865 | spin_lock_init(&pt_attrs->pg_lock); |
999e07d6 | 866 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; |
ac8a139a FC |
867 | |
868 | /* Set the Clock Divisor for the DSP module */ | |
869 | udelay(5); | |
870 | /* MMU address is obtained from the host | |
871 | * resources struct */ | |
872 | dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; | |
873 | } | |
874 | if (!status) { | |
999e07d6 | 875 | dev_context->hdev_obj = hdev_obj; |
999e07d6 | 876 | /* Store current board state. */ |
a2205e48 | 877 | dev_context->dw_brd_state = BRD_UNKNOWN; |
999e07d6 | 878 | dev_context->resources = resources; |
a2205e48 ER |
879 | dsp_clk_enable(DSP_CLK_IVA2); |
880 | bridge_brd_stop(dev_context); | |
999e07d6 | 881 | /* Return ptr to our device state to the DSP API for storage */ |
fb6aabb7 | 882 | *dev_cntxt = dev_context; |
999e07d6 | 883 | } else { |
ac8a139a FC |
884 | if (pt_attrs != NULL) { |
885 | kfree(pt_attrs->pg_info); | |
886 | ||
887 | if (pt_attrs->l2_tbl_alloc_va) { | |
888 | mem_free_phys_mem((void *) | |
889 | pt_attrs->l2_tbl_alloc_va, | |
890 | pt_attrs->l2_tbl_alloc_pa, | |
891 | pt_attrs->l2_tbl_alloc_sz); | |
892 | } | |
893 | if (pt_attrs->l1_tbl_alloc_va) { | |
894 | mem_free_phys_mem((void *) | |
895 | pt_attrs->l1_tbl_alloc_va, | |
896 | pt_attrs->l1_tbl_alloc_pa, | |
897 | pt_attrs->l1_tbl_alloc_sz); | |
898 | } | |
899 | } | |
900 | kfree(pt_attrs); | |
999e07d6 ORL |
901 | kfree(dev_context); |
902 | } | |
903 | func_end: | |
904 | return status; | |
905 | } | |
906 | ||
907 | /* | |
908 | * ======== bridge_dev_ctrl ======== | |
909 | * Receives device specific commands. | |
910 | */ | |
911 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | |
e6bf74f0 | 912 | u32 dw_cmd, void *pargs) |
999e07d6 ORL |
913 | { |
914 | int status = 0; | |
915 | struct bridge_ioctl_extproc *pa_ext_proc = | |
916 | (struct bridge_ioctl_extproc *)pargs; | |
917 | s32 ndx; | |
918 | ||
919 | switch (dw_cmd) { | |
920 | case BRDIOCTL_CHNLREAD: | |
921 | break; | |
922 | case BRDIOCTL_CHNLWRITE: | |
923 | break; | |
924 | case BRDIOCTL_SETMMUCONFIG: | |
925 | /* store away dsp-mmu setup values for later use */ | |
926 | for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++) | |
927 | dev_context->atlb_entry[ndx] = *pa_ext_proc; | |
928 | break; | |
929 | case BRDIOCTL_DEEPSLEEP: | |
930 | case BRDIOCTL_EMERGENCYSLEEP: | |
931 | /* Currently only DSP Idle is supported Need to update for | |
932 | * later releases */ | |
933 | status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs); | |
934 | break; | |
935 | case BRDIOCTL_WAKEUP: | |
936 | status = wake_dsp(dev_context, pargs); | |
937 | break; | |
938 | case BRDIOCTL_CLK_CTRL: | |
939 | status = 0; | |
940 | /* Looking For Baseport Fix for Clocks */ | |
941 | status = dsp_peripheral_clk_ctrl(dev_context, pargs); | |
942 | break; | |
943 | case BRDIOCTL_PWR_HIBERNATE: | |
944 | status = handle_hibernation_from_dsp(dev_context); | |
945 | break; | |
946 | case BRDIOCTL_PRESCALE_NOTIFY: | |
947 | status = pre_scale_dsp(dev_context, pargs); | |
948 | break; | |
949 | case BRDIOCTL_POSTSCALE_NOTIFY: | |
950 | status = post_scale_dsp(dev_context, pargs); | |
951 | break; | |
952 | case BRDIOCTL_CONSTRAINT_REQUEST: | |
953 | status = handle_constraints_set(dev_context, pargs); | |
954 | break; | |
955 | default: | |
956 | status = -EPERM; | |
957 | break; | |
958 | } | |
959 | return status; | |
960 | } | |
961 | ||
962 | /* | |
963 | * ======== bridge_dev_destroy ======== | |
964 | * Destroys the driver object. | |
965 | */ | |
e6890692 | 966 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) |
999e07d6 | 967 | { |
ac8a139a | 968 | struct pg_table_attrs *pt_attrs; |
999e07d6 ORL |
969 | int status = 0; |
970 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) | |
e6890692 | 971 | dev_ctxt; |
999e07d6 ORL |
972 | struct cfg_hostres *host_res; |
973 | u32 shm_size; | |
974 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | |
975 | ||
976 | /* It should never happen */ | |
e6890692 | 977 | if (!dev_ctxt) |
999e07d6 ORL |
978 | return -EFAULT; |
979 | ||
980 | /* first put the device to stop state */ | |
7c9305b8 | 981 | bridge_brd_stop(dev_context); |
ac8a139a FC |
982 | if (dev_context->pt_attrs) { |
983 | pt_attrs = dev_context->pt_attrs; | |
984 | kfree(pt_attrs->pg_info); | |
985 | ||
986 | if (pt_attrs->l2_tbl_alloc_va) { | |
987 | mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, | |
988 | pt_attrs->l2_tbl_alloc_pa, | |
989 | pt_attrs->l2_tbl_alloc_sz); | |
990 | } | |
991 | if (pt_attrs->l1_tbl_alloc_va) { | |
992 | mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, | |
993 | pt_attrs->l1_tbl_alloc_pa, | |
994 | pt_attrs->l1_tbl_alloc_sz); | |
995 | } | |
996 | kfree(pt_attrs); | |
997 | ||
998 | } | |
999e07d6 ORL |
999 | |
1000 | if (dev_context->resources) { | |
1001 | host_res = dev_context->resources; | |
1002 | shm_size = drv_datap->shm_size; | |
1003 | if (shm_size >= 0x10000) { | |
1004 | if ((host_res->dw_mem_base[1]) && | |
1005 | (host_res->dw_mem_phys[1])) { | |
1006 | mem_free_phys_mem((void *) | |
1007 | host_res->dw_mem_base | |
1008 | [1], | |
1009 | host_res->dw_mem_phys | |
1010 | [1], shm_size); | |
1011 | } | |
1012 | } else { | |
1013 | dev_dbg(bridge, "%s: Error getting shm size " | |
1014 | "from registry: %x. Not calling " | |
1015 | "mem_free_phys_mem\n", __func__, | |
1016 | status); | |
1017 | } | |
1018 | host_res->dw_mem_base[1] = 0; | |
1019 | host_res->dw_mem_phys[1] = 0; | |
1020 | ||
1021 | if (host_res->dw_mem_base[0]) | |
1022 | iounmap((void *)host_res->dw_mem_base[0]); | |
1023 | if (host_res->dw_mem_base[2]) | |
1024 | iounmap((void *)host_res->dw_mem_base[2]); | |
1025 | if (host_res->dw_mem_base[3]) | |
1026 | iounmap((void *)host_res->dw_mem_base[3]); | |
1027 | if (host_res->dw_mem_base[4]) | |
1028 | iounmap((void *)host_res->dw_mem_base[4]); | |
9d4f81a7 FC |
1029 | if (host_res->dw_dmmu_base) |
1030 | iounmap(host_res->dw_dmmu_base); | |
999e07d6 ORL |
1031 | if (host_res->dw_per_base) |
1032 | iounmap(host_res->dw_per_base); | |
1033 | if (host_res->dw_per_pm_base) | |
1034 | iounmap((void *)host_res->dw_per_pm_base); | |
1035 | if (host_res->dw_core_pm_base) | |
1036 | iounmap((void *)host_res->dw_core_pm_base); | |
999e07d6 ORL |
1037 | |
1038 | host_res->dw_mem_base[0] = (u32) NULL; | |
1039 | host_res->dw_mem_base[2] = (u32) NULL; | |
1040 | host_res->dw_mem_base[3] = (u32) NULL; | |
1041 | host_res->dw_mem_base[4] = (u32) NULL; | |
9d4f81a7 | 1042 | host_res->dw_dmmu_base = NULL; |
999e07d6 ORL |
1043 | |
1044 | kfree(host_res); | |
1045 | } | |
1046 | ||
1047 | /* Free the driver's device context: */ | |
1048 | kfree(drv_datap->base_img); | |
1049 | kfree(drv_datap); | |
1050 | dev_set_drvdata(bridge, NULL); | |
e6890692 | 1051 | kfree((void *)dev_ctxt); |
999e07d6 ORL |
1052 | return status; |
1053 | } | |
1054 | ||
e6890692 | 1055 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, |
5e2eae57 RS |
1056 | u32 dsp_dest_addr, u32 dsp_src_addr, |
1057 | u32 ul_num_bytes, u32 mem_type) | |
999e07d6 ORL |
1058 | { |
1059 | int status = 0; | |
5e2eae57 RS |
1060 | u32 src_addr = dsp_src_addr; |
1061 | u32 dest_addr = dsp_dest_addr; | |
999e07d6 ORL |
1062 | u32 copy_bytes = 0; |
1063 | u32 total_bytes = ul_num_bytes; | |
1064 | u8 host_buf[BUFFERSIZE]; | |
e6890692 | 1065 | struct bridge_dev_context *dev_context = dev_ctxt; |
e6486d8c | 1066 | while (total_bytes > 0 && !status) { |
999e07d6 ORL |
1067 | copy_bytes = |
1068 | total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; | |
1069 | /* Read from External memory */ | |
e6890692 | 1070 | status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, |
5e2eae57 | 1071 | copy_bytes, mem_type); |
e6486d8c | 1072 | if (!status) { |
999e07d6 ORL |
1073 | if (dest_addr < (dev_context->dw_dsp_start_add + |
1074 | dev_context->dw_internal_size)) { | |
1075 | /* Write to Internal memory */ | |
e6890692 | 1076 | status = write_dsp_data(dev_ctxt, host_buf, |
999e07d6 | 1077 | dest_addr, copy_bytes, |
5e2eae57 | 1078 | mem_type); |
999e07d6 ORL |
1079 | } else { |
1080 | /* Write to External memory */ | |
1081 | status = | |
e6890692 | 1082 | write_ext_dsp_data(dev_ctxt, host_buf, |
999e07d6 | 1083 | dest_addr, copy_bytes, |
5e2eae57 | 1084 | mem_type, false); |
999e07d6 ORL |
1085 | } |
1086 | } | |
1087 | total_bytes -= copy_bytes; | |
1088 | src_addr += copy_bytes; | |
1089 | dest_addr += copy_bytes; | |
1090 | } | |
1091 | return status; | |
1092 | } | |
1093 | ||
1094 | /* Mem Write does not halt the DSP to write unlike bridge_brd_write */ | |
e6890692 | 1095 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 1096 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 1097 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
1098 | { |
1099 | int status = 0; | |
e6890692 | 1100 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 ORL |
1101 | u32 ul_remain_bytes = 0; |
1102 | u32 ul_bytes = 0; | |
1103 | ul_remain_bytes = ul_num_bytes; | |
e6486d8c | 1104 | while (ul_remain_bytes > 0 && !status) { |
999e07d6 ORL |
1105 | ul_bytes = |
1106 | ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; | |
b301c858 | 1107 | if (dsp_addr < (dev_context->dw_dsp_start_add + |
999e07d6 ORL |
1108 | dev_context->dw_internal_size)) { |
1109 | status = | |
aa09b091 | 1110 | write_dsp_data(dev_ctxt, host_buff, dsp_addr, |
5e2eae57 | 1111 | ul_bytes, mem_type); |
999e07d6 | 1112 | } else { |
aa09b091 | 1113 | status = write_ext_dsp_data(dev_ctxt, host_buff, |
b301c858 | 1114 | dsp_addr, ul_bytes, |
5e2eae57 | 1115 | mem_type, true); |
999e07d6 ORL |
1116 | } |
1117 | ul_remain_bytes -= ul_bytes; | |
b301c858 | 1118 | dsp_addr += ul_bytes; |
aa09b091 | 1119 | host_buff = host_buff + ul_bytes; |
999e07d6 ORL |
1120 | } |
1121 | return status; | |
1122 | } | |
1123 | ||
f5bd96bb | 1124 | /* |
d0b345f3 FC |
1125 | * ======== bridge_brd_mem_map ======== |
1126 | * This function maps MPU buffer to the DSP address space. It performs | |
1127 | * linear to physical address translation if required. It translates each | |
1128 | * page since linear addresses can be physically non-contiguous | |
1129 | * All address & size arguments are assumed to be page aligned (in proc.c) | |
f5bd96bb | 1130 | * |
d0b345f3 | 1131 | * TODO: Disable MMU while updating the page tables (but that'll stall DSP) |
f5bd96bb | 1132 | */ |
50ad26f4 FC |
1133 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, |
1134 | u32 ul_mpu_addr, u32 virt_addr, | |
1135 | u32 ul_num_bytes, u32 ul_map_attr, | |
1136 | struct page **mapped_pages) | |
f5bd96bb | 1137 | { |
50ad26f4 FC |
1138 | u32 attrs; |
1139 | int status = 0; | |
1140 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1141 | struct hw_mmu_map_attrs_t hw_attrs; | |
f5bd96bb FC |
1142 | struct vm_area_struct *vma; |
1143 | struct mm_struct *mm = current->mm; | |
50ad26f4 FC |
1144 | u32 write = 0; |
1145 | u32 num_usr_pgs = 0; | |
1146 | struct page *mapped_page, *pg; | |
1147 | s32 pg_num; | |
1148 | u32 va = virt_addr; | |
1149 | struct task_struct *curr_task = current; | |
1150 | u32 pg_i = 0; | |
1151 | u32 mpu_addr, pa; | |
1152 | ||
1153 | dev_dbg(bridge, | |
1154 | "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", | |
1155 | __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, | |
1156 | ul_map_attr); | |
1157 | if (ul_num_bytes == 0) | |
f5bd96bb FC |
1158 | return -EINVAL; |
1159 | ||
50ad26f4 FC |
1160 | if (ul_map_attr & DSP_MAP_DIR_MASK) { |
1161 | attrs = ul_map_attr; | |
1162 | } else { | |
1163 | /* Assign default attributes */ | |
1164 | attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); | |
1165 | } | |
1166 | /* Take mapping properties */ | |
1167 | if (attrs & DSP_MAPBIGENDIAN) | |
1168 | hw_attrs.endianism = HW_BIG_ENDIAN; | |
1169 | else | |
1170 | hw_attrs.endianism = HW_LITTLE_ENDIAN; | |
1171 | ||
1172 | hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) | |
1173 | ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); | |
1174 | /* Ignore element_size if mixed_size is enabled */ | |
1175 | if (hw_attrs.mixed_size == 0) { | |
1176 | if (attrs & DSP_MAPELEMSIZE8) { | |
1177 | /* Size is 8 bit */ | |
1178 | hw_attrs.element_size = HW_ELEM_SIZE8BIT; | |
1179 | } else if (attrs & DSP_MAPELEMSIZE16) { | |
1180 | /* Size is 16 bit */ | |
1181 | hw_attrs.element_size = HW_ELEM_SIZE16BIT; | |
1182 | } else if (attrs & DSP_MAPELEMSIZE32) { | |
1183 | /* Size is 32 bit */ | |
1184 | hw_attrs.element_size = HW_ELEM_SIZE32BIT; | |
1185 | } else if (attrs & DSP_MAPELEMSIZE64) { | |
1186 | /* Size is 64 bit */ | |
1187 | hw_attrs.element_size = HW_ELEM_SIZE64BIT; | |
1188 | } else { | |
1189 | /* | |
1190 | * Mixedsize isn't enabled, so size can't be | |
1191 | * zero here | |
1192 | */ | |
1193 | return -EINVAL; | |
1194 | } | |
1195 | } | |
1196 | if (attrs & DSP_MAPDONOTLOCK) | |
1197 | hw_attrs.donotlockmpupage = 1; | |
1198 | else | |
1199 | hw_attrs.donotlockmpupage = 0; | |
1200 | ||
1201 | if (attrs & DSP_MAPVMALLOCADDR) { | |
1202 | return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, | |
1203 | ul_num_bytes, &hw_attrs); | |
1204 | } | |
1205 | /* | |
1206 | * Do OS-specific user-va to pa translation. | |
1207 | * Combine physically contiguous regions to reduce TLBs. | |
1208 | * Pass the translated pa to pte_update. | |
1209 | */ | |
1210 | if ((attrs & DSP_MAPPHYSICALADDR)) { | |
1211 | status = pte_update(dev_context, ul_mpu_addr, virt_addr, | |
1212 | ul_num_bytes, &hw_attrs); | |
1213 | goto func_cont; | |
1214 | } | |
f5bd96bb | 1215 | |
50ad26f4 FC |
1216 | /* |
1217 | * Important Note: ul_mpu_addr is mapped from user application process | |
1218 | * to current process - it must lie completely within the current | |
1219 | * virtual memory address space in order to be of use to us here! | |
1220 | */ | |
f5bd96bb | 1221 | down_read(&mm->mmap_sem); |
50ad26f4 FC |
1222 | vma = find_vma(mm, ul_mpu_addr); |
1223 | if (vma) | |
1224 | dev_dbg(bridge, | |
1225 | "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " | |
1226 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | |
1227 | ul_num_bytes, vma->vm_start, vma->vm_end, | |
1228 | vma->vm_flags); | |
1229 | ||
1230 | /* | |
1231 | * It is observed that under some circumstances, the user buffer is | |
1232 | * spread across several VMAs. So loop through and check if the entire | |
1233 | * user buffer is covered | |
1234 | */ | |
1235 | while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { | |
1236 | /* jump to the next VMA region */ | |
f5bd96bb | 1237 | vma = find_vma(mm, vma->vm_end + 1); |
50ad26f4 FC |
1238 | dev_dbg(bridge, |
1239 | "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " | |
1240 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | |
1241 | ul_num_bytes, vma->vm_start, vma->vm_end, | |
1242 | vma->vm_flags); | |
1243 | } | |
f5bd96bb FC |
1244 | if (!vma) { |
1245 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | |
50ad26f4 FC |
1246 | __func__, ul_mpu_addr, ul_num_bytes); |
1247 | status = -EINVAL; | |
f5bd96bb | 1248 | up_read(&mm->mmap_sem); |
50ad26f4 | 1249 | goto func_cont; |
f5bd96bb | 1250 | } |
f5bd96bb | 1251 | |
50ad26f4 FC |
1252 | if (vma->vm_flags & VM_IO) { |
1253 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | |
1254 | mpu_addr = ul_mpu_addr; | |
1255 | ||
1256 | /* Get the physical addresses for user buffer */ | |
1257 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | |
1258 | pa = user_va2_pa(mm, mpu_addr); | |
1259 | if (!pa) { | |
1260 | status = -EPERM; | |
1261 | pr_err("DSPBRIDGE: VM_IO mapping physical" | |
1262 | "address is invalid\n"); | |
1263 | break; | |
1264 | } | |
1265 | if (pfn_valid(__phys_to_pfn(pa))) { | |
1266 | pg = PHYS_TO_PAGE(pa); | |
1267 | get_page(pg); | |
1268 | if (page_count(pg) < 1) { | |
1269 | pr_err("Bad page in VM_IO buffer\n"); | |
1270 | bad_page_dump(pa, pg); | |
1271 | } | |
1272 | } | |
1273 | status = pte_set(dev_context->pt_attrs, pa, | |
1274 | va, HW_PAGE_SIZE4KB, &hw_attrs); | |
1275 | if (status) | |
1276 | break; | |
1277 | ||
1278 | va += HW_PAGE_SIZE4KB; | |
1279 | mpu_addr += HW_PAGE_SIZE4KB; | |
1280 | pa += HW_PAGE_SIZE4KB; | |
1281 | } | |
1282 | } else { | |
1283 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | |
1284 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | |
1285 | write = 1; | |
1286 | ||
1287 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | |
1288 | pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, | |
1289 | write, 1, &mapped_page, NULL); | |
1290 | if (pg_num > 0) { | |
1291 | if (page_count(mapped_page) < 1) { | |
1292 | pr_err("Bad page count after doing" | |
1293 | "get_user_pages on" | |
1294 | "user buffer\n"); | |
1295 | bad_page_dump(page_to_phys(mapped_page), | |
1296 | mapped_page); | |
1297 | } | |
1298 | status = pte_set(dev_context->pt_attrs, | |
1299 | page_to_phys(mapped_page), va, | |
1300 | HW_PAGE_SIZE4KB, &hw_attrs); | |
1301 | if (status) | |
1302 | break; | |
1303 | ||
1304 | if (mapped_pages) | |
1305 | mapped_pages[pg_i] = mapped_page; | |
1306 | ||
1307 | va += HW_PAGE_SIZE4KB; | |
1308 | ul_mpu_addr += HW_PAGE_SIZE4KB; | |
1309 | } else { | |
1310 | pr_err("DSPBRIDGE: get_user_pages FAILED," | |
1311 | "MPU addr = 0x%x," | |
1312 | "vma->vm_flags = 0x%lx," | |
1313 | "get_user_pages Err" | |
1314 | "Value = %d, Buffer" | |
1315 | "size=0x%x\n", ul_mpu_addr, | |
1316 | vma->vm_flags, pg_num, ul_num_bytes); | |
1317 | status = -EPERM; | |
1318 | break; | |
1319 | } | |
1320 | } | |
f5bd96bb | 1321 | } |
50ad26f4 FC |
1322 | up_read(&mm->mmap_sem); |
1323 | func_cont: | |
1324 | if (status) { | |
1325 | /* | |
1326 | * Roll out the mapped pages incase it failed in middle of | |
1327 | * mapping | |
1328 | */ | |
1329 | if (pg_i) { | |
1330 | bridge_brd_mem_un_map(dev_context, virt_addr, | |
1331 | (pg_i * PG_SIZE4K)); | |
1332 | } | |
1333 | status = -EPERM; | |
f5bd96bb | 1334 | } |
50ad26f4 FC |
1335 | /* |
1336 | * In any case, flush the TLB | |
1337 | * This is called from here instead from pte_update to avoid unnecessary | |
1338 | * repetition while mapping non-contiguous physical regions of a virtual | |
1339 | * region | |
1340 | */ | |
1341 | flush_all(dev_context); | |
1342 | dev_dbg(bridge, "%s status %x\n", __func__, status); | |
1343 | return status; | |
f5bd96bb FC |
1344 | } |
1345 | ||
d0b345f3 FC |
1346 | /* |
1347 | * ======== bridge_brd_mem_un_map ======== | |
1348 | * Invalidate the PTEs for the DSP VA block to be unmapped. | |
f5bd96bb | 1349 | * |
d0b345f3 FC |
1350 | * PTEs of a mapped memory block are contiguous in any page table |
1351 | * So, instead of looking up the PTE address for every 4K block, | |
1352 | * we clear consecutive PTEs until we unmap all the bytes | |
f5bd96bb | 1353 | */ |
50ad26f4 FC |
1354 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, |
1355 | u32 virt_addr, u32 ul_num_bytes) | |
f5bd96bb | 1356 | { |
50ad26f4 FC |
1357 | u32 l1_base_va; |
1358 | u32 l2_base_va; | |
1359 | u32 l2_base_pa; | |
1360 | u32 l2_page_num; | |
1361 | u32 pte_val; | |
1362 | u32 pte_size; | |
1363 | u32 pte_count; | |
1364 | u32 pte_addr_l1; | |
1365 | u32 pte_addr_l2 = 0; | |
1366 | u32 rem_bytes; | |
1367 | u32 rem_bytes_l2; | |
1368 | u32 va_curr; | |
1369 | struct page *pg = NULL; | |
1370 | int status = 0; | |
1371 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1372 | struct pg_table_attrs *pt = dev_context->pt_attrs; | |
1373 | u32 temp; | |
1374 | u32 paddr; | |
1375 | u32 numof4k_pages = 0; | |
1376 | ||
1377 | va_curr = virt_addr; | |
1378 | rem_bytes = ul_num_bytes; | |
1379 | rem_bytes_l2 = 0; | |
1380 | l1_base_va = pt->l1_base_va; | |
1381 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | |
1382 | dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " | |
1383 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | |
1384 | ul_num_bytes, l1_base_va, pte_addr_l1); | |
1385 | ||
1386 | while (rem_bytes && !status) { | |
1387 | u32 va_curr_orig = va_curr; | |
1388 | /* Find whether the L1 PTE points to a valid L2 PT */ | |
1389 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | |
1390 | pte_val = *(u32 *) pte_addr_l1; | |
1391 | pte_size = hw_mmu_pte_size_l1(pte_val); | |
1392 | ||
1393 | if (pte_size != HW_MMU_COARSE_PAGE_SIZE) | |
1394 | goto skip_coarse_page; | |
f5bd96bb | 1395 | |
50ad26f4 FC |
1396 | /* |
1397 | * Get the L2 PA from the L1 PTE, and find | |
1398 | * corresponding L2 VA | |
1399 | */ | |
1400 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | |
1401 | l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | |
1402 | l2_page_num = | |
1403 | (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | |
1404 | /* | |
1405 | * Find the L2 PTE address from which we will start | |
1406 | * clearing, the number of PTEs to be cleared on this | |
1407 | * page, and the size of VA space that needs to be | |
1408 | * cleared on this L2 page | |
1409 | */ | |
1410 | pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); | |
1411 | pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); | |
1412 | pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); | |
1413 | if (rem_bytes < (pte_count * PG_SIZE4K)) | |
1414 | pte_count = rem_bytes / PG_SIZE4K; | |
1415 | rem_bytes_l2 = pte_count * PG_SIZE4K; | |
f5bd96bb | 1416 | |
50ad26f4 FC |
1417 | /* |
1418 | * Unmap the VA space on this L2 PT. A quicker way | |
1419 | * would be to clear pte_count entries starting from | |
1420 | * pte_addr_l2. However, below code checks that we don't | |
1421 | * clear invalid entries or less than 64KB for a 64KB | |
1422 | * entry. Similar checking is done for L1 PTEs too | |
1423 | * below | |
1424 | */ | |
1425 | while (rem_bytes_l2 && !status) { | |
1426 | pte_val = *(u32 *) pte_addr_l2; | |
1427 | pte_size = hw_mmu_pte_size_l2(pte_val); | |
1428 | /* va_curr aligned to pte_size? */ | |
1429 | if (pte_size == 0 || rem_bytes_l2 < pte_size || | |
1430 | va_curr & (pte_size - 1)) { | |
1431 | status = -EPERM; | |
1432 | break; | |
1433 | } | |
d0b345f3 | 1434 | |
50ad26f4 FC |
1435 | /* Collect Physical addresses from VA */ |
1436 | paddr = (pte_val & ~(pte_size - 1)); | |
1437 | if (pte_size == HW_PAGE_SIZE64KB) | |
1438 | numof4k_pages = 16; | |
1439 | else | |
1440 | numof4k_pages = 1; | |
1441 | temp = 0; | |
1442 | while (temp++ < numof4k_pages) { | |
1443 | if (!pfn_valid(__phys_to_pfn(paddr))) { | |
1444 | paddr += HW_PAGE_SIZE4KB; | |
1445 | continue; | |
1446 | } | |
1447 | pg = PHYS_TO_PAGE(paddr); | |
1448 | if (page_count(pg) < 1) { | |
1449 | pr_info("DSPBRIDGE: UNMAP function: " | |
1450 | "COUNT 0 FOR PA 0x%x, size = " | |
1451 | "0x%x\n", paddr, ul_num_bytes); | |
1452 | bad_page_dump(paddr, pg); | |
1453 | } else { | |
1454 | set_page_dirty(pg); | |
1455 | page_cache_release(pg); | |
1456 | } | |
1457 | paddr += HW_PAGE_SIZE4KB; | |
1458 | } | |
1459 | if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { | |
1460 | status = -EPERM; | |
1461 | goto EXIT_LOOP; | |
1462 | } | |
d0b345f3 | 1463 | |
50ad26f4 FC |
1464 | status = 0; |
1465 | rem_bytes_l2 -= pte_size; | |
1466 | va_curr += pte_size; | |
1467 | pte_addr_l2 += (pte_size >> 12) * sizeof(u32); | |
1468 | } | |
1469 | spin_lock(&pt->pg_lock); | |
1470 | if (rem_bytes_l2 == 0) { | |
1471 | pt->pg_info[l2_page_num].num_entries -= pte_count; | |
1472 | if (pt->pg_info[l2_page_num].num_entries == 0) { | |
1473 | /* | |
1474 | * Clear the L1 PTE pointing to the L2 PT | |
1475 | */ | |
1476 | if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, | |
1477 | HW_MMU_COARSE_PAGE_SIZE)) | |
1478 | status = 0; | |
1479 | else { | |
1480 | status = -EPERM; | |
1481 | spin_unlock(&pt->pg_lock); | |
1482 | goto EXIT_LOOP; | |
1483 | } | |
1484 | } | |
1485 | rem_bytes -= pte_count * PG_SIZE4K; | |
1486 | } else | |
1487 | status = -EPERM; | |
d0b345f3 | 1488 | |
50ad26f4 FC |
1489 | spin_unlock(&pt->pg_lock); |
1490 | continue; | |
1491 | skip_coarse_page: | |
1492 | /* va_curr aligned to pte_size? */ | |
1493 | /* pte_size = 1 MB or 16 MB */ | |
1494 | if (pte_size == 0 || rem_bytes < pte_size || | |
1495 | va_curr & (pte_size - 1)) { | |
1496 | status = -EPERM; | |
d0b345f3 | 1497 | break; |
50ad26f4 | 1498 | } |
d0b345f3 | 1499 | |
50ad26f4 FC |
1500 | if (pte_size == HW_PAGE_SIZE1MB) |
1501 | numof4k_pages = 256; | |
1502 | else | |
1503 | numof4k_pages = 4096; | |
1504 | temp = 0; | |
1505 | /* Collect Physical addresses from VA */ | |
1506 | paddr = (pte_val & ~(pte_size - 1)); | |
1507 | while (temp++ < numof4k_pages) { | |
1508 | if (pfn_valid(__phys_to_pfn(paddr))) { | |
1509 | pg = PHYS_TO_PAGE(paddr); | |
1510 | if (page_count(pg) < 1) { | |
1511 | pr_info("DSPBRIDGE: UNMAP function: " | |
1512 | "COUNT 0 FOR PA 0x%x, size = " | |
1513 | "0x%x\n", paddr, ul_num_bytes); | |
1514 | bad_page_dump(paddr, pg); | |
1515 | } else { | |
1516 | set_page_dirty(pg); | |
1517 | page_cache_release(pg); | |
1518 | } | |
1519 | } | |
1520 | paddr += HW_PAGE_SIZE4KB; | |
1521 | } | |
1522 | if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { | |
1523 | status = 0; | |
1524 | rem_bytes -= pte_size; | |
1525 | va_curr += pte_size; | |
1526 | } else { | |
1527 | status = -EPERM; | |
1528 | goto EXIT_LOOP; | |
1529 | } | |
d0b345f3 | 1530 | } |
50ad26f4 FC |
1531 | /* |
1532 | * It is better to flush the TLB here, so that any stale old entries | |
1533 | * get flushed | |
1534 | */ | |
1535 | EXIT_LOOP: | |
1536 | flush_all(dev_context); | |
1537 | dev_dbg(bridge, | |
1538 | "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," | |
1539 | " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, | |
1540 | pte_addr_l2, rem_bytes, rem_bytes_l2, status); | |
1541 | return status; | |
d0b345f3 FC |
1542 | } |
1543 | ||
1544 | /* | |
1545 | * ======== user_va2_pa ======== | |
1546 | * Purpose: | |
1547 | * This function walks through the page tables to convert a userland | |
1548 | * virtual address to physical address | |
1549 | */ | |
1550 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | |
1551 | { | |
1552 | pgd_t *pgd; | |
1553 | pmd_t *pmd; | |
1554 | pte_t *ptep, pte; | |
1555 | ||
1556 | pgd = pgd_offset(mm, address); | |
1557 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | |
1558 | pmd = pmd_offset(pgd, address); | |
1559 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | |
1560 | ptep = pte_offset_map(pmd, address); | |
1561 | if (ptep) { | |
1562 | pte = *ptep; | |
1563 | if (pte_present(pte)) | |
1564 | return pte & PAGE_MASK; | |
1565 | } | |
1566 | } | |
1567 | } | |
1568 | ||
1569 | return 0; | |
1570 | } | |
1571 | ||
ac8a139a FC |
1572 | /* |
1573 | * ======== pte_update ======== | |
1574 | * This function calculates the optimum page-aligned addresses and sizes | |
1575 | * Caller must pass page-aligned values | |
1576 | */ | |
1577 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | |
1578 | u32 va, u32 size, | |
1579 | struct hw_mmu_map_attrs_t *map_attrs) | |
1580 | { | |
1581 | u32 i; | |
1582 | u32 all_bits; | |
1583 | u32 pa_curr = pa; | |
1584 | u32 va_curr = va; | |
1585 | u32 num_bytes = size; | |
1586 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1587 | int status = 0; | |
1588 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | |
1589 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | |
1590 | }; | |
1591 | ||
1592 | while (num_bytes && !status) { | |
1593 | /* To find the max. page size with which both PA & VA are | |
1594 | * aligned */ | |
1595 | all_bits = pa_curr | va_curr; | |
1596 | ||
1597 | for (i = 0; i < 4; i++) { | |
1598 | if ((num_bytes >= page_size[i]) && ((all_bits & | |
1599 | (page_size[i] - | |
1600 | 1)) == 0)) { | |
1601 | status = | |
1602 | pte_set(dev_context->pt_attrs, pa_curr, | |
1603 | va_curr, page_size[i], map_attrs); | |
1604 | pa_curr += page_size[i]; | |
1605 | va_curr += page_size[i]; | |
1606 | num_bytes -= page_size[i]; | |
1607 | /* Don't try smaller sizes. Hopefully we have | |
1608 | * reached an address aligned to a bigger page | |
1609 | * size */ | |
1610 | break; | |
1611 | } | |
1612 | } | |
1613 | } | |
1614 | ||
1615 | return status; | |
1616 | } | |
1617 | ||
1618 | /* | |
1619 | * ======== pte_set ======== | |
1620 | * This function calculates PTE address (MPU virtual) to be updated | |
1621 | * It also manages the L2 page tables | |
1622 | */ | |
1623 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |
1624 | u32 size, struct hw_mmu_map_attrs_t *attrs) | |
1625 | { | |
1626 | u32 i; | |
1627 | u32 pte_val; | |
1628 | u32 pte_addr_l1; | |
1629 | u32 pte_size; | |
1630 | /* Base address of the PT that will be updated */ | |
1631 | u32 pg_tbl_va; | |
1632 | u32 l1_base_va; | |
1633 | /* Compiler warns that the next three variables might be used | |
1634 | * uninitialized in this function. Doesn't seem so. Working around, | |
1635 | * anyways. */ | |
1636 | u32 l2_base_va = 0; | |
1637 | u32 l2_base_pa = 0; | |
1638 | u32 l2_page_num = 0; | |
1639 | int status = 0; | |
1640 | ||
1641 | l1_base_va = pt->l1_base_va; | |
1642 | pg_tbl_va = l1_base_va; | |
1643 | if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { | |
1644 | /* Find whether the L1 PTE points to a valid L2 PT */ | |
1645 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); | |
1646 | if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { | |
1647 | pte_val = *(u32 *) pte_addr_l1; | |
1648 | pte_size = hw_mmu_pte_size_l1(pte_val); | |
1649 | } else { | |
1650 | return -EPERM; | |
1651 | } | |
1652 | spin_lock(&pt->pg_lock); | |
1653 | if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { | |
1654 | /* Get the L2 PA from the L1 PTE, and find | |
1655 | * corresponding L2 VA */ | |
1656 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | |
1657 | l2_base_va = | |
1658 | l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | |
1659 | l2_page_num = | |
1660 | (l2_base_pa - | |
1661 | pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | |
1662 | } else if (pte_size == 0) { | |
1663 | /* L1 PTE is invalid. Allocate a L2 PT and | |
1664 | * point the L1 PTE to it */ | |
1665 | /* Find a free L2 PT. */ | |
1666 | for (i = 0; (i < pt->l2_num_pages) && | |
1667 | (pt->pg_info[i].num_entries != 0); i++) | |
859171ca | 1668 | ; |
ac8a139a FC |
1669 | if (i < pt->l2_num_pages) { |
1670 | l2_page_num = i; | |
1671 | l2_base_pa = pt->l2_base_pa + (l2_page_num * | |
1672 | HW_MMU_COARSE_PAGE_SIZE); | |
1673 | l2_base_va = pt->l2_base_va + (l2_page_num * | |
1674 | HW_MMU_COARSE_PAGE_SIZE); | |
1675 | /* Endianness attributes are ignored for | |
1676 | * HW_MMU_COARSE_PAGE_SIZE */ | |
1677 | status = | |
1678 | hw_mmu_pte_set(l1_base_va, l2_base_pa, va, | |
1679 | HW_MMU_COARSE_PAGE_SIZE, | |
1680 | attrs); | |
1681 | } else { | |
1682 | status = -ENOMEM; | |
1683 | } | |
1684 | } else { | |
1685 | /* Found valid L1 PTE of another size. | |
1686 | * Should not overwrite it. */ | |
1687 | status = -EPERM; | |
1688 | } | |
1689 | if (!status) { | |
1690 | pg_tbl_va = l2_base_va; | |
1691 | if (size == HW_PAGE_SIZE64KB) | |
1692 | pt->pg_info[l2_page_num].num_entries += 16; | |
1693 | else | |
1694 | pt->pg_info[l2_page_num].num_entries++; | |
1695 | dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " | |
1696 | "%x, num_entries %x\n", l2_base_va, | |
1697 | l2_base_pa, l2_page_num, | |
1698 | pt->pg_info[l2_page_num].num_entries); | |
1699 | } | |
1700 | spin_unlock(&pt->pg_lock); | |
1701 | } | |
1702 | if (!status) { | |
1703 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | |
1704 | pg_tbl_va, pa, va, size); | |
1705 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | |
1706 | "mixed_size %x\n", attrs->endianism, | |
1707 | attrs->element_size, attrs->mixed_size); | |
1708 | status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); | |
1709 | } | |
1710 | ||
1711 | return status; | |
1712 | } | |
1713 | ||
1714 | /* Memory map kernel VA -- memory allocated with vmalloc */ | |
1715 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |
1716 | u32 ul_mpu_addr, u32 virt_addr, | |
1717 | u32 ul_num_bytes, | |
1718 | struct hw_mmu_map_attrs_t *hw_attrs) | |
1719 | { | |
1720 | int status = 0; | |
1721 | struct page *page[1]; | |
1722 | u32 i; | |
1723 | u32 pa_curr; | |
1724 | u32 pa_next; | |
1725 | u32 va_curr; | |
1726 | u32 size_curr; | |
1727 | u32 num_pages; | |
1728 | u32 pa; | |
1729 | u32 num_of4k_pages; | |
1730 | u32 temp = 0; | |
1731 | ||
1732 | /* | |
1733 | * Do Kernel va to pa translation. | |
1734 | * Combine physically contiguous regions to reduce TLBs. | |
1735 | * Pass the translated pa to pte_update. | |
1736 | */ | |
1737 | num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ | |
1738 | i = 0; | |
1739 | va_curr = ul_mpu_addr; | |
1740 | page[0] = vmalloc_to_page((void *)va_curr); | |
1741 | pa_next = page_to_phys(page[0]); | |
1742 | while (!status && (i < num_pages)) { | |
1743 | /* | |
1744 | * Reuse pa_next from the previous iteraion to avoid | |
1745 | * an extra va2pa call | |
1746 | */ | |
1747 | pa_curr = pa_next; | |
1748 | size_curr = PAGE_SIZE; | |
1749 | /* | |
1750 | * If the next page is physically contiguous, | |
1751 | * map it with the current one by increasing | |
1752 | * the size of the region to be mapped | |
1753 | */ | |
1754 | while (++i < num_pages) { | |
1755 | page[0] = | |
1756 | vmalloc_to_page((void *)(va_curr + size_curr)); | |
1757 | pa_next = page_to_phys(page[0]); | |
1758 | ||
1759 | if (pa_next == (pa_curr + size_curr)) | |
1760 | size_curr += PAGE_SIZE; | |
1761 | else | |
1762 | break; | |
1763 | ||
1764 | } | |
1765 | if (pa_next == 0) { | |
1766 | status = -ENOMEM; | |
1767 | break; | |
1768 | } | |
1769 | pa = pa_curr; | |
1770 | num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; | |
1771 | while (temp++ < num_of4k_pages) { | |
1772 | get_page(PHYS_TO_PAGE(pa)); | |
1773 | pa += HW_PAGE_SIZE4KB; | |
1774 | } | |
1775 | status = pte_update(dev_context, pa_curr, virt_addr + | |
1776 | (va_curr - ul_mpu_addr), size_curr, | |
1777 | hw_attrs); | |
1778 | va_curr += size_curr; | |
1779 | } | |
1780 | /* | |
1781 | * In any case, flush the TLB | |
1782 | * This is called from here instead from pte_update to avoid unnecessary | |
1783 | * repetition while mapping non-contiguous physical regions of a virtual | |
1784 | * region | |
1785 | */ | |
1786 | flush_all(dev_context); | |
1787 | dev_dbg(bridge, "%s status %x\n", __func__, status); | |
1788 | return status; | |
1789 | } | |
1790 | ||
999e07d6 ORL |
1791 | /* |
1792 | * ======== wait_for_start ======== | |
1793 | * Wait for the singal from DSP that it has started, or time out. | |
1794 | */ | |
1795 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr) | |
1796 | { | |
1797 | u16 timeout = TIHELEN_ACKTIMEOUT; | |
1798 | ||
1799 | /* Wait for response from board */ | |
b3c8aef0 | 1800 | while (__raw_readw(dw_sync_addr) && --timeout) |
999e07d6 ORL |
1801 | udelay(10); |
1802 | ||
5e768067 | 1803 | /* If timed out: return false */ |
999e07d6 ORL |
1804 | if (!timeout) { |
1805 | pr_err("%s: Timed out waiting DSP to Start\n", __func__); | |
5e768067 | 1806 | return false; |
999e07d6 | 1807 | } |
5e768067 | 1808 | return true; |
999e07d6 | 1809 | } |