Commit | Line | Data |
---|---|---|
999e07d6 ORL |
1 | /* |
2 | * tiomap.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * Processor Manager Driver for TI OMAP3430 EVM. | |
7 | * | |
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
18 | ||
82d4b477 FC |
19 | #include <plat/dsp.h> |
20 | ||
2094f12d | 21 | #include <linux/types.h> |
999e07d6 ORL |
22 | /* ----------------------------------- Host OS */ |
23 | #include <dspbridge/host_os.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/mmzone.h> | |
999e07d6 ORL |
26 | |
27 | /* ----------------------------------- DSP/BIOS Bridge */ | |
999e07d6 ORL |
28 | #include <dspbridge/dbdefs.h> |
29 | ||
999e07d6 | 30 | /* ----------------------------------- OS Adaptation Layer */ |
999e07d6 ORL |
31 | #include <dspbridge/drv.h> |
32 | #include <dspbridge/sync.h> | |
33 | ||
58c1ceb1 FC |
34 | /* ------------------------------------ Hardware Abstraction Layer */ |
35 | #include <hw_defs.h> | |
36 | #include <hw_mmu.h> | |
37 | ||
999e07d6 ORL |
38 | /* ----------------------------------- Link Driver */ |
39 | #include <dspbridge/dspdefs.h> | |
40 | #include <dspbridge/dspchnl.h> | |
41 | #include <dspbridge/dspdeh.h> | |
42 | #include <dspbridge/dspio.h> | |
43 | #include <dspbridge/dspmsg.h> | |
44 | #include <dspbridge/pwr.h> | |
45 | #include <dspbridge/io_sm.h> | |
46 | ||
47 | /* ----------------------------------- Platform Manager */ | |
48 | #include <dspbridge/dev.h> | |
49 | #include <dspbridge/dspapi.h> | |
677f2ded | 50 | #include <dspbridge/dmm.h> |
999e07d6 ORL |
51 | #include <dspbridge/wdt.h> |
52 | ||
53 | /* ----------------------------------- Local */ | |
54 | #include "_tiomap.h" | |
55 | #include "_tiomap_pwr.h" | |
56 | #include "tiomap_io.h" | |
57 | ||
58 | /* Offset in shared mem to write to in order to synchronize start with DSP */ | |
59 | #define SHMSYNCOFFSET 4 /* GPP byte offset */ | |
60 | ||
61 | #define BUFFERSIZE 1024 | |
62 | ||
63 | #define TIHELEN_ACKTIMEOUT 10000 | |
64 | ||
65 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | |
66 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | |
67 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | |
68 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | |
69 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 | |
70 | #define PAGES_II_LVL_TABLE 512 | |
f5bd96bb | 71 | #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) |
999e07d6 | 72 | |
a9db2036 FC |
73 | /* |
74 | * This is a totally ugly layer violation, but needed until | |
75 | * omap_ctrl_set_dsp_boot*() are provided. | |
76 | */ | |
77 | #define OMAP3_IVA2_BOOTMOD_IDLE 1 | |
78 | #define OMAP2_CONTROL_GENERAL 0x270 | |
79 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) | |
80 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) | |
81 | ||
82 | #define OMAP343X_CTRL_REGADDR(reg) \ | |
83 | OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) | |
84 | ||
85 | ||
999e07d6 | 86 | /* Forward Declarations: */ |
c8c1ad8c RS |
87 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); |
88 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, | |
e6bf74f0 | 89 | u8 *host_buff, |
b301c858 | 90 | u32 dsp_addr, u32 ul_num_bytes, |
5e2eae57 | 91 | u32 mem_type); |
c8c1ad8c | 92 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, |
b301c858 | 93 | u32 dsp_addr); |
c8c1ad8c | 94 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, |
a5120278 | 95 | int *board_state); |
c8c1ad8c RS |
96 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt); |
97 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, | |
9d7d0a52 | 98 | u8 *host_buff, |
b301c858 | 99 | u32 dsp_addr, u32 ul_num_bytes, |
5e2eae57 | 100 | u32 mem_type); |
e6890692 | 101 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, |
5e2eae57 | 102 | u32 brd_state); |
e6890692 | 103 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, |
5e2eae57 RS |
104 | u32 dsp_dest_addr, u32 dsp_src_addr, |
105 | u32 ul_num_bytes, u32 mem_type); | |
c8c1ad8c | 106 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 107 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 108 | u32 ul_num_bytes, u32 mem_type); |
d0b345f3 FC |
109 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, |
110 | u32 ul_mpu_addr, u32 virt_addr, | |
111 | u32 ul_num_bytes, u32 ul_map_attr, | |
112 | struct page **mapped_pages); | |
113 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | |
50ad26f4 | 114 | u32 virt_addr, u32 ul_num_bytes); |
e6bf74f0 | 115 | static int bridge_dev_create(struct bridge_dev_context |
fb6aabb7 | 116 | **dev_cntxt, |
999e07d6 | 117 | struct dev_object *hdev_obj, |
9d7d0a52 | 118 | struct cfg_hostres *config_param); |
999e07d6 | 119 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, |
e6bf74f0 | 120 | u32 dw_cmd, void *pargs); |
c8c1ad8c | 121 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); |
d0b345f3 | 122 | static u32 user_va2_pa(struct mm_struct *mm, u32 address); |
ac8a139a FC |
123 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, |
124 | u32 va, u32 size, | |
125 | struct hw_mmu_map_attrs_t *map_attrs); | |
126 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |
127 | u32 size, struct hw_mmu_map_attrs_t *attrs); | |
128 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |
129 | u32 ul_mpu_addr, u32 virt_addr, | |
130 | u32 ul_num_bytes, | |
131 | struct hw_mmu_map_attrs_t *hw_attrs); | |
132 | ||
999e07d6 ORL |
133 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); |
134 | ||
ac8a139a FC |
135 | /* ----------------------------------- Globals */ |
136 | ||
137 | /* Attributes of L2 page tables for DSP MMU */ | |
138 | struct page_info { | |
139 | u32 num_entries; /* Number of valid PTEs in the L2 PT */ | |
140 | }; | |
141 | ||
142 | /* Attributes used to manage the DSP MMU page tables */ | |
143 | struct pg_table_attrs { | |
144 | spinlock_t pg_lock; /* Critical section object handle */ | |
145 | ||
146 | u32 l1_base_pa; /* Physical address of the L1 PT */ | |
147 | u32 l1_base_va; /* Virtual address of the L1 PT */ | |
148 | u32 l1_size; /* Size of the L1 PT */ | |
149 | u32 l1_tbl_alloc_pa; | |
150 | /* Physical address of Allocated mem for L1 table. May not be aligned */ | |
151 | u32 l1_tbl_alloc_va; | |
152 | /* Virtual address of Allocated mem for L1 table. May not be aligned */ | |
153 | u32 l1_tbl_alloc_sz; | |
154 | /* Size of consistent memory allocated for L1 table. | |
155 | * May not be aligned */ | |
156 | ||
157 | u32 l2_base_pa; /* Physical address of the L2 PT */ | |
158 | u32 l2_base_va; /* Virtual address of the L2 PT */ | |
159 | u32 l2_size; /* Size of the L2 PT */ | |
160 | u32 l2_tbl_alloc_pa; | |
161 | /* Physical address of Allocated mem for L2 table. May not be aligned */ | |
162 | u32 l2_tbl_alloc_va; | |
163 | /* Virtual address of Allocated mem for L2 table. May not be aligned */ | |
164 | u32 l2_tbl_alloc_sz; | |
165 | /* Size of consistent memory allocated for L2 table. | |
166 | * May not be aligned */ | |
167 | ||
168 | u32 l2_num_pages; /* Number of allocated L2 PT */ | |
169 | /* Array [l2_num_pages] of L2 PT info structs */ | |
170 | struct page_info *pg_info; | |
171 | }; | |
172 | ||
999e07d6 ORL |
173 | /* |
174 | * This Bridge driver's function interface table. | |
175 | */ | |
176 | static struct bridge_drv_interface drv_interface_fxns = { | |
177 | /* Bridge API ver. for which this bridge driver is built. */ | |
178 | BRD_API_MAJOR_VERSION, | |
179 | BRD_API_MINOR_VERSION, | |
180 | bridge_dev_create, | |
181 | bridge_dev_destroy, | |
182 | bridge_dev_ctrl, | |
183 | bridge_brd_monitor, | |
184 | bridge_brd_start, | |
185 | bridge_brd_stop, | |
186 | bridge_brd_status, | |
187 | bridge_brd_read, | |
188 | bridge_brd_write, | |
189 | bridge_brd_set_state, | |
190 | bridge_brd_mem_copy, | |
191 | bridge_brd_mem_write, | |
d0b345f3 FC |
192 | bridge_brd_mem_map, |
193 | bridge_brd_mem_un_map, | |
999e07d6 ORL |
194 | /* The following CHNL functions are provided by chnl_io.lib: */ |
195 | bridge_chnl_create, | |
196 | bridge_chnl_destroy, | |
197 | bridge_chnl_open, | |
198 | bridge_chnl_close, | |
199 | bridge_chnl_add_io_req, | |
200 | bridge_chnl_get_ioc, | |
201 | bridge_chnl_cancel_io, | |
202 | bridge_chnl_flush_io, | |
203 | bridge_chnl_get_info, | |
204 | bridge_chnl_get_mgr_info, | |
205 | bridge_chnl_idle, | |
206 | bridge_chnl_register_notify, | |
999e07d6 ORL |
207 | /* The following IO functions are provided by chnl_io.lib: */ |
208 | bridge_io_create, | |
209 | bridge_io_destroy, | |
210 | bridge_io_on_loaded, | |
211 | bridge_io_get_proc_load, | |
212 | /* The following msg_ctrl functions are provided by chnl_io.lib: */ | |
213 | bridge_msg_create, | |
214 | bridge_msg_create_queue, | |
215 | bridge_msg_delete, | |
216 | bridge_msg_delete_queue, | |
217 | bridge_msg_get, | |
218 | bridge_msg_put, | |
219 | bridge_msg_register_notify, | |
220 | bridge_msg_set_queue_id, | |
221 | }; | |
222 | ||
85d139c9 ORL |
223 | static struct notifier_block dsp_mbox_notifier = { |
224 | .notifier_call = io_mbox_msg, | |
225 | }; | |
226 | ||
ac8a139a FC |
227 | static inline void flush_all(struct bridge_dev_context *dev_context) |
228 | { | |
b4da7fc3 RS |
229 | if (dev_context->brd_state == BRD_DSP_HIBERNATION || |
230 | dev_context->brd_state == BRD_HIBERNATION) | |
ac8a139a FC |
231 | wake_dsp(dev_context, NULL); |
232 | ||
5108de0a | 233 | hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base); |
ac8a139a FC |
234 | } |
235 | ||
236 | static void bad_page_dump(u32 pa, struct page *pg) | |
237 | { | |
238 | pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); | |
239 | pr_emerg("Bad page state in process '%s'\n" | |
240 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | |
241 | "Backtrace:\n", | |
242 | current->comm, pg, (int)(2 * sizeof(unsigned long)), | |
243 | (unsigned long)pg->flags, pg->mapping, | |
244 | page_mapcount(pg), page_count(pg)); | |
245 | dump_stack(); | |
246 | } | |
247 | ||
999e07d6 ORL |
248 | /* |
249 | * ======== bridge_drv_entry ======== | |
250 | * purpose: | |
251 | * Bridge Driver entry point. | |
252 | */ | |
e6bf74f0 | 253 | void bridge_drv_entry(struct bridge_drv_interface **drv_intf, |
9d7d0a52 | 254 | const char *driver_file_name) |
999e07d6 | 255 | { |
999e07d6 | 256 | if (strcmp(driver_file_name, "UMA") == 0) |
fb6aabb7 | 257 | *drv_intf = &drv_interface_fxns; |
999e07d6 ORL |
258 | else |
259 | dev_dbg(bridge, "%s Unknown Bridge file name", __func__); | |
260 | ||
261 | } | |
262 | ||
263 | /* | |
264 | * ======== bridge_brd_monitor ======== | |
265 | * purpose: | |
266 | * This bridge_brd_monitor puts DSP into a Loadable state. | |
267 | * i.e Application can load and start the device. | |
268 | * | |
269 | * Preconditions: | |
270 | * Device in 'OFF' state. | |
271 | */ | |
e6890692 | 272 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) |
999e07d6 | 273 | { |
e6890692 | 274 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 275 | u32 temp; |
82d4b477 FC |
276 | struct omap_dsp_platform_data *pdata = |
277 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 ORL |
278 | |
279 | temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
280 | OMAP_POWERSTATEST_MASK; | |
281 | if (!(temp & 0x02)) { | |
282 | /* IVA2 is not in ON state */ | |
283 | /* Read and set PM_PWSTCTRL_IVA2 to ON */ | |
284 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | |
285 | PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | |
286 | /* Set the SW supervised state transition */ | |
287 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, | |
288 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
289 | ||
290 | /* Wait until the state has moved to ON */ | |
291 | while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
292 | OMAP_INTRANSITION_MASK) | |
293 | ; | |
294 | /* Disable Automatic transition */ | |
295 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, | |
296 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
297 | } | |
1cf3fb2d FC |
298 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
299 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 ORL |
300 | dsp_clk_enable(DSP_CLK_IVA2); |
301 | ||
e6486d8c | 302 | /* set the device state to IDLE */ |
b4da7fc3 | 303 | dev_context->brd_state = BRD_IDLE; |
e6486d8c ER |
304 | |
305 | return 0; | |
999e07d6 ORL |
306 | } |
307 | ||
308 | /* | |
309 | * ======== bridge_brd_read ======== | |
310 | * purpose: | |
311 | * Reads buffers for DSP memory. | |
312 | */ | |
e6890692 | 313 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, |
e6bf74f0 | 314 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 315 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
316 | { |
317 | int status = 0; | |
e6890692 | 318 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 319 | u32 offset; |
b4da7fc3 | 320 | u32 dsp_base_addr = dev_ctxt->dsp_base_addr; |
999e07d6 | 321 | |
b4da7fc3 | 322 | if (dsp_addr < dev_context->dsp_start_add) { |
999e07d6 ORL |
323 | status = -EPERM; |
324 | return status; | |
325 | } | |
326 | /* change here to account for the 3 bands of the DSP internal memory */ | |
b4da7fc3 | 327 | if ((dsp_addr - dev_context->dsp_start_add) < |
5108de0a | 328 | dev_context->internal_size) { |
b4da7fc3 | 329 | offset = dsp_addr - dev_context->dsp_start_add; |
999e07d6 | 330 | } else { |
aa09b091 | 331 | status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, |
5e2eae57 | 332 | ul_num_bytes, mem_type); |
999e07d6 ORL |
333 | return status; |
334 | } | |
335 | /* copy the data from DSP memory, */ | |
aa09b091 | 336 | memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes); |
999e07d6 ORL |
337 | return status; |
338 | } | |
339 | ||
340 | /* | |
341 | * ======== bridge_brd_set_state ======== | |
342 | * purpose: | |
343 | * This routine updates the Board status. | |
344 | */ | |
e6890692 | 345 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, |
5e2eae57 | 346 | u32 brd_state) |
999e07d6 ORL |
347 | { |
348 | int status = 0; | |
e6890692 | 349 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 350 | |
b4da7fc3 | 351 | dev_context->brd_state = brd_state; |
999e07d6 ORL |
352 | return status; |
353 | } | |
354 | ||
355 | /* | |
356 | * ======== bridge_brd_start ======== | |
357 | * purpose: | |
358 | * Initializes DSP MMU and Starts DSP. | |
359 | * | |
360 | * Preconditions: | |
361 | * a) DSP domain is 'ACTIVE'. | |
362 | * b) DSP_RST1 is asserted. | |
363 | * b) DSP_RST2 is released. | |
364 | */ | |
e6890692 | 365 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, |
b301c858 | 366 | u32 dsp_addr) |
999e07d6 ORL |
367 | { |
368 | int status = 0; | |
e6890692 | 369 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 ORL |
370 | u32 dw_sync_addr = 0; |
371 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ | |
372 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ | |
373 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ | |
374 | /* Offset of shm_base_virt from tlb_base_virt */ | |
375 | u32 ul_shm_offset_virt; | |
1cf3fb2d FC |
376 | s32 entry_ndx; |
377 | s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ | |
999e07d6 ORL |
378 | struct cfg_hostres *resources = NULL; |
379 | u32 temp; | |
380 | u32 ul_dsp_clk_rate; | |
381 | u32 ul_dsp_clk_addr; | |
382 | u32 ul_bios_gp_timer; | |
383 | u32 clk_cmd; | |
384 | struct io_mgr *hio_mgr; | |
385 | u32 ul_load_monitor_timer; | |
08cd9ef4 | 386 | u32 wdt_en = 0; |
82d4b477 FC |
387 | struct omap_dsp_platform_data *pdata = |
388 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 ORL |
389 | |
390 | /* The device context contains all the mmu setup info from when the | |
391 | * last dsp base image was loaded. The first entry is always | |
392 | * SHMMEM base. */ | |
393 | /* Get SHM_BEG - convert to byte address */ | |
085467b8 | 394 | (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME, |
999e07d6 ORL |
395 | &ul_shm_base_virt); |
396 | ul_shm_base_virt *= DSPWORDSIZE; | |
999e07d6 | 397 | /* DSP Virtual address */ |
dab7f7fe | 398 | ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va; |
999e07d6 ORL |
399 | ul_shm_offset_virt = |
400 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); | |
401 | /* Kernel logical address */ | |
6c66e948 | 402 | ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; |
999e07d6 | 403 | |
999e07d6 ORL |
404 | /* 2nd wd is used as sync field */ |
405 | dw_sync_addr = ul_shm_base + SHMSYNCOFFSET; | |
406 | /* Write a signature into the shm base + offset; this will | |
407 | * get cleared when the DSP program starts. */ | |
408 | if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) { | |
409 | pr_err("%s: Illegal SM base\n", __func__); | |
410 | status = -EPERM; | |
411 | } else | |
b3c8aef0 | 412 | __raw_writel(0xffffffff, dw_sync_addr); |
999e07d6 | 413 | |
e6486d8c | 414 | if (!status) { |
999e07d6 ORL |
415 | resources = dev_context->resources; |
416 | if (!resources) | |
417 | status = -EPERM; | |
418 | ||
419 | /* Assert RST1 i.e only the RST only for DSP megacell */ | |
e6486d8c | 420 | if (!status) { |
999e07d6 ORL |
421 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, |
422 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, | |
423 | OMAP2_RM_RSTCTRL); | |
424 | /* Mask address with 1K for compatibility */ | |
b301c858 | 425 | __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, |
999e07d6 ORL |
426 | OMAP343X_CTRL_REGADDR( |
427 | OMAP343X_CONTROL_IVA2_BOOTADDR)); | |
428 | /* | |
429 | * Set bootmode to self loop if dsp_debug flag is true | |
430 | */ | |
431 | __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, | |
432 | OMAP343X_CTRL_REGADDR( | |
433 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | |
434 | } | |
435 | } | |
0c10e91b | 436 | if (!status) { |
50ad26f4 FC |
437 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to |
438 | * IVA2 SYSC register */ | |
439 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | |
440 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
441 | udelay(100); | |
442 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | |
443 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
444 | udelay(100); | |
445 | ||
446 | /* Disbale the DSP MMU */ | |
5108de0a | 447 | hw_mmu_disable(resources->dmmu_base); |
50ad26f4 | 448 | /* Disable TWL */ |
5108de0a | 449 | hw_mmu_twl_disable(resources->dmmu_base); |
50ad26f4 | 450 | |
1cf3fb2d FC |
451 | /* Only make TLB entry if both addresses are non-zero */ |
452 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; | |
453 | entry_ndx++) { | |
454 | struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; | |
50ad26f4 FC |
455 | struct hw_mmu_map_attrs_t map_attrs = { |
456 | .endianism = e->endianism, | |
457 | .element_size = e->elem_size, | |
458 | .mixed_size = e->mixed_mode, | |
459 | }; | |
1cf3fb2d | 460 | |
dab7f7fe | 461 | if (!e->gpp_pa || !e->dsp_va) |
999e07d6 ORL |
462 | continue; |
463 | ||
1cf3fb2d FC |
464 | dev_dbg(bridge, |
465 | "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", | |
466 | itmp_entry_ndx, | |
dab7f7fe RS |
467 | e->gpp_pa, |
468 | e->dsp_va, | |
085467b8 | 469 | e->size); |
999e07d6 | 470 | |
5108de0a | 471 | hw_mmu_tlb_add(dev_context->dsp_mmu_base, |
dab7f7fe RS |
472 | e->gpp_pa, |
473 | e->dsp_va, | |
085467b8 | 474 | e->size, |
50ad26f4 FC |
475 | itmp_entry_ndx, |
476 | &map_attrs, 1, 1); | |
477 | ||
1cf3fb2d | 478 | itmp_entry_ndx++; |
999e07d6 ORL |
479 | } |
480 | } | |
481 | ||
482 | /* Lock the above TLB entries and get the BIOS and load monitor timer | |
483 | * information */ | |
e6486d8c | 484 | if (!status) { |
5108de0a RS |
485 | hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx); |
486 | hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx); | |
487 | hw_mmu_ttb_set(resources->dmmu_base, | |
50ad26f4 | 488 | dev_context->pt_attrs->l1_base_pa); |
5108de0a | 489 | hw_mmu_twl_enable(resources->dmmu_base); |
50ad26f4 FC |
490 | /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ |
491 | ||
5108de0a | 492 | temp = __raw_readl((resources->dmmu_base) + 0x10); |
50ad26f4 | 493 | temp = (temp & 0xFFFFFFEF) | 0x11; |
5108de0a | 494 | __raw_writel(temp, (resources->dmmu_base) + 0x10); |
50ad26f4 FC |
495 | |
496 | /* Let the DSP MMU run */ | |
5108de0a | 497 | hw_mmu_enable(resources->dmmu_base); |
50ad26f4 | 498 | |
999e07d6 | 499 | /* Enable the BIOS clock */ |
085467b8 | 500 | (void)dev_get_symbol(dev_context->dev_obj, |
999e07d6 | 501 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); |
085467b8 | 502 | (void)dev_get_symbol(dev_context->dev_obj, |
999e07d6 ORL |
503 | BRIDGEINIT_LOADMON_GPTIMER, |
504 | &ul_load_monitor_timer); | |
50ad26f4 | 505 | } |
999e07d6 | 506 | |
50ad26f4 | 507 | if (!status) { |
999e07d6 ORL |
508 | if (ul_load_monitor_timer != 0xFFFF) { |
509 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | |
510 | ul_load_monitor_timer; | |
511 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | |
512 | } else { | |
513 | dev_dbg(bridge, "Not able to get the symbol for Load " | |
514 | "Monitor Timer\n"); | |
515 | } | |
50ad26f4 | 516 | } |
999e07d6 | 517 | |
50ad26f4 | 518 | if (!status) { |
999e07d6 ORL |
519 | if (ul_bios_gp_timer != 0xFFFF) { |
520 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | |
521 | ul_bios_gp_timer; | |
522 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | |
523 | } else { | |
524 | dev_dbg(bridge, | |
525 | "Not able to get the symbol for BIOS Timer\n"); | |
526 | } | |
50ad26f4 | 527 | } |
999e07d6 | 528 | |
50ad26f4 | 529 | if (!status) { |
999e07d6 | 530 | /* Set the DSP clock rate */ |
085467b8 | 531 | (void)dev_get_symbol(dev_context->dev_obj, |
999e07d6 ORL |
532 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); |
533 | /*Set Autoidle Mode for IVA2 PLL */ | |
534 | (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, | |
535 | OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); | |
536 | ||
537 | if ((unsigned int *)ul_dsp_clk_addr != NULL) { | |
538 | /* Get the clock rate */ | |
539 | ul_dsp_clk_rate = dsp_clk_get_iva2_rate(); | |
540 | dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n", | |
541 | __func__, ul_dsp_clk_rate); | |
542 | (void)bridge_brd_write(dev_context, | |
543 | (u8 *) &ul_dsp_clk_rate, | |
544 | ul_dsp_clk_addr, sizeof(u32), 0); | |
545 | } | |
546 | /* | |
547 | * Enable Mailbox events and also drain any pending | |
548 | * stale messages. | |
549 | */ | |
85d139c9 | 550 | dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier); |
999e07d6 ORL |
551 | if (IS_ERR(dev_context->mbox)) { |
552 | dev_context->mbox = NULL; | |
553 | pr_err("%s: Failed to get dsp mailbox handle\n", | |
554 | __func__); | |
555 | status = -EPERM; | |
556 | } | |
557 | ||
558 | } | |
e6486d8c | 559 | if (!status) { |
999e07d6 | 560 | /*PM_IVA2GRPSEL_PER = 0xC0;*/ |
5108de0a | 561 | temp = readl(resources->per_pm_base + 0xA8); |
999e07d6 | 562 | temp = (temp & 0xFFFFFF30) | 0xC0; |
5108de0a | 563 | writel(temp, resources->per_pm_base + 0xA8); |
999e07d6 ORL |
564 | |
565 | /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ | |
5108de0a | 566 | temp = readl(resources->per_pm_base + 0xA4); |
999e07d6 | 567 | temp = (temp & 0xFFFFFF3F); |
5108de0a | 568 | writel(temp, resources->per_pm_base + 0xA4); |
999e07d6 | 569 | /*CM_SLEEPDEP_PER |= 0x04; */ |
5108de0a | 570 | temp = readl(resources->per_base + 0x44); |
999e07d6 | 571 | temp = (temp & 0xFFFFFFFB) | 0x04; |
5108de0a | 572 | writel(temp, resources->per_base + 0x44); |
999e07d6 ORL |
573 | |
574 | /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ | |
575 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, | |
576 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
577 | ||
578 | /* Let DSP go */ | |
579 | dev_dbg(bridge, "%s Unreset\n", __func__); | |
50ad26f4 | 580 | /* Enable DSP MMU Interrupts */ |
5108de0a | 581 | hw_mmu_event_enable(resources->dmmu_base, |
50ad26f4 | 582 | HW_MMU_ALL_INTERRUPTS); |
999e07d6 ORL |
583 | /* release the RST1, DSP starts executing now .. */ |
584 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, | |
585 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
586 | ||
587 | dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr); | |
b301c858 | 588 | dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr); |
999e07d6 | 589 | if (dsp_debug) |
b3c8aef0 | 590 | while (__raw_readw(dw_sync_addr)) |
859171ca | 591 | ; |
999e07d6 ORL |
592 | |
593 | /* Wait for DSP to clear word in shared memory */ | |
594 | /* Read the Location */ | |
595 | if (!wait_for_start(dev_context, dw_sync_addr)) | |
596 | status = -ETIMEDOUT; | |
597 | ||
08cd9ef4 ORL |
598 | dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en); |
599 | if (wdt_en) { | |
600 | /* Start wdt */ | |
601 | dsp_wdt_sm_set((void *)ul_shm_base); | |
602 | dsp_wdt_enable(true); | |
603 | } | |
999e07d6 | 604 | |
085467b8 | 605 | status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); |
999e07d6 ORL |
606 | if (hio_mgr) { |
607 | io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); | |
608 | /* Write the synchronization bit to indicate the | |
609 | * completion of OPP table update to DSP | |
610 | */ | |
b3c8aef0 | 611 | __raw_writel(0XCAFECAFE, dw_sync_addr); |
999e07d6 ORL |
612 | |
613 | /* update board state */ | |
b4da7fc3 | 614 | dev_context->brd_state = BRD_RUNNING; |
1cf3fb2d | 615 | /* (void)chnlsm_enable_interrupt(dev_context); */ |
999e07d6 | 616 | } else { |
b4da7fc3 | 617 | dev_context->brd_state = BRD_UNKNOWN; |
999e07d6 ORL |
618 | } |
619 | } | |
620 | return status; | |
621 | } | |
622 | ||
623 | /* | |
624 | * ======== bridge_brd_stop ======== | |
625 | * purpose: | |
626 | * Puts DSP in self loop. | |
627 | * | |
628 | * Preconditions : | |
629 | * a) None | |
630 | */ | |
e6890692 | 631 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) |
999e07d6 ORL |
632 | { |
633 | int status = 0; | |
e6890692 | 634 | struct bridge_dev_context *dev_context = dev_ctxt; |
ac8a139a | 635 | struct pg_table_attrs *pt_attrs; |
999e07d6 | 636 | u32 dsp_pwr_state; |
82d4b477 FC |
637 | struct omap_dsp_platform_data *pdata = |
638 | omap_dspbridge_dev->dev.platform_data; | |
999e07d6 | 639 | |
b4da7fc3 | 640 | if (dev_context->brd_state == BRD_STOPPED) |
999e07d6 ORL |
641 | return status; |
642 | ||
643 | /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, | |
644 | * before turning off the clocks.. This is to ensure that there are no | |
645 | * pending L3 or other transactons from IVA2 */ | |
646 | dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | |
647 | OMAP_POWERSTATEST_MASK; | |
648 | if (dsp_pwr_state != PWRDM_POWER_OFF) { | |
a2c22721 ER |
649 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, |
650 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 ORL |
651 | sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE); |
652 | mdelay(10); | |
653 | ||
999e07d6 ORL |
654 | /* IVA2 is not in OFF state */ |
655 | /* Set PM_PWSTCTRL_IVA2 to OFF */ | |
656 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | |
657 | PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | |
658 | /* Set the SW supervised state transition for Sleep */ | |
659 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, | |
660 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | |
999e07d6 ORL |
661 | } |
662 | udelay(10); | |
663 | /* Release the Ext Base virtual Address as the next DSP Program | |
664 | * may have a different load address */ | |
b4da7fc3 RS |
665 | if (dev_context->dsp_ext_base_addr) |
666 | dev_context->dsp_ext_base_addr = 0; | |
999e07d6 | 667 | |
b4da7fc3 | 668 | dev_context->brd_state = BRD_STOPPED; /* update board state */ |
999e07d6 ORL |
669 | |
670 | dsp_wdt_enable(false); | |
671 | ||
ac8a139a FC |
672 | /* This is a good place to clear the MMU page tables as well */ |
673 | if (dev_context->pt_attrs) { | |
674 | pt_attrs = dev_context->pt_attrs; | |
675 | memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); | |
676 | memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); | |
677 | memset((u8 *) pt_attrs->pg_info, 0x00, | |
678 | (pt_attrs->l2_num_pages * sizeof(struct page_info))); | |
679 | } | |
999e07d6 ORL |
680 | /* Disable the mailbox interrupts */ |
681 | if (dev_context->mbox) { | |
682 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); | |
85d139c9 | 683 | omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier); |
999e07d6 ORL |
684 | dev_context->mbox = NULL; |
685 | } | |
1cf3fb2d FC |
686 | /* Reset IVA2 clocks*/ |
687 | (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | | |
688 | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | |
999e07d6 | 689 | |
0ee5ab30 ER |
690 | dsp_clock_disable_all(dev_context->dsp_per_clks); |
691 | dsp_clk_disable(DSP_CLK_IVA2); | |
a2c22721 | 692 | |
999e07d6 ORL |
693 | return status; |
694 | } | |
695 | ||
999e07d6 ORL |
696 | /* |
697 | * ======== bridge_brd_status ======== | |
698 | * Returns the board status. | |
699 | */ | |
e6890692 | 700 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, |
a5120278 | 701 | int *board_state) |
999e07d6 | 702 | { |
e6890692 | 703 | struct bridge_dev_context *dev_context = dev_ctxt; |
b4da7fc3 | 704 | *board_state = dev_context->brd_state; |
999e07d6 ORL |
705 | return 0; |
706 | } | |
707 | ||
708 | /* | |
709 | * ======== bridge_brd_write ======== | |
710 | * Copies the buffers to DSP internal or external memory. | |
711 | */ | |
e6890692 | 712 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 713 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 714 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
715 | { |
716 | int status = 0; | |
e6890692 | 717 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 | 718 | |
b4da7fc3 | 719 | if (dsp_addr < dev_context->dsp_start_add) { |
999e07d6 ORL |
720 | status = -EPERM; |
721 | return status; | |
722 | } | |
b4da7fc3 | 723 | if ((dsp_addr - dev_context->dsp_start_add) < |
5108de0a | 724 | dev_context->internal_size) { |
aa09b091 | 725 | status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, |
5e2eae57 | 726 | ul_num_bytes, mem_type); |
999e07d6 | 727 | } else { |
aa09b091 | 728 | status = write_ext_dsp_data(dev_context, host_buff, dsp_addr, |
5e2eae57 | 729 | ul_num_bytes, mem_type, false); |
999e07d6 ORL |
730 | } |
731 | ||
732 | return status; | |
733 | } | |
734 | ||
735 | /* | |
736 | * ======== bridge_dev_create ======== | |
737 | * Creates a driver object. Puts DSP in self loop. | |
738 | */ | |
e6bf74f0 | 739 | static int bridge_dev_create(struct bridge_dev_context |
fb6aabb7 | 740 | **dev_cntxt, |
999e07d6 | 741 | struct dev_object *hdev_obj, |
9d7d0a52 | 742 | struct cfg_hostres *config_param) |
999e07d6 ORL |
743 | { |
744 | int status = 0; | |
745 | struct bridge_dev_context *dev_context = NULL; | |
746 | s32 entry_ndx; | |
aa09b091 | 747 | struct cfg_hostres *resources = config_param; |
ac8a139a FC |
748 | struct pg_table_attrs *pt_attrs; |
749 | u32 pg_tbl_pa; | |
750 | u32 pg_tbl_va; | |
751 | u32 align_size; | |
999e07d6 ORL |
752 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
753 | ||
754 | /* Allocate and initialize a data structure to contain the bridge driver | |
755 | * state, which becomes the context for later calls into this driver */ | |
756 | dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL); | |
757 | if (!dev_context) { | |
758 | status = -ENOMEM; | |
759 | goto func_end; | |
760 | } | |
761 | ||
b4da7fc3 | 762 | dev_context->dsp_start_add = (u32) OMAP_GEM_BASE; |
3c882de5 | 763 | dev_context->self_loop = (u32) NULL; |
999e07d6 | 764 | dev_context->dsp_per_clks = 0; |
5108de0a | 765 | dev_context->internal_size = OMAP_DSP_SIZE; |
999e07d6 ORL |
766 | /* Clear dev context MMU table entries. |
767 | * These get set on bridge_io_on_loaded() call after program loaded. */ | |
768 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { | |
dab7f7fe RS |
769 | dev_context->atlb_entry[entry_ndx].gpp_pa = |
770 | dev_context->atlb_entry[entry_ndx].dsp_va = 0; | |
999e07d6 | 771 | } |
b4da7fc3 | 772 | dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) |
aa09b091 | 773 | (config_param-> |
5108de0a | 774 | mem_base |
999e07d6 | 775 | [3]), |
aa09b091 | 776 | config_param-> |
5108de0a | 777 | mem_length |
999e07d6 | 778 | [3]); |
b4da7fc3 | 779 | if (!dev_context->dsp_base_addr) |
999e07d6 ORL |
780 | status = -EPERM; |
781 | ||
ac8a139a FC |
782 | pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); |
783 | if (pt_attrs != NULL) { | |
8e290fd4 | 784 | pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */ |
ac8a139a FC |
785 | align_size = pt_attrs->l1_size; |
786 | /* Align sizes are expected to be power of 2 */ | |
787 | /* we like to get aligned on L1 table size */ | |
788 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, | |
789 | align_size, &pg_tbl_pa); | |
790 | ||
791 | /* Check if the PA is aligned for us */ | |
792 | if ((pg_tbl_pa) & (align_size - 1)) { | |
793 | /* PA not aligned to page table size , | |
794 | * try with more allocation and align */ | |
795 | mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, | |
796 | pt_attrs->l1_size); | |
797 | /* we like to get aligned on L1 table size */ | |
798 | pg_tbl_va = | |
799 | (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, | |
800 | align_size, &pg_tbl_pa); | |
801 | /* We should be able to get aligned table now */ | |
802 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | |
803 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | |
804 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; | |
805 | /* Align the PA to the next 'align' boundary */ | |
806 | pt_attrs->l1_base_pa = | |
807 | ((pg_tbl_pa) + | |
808 | (align_size - 1)) & (~(align_size - 1)); | |
809 | pt_attrs->l1_base_va = | |
810 | pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); | |
811 | } else { | |
812 | /* We got aligned PA, cool */ | |
813 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | |
814 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | |
815 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; | |
816 | pt_attrs->l1_base_pa = pg_tbl_pa; | |
817 | pt_attrs->l1_base_va = pg_tbl_va; | |
818 | } | |
819 | if (pt_attrs->l1_base_va) | |
820 | memset((u8 *) pt_attrs->l1_base_va, 0x00, | |
821 | pt_attrs->l1_size); | |
822 | ||
823 | /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + | |
824 | * L4 pages */ | |
825 | pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); | |
826 | pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * | |
827 | pt_attrs->l2_num_pages; | |
828 | align_size = 4; /* Make it u32 aligned */ | |
829 | /* we like to get aligned on L1 table size */ | |
830 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, | |
831 | align_size, &pg_tbl_pa); | |
832 | pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; | |
833 | pt_attrs->l2_tbl_alloc_va = pg_tbl_va; | |
834 | pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; | |
835 | pt_attrs->l2_base_pa = pg_tbl_pa; | |
836 | pt_attrs->l2_base_va = pg_tbl_va; | |
837 | ||
838 | if (pt_attrs->l2_base_va) | |
839 | memset((u8 *) pt_attrs->l2_base_va, 0x00, | |
840 | pt_attrs->l2_size); | |
841 | ||
842 | pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * | |
843 | sizeof(struct page_info), GFP_KERNEL); | |
844 | dev_dbg(bridge, | |
845 | "L1 pa %x, va %x, size %x\n L2 pa %x, va " | |
846 | "%x, size %x\n", pt_attrs->l1_base_pa, | |
847 | pt_attrs->l1_base_va, pt_attrs->l1_size, | |
848 | pt_attrs->l2_base_pa, pt_attrs->l2_base_va, | |
849 | pt_attrs->l2_size); | |
850 | dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", | |
851 | pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); | |
852 | } | |
853 | if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && | |
854 | (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) | |
855 | dev_context->pt_attrs = pt_attrs; | |
856 | else | |
857 | status = -ENOMEM; | |
858 | ||
e6486d8c | 859 | if (!status) { |
ac8a139a | 860 | spin_lock_init(&pt_attrs->pg_lock); |
999e07d6 | 861 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; |
ac8a139a FC |
862 | |
863 | /* Set the Clock Divisor for the DSP module */ | |
864 | udelay(5); | |
865 | /* MMU address is obtained from the host | |
866 | * resources struct */ | |
5108de0a | 867 | dev_context->dsp_mmu_base = resources->dmmu_base; |
ac8a139a FC |
868 | } |
869 | if (!status) { | |
085467b8 | 870 | dev_context->dev_obj = hdev_obj; |
999e07d6 | 871 | /* Store current board state. */ |
b4da7fc3 | 872 | dev_context->brd_state = BRD_UNKNOWN; |
999e07d6 | 873 | dev_context->resources = resources; |
a2205e48 ER |
874 | dsp_clk_enable(DSP_CLK_IVA2); |
875 | bridge_brd_stop(dev_context); | |
999e07d6 | 876 | /* Return ptr to our device state to the DSP API for storage */ |
fb6aabb7 | 877 | *dev_cntxt = dev_context; |
999e07d6 | 878 | } else { |
ac8a139a FC |
879 | if (pt_attrs != NULL) { |
880 | kfree(pt_attrs->pg_info); | |
881 | ||
882 | if (pt_attrs->l2_tbl_alloc_va) { | |
883 | mem_free_phys_mem((void *) | |
884 | pt_attrs->l2_tbl_alloc_va, | |
885 | pt_attrs->l2_tbl_alloc_pa, | |
886 | pt_attrs->l2_tbl_alloc_sz); | |
887 | } | |
888 | if (pt_attrs->l1_tbl_alloc_va) { | |
889 | mem_free_phys_mem((void *) | |
890 | pt_attrs->l1_tbl_alloc_va, | |
891 | pt_attrs->l1_tbl_alloc_pa, | |
892 | pt_attrs->l1_tbl_alloc_sz); | |
893 | } | |
894 | } | |
895 | kfree(pt_attrs); | |
999e07d6 ORL |
896 | kfree(dev_context); |
897 | } | |
898 | func_end: | |
899 | return status; | |
900 | } | |
901 | ||
902 | /* | |
903 | * ======== bridge_dev_ctrl ======== | |
904 | * Receives device specific commands. | |
905 | */ | |
906 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | |
e6bf74f0 | 907 | u32 dw_cmd, void *pargs) |
999e07d6 ORL |
908 | { |
909 | int status = 0; | |
910 | struct bridge_ioctl_extproc *pa_ext_proc = | |
911 | (struct bridge_ioctl_extproc *)pargs; | |
912 | s32 ndx; | |
913 | ||
914 | switch (dw_cmd) { | |
915 | case BRDIOCTL_CHNLREAD: | |
916 | break; | |
917 | case BRDIOCTL_CHNLWRITE: | |
918 | break; | |
919 | case BRDIOCTL_SETMMUCONFIG: | |
920 | /* store away dsp-mmu setup values for later use */ | |
921 | for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++) | |
922 | dev_context->atlb_entry[ndx] = *pa_ext_proc; | |
923 | break; | |
924 | case BRDIOCTL_DEEPSLEEP: | |
925 | case BRDIOCTL_EMERGENCYSLEEP: | |
926 | /* Currently only DSP Idle is supported Need to update for | |
927 | * later releases */ | |
928 | status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs); | |
929 | break; | |
930 | case BRDIOCTL_WAKEUP: | |
931 | status = wake_dsp(dev_context, pargs); | |
932 | break; | |
933 | case BRDIOCTL_CLK_CTRL: | |
934 | status = 0; | |
935 | /* Looking For Baseport Fix for Clocks */ | |
936 | status = dsp_peripheral_clk_ctrl(dev_context, pargs); | |
937 | break; | |
938 | case BRDIOCTL_PWR_HIBERNATE: | |
939 | status = handle_hibernation_from_dsp(dev_context); | |
940 | break; | |
941 | case BRDIOCTL_PRESCALE_NOTIFY: | |
942 | status = pre_scale_dsp(dev_context, pargs); | |
943 | break; | |
944 | case BRDIOCTL_POSTSCALE_NOTIFY: | |
945 | status = post_scale_dsp(dev_context, pargs); | |
946 | break; | |
947 | case BRDIOCTL_CONSTRAINT_REQUEST: | |
948 | status = handle_constraints_set(dev_context, pargs); | |
949 | break; | |
950 | default: | |
951 | status = -EPERM; | |
952 | break; | |
953 | } | |
954 | return status; | |
955 | } | |
956 | ||
957 | /* | |
958 | * ======== bridge_dev_destroy ======== | |
959 | * Destroys the driver object. | |
960 | */ | |
e6890692 | 961 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) |
999e07d6 | 962 | { |
ac8a139a | 963 | struct pg_table_attrs *pt_attrs; |
999e07d6 ORL |
964 | int status = 0; |
965 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) | |
e6890692 | 966 | dev_ctxt; |
999e07d6 ORL |
967 | struct cfg_hostres *host_res; |
968 | u32 shm_size; | |
969 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | |
970 | ||
971 | /* It should never happen */ | |
e6890692 | 972 | if (!dev_ctxt) |
999e07d6 ORL |
973 | return -EFAULT; |
974 | ||
975 | /* first put the device to stop state */ | |
7c9305b8 | 976 | bridge_brd_stop(dev_context); |
ac8a139a FC |
977 | if (dev_context->pt_attrs) { |
978 | pt_attrs = dev_context->pt_attrs; | |
979 | kfree(pt_attrs->pg_info); | |
980 | ||
981 | if (pt_attrs->l2_tbl_alloc_va) { | |
982 | mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, | |
983 | pt_attrs->l2_tbl_alloc_pa, | |
984 | pt_attrs->l2_tbl_alloc_sz); | |
985 | } | |
986 | if (pt_attrs->l1_tbl_alloc_va) { | |
987 | mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, | |
988 | pt_attrs->l1_tbl_alloc_pa, | |
989 | pt_attrs->l1_tbl_alloc_sz); | |
990 | } | |
991 | kfree(pt_attrs); | |
992 | ||
993 | } | |
999e07d6 ORL |
994 | |
995 | if (dev_context->resources) { | |
996 | host_res = dev_context->resources; | |
997 | shm_size = drv_datap->shm_size; | |
998 | if (shm_size >= 0x10000) { | |
5108de0a RS |
999 | if ((host_res->mem_base[1]) && |
1000 | (host_res->mem_phys[1])) { | |
999e07d6 | 1001 | mem_free_phys_mem((void *) |
5108de0a | 1002 | host_res->mem_base |
999e07d6 | 1003 | [1], |
5108de0a | 1004 | host_res->mem_phys |
999e07d6 ORL |
1005 | [1], shm_size); |
1006 | } | |
1007 | } else { | |
1008 | dev_dbg(bridge, "%s: Error getting shm size " | |
1009 | "from registry: %x. Not calling " | |
1010 | "mem_free_phys_mem\n", __func__, | |
1011 | status); | |
1012 | } | |
5108de0a RS |
1013 | host_res->mem_base[1] = 0; |
1014 | host_res->mem_phys[1] = 0; | |
1015 | ||
1016 | if (host_res->mem_base[0]) | |
1017 | iounmap((void *)host_res->mem_base[0]); | |
1018 | if (host_res->mem_base[2]) | |
1019 | iounmap((void *)host_res->mem_base[2]); | |
1020 | if (host_res->mem_base[3]) | |
1021 | iounmap((void *)host_res->mem_base[3]); | |
1022 | if (host_res->mem_base[4]) | |
1023 | iounmap((void *)host_res->mem_base[4]); | |
1024 | if (host_res->dmmu_base) | |
1025 | iounmap(host_res->dmmu_base); | |
1026 | if (host_res->per_base) | |
1027 | iounmap(host_res->per_base); | |
1028 | if (host_res->per_pm_base) | |
1029 | iounmap((void *)host_res->per_pm_base); | |
b4da7fc3 RS |
1030 | if (host_res->core_pm_base) |
1031 | iounmap((void *)host_res->core_pm_base); | |
999e07d6 | 1032 | |
5108de0a RS |
1033 | host_res->mem_base[0] = (u32) NULL; |
1034 | host_res->mem_base[2] = (u32) NULL; | |
1035 | host_res->mem_base[3] = (u32) NULL; | |
1036 | host_res->mem_base[4] = (u32) NULL; | |
1037 | host_res->dmmu_base = NULL; | |
999e07d6 ORL |
1038 | |
1039 | kfree(host_res); | |
1040 | } | |
1041 | ||
1042 | /* Free the driver's device context: */ | |
1043 | kfree(drv_datap->base_img); | |
e6890692 | 1044 | kfree((void *)dev_ctxt); |
999e07d6 ORL |
1045 | return status; |
1046 | } | |
1047 | ||
e6890692 | 1048 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, |
5e2eae57 RS |
1049 | u32 dsp_dest_addr, u32 dsp_src_addr, |
1050 | u32 ul_num_bytes, u32 mem_type) | |
999e07d6 ORL |
1051 | { |
1052 | int status = 0; | |
5e2eae57 RS |
1053 | u32 src_addr = dsp_src_addr; |
1054 | u32 dest_addr = dsp_dest_addr; | |
999e07d6 ORL |
1055 | u32 copy_bytes = 0; |
1056 | u32 total_bytes = ul_num_bytes; | |
1057 | u8 host_buf[BUFFERSIZE]; | |
e6890692 | 1058 | struct bridge_dev_context *dev_context = dev_ctxt; |
e6486d8c | 1059 | while (total_bytes > 0 && !status) { |
999e07d6 ORL |
1060 | copy_bytes = |
1061 | total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; | |
1062 | /* Read from External memory */ | |
e6890692 | 1063 | status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, |
5e2eae57 | 1064 | copy_bytes, mem_type); |
e6486d8c | 1065 | if (!status) { |
b4da7fc3 | 1066 | if (dest_addr < (dev_context->dsp_start_add + |
5108de0a | 1067 | dev_context->internal_size)) { |
999e07d6 | 1068 | /* Write to Internal memory */ |
e6890692 | 1069 | status = write_dsp_data(dev_ctxt, host_buf, |
999e07d6 | 1070 | dest_addr, copy_bytes, |
5e2eae57 | 1071 | mem_type); |
999e07d6 ORL |
1072 | } else { |
1073 | /* Write to External memory */ | |
1074 | status = | |
e6890692 | 1075 | write_ext_dsp_data(dev_ctxt, host_buf, |
999e07d6 | 1076 | dest_addr, copy_bytes, |
5e2eae57 | 1077 | mem_type, false); |
999e07d6 ORL |
1078 | } |
1079 | } | |
1080 | total_bytes -= copy_bytes; | |
1081 | src_addr += copy_bytes; | |
1082 | dest_addr += copy_bytes; | |
1083 | } | |
1084 | return status; | |
1085 | } | |
1086 | ||
1087 | /* Mem Write does not halt the DSP to write unlike bridge_brd_write */ | |
e6890692 | 1088 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, |
9d7d0a52 | 1089 | u8 *host_buff, u32 dsp_addr, |
5e2eae57 | 1090 | u32 ul_num_bytes, u32 mem_type) |
999e07d6 ORL |
1091 | { |
1092 | int status = 0; | |
e6890692 | 1093 | struct bridge_dev_context *dev_context = dev_ctxt; |
999e07d6 ORL |
1094 | u32 ul_remain_bytes = 0; |
1095 | u32 ul_bytes = 0; | |
1096 | ul_remain_bytes = ul_num_bytes; | |
e6486d8c | 1097 | while (ul_remain_bytes > 0 && !status) { |
999e07d6 ORL |
1098 | ul_bytes = |
1099 | ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; | |
b4da7fc3 | 1100 | if (dsp_addr < (dev_context->dsp_start_add + |
5108de0a | 1101 | dev_context->internal_size)) { |
999e07d6 | 1102 | status = |
aa09b091 | 1103 | write_dsp_data(dev_ctxt, host_buff, dsp_addr, |
5e2eae57 | 1104 | ul_bytes, mem_type); |
999e07d6 | 1105 | } else { |
aa09b091 | 1106 | status = write_ext_dsp_data(dev_ctxt, host_buff, |
b301c858 | 1107 | dsp_addr, ul_bytes, |
5e2eae57 | 1108 | mem_type, true); |
999e07d6 ORL |
1109 | } |
1110 | ul_remain_bytes -= ul_bytes; | |
b301c858 | 1111 | dsp_addr += ul_bytes; |
aa09b091 | 1112 | host_buff = host_buff + ul_bytes; |
999e07d6 ORL |
1113 | } |
1114 | return status; | |
1115 | } | |
1116 | ||
f5bd96bb | 1117 | /* |
d0b345f3 FC |
1118 | * ======== bridge_brd_mem_map ======== |
1119 | * This function maps MPU buffer to the DSP address space. It performs | |
1120 | * linear to physical address translation if required. It translates each | |
1121 | * page since linear addresses can be physically non-contiguous | |
1122 | * All address & size arguments are assumed to be page aligned (in proc.c) | |
f5bd96bb | 1123 | * |
d0b345f3 | 1124 | * TODO: Disable MMU while updating the page tables (but that'll stall DSP) |
f5bd96bb | 1125 | */ |
50ad26f4 FC |
1126 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, |
1127 | u32 ul_mpu_addr, u32 virt_addr, | |
1128 | u32 ul_num_bytes, u32 ul_map_attr, | |
1129 | struct page **mapped_pages) | |
f5bd96bb | 1130 | { |
50ad26f4 FC |
1131 | u32 attrs; |
1132 | int status = 0; | |
1133 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1134 | struct hw_mmu_map_attrs_t hw_attrs; | |
f5bd96bb FC |
1135 | struct vm_area_struct *vma; |
1136 | struct mm_struct *mm = current->mm; | |
50ad26f4 FC |
1137 | u32 write = 0; |
1138 | u32 num_usr_pgs = 0; | |
1139 | struct page *mapped_page, *pg; | |
1140 | s32 pg_num; | |
1141 | u32 va = virt_addr; | |
1142 | struct task_struct *curr_task = current; | |
1143 | u32 pg_i = 0; | |
1144 | u32 mpu_addr, pa; | |
1145 | ||
1146 | dev_dbg(bridge, | |
1147 | "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", | |
1148 | __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, | |
1149 | ul_map_attr); | |
1150 | if (ul_num_bytes == 0) | |
f5bd96bb FC |
1151 | return -EINVAL; |
1152 | ||
50ad26f4 FC |
1153 | if (ul_map_attr & DSP_MAP_DIR_MASK) { |
1154 | attrs = ul_map_attr; | |
1155 | } else { | |
1156 | /* Assign default attributes */ | |
1157 | attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); | |
1158 | } | |
1159 | /* Take mapping properties */ | |
1160 | if (attrs & DSP_MAPBIGENDIAN) | |
1161 | hw_attrs.endianism = HW_BIG_ENDIAN; | |
1162 | else | |
1163 | hw_attrs.endianism = HW_LITTLE_ENDIAN; | |
1164 | ||
1165 | hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) | |
1166 | ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); | |
1167 | /* Ignore element_size if mixed_size is enabled */ | |
1168 | if (hw_attrs.mixed_size == 0) { | |
1169 | if (attrs & DSP_MAPELEMSIZE8) { | |
1170 | /* Size is 8 bit */ | |
1171 | hw_attrs.element_size = HW_ELEM_SIZE8BIT; | |
1172 | } else if (attrs & DSP_MAPELEMSIZE16) { | |
1173 | /* Size is 16 bit */ | |
1174 | hw_attrs.element_size = HW_ELEM_SIZE16BIT; | |
1175 | } else if (attrs & DSP_MAPELEMSIZE32) { | |
1176 | /* Size is 32 bit */ | |
1177 | hw_attrs.element_size = HW_ELEM_SIZE32BIT; | |
1178 | } else if (attrs & DSP_MAPELEMSIZE64) { | |
1179 | /* Size is 64 bit */ | |
1180 | hw_attrs.element_size = HW_ELEM_SIZE64BIT; | |
1181 | } else { | |
1182 | /* | |
1183 | * Mixedsize isn't enabled, so size can't be | |
1184 | * zero here | |
1185 | */ | |
1186 | return -EINVAL; | |
1187 | } | |
1188 | } | |
1189 | if (attrs & DSP_MAPDONOTLOCK) | |
1190 | hw_attrs.donotlockmpupage = 1; | |
1191 | else | |
1192 | hw_attrs.donotlockmpupage = 0; | |
1193 | ||
1194 | if (attrs & DSP_MAPVMALLOCADDR) { | |
1195 | return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, | |
1196 | ul_num_bytes, &hw_attrs); | |
1197 | } | |
1198 | /* | |
1199 | * Do OS-specific user-va to pa translation. | |
1200 | * Combine physically contiguous regions to reduce TLBs. | |
1201 | * Pass the translated pa to pte_update. | |
1202 | */ | |
1203 | if ((attrs & DSP_MAPPHYSICALADDR)) { | |
1204 | status = pte_update(dev_context, ul_mpu_addr, virt_addr, | |
1205 | ul_num_bytes, &hw_attrs); | |
1206 | goto func_cont; | |
1207 | } | |
f5bd96bb | 1208 | |
50ad26f4 FC |
1209 | /* |
1210 | * Important Note: ul_mpu_addr is mapped from user application process | |
1211 | * to current process - it must lie completely within the current | |
1212 | * virtual memory address space in order to be of use to us here! | |
1213 | */ | |
f5bd96bb | 1214 | down_read(&mm->mmap_sem); |
50ad26f4 FC |
1215 | vma = find_vma(mm, ul_mpu_addr); |
1216 | if (vma) | |
1217 | dev_dbg(bridge, | |
1218 | "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " | |
1219 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | |
1220 | ul_num_bytes, vma->vm_start, vma->vm_end, | |
1221 | vma->vm_flags); | |
1222 | ||
1223 | /* | |
1224 | * It is observed that under some circumstances, the user buffer is | |
1225 | * spread across several VMAs. So loop through and check if the entire | |
1226 | * user buffer is covered | |
1227 | */ | |
1228 | while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { | |
1229 | /* jump to the next VMA region */ | |
f5bd96bb | 1230 | vma = find_vma(mm, vma->vm_end + 1); |
50ad26f4 FC |
1231 | dev_dbg(bridge, |
1232 | "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " | |
1233 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | |
1234 | ul_num_bytes, vma->vm_start, vma->vm_end, | |
1235 | vma->vm_flags); | |
1236 | } | |
f5bd96bb FC |
1237 | if (!vma) { |
1238 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | |
50ad26f4 FC |
1239 | __func__, ul_mpu_addr, ul_num_bytes); |
1240 | status = -EINVAL; | |
f5bd96bb | 1241 | up_read(&mm->mmap_sem); |
50ad26f4 | 1242 | goto func_cont; |
f5bd96bb | 1243 | } |
f5bd96bb | 1244 | |
50ad26f4 FC |
1245 | if (vma->vm_flags & VM_IO) { |
1246 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | |
1247 | mpu_addr = ul_mpu_addr; | |
1248 | ||
1249 | /* Get the physical addresses for user buffer */ | |
1250 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | |
1251 | pa = user_va2_pa(mm, mpu_addr); | |
1252 | if (!pa) { | |
1253 | status = -EPERM; | |
1254 | pr_err("DSPBRIDGE: VM_IO mapping physical" | |
1255 | "address is invalid\n"); | |
1256 | break; | |
1257 | } | |
1258 | if (pfn_valid(__phys_to_pfn(pa))) { | |
1259 | pg = PHYS_TO_PAGE(pa); | |
1260 | get_page(pg); | |
1261 | if (page_count(pg) < 1) { | |
1262 | pr_err("Bad page in VM_IO buffer\n"); | |
1263 | bad_page_dump(pa, pg); | |
1264 | } | |
1265 | } | |
1266 | status = pte_set(dev_context->pt_attrs, pa, | |
1267 | va, HW_PAGE_SIZE4KB, &hw_attrs); | |
1268 | if (status) | |
1269 | break; | |
1270 | ||
1271 | va += HW_PAGE_SIZE4KB; | |
1272 | mpu_addr += HW_PAGE_SIZE4KB; | |
1273 | pa += HW_PAGE_SIZE4KB; | |
1274 | } | |
1275 | } else { | |
1276 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | |
1277 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | |
1278 | write = 1; | |
1279 | ||
1280 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | |
1281 | pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, | |
1282 | write, 1, &mapped_page, NULL); | |
1283 | if (pg_num > 0) { | |
1284 | if (page_count(mapped_page) < 1) { | |
1285 | pr_err("Bad page count after doing" | |
1286 | "get_user_pages on" | |
1287 | "user buffer\n"); | |
1288 | bad_page_dump(page_to_phys(mapped_page), | |
1289 | mapped_page); | |
1290 | } | |
1291 | status = pte_set(dev_context->pt_attrs, | |
1292 | page_to_phys(mapped_page), va, | |
1293 | HW_PAGE_SIZE4KB, &hw_attrs); | |
1294 | if (status) | |
1295 | break; | |
1296 | ||
1297 | if (mapped_pages) | |
1298 | mapped_pages[pg_i] = mapped_page; | |
1299 | ||
1300 | va += HW_PAGE_SIZE4KB; | |
1301 | ul_mpu_addr += HW_PAGE_SIZE4KB; | |
1302 | } else { | |
1303 | pr_err("DSPBRIDGE: get_user_pages FAILED," | |
1304 | "MPU addr = 0x%x," | |
1305 | "vma->vm_flags = 0x%lx," | |
1306 | "get_user_pages Err" | |
1307 | "Value = %d, Buffer" | |
1308 | "size=0x%x\n", ul_mpu_addr, | |
1309 | vma->vm_flags, pg_num, ul_num_bytes); | |
1310 | status = -EPERM; | |
1311 | break; | |
1312 | } | |
1313 | } | |
f5bd96bb | 1314 | } |
50ad26f4 FC |
1315 | up_read(&mm->mmap_sem); |
1316 | func_cont: | |
1317 | if (status) { | |
1318 | /* | |
1319 | * Roll out the mapped pages incase it failed in middle of | |
1320 | * mapping | |
1321 | */ | |
1322 | if (pg_i) { | |
1323 | bridge_brd_mem_un_map(dev_context, virt_addr, | |
1324 | (pg_i * PG_SIZE4K)); | |
1325 | } | |
1326 | status = -EPERM; | |
f5bd96bb | 1327 | } |
50ad26f4 FC |
1328 | /* |
1329 | * In any case, flush the TLB | |
1330 | * This is called from here instead from pte_update to avoid unnecessary | |
1331 | * repetition while mapping non-contiguous physical regions of a virtual | |
1332 | * region | |
1333 | */ | |
1334 | flush_all(dev_context); | |
1335 | dev_dbg(bridge, "%s status %x\n", __func__, status); | |
1336 | return status; | |
f5bd96bb FC |
1337 | } |
1338 | ||
d0b345f3 FC |
1339 | /* |
1340 | * ======== bridge_brd_mem_un_map ======== | |
1341 | * Invalidate the PTEs for the DSP VA block to be unmapped. | |
f5bd96bb | 1342 | * |
d0b345f3 FC |
1343 | * PTEs of a mapped memory block are contiguous in any page table |
1344 | * So, instead of looking up the PTE address for every 4K block, | |
1345 | * we clear consecutive PTEs until we unmap all the bytes | |
f5bd96bb | 1346 | */ |
50ad26f4 FC |
1347 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, |
1348 | u32 virt_addr, u32 ul_num_bytes) | |
f5bd96bb | 1349 | { |
50ad26f4 FC |
1350 | u32 l1_base_va; |
1351 | u32 l2_base_va; | |
1352 | u32 l2_base_pa; | |
1353 | u32 l2_page_num; | |
1354 | u32 pte_val; | |
1355 | u32 pte_size; | |
1356 | u32 pte_count; | |
1357 | u32 pte_addr_l1; | |
1358 | u32 pte_addr_l2 = 0; | |
1359 | u32 rem_bytes; | |
1360 | u32 rem_bytes_l2; | |
1361 | u32 va_curr; | |
1362 | struct page *pg = NULL; | |
1363 | int status = 0; | |
1364 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1365 | struct pg_table_attrs *pt = dev_context->pt_attrs; | |
1366 | u32 temp; | |
1367 | u32 paddr; | |
1368 | u32 numof4k_pages = 0; | |
1369 | ||
1370 | va_curr = virt_addr; | |
1371 | rem_bytes = ul_num_bytes; | |
1372 | rem_bytes_l2 = 0; | |
1373 | l1_base_va = pt->l1_base_va; | |
1374 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | |
1375 | dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " | |
1376 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | |
1377 | ul_num_bytes, l1_base_va, pte_addr_l1); | |
1378 | ||
1379 | while (rem_bytes && !status) { | |
1380 | u32 va_curr_orig = va_curr; | |
1381 | /* Find whether the L1 PTE points to a valid L2 PT */ | |
1382 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | |
1383 | pte_val = *(u32 *) pte_addr_l1; | |
1384 | pte_size = hw_mmu_pte_size_l1(pte_val); | |
1385 | ||
1386 | if (pte_size != HW_MMU_COARSE_PAGE_SIZE) | |
1387 | goto skip_coarse_page; | |
f5bd96bb | 1388 | |
50ad26f4 FC |
1389 | /* |
1390 | * Get the L2 PA from the L1 PTE, and find | |
1391 | * corresponding L2 VA | |
1392 | */ | |
1393 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | |
1394 | l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | |
1395 | l2_page_num = | |
1396 | (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | |
1397 | /* | |
1398 | * Find the L2 PTE address from which we will start | |
1399 | * clearing, the number of PTEs to be cleared on this | |
1400 | * page, and the size of VA space that needs to be | |
1401 | * cleared on this L2 page | |
1402 | */ | |
1403 | pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); | |
1404 | pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); | |
1405 | pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); | |
1406 | if (rem_bytes < (pte_count * PG_SIZE4K)) | |
1407 | pte_count = rem_bytes / PG_SIZE4K; | |
1408 | rem_bytes_l2 = pte_count * PG_SIZE4K; | |
f5bd96bb | 1409 | |
50ad26f4 FC |
1410 | /* |
1411 | * Unmap the VA space on this L2 PT. A quicker way | |
1412 | * would be to clear pte_count entries starting from | |
1413 | * pte_addr_l2. However, below code checks that we don't | |
1414 | * clear invalid entries or less than 64KB for a 64KB | |
1415 | * entry. Similar checking is done for L1 PTEs too | |
1416 | * below | |
1417 | */ | |
1418 | while (rem_bytes_l2 && !status) { | |
1419 | pte_val = *(u32 *) pte_addr_l2; | |
1420 | pte_size = hw_mmu_pte_size_l2(pte_val); | |
1421 | /* va_curr aligned to pte_size? */ | |
1422 | if (pte_size == 0 || rem_bytes_l2 < pte_size || | |
1423 | va_curr & (pte_size - 1)) { | |
1424 | status = -EPERM; | |
1425 | break; | |
1426 | } | |
d0b345f3 | 1427 | |
50ad26f4 FC |
1428 | /* Collect Physical addresses from VA */ |
1429 | paddr = (pte_val & ~(pte_size - 1)); | |
1430 | if (pte_size == HW_PAGE_SIZE64KB) | |
1431 | numof4k_pages = 16; | |
1432 | else | |
1433 | numof4k_pages = 1; | |
1434 | temp = 0; | |
1435 | while (temp++ < numof4k_pages) { | |
1436 | if (!pfn_valid(__phys_to_pfn(paddr))) { | |
1437 | paddr += HW_PAGE_SIZE4KB; | |
1438 | continue; | |
1439 | } | |
1440 | pg = PHYS_TO_PAGE(paddr); | |
1441 | if (page_count(pg) < 1) { | |
1442 | pr_info("DSPBRIDGE: UNMAP function: " | |
1443 | "COUNT 0 FOR PA 0x%x, size = " | |
1444 | "0x%x\n", paddr, ul_num_bytes); | |
1445 | bad_page_dump(paddr, pg); | |
1446 | } else { | |
1447 | set_page_dirty(pg); | |
1448 | page_cache_release(pg); | |
1449 | } | |
1450 | paddr += HW_PAGE_SIZE4KB; | |
1451 | } | |
1452 | if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { | |
1453 | status = -EPERM; | |
1454 | goto EXIT_LOOP; | |
1455 | } | |
d0b345f3 | 1456 | |
50ad26f4 FC |
1457 | status = 0; |
1458 | rem_bytes_l2 -= pte_size; | |
1459 | va_curr += pte_size; | |
1460 | pte_addr_l2 += (pte_size >> 12) * sizeof(u32); | |
1461 | } | |
1462 | spin_lock(&pt->pg_lock); | |
1463 | if (rem_bytes_l2 == 0) { | |
1464 | pt->pg_info[l2_page_num].num_entries -= pte_count; | |
1465 | if (pt->pg_info[l2_page_num].num_entries == 0) { | |
1466 | /* | |
1467 | * Clear the L1 PTE pointing to the L2 PT | |
1468 | */ | |
1469 | if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, | |
1470 | HW_MMU_COARSE_PAGE_SIZE)) | |
1471 | status = 0; | |
1472 | else { | |
1473 | status = -EPERM; | |
1474 | spin_unlock(&pt->pg_lock); | |
1475 | goto EXIT_LOOP; | |
1476 | } | |
1477 | } | |
1478 | rem_bytes -= pte_count * PG_SIZE4K; | |
1479 | } else | |
1480 | status = -EPERM; | |
d0b345f3 | 1481 | |
50ad26f4 FC |
1482 | spin_unlock(&pt->pg_lock); |
1483 | continue; | |
1484 | skip_coarse_page: | |
1485 | /* va_curr aligned to pte_size? */ | |
1486 | /* pte_size = 1 MB or 16 MB */ | |
1487 | if (pte_size == 0 || rem_bytes < pte_size || | |
1488 | va_curr & (pte_size - 1)) { | |
1489 | status = -EPERM; | |
d0b345f3 | 1490 | break; |
50ad26f4 | 1491 | } |
d0b345f3 | 1492 | |
50ad26f4 FC |
1493 | if (pte_size == HW_PAGE_SIZE1MB) |
1494 | numof4k_pages = 256; | |
1495 | else | |
1496 | numof4k_pages = 4096; | |
1497 | temp = 0; | |
1498 | /* Collect Physical addresses from VA */ | |
1499 | paddr = (pte_val & ~(pte_size - 1)); | |
1500 | while (temp++ < numof4k_pages) { | |
1501 | if (pfn_valid(__phys_to_pfn(paddr))) { | |
1502 | pg = PHYS_TO_PAGE(paddr); | |
1503 | if (page_count(pg) < 1) { | |
1504 | pr_info("DSPBRIDGE: UNMAP function: " | |
1505 | "COUNT 0 FOR PA 0x%x, size = " | |
1506 | "0x%x\n", paddr, ul_num_bytes); | |
1507 | bad_page_dump(paddr, pg); | |
1508 | } else { | |
1509 | set_page_dirty(pg); | |
1510 | page_cache_release(pg); | |
1511 | } | |
1512 | } | |
1513 | paddr += HW_PAGE_SIZE4KB; | |
1514 | } | |
1515 | if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { | |
1516 | status = 0; | |
1517 | rem_bytes -= pte_size; | |
1518 | va_curr += pte_size; | |
1519 | } else { | |
1520 | status = -EPERM; | |
1521 | goto EXIT_LOOP; | |
1522 | } | |
d0b345f3 | 1523 | } |
50ad26f4 FC |
1524 | /* |
1525 | * It is better to flush the TLB here, so that any stale old entries | |
1526 | * get flushed | |
1527 | */ | |
1528 | EXIT_LOOP: | |
1529 | flush_all(dev_context); | |
1530 | dev_dbg(bridge, | |
1531 | "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," | |
1532 | " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, | |
1533 | pte_addr_l2, rem_bytes, rem_bytes_l2, status); | |
1534 | return status; | |
d0b345f3 FC |
1535 | } |
1536 | ||
1537 | /* | |
1538 | * ======== user_va2_pa ======== | |
1539 | * Purpose: | |
1540 | * This function walks through the page tables to convert a userland | |
1541 | * virtual address to physical address | |
1542 | */ | |
1543 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | |
1544 | { | |
1545 | pgd_t *pgd; | |
1546 | pmd_t *pmd; | |
1547 | pte_t *ptep, pte; | |
1548 | ||
1549 | pgd = pgd_offset(mm, address); | |
1550 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | |
1551 | pmd = pmd_offset(pgd, address); | |
1552 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | |
1553 | ptep = pte_offset_map(pmd, address); | |
1554 | if (ptep) { | |
1555 | pte = *ptep; | |
1556 | if (pte_present(pte)) | |
1557 | return pte & PAGE_MASK; | |
1558 | } | |
1559 | } | |
1560 | } | |
1561 | ||
1562 | return 0; | |
1563 | } | |
1564 | ||
ac8a139a FC |
1565 | /* |
1566 | * ======== pte_update ======== | |
1567 | * This function calculates the optimum page-aligned addresses and sizes | |
1568 | * Caller must pass page-aligned values | |
1569 | */ | |
1570 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | |
1571 | u32 va, u32 size, | |
1572 | struct hw_mmu_map_attrs_t *map_attrs) | |
1573 | { | |
1574 | u32 i; | |
1575 | u32 all_bits; | |
1576 | u32 pa_curr = pa; | |
1577 | u32 va_curr = va; | |
1578 | u32 num_bytes = size; | |
1579 | struct bridge_dev_context *dev_context = dev_ctxt; | |
1580 | int status = 0; | |
1581 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | |
1582 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | |
1583 | }; | |
1584 | ||
1585 | while (num_bytes && !status) { | |
1586 | /* To find the max. page size with which both PA & VA are | |
1587 | * aligned */ | |
1588 | all_bits = pa_curr | va_curr; | |
1589 | ||
1590 | for (i = 0; i < 4; i++) { | |
1591 | if ((num_bytes >= page_size[i]) && ((all_bits & | |
1592 | (page_size[i] - | |
1593 | 1)) == 0)) { | |
1594 | status = | |
1595 | pte_set(dev_context->pt_attrs, pa_curr, | |
1596 | va_curr, page_size[i], map_attrs); | |
1597 | pa_curr += page_size[i]; | |
1598 | va_curr += page_size[i]; | |
1599 | num_bytes -= page_size[i]; | |
1600 | /* Don't try smaller sizes. Hopefully we have | |
1601 | * reached an address aligned to a bigger page | |
1602 | * size */ | |
1603 | break; | |
1604 | } | |
1605 | } | |
1606 | } | |
1607 | ||
1608 | return status; | |
1609 | } | |
1610 | ||
1611 | /* | |
1612 | * ======== pte_set ======== | |
1613 | * This function calculates PTE address (MPU virtual) to be updated | |
1614 | * It also manages the L2 page tables | |
1615 | */ | |
1616 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |
1617 | u32 size, struct hw_mmu_map_attrs_t *attrs) | |
1618 | { | |
1619 | u32 i; | |
1620 | u32 pte_val; | |
1621 | u32 pte_addr_l1; | |
1622 | u32 pte_size; | |
1623 | /* Base address of the PT that will be updated */ | |
1624 | u32 pg_tbl_va; | |
1625 | u32 l1_base_va; | |
1626 | /* Compiler warns that the next three variables might be used | |
1627 | * uninitialized in this function. Doesn't seem so. Working around, | |
1628 | * anyways. */ | |
1629 | u32 l2_base_va = 0; | |
1630 | u32 l2_base_pa = 0; | |
1631 | u32 l2_page_num = 0; | |
1632 | int status = 0; | |
1633 | ||
1634 | l1_base_va = pt->l1_base_va; | |
1635 | pg_tbl_va = l1_base_va; | |
1636 | if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { | |
1637 | /* Find whether the L1 PTE points to a valid L2 PT */ | |
1638 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); | |
1639 | if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { | |
1640 | pte_val = *(u32 *) pte_addr_l1; | |
1641 | pte_size = hw_mmu_pte_size_l1(pte_val); | |
1642 | } else { | |
1643 | return -EPERM; | |
1644 | } | |
1645 | spin_lock(&pt->pg_lock); | |
1646 | if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { | |
1647 | /* Get the L2 PA from the L1 PTE, and find | |
1648 | * corresponding L2 VA */ | |
1649 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | |
1650 | l2_base_va = | |
1651 | l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | |
1652 | l2_page_num = | |
1653 | (l2_base_pa - | |
1654 | pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | |
1655 | } else if (pte_size == 0) { | |
1656 | /* L1 PTE is invalid. Allocate a L2 PT and | |
1657 | * point the L1 PTE to it */ | |
1658 | /* Find a free L2 PT. */ | |
1659 | for (i = 0; (i < pt->l2_num_pages) && | |
1660 | (pt->pg_info[i].num_entries != 0); i++) | |
859171ca | 1661 | ; |
ac8a139a FC |
1662 | if (i < pt->l2_num_pages) { |
1663 | l2_page_num = i; | |
1664 | l2_base_pa = pt->l2_base_pa + (l2_page_num * | |
1665 | HW_MMU_COARSE_PAGE_SIZE); | |
1666 | l2_base_va = pt->l2_base_va + (l2_page_num * | |
1667 | HW_MMU_COARSE_PAGE_SIZE); | |
1668 | /* Endianness attributes are ignored for | |
1669 | * HW_MMU_COARSE_PAGE_SIZE */ | |
1670 | status = | |
1671 | hw_mmu_pte_set(l1_base_va, l2_base_pa, va, | |
1672 | HW_MMU_COARSE_PAGE_SIZE, | |
1673 | attrs); | |
1674 | } else { | |
1675 | status = -ENOMEM; | |
1676 | } | |
1677 | } else { | |
1678 | /* Found valid L1 PTE of another size. | |
1679 | * Should not overwrite it. */ | |
1680 | status = -EPERM; | |
1681 | } | |
1682 | if (!status) { | |
1683 | pg_tbl_va = l2_base_va; | |
1684 | if (size == HW_PAGE_SIZE64KB) | |
1685 | pt->pg_info[l2_page_num].num_entries += 16; | |
1686 | else | |
1687 | pt->pg_info[l2_page_num].num_entries++; | |
1688 | dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " | |
1689 | "%x, num_entries %x\n", l2_base_va, | |
1690 | l2_base_pa, l2_page_num, | |
1691 | pt->pg_info[l2_page_num].num_entries); | |
1692 | } | |
1693 | spin_unlock(&pt->pg_lock); | |
1694 | } | |
1695 | if (!status) { | |
1696 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | |
1697 | pg_tbl_va, pa, va, size); | |
1698 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | |
1699 | "mixed_size %x\n", attrs->endianism, | |
1700 | attrs->element_size, attrs->mixed_size); | |
1701 | status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); | |
1702 | } | |
1703 | ||
1704 | return status; | |
1705 | } | |
1706 | ||
1707 | /* Memory map kernel VA -- memory allocated with vmalloc */ | |
1708 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |
1709 | u32 ul_mpu_addr, u32 virt_addr, | |
1710 | u32 ul_num_bytes, | |
1711 | struct hw_mmu_map_attrs_t *hw_attrs) | |
1712 | { | |
1713 | int status = 0; | |
1714 | struct page *page[1]; | |
1715 | u32 i; | |
1716 | u32 pa_curr; | |
1717 | u32 pa_next; | |
1718 | u32 va_curr; | |
1719 | u32 size_curr; | |
1720 | u32 num_pages; | |
1721 | u32 pa; | |
1722 | u32 num_of4k_pages; | |
1723 | u32 temp = 0; | |
1724 | ||
1725 | /* | |
1726 | * Do Kernel va to pa translation. | |
1727 | * Combine physically contiguous regions to reduce TLBs. | |
1728 | * Pass the translated pa to pte_update. | |
1729 | */ | |
1730 | num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ | |
1731 | i = 0; | |
1732 | va_curr = ul_mpu_addr; | |
1733 | page[0] = vmalloc_to_page((void *)va_curr); | |
1734 | pa_next = page_to_phys(page[0]); | |
1735 | while (!status && (i < num_pages)) { | |
1736 | /* | |
1737 | * Reuse pa_next from the previous iteraion to avoid | |
1738 | * an extra va2pa call | |
1739 | */ | |
1740 | pa_curr = pa_next; | |
1741 | size_curr = PAGE_SIZE; | |
1742 | /* | |
1743 | * If the next page is physically contiguous, | |
1744 | * map it with the current one by increasing | |
1745 | * the size of the region to be mapped | |
1746 | */ | |
1747 | while (++i < num_pages) { | |
1748 | page[0] = | |
1749 | vmalloc_to_page((void *)(va_curr + size_curr)); | |
1750 | pa_next = page_to_phys(page[0]); | |
1751 | ||
1752 | if (pa_next == (pa_curr + size_curr)) | |
1753 | size_curr += PAGE_SIZE; | |
1754 | else | |
1755 | break; | |
1756 | ||
1757 | } | |
1758 | if (pa_next == 0) { | |
1759 | status = -ENOMEM; | |
1760 | break; | |
1761 | } | |
1762 | pa = pa_curr; | |
1763 | num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; | |
1764 | while (temp++ < num_of4k_pages) { | |
1765 | get_page(PHYS_TO_PAGE(pa)); | |
1766 | pa += HW_PAGE_SIZE4KB; | |
1767 | } | |
1768 | status = pte_update(dev_context, pa_curr, virt_addr + | |
1769 | (va_curr - ul_mpu_addr), size_curr, | |
1770 | hw_attrs); | |
1771 | va_curr += size_curr; | |
1772 | } | |
1773 | /* | |
1774 | * In any case, flush the TLB | |
1775 | * This is called from here instead from pte_update to avoid unnecessary | |
1776 | * repetition while mapping non-contiguous physical regions of a virtual | |
1777 | * region | |
1778 | */ | |
1779 | flush_all(dev_context); | |
1780 | dev_dbg(bridge, "%s status %x\n", __func__, status); | |
1781 | return status; | |
1782 | } | |
1783 | ||
999e07d6 ORL |
1784 | /* |
1785 | * ======== wait_for_start ======== | |
1786 | * Wait for the singal from DSP that it has started, or time out. | |
1787 | */ | |
1788 | bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr) | |
1789 | { | |
1790 | u16 timeout = TIHELEN_ACKTIMEOUT; | |
1791 | ||
1792 | /* Wait for response from board */ | |
b3c8aef0 | 1793 | while (__raw_readw(dw_sync_addr) && --timeout) |
999e07d6 ORL |
1794 | udelay(10); |
1795 | ||
5e768067 | 1796 | /* If timed out: return false */ |
999e07d6 ORL |
1797 | if (!timeout) { |
1798 | pr_err("%s: Timed out waiting DSP to Start\n", __func__); | |
5e768067 | 1799 | return false; |
999e07d6 | 1800 | } |
5e768067 | 1801 | return true; |
999e07d6 | 1802 | } |