drm/etnaviv: call correct function when trying to vmap a DMABUF
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
1 /*
2  * Copyright (C) 2014 Etnaviv Project
3  * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "etnaviv_gpu.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_mmu.h"
21
22 #include "common.xml.h"
23 #include "state.xml.h"
24 #include "cmdstream.xml.h"
25
26 /*
27  * Command Buffer helper:
28  */
29
30
31 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
32 {
33         u32 *vaddr = (u32 *)buffer->vaddr;
34
35         BUG_ON(buffer->user_size >= buffer->size);
36
37         vaddr[buffer->user_size / 4] = data;
38         buffer->user_size += 4;
39 }
40
41 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
42         u32 reg, u32 value)
43 {
44         u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
45
46         buffer->user_size = ALIGN(buffer->user_size, 8);
47
48         /* write a register via cmd stream */
49         OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
50                     VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
51                     VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
52         OUT(buffer, value);
53 }
54
55 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
56 {
57         buffer->user_size = ALIGN(buffer->user_size, 8);
58
59         OUT(buffer, VIV_FE_END_HEADER_OP_END);
60 }
61
62 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
63 {
64         buffer->user_size = ALIGN(buffer->user_size, 8);
65
66         OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
67 }
68
69 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
70         u16 prefetch, u32 address)
71 {
72         buffer->user_size = ALIGN(buffer->user_size, 8);
73
74         OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
75                     VIV_FE_LINK_HEADER_PREFETCH(prefetch));
76         OUT(buffer, address);
77 }
78
79 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
80         u32 from, u32 to)
81 {
82         buffer->user_size = ALIGN(buffer->user_size, 8);
83
84         OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
85         OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
86 }
87
88 static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
89 {
90         u32 flush;
91         u32 stall;
92
93         /*
94          * This assumes that if we're switching to 2D, we're switching
95          * away from 3D, and vice versa.  Hence, if we're switching to
96          * the 2D core, we need to flush the 3D depth and color caches,
97          * otherwise we need to flush the 2D pixel engine cache.
98          */
99         if (pipe == ETNA_PIPE_2D)
100                 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
101         else
102                 flush = VIVS_GL_FLUSH_CACHE_PE2D;
103
104         stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
105                 VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
106
107         CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108         CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
109
110         CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111
112         CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
113                        VIVS_GL_PIPE_SELECT_PIPE(pipe));
114 }
115
116 static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
117 {
118         return buf->paddr - gpu->memory_base;
119 }
120
121 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
122         struct etnaviv_cmdbuf *buf, u32 off, u32 len)
123 {
124         u32 size = buf->size;
125         u32 *ptr = buf->vaddr + off;
126
127         dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
128                         ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
129
130         print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
131                         ptr, len * 4, 0);
132 }
133
134 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
135 {
136         struct etnaviv_cmdbuf *buffer = gpu->buffer;
137
138         /* initialize buffer */
139         buffer->user_size = 0;
140
141         CMD_WAIT(buffer);
142         CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
143
144         return buffer->user_size / 8;
145 }
146
147 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
148 {
149         struct etnaviv_cmdbuf *buffer = gpu->buffer;
150
151         /* Replace the last WAIT with an END */
152         buffer->user_size -= 16;
153
154         CMD_END(buffer);
155         mb();
156 }
157
158 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
159         struct etnaviv_cmdbuf *cmdbuf)
160 {
161         struct etnaviv_cmdbuf *buffer = gpu->buffer;
162         u32 *lw = buffer->vaddr + buffer->user_size - 16;
163         u32 back, link_target, link_size, reserve_size, extra_size = 0;
164
165         if (drm_debug & DRM_UT_DRIVER)
166                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
167
168         /*
169          * If we need to flush the MMU prior to submitting this buffer, we
170          * will need to append a mmu flush load state, followed by a new
171          * link to this buffer - a total of four additional words.
172          */
173         if (gpu->mmu->need_flush || gpu->switch_context) {
174                 /* link command */
175                 extra_size += 2;
176                 /* flush command */
177                 if (gpu->mmu->need_flush)
178                         extra_size += 2;
179                 /* pipe switch commands */
180                 if (gpu->switch_context)
181                         extra_size += 8;
182         }
183
184         reserve_size = (6 + extra_size) * 4;
185
186         /*
187          * if we are going to completely overflow the buffer, we need to wrap.
188          */
189         if (buffer->user_size + reserve_size > buffer->size)
190                 buffer->user_size = 0;
191
192         /* save offset back into main buffer */
193         back = buffer->user_size + reserve_size - 6 * 4;
194         link_target = gpu_va(gpu, buffer) + buffer->user_size;
195         link_size = 6;
196
197         /* Skip over any extra instructions */
198         link_target += extra_size * sizeof(u32);
199
200         if (drm_debug & DRM_UT_DRIVER)
201                 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
202                         link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
203
204         /* jump back from cmd to main buffer */
205         CMD_LINK(cmdbuf, link_size, link_target);
206
207         link_target = gpu_va(gpu, cmdbuf);
208         link_size = cmdbuf->size / 8;
209
210
211
212         if (drm_debug & DRM_UT_DRIVER) {
213                 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
214                                cmdbuf->vaddr, cmdbuf->size, 0);
215
216                 pr_info("link op: %p\n", lw);
217                 pr_info("link addr: %p\n", lw + 1);
218                 pr_info("addr: 0x%08x\n", link_target);
219                 pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
220                 pr_info("event: %d\n", event);
221         }
222
223         if (gpu->mmu->need_flush || gpu->switch_context) {
224                 u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
225
226                 if (gpu->mmu->need_flush) {
227                         /* Add the MMU flush */
228                         CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
229                                        VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
230                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
231                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
232                                        VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
233                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
234
235                         gpu->mmu->need_flush = false;
236                 }
237
238                 if (gpu->switch_context) {
239                         etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
240                         gpu->switch_context = false;
241                 }
242
243                 /* And the link to the first buffer */
244                 CMD_LINK(buffer, link_size, link_target);
245
246                 /* Update the link target to point to above instructions */
247                 link_target = new_target;
248                 link_size = extra_size;
249         }
250
251         /* trigger event */
252         CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
253                        VIVS_GL_EVENT_FROM_PE);
254
255         /* append WAIT/LINK to main buffer */
256         CMD_WAIT(buffer);
257         CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
258
259         /* Change WAIT into a LINK command; write the address first. */
260         *(lw + 1) = link_target;
261         mb();
262         *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
263                 VIV_FE_LINK_HEADER_PREFETCH(link_size);
264         mb();
265
266         if (drm_debug & DRM_UT_DRIVER)
267                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
268 }