Merge branch 'fix/hda' into for-linus
[linux-2.6-block.git] / drivers / gpu / drm / radeon / r300_cmdbuf.c
CommitLineData
414ed537
DA
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
958a6f8c
DM
40#include <asm/unaligned.h>
41
414ed537
DA
42#define R300_SIMULTANEOUS_CLIPRECTS 4
43
44/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
45 */
46static const int r300_cliprect_cntl[4] = {
47 0xAAAA,
48 0xEEEE,
49 0xFEFE,
50 0xFFFE
51};
52
414ed537
DA
53/**
54 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55 * buffer, starting with index n.
56 */
d985c108
DA
57static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
58 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
414ed537 59{
c60ce623 60 struct drm_clip_rect box;
414ed537
DA
61 int nr;
62 int i;
63 RING_LOCALS;
64
65 nr = cmdbuf->nbox - n;
66 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
67 nr = R300_SIMULTANEOUS_CLIPRECTS;
68
69 DRM_DEBUG("%i cliprects\n", nr);
70
71 if (nr) {
b5e89ed5
DA
72 BEGIN_RING(6 + nr * 2);
73 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
414ed537 74
b5e89ed5
DA
75 for (i = 0; i < nr; ++i) {
76 if (DRM_COPY_FROM_USER_UNCHECKED
77 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
414ed537 78 DRM_ERROR("copy cliprect faulted\n");
20caafa6 79 return -EFAULT;
414ed537
DA
80 }
81
649ffc06
NH
82 box.x2--; /* Hardware expects inclusive bottom-right corner */
83 box.y2--;
84
3d5e2c13
DA
85 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
86 box.x1 = (box.x1) &
87 R300_CLIPRECT_MASK;
88 box.y1 = (box.y1) &
89 R300_CLIPRECT_MASK;
90 box.x2 = (box.x2) &
91 R300_CLIPRECT_MASK;
92 box.y2 = (box.y2) &
93 R300_CLIPRECT_MASK;
94 } else {
95 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
96 R300_CLIPRECT_MASK;
97 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
98 R300_CLIPRECT_MASK;
99 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
100 R300_CLIPRECT_MASK;
101 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
102 R300_CLIPRECT_MASK;
3d5e2c13 103 }
649ffc06 104
414ed537 105 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
b5e89ed5 106 (box.y1 << R300_CLIPRECT_Y_SHIFT));
414ed537 107 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
b5e89ed5 108 (box.y2 << R300_CLIPRECT_Y_SHIFT));
3d5e2c13 109
414ed537
DA
110 }
111
b5e89ed5 112 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
414ed537
DA
113
114 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
b5e89ed5
DA
115 * client might be able to trample over memory.
116 * The impact should be very limited, but I'd rather be safe than
117 * sorry.
118 */
119 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
120 OUT_RING(0);
121 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
414ed537 122 ADVANCE_RING();
b5e89ed5 123 } else {
414ed537
DA
124 /* Why we allow zero cliprect rendering:
125 * There are some commands in a command buffer that must be submitted
126 * even when there are no cliprects, e.g. DMA buffer discard
127 * or state setting (though state setting could be avoided by
128 * simulating a loss of context).
129 *
130 * Now since the cmdbuf interface is so chaotic right now (and is
131 * bound to remain that way for a bit until things settle down),
132 * it is basically impossible to filter out the commands that are
133 * necessary and those that aren't.
134 *
135 * So I choose the safe way and don't do any filtering at all;
136 * instead, I simply set up the engine so that all rendering
137 * can't produce any fragments.
138 */
139 BEGIN_RING(2);
b5e89ed5 140 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
414ed537 141 ADVANCE_RING();
b5e89ed5 142 }
414ed537 143
54f961a6
JG
144 /* flus cache and wait idle clean after cliprect change */
145 BEGIN_RING(2);
146 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
147 OUT_RING(R300_RB3D_DC_FLUSH);
148 ADVANCE_RING();
149 BEGIN_RING(2);
150 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
151 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
152 ADVANCE_RING();
153 /* set flush flag */
154 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
155
414ed537
DA
156 return 0;
157}
158
b3a83639 159static u8 r300_reg_flags[0x10000 >> 2];
414ed537 160
3d5e2c13 161void r300_init_reg_flags(struct drm_device *dev)
414ed537
DA
162{
163 int i;
3d5e2c13
DA
164 drm_radeon_private_t *dev_priv = dev->dev_private;
165
b5e89ed5
DA
166 memset(r300_reg_flags, 0, 0x10000 >> 2);
167#define ADD_RANGE_MARK(reg, count,mark) \
414ed537
DA
168 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
169 r300_reg_flags[i]|=(mark);
b5e89ed5
DA
170
171#define MARK_SAFE 1
172#define MARK_CHECK_OFFSET 2
173
174#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
414ed537
DA
175
176 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
177 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
c6c656b4 178 ADD_RANGE(R300_VAP_CNTL, 1);
414ed537
DA
179 ADD_RANGE(R300_SE_VTE_CNTL, 2);
180 ADD_RANGE(0x2134, 2);
c6c656b4 181 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
414ed537
DA
182 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
183 ADD_RANGE(0x21DC, 1);
c6c656b4
OM
184 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
185 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
54f961a6 186 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
c6c656b4 187 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
414ed537
DA
188 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
189 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
190 ADD_RANGE(R300_GB_ENABLE, 1);
191 ADD_RANGE(R300_GB_MSPOS0, 5);
54f961a6 192 ADD_RANGE(R300_TX_INVALTAGS, 1);
414ed537
DA
193 ADD_RANGE(R300_TX_ENABLE, 1);
194 ADD_RANGE(0x4200, 4);
195 ADD_RANGE(0x4214, 1);
196 ADD_RANGE(R300_RE_POINTSIZE, 1);
197 ADD_RANGE(0x4230, 3);
198 ADD_RANGE(R300_RE_LINE_CNT, 1);
c6c656b4 199 ADD_RANGE(R300_RE_UNK4238, 1);
414ed537 200 ADD_RANGE(0x4260, 3);
c6c656b4
OM
201 ADD_RANGE(R300_RE_SHADE, 4);
202 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
203 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
414ed537 204 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
c6c656b4 205 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
414ed537
DA
206 ADD_RANGE(R300_RE_CULL_CNTL, 1);
207 ADD_RANGE(0x42C0, 2);
208 ADD_RANGE(R300_RS_CNTL_0, 2);
c0beb2a7 209
af7ae351
MC
210 ADD_RANGE(R300_SU_REG_DEST, 1);
211 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
212 ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
213
21efa2ba 214 ADD_RANGE(R300_SC_HYPERZ, 2);
414ed537 215 ADD_RANGE(0x43E8, 1);
c0beb2a7 216
414ed537 217 ADD_RANGE(0x46A4, 5);
c0beb2a7 218
c6c656b4
OM
219 ADD_RANGE(R300_RE_FOG_STATE, 1);
220 ADD_RANGE(R300_FOG_COLOR_R, 3);
414ed537
DA
221 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
222 ADD_RANGE(0x4BD8, 1);
223 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
224 ADD_RANGE(0x4E00, 1);
225 ADD_RANGE(R300_RB3D_CBLEND, 2);
226 ADD_RANGE(R300_RB3D_COLORMASK, 1);
c6c656b4 227 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
b5e89ed5 228 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
414ed537
DA
229 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
230 ADD_RANGE(0x4E50, 9);
231 ADD_RANGE(0x4E88, 1);
232 ADD_RANGE(0x4EA0, 2);
21efa2ba
DA
233 ADD_RANGE(R300_ZB_CNTL, 3);
234 ADD_RANGE(R300_ZB_FORMAT, 4);
235 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
236 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
237 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
238 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
af7ae351 239 ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
414ed537
DA
240
241 ADD_RANGE(R300_TX_FILTER_0, 16);
45f17100 242 ADD_RANGE(R300_TX_FILTER1_0, 16);
414ed537
DA
243 ADD_RANGE(R300_TX_SIZE_0, 16);
244 ADD_RANGE(R300_TX_FORMAT_0, 16);
d985c108 245 ADD_RANGE(R300_TX_PITCH_0, 16);
b5e89ed5 246 /* Texture offset is dangerous and needs more checking */
414ed537 247 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
45f17100 248 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
414ed537
DA
249 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
250
251 /* Sporadic registers used as primitives are emitted */
21efa2ba 252 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
414ed537
DA
253 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
254 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
255 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
256
3d5e2c13 257 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
c0beb2a7
DA
258 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
259 ADD_RANGE(R500_US_CONFIG, 2);
260 ADD_RANGE(R500_US_CODE_ADDR, 3);
261 ADD_RANGE(R500_US_FC_CTRL, 1);
262 ADD_RANGE(R500_RS_IP_0, 16);
263 ADD_RANGE(R500_RS_INST_0, 16);
264 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
265 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
21efa2ba 266 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
c0beb2a7
DA
267 } else {
268 ADD_RANGE(R300_PFS_CNTL_0, 3);
269 ADD_RANGE(R300_PFS_NODE_0, 4);
270 ADD_RANGE(R300_PFS_TEXI_0, 64);
271 ADD_RANGE(R300_PFS_INSTR0_0, 64);
272 ADD_RANGE(R300_PFS_INSTR1_0, 64);
273 ADD_RANGE(R300_PFS_INSTR2_0, 64);
274 ADD_RANGE(R300_PFS_INSTR3_0, 64);
275 ADD_RANGE(R300_RS_INTERP_0, 8);
276 ADD_RANGE(R300_RS_ROUTE_0, 8);
277
3d5e2c13 278 }
414ed537
DA
279}
280
b5e89ed5 281static __inline__ int r300_check_range(unsigned reg, int count)
414ed537
DA
282{
283 int i;
b5e89ed5
DA
284 if (reg & ~0xffff)
285 return -1;
286 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
287 if (r300_reg_flags[i] != MARK_SAFE)
288 return 1;
414ed537
DA
289 return 0;
290}
291
b5e89ed5
DA
292static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
293 dev_priv,
b3a83639 294 drm_radeon_kcmd_buffer_t
b5e89ed5
DA
295 * cmdbuf,
296 drm_r300_cmd_header_t
297 header)
414ed537
DA
298{
299 int reg;
300 int sz;
301 int i;
302 int values[64];
303 RING_LOCALS;
304
305 sz = header.packet0.count;
306 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
b5e89ed5
DA
307
308 if ((sz > 64) || (sz < 0)) {
309 DRM_ERROR
310 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
311 reg, sz);
20caafa6 312 return -EINVAL;
b5e89ed5
DA
313 }
314 for (i = 0; i < sz; i++) {
b3a83639 315 values[i] = ((int *)cmdbuf->buf)[i];
b5e89ed5 316 switch (r300_reg_flags[(reg >> 2) + i]) {
414ed537
DA
317 case MARK_SAFE:
318 break;
319 case MARK_CHECK_OFFSET:
1d6bb8e5 320 if (!radeon_check_offset(dev_priv, (u32) values[i])) {
b5e89ed5
DA
321 DRM_ERROR
322 ("Offset failed range check (reg=%04x sz=%d)\n",
323 reg, sz);
20caafa6 324 return -EINVAL;
b5e89ed5 325 }
414ed537
DA
326 break;
327 default:
b5e89ed5
DA
328 DRM_ERROR("Register %04x failed check as flag=%02x\n",
329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
20caafa6 330 return -EINVAL;
414ed537 331 }
b5e89ed5
DA
332 }
333
334 BEGIN_RING(1 + sz);
335 OUT_RING(CP_PACKET0(reg, sz - 1));
336 OUT_RING_TABLE(values, sz);
414ed537
DA
337 ADVANCE_RING();
338
b5e89ed5
DA
339 cmdbuf->buf += sz * 4;
340 cmdbuf->bufsz -= sz * 4;
414ed537
DA
341
342 return 0;
343}
344
345/**
346 * Emits a packet0 setting arbitrary registers.
347 * Called by r300_do_cp_cmdbuf.
348 *
349 * Note that checks are performed on contents and addresses of the registers
350 */
d985c108
DA
351static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
352 drm_radeon_kcmd_buffer_t *cmdbuf,
b5e89ed5 353 drm_r300_cmd_header_t header)
414ed537
DA
354{
355 int reg;
356 int sz;
357 RING_LOCALS;
358
359 sz = header.packet0.count;
360 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
361
362 if (!sz)
363 return 0;
364
b5e89ed5 365 if (sz * 4 > cmdbuf->bufsz)
20caafa6 366 return -EINVAL;
b5e89ed5
DA
367
368 if (reg + sz * 4 >= 0x10000) {
369 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
370 sz);
20caafa6 371 return -EINVAL;
b5e89ed5 372 }
414ed537 373
b5e89ed5 374 if (r300_check_range(reg, sz)) {
414ed537 375 /* go and check everything */
b5e89ed5
DA
376 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
377 header);
378 }
414ed537
DA
379 /* the rest of the data is safe to emit, whatever the values the user passed */
380
b5e89ed5
DA
381 BEGIN_RING(1 + sz);
382 OUT_RING(CP_PACKET0(reg, sz - 1));
b3a83639 383 OUT_RING_TABLE((int *)cmdbuf->buf, sz);
414ed537
DA
384 ADVANCE_RING();
385
b5e89ed5
DA
386 cmdbuf->buf += sz * 4;
387 cmdbuf->bufsz -= sz * 4;
414ed537
DA
388
389 return 0;
390}
391
414ed537
DA
392/**
393 * Uploads user-supplied vertex program instructions or parameters onto
394 * the graphics card.
395 * Called by r300_do_cp_cmdbuf.
396 */
d985c108
DA
397static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
398 drm_radeon_kcmd_buffer_t *cmdbuf,
414ed537
DA
399 drm_r300_cmd_header_t header)
400{
401 int sz;
402 int addr;
403 RING_LOCALS;
404
405 sz = header.vpu.count;
406 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
407
408 if (!sz)
409 return 0;
b5e89ed5 410 if (sz * 16 > cmdbuf->bufsz)
20caafa6 411 return -EINVAL;
414ed537 412
54f961a6
JG
413 /* VAP is very sensitive so we purge cache before we program it
414 * and we also flush its state before & after */
415 BEGIN_RING(6);
416 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
417 OUT_RING(R300_RB3D_DC_FLUSH);
418 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
419 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
420 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
421 OUT_RING(0);
422 ADVANCE_RING();
423 /* set flush flag */
424 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
425
426 BEGIN_RING(3 + sz * 4);
b5e89ed5
DA
427 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
428 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
b3a83639 429 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
54f961a6 430 ADVANCE_RING();
414ed537 431
54f961a6
JG
432 BEGIN_RING(2);
433 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
434 OUT_RING(0);
414ed537
DA
435 ADVANCE_RING();
436
b5e89ed5
DA
437 cmdbuf->buf += sz * 16;
438 cmdbuf->bufsz -= sz * 16;
414ed537
DA
439
440 return 0;
441}
442
414ed537
DA
443/**
444 * Emit a clear packet from userspace.
445 * Called by r300_emit_packet3.
446 */
d985c108
DA
447static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
448 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537
DA
449{
450 RING_LOCALS;
451
b5e89ed5 452 if (8 * 4 > cmdbuf->bufsz)
20caafa6 453 return -EINVAL;
414ed537
DA
454
455 BEGIN_RING(10);
b5e89ed5
DA
456 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
457 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
458 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
b3a83639 459 OUT_RING_TABLE((int *)cmdbuf->buf, 8);
414ed537
DA
460 ADVANCE_RING();
461
54f961a6
JG
462 BEGIN_RING(4);
463 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
464 OUT_RING(R300_RB3D_DC_FLUSH);
465 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
466 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
467 ADVANCE_RING();
468 /* set flush flag */
469 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
470
b5e89ed5
DA
471 cmdbuf->buf += 8 * 4;
472 cmdbuf->bufsz -= 8 * 4;
414ed537
DA
473
474 return 0;
475}
476
d985c108
DA
477static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
478 drm_radeon_kcmd_buffer_t *cmdbuf,
b5e89ed5 479 u32 header)
414ed537 480{
b5e89ed5
DA
481 int count, i, k;
482#define MAX_ARRAY_PACKET 64
414ed537
DA
483 u32 payload[MAX_ARRAY_PACKET];
484 u32 narrays;
485 RING_LOCALS;
486
b5e89ed5
DA
487 count = (header >> 16) & 0x3fff;
488
489 if ((count + 1) > MAX_ARRAY_PACKET) {
490 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
491 count);
20caafa6 492 return -EINVAL;
b5e89ed5
DA
493 }
494 memset(payload, 0, MAX_ARRAY_PACKET * 4);
495 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
496
414ed537 497 /* carefully check packet contents */
b5e89ed5
DA
498
499 narrays = payload[0];
500 k = 0;
501 i = 1;
502 while ((k < narrays) && (i < (count + 1))) {
503 i++; /* skip attribute field */
1d6bb8e5 504 if (!radeon_check_offset(dev_priv, payload[i])) {
b5e89ed5
DA
505 DRM_ERROR
506 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
507 k, i);
20caafa6 508 return -EINVAL;
b5e89ed5 509 }
414ed537
DA
510 k++;
511 i++;
b5e89ed5
DA
512 if (k == narrays)
513 break;
414ed537 514 /* have one more to process, they come in pairs */
1d6bb8e5 515 if (!radeon_check_offset(dev_priv, payload[i])) {
b5e89ed5
DA
516 DRM_ERROR
517 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
518 k, i);
20caafa6 519 return -EINVAL;
414ed537 520 }
b5e89ed5
DA
521 k++;
522 i++;
523 }
414ed537 524 /* do the counts match what we expect ? */
b5e89ed5
DA
525 if ((k != narrays) || (i != (count + 1))) {
526 DRM_ERROR
527 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
528 k, i, narrays, count + 1);
20caafa6 529 return -EINVAL;
b5e89ed5 530 }
414ed537
DA
531
532 /* all clear, output packet */
533
b5e89ed5 534 BEGIN_RING(count + 2);
414ed537 535 OUT_RING(header);
b5e89ed5 536 OUT_RING_TABLE(payload, count + 1);
414ed537
DA
537 ADVANCE_RING();
538
b5e89ed5
DA
539 cmdbuf->buf += (count + 2) * 4;
540 cmdbuf->bufsz -= (count + 2) * 4;
414ed537
DA
541
542 return 0;
543}
d5ea702f 544
4e5e2e25
DA
545static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
546 drm_radeon_kcmd_buffer_t *cmdbuf)
547{
548 u32 *cmd = (u32 *) cmdbuf->buf;
549 int count, ret;
550 RING_LOCALS;
551
552 count=(cmd[0]>>16) & 0x3fff;
553
554 if (cmd[0] & 0x8000) {
555 u32 offset;
556
bc5f4523 557 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
4e5e2e25
DA
558 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
559 offset = cmd[2] << 10;
1d6bb8e5 560 ret = !radeon_check_offset(dev_priv, offset);
73d72cff 561 if (ret) {
4e5e2e25 562 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
20caafa6 563 return -EINVAL;
4e5e2e25
DA
564 }
565 }
566
567 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
568 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
569 offset = cmd[3] << 10;
1d6bb8e5 570 ret = !radeon_check_offset(dev_priv, offset);
73d72cff 571 if (ret) {
4e5e2e25 572 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
20caafa6 573 return -EINVAL;
4e5e2e25 574 }
bc5f4523 575
4e5e2e25
DA
576 }
577 }
578
579 BEGIN_RING(count+2);
580 OUT_RING(cmd[0]);
581 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
582 ADVANCE_RING();
583
584 cmdbuf->buf += (count+2)*4;
585 cmdbuf->bufsz -= (count+2)*4;
586
587 return 0;
588}
414ed537 589
e2898c5f
NH
590static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
591 drm_radeon_kcmd_buffer_t *cmdbuf)
a1aa2897 592{
e2898c5f
NH
593 u32 *cmd;
594 int count;
595 int expected_count;
a1aa2897
RS
596 RING_LOCALS;
597
e2898c5f
NH
598 cmd = (u32 *) cmdbuf->buf;
599 count = (cmd[0]>>16) & 0x3fff;
600 expected_count = cmd[1] >> 16;
601 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
602 expected_count = (expected_count+1)/2;
a1aa2897 603
e2898c5f
NH
604 if (count && count != expected_count) {
605 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
606 count, expected_count);
20caafa6 607 return -EINVAL;
a1aa2897
RS
608 }
609
610 BEGIN_RING(count+2);
611 OUT_RING(cmd[0]);
612 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
613 ADVANCE_RING();
614
615 cmdbuf->buf += (count+2)*4;
616 cmdbuf->bufsz -= (count+2)*4;
617
e2898c5f
NH
618 if (!count) {
619 drm_r300_cmd_header_t header;
620
621 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
622 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
623 return -EINVAL;
624 }
625
626 header.u = *(unsigned int *)cmdbuf->buf;
627
628 cmdbuf->buf += sizeof(header);
629 cmdbuf->bufsz -= sizeof(header);
630 cmd = (u32 *) cmdbuf->buf;
631
632 if (header.header.cmd_type != R300_CMD_PACKET3 ||
633 header.packet3.packet != R300_CMD_PACKET3_RAW ||
634 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
635 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
636 return -EINVAL;
637 }
638
639 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
640 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
641 return -EINVAL;
642 }
643 if (!radeon_check_offset(dev_priv, cmd[2])) {
644 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
645 return -EINVAL;
646 }
647 if (cmd[3] != expected_count) {
648 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
649 cmd[3], expected_count);
650 return -EINVAL;
651 }
652
653 BEGIN_RING(4);
654 OUT_RING(cmd[0]);
655 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
656 ADVANCE_RING();
657
658 cmdbuf->buf += 4*4;
659 cmdbuf->bufsz -= 4*4;
660 }
661
a1aa2897
RS
662 return 0;
663}
664
d985c108
DA
665static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
666 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537
DA
667{
668 u32 header;
669 int count;
670 RING_LOCALS;
671
672 if (4 > cmdbuf->bufsz)
20caafa6 673 return -EINVAL;
414ed537 674
b5e89ed5 675 /* Fixme !! This simply emits a packet without much checking.
414ed537
DA
676 We need to be smarter. */
677
678 /* obtain first word - actual packet3 header */
b3a83639 679 header = *(u32 *) cmdbuf->buf;
414ed537
DA
680
681 /* Is it packet 3 ? */
b5e89ed5 682 if ((header >> 30) != 0x3) {
414ed537 683 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
20caafa6 684 return -EINVAL;
b5e89ed5 685 }
414ed537 686
b5e89ed5 687 count = (header >> 16) & 0x3fff;
414ed537
DA
688
689 /* Check again now that we know how much data to expect */
b5e89ed5
DA
690 if ((count + 2) * 4 > cmdbuf->bufsz) {
691 DRM_ERROR
692 ("Expected packet3 of length %d but have only %d bytes left\n",
693 (count + 2) * 4, cmdbuf->bufsz);
20caafa6 694 return -EINVAL;
b5e89ed5 695 }
414ed537
DA
696
697 /* Is it a packet type we know about ? */
b5e89ed5
DA
698 switch (header & 0xff00) {
699 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
414ed537
DA
700 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
701
4e5e2e25
DA
702 case RADEON_CNTL_BITBLT_MULTI:
703 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
704
54f961a6 705 case RADEON_CP_INDX_BUFFER:
e2898c5f
NH
706 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
707 return -EINVAL;
54f961a6
JG
708 case RADEON_CP_3D_DRAW_IMMD_2:
709 /* triggers drawing using in-packet vertex data */
710 case RADEON_CP_3D_DRAW_VBUF_2:
711 /* triggers drawing of vertex buffers setup elsewhere */
e2898c5f
NH
712 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
713 RADEON_PURGE_EMITED);
714 break;
54f961a6
JG
715 case RADEON_CP_3D_DRAW_INDX_2:
716 /* triggers drawing using indices to vertex buffer */
717 /* whenever we send vertex we clear flush & purge */
718 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
719 RADEON_PURGE_EMITED);
e2898c5f 720 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
414ed537
DA
721 case RADEON_WAIT_FOR_IDLE:
722 case RADEON_CP_NOP:
723 /* these packets are safe */
724 break;
725 default:
726 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
20caafa6 727 return -EINVAL;
b5e89ed5 728 }
414ed537 729
b5e89ed5 730 BEGIN_RING(count + 2);
414ed537 731 OUT_RING(header);
b3a83639 732 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
414ed537
DA
733 ADVANCE_RING();
734
b5e89ed5
DA
735 cmdbuf->buf += (count + 2) * 4;
736 cmdbuf->bufsz -= (count + 2) * 4;
414ed537
DA
737
738 return 0;
739}
740
414ed537
DA
741/**
742 * Emit a rendering packet3 from userspace.
743 * Called by r300_do_cp_cmdbuf.
744 */
d985c108
DA
745static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
746 drm_radeon_kcmd_buffer_t *cmdbuf,
414ed537
DA
747 drm_r300_cmd_header_t header)
748{
749 int n;
750 int ret;
b3a83639 751 char *orig_buf = cmdbuf->buf;
414ed537
DA
752 int orig_bufsz = cmdbuf->bufsz;
753
754 /* This is a do-while-loop so that we run the interior at least once,
755 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
756 */
757 n = 0;
758 do {
759 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
760 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
761 if (ret)
762 return ret;
763
764 cmdbuf->buf = orig_buf;
765 cmdbuf->bufsz = orig_bufsz;
b5e89ed5 766 }
414ed537 767
b5e89ed5 768 switch (header.packet3.packet) {
414ed537
DA
769 case R300_CMD_PACKET3_CLEAR:
770 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
771 ret = r300_emit_clear(dev_priv, cmdbuf);
772 if (ret) {
773 DRM_ERROR("r300_emit_clear failed\n");
774 return ret;
b5e89ed5 775 }
414ed537
DA
776 break;
777
778 case R300_CMD_PACKET3_RAW:
779 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
780 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
781 if (ret) {
782 DRM_ERROR("r300_emit_raw_packet3 failed\n");
783 return ret;
b5e89ed5 784 }
414ed537
DA
785 break;
786
787 default:
788 DRM_ERROR("bad packet3 type %i at %p\n",
b5e89ed5
DA
789 header.packet3.packet,
790 cmdbuf->buf - sizeof(header));
20caafa6 791 return -EINVAL;
b5e89ed5 792 }
414ed537
DA
793
794 n += R300_SIMULTANEOUS_CLIPRECTS;
b5e89ed5 795 } while (n < cmdbuf->nbox);
414ed537
DA
796
797 return 0;
798}
799
800/* Some of the R300 chips seem to be extremely touchy about the two registers
801 * that are configured in r300_pacify.
802 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
803 * sends a command buffer that contains only state setting commands and a
804 * vertex program/parameter upload sequence, this will eventually lead to a
805 * lockup, unless the sequence is bracketed by calls to r300_pacify.
806 * So we should take great care to *always* call r300_pacify before
807 * *anything* 3D related, and again afterwards. This is what the
808 * call bracket in r300_do_cp_cmdbuf is for.
809 */
810
811/**
812 * Emit the sequence to pacify R300.
813 */
d985c108 814static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
414ed537 815{
54f961a6 816 uint32_t cache_z, cache_3d, cache_2d;
414ed537 817 RING_LOCALS;
e2898c5f 818
54f961a6
JG
819 cache_z = R300_ZC_FLUSH;
820 cache_2d = R300_RB2D_DC_FLUSH;
821 cache_3d = R300_RB3D_DC_FLUSH;
822 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
823 /* we can purge, primitive where draw since last purge */
824 cache_z |= R300_ZC_FREE;
825 cache_2d |= R300_RB2D_DC_FREE;
826 cache_3d |= R300_RB3D_DC_FREE;
827 }
414ed537 828
54f961a6
JG
829 /* flush & purge zbuffer */
830 BEGIN_RING(2);
21efa2ba 831 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
54f961a6
JG
832 OUT_RING(cache_z);
833 ADVANCE_RING();
834 /* flush & purge 3d */
835 BEGIN_RING(2);
836 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
837 OUT_RING(cache_3d);
838 ADVANCE_RING();
839 /* flush & purge texture */
840 BEGIN_RING(2);
841 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
842 OUT_RING(0);
843 ADVANCE_RING();
844 /* FIXME: is this one really needed ? */
845 BEGIN_RING(2);
846 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
847 OUT_RING(0);
848 ADVANCE_RING();
849 BEGIN_RING(2);
850 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
851 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
852 ADVANCE_RING();
853 /* flush & purge 2d through E2 as RB2D will trigger lockup */
854 BEGIN_RING(4);
855 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
856 OUT_RING(cache_2d);
857 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
858 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
859 RADEON_WAIT_HOST_IDLECLEAN);
414ed537 860 ADVANCE_RING();
54f961a6
JG
861 /* set flush & purge flags */
862 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
414ed537
DA
863}
864
414ed537
DA
865/**
866 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
867 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
868 * be careful about how this function is called.
869 */
7c1c2871 870static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
414ed537 871{
414ed537 872 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
7c1c2871 873 struct drm_radeon_master_private *master_priv = master->driver_priv;
414ed537 874
7c1c2871 875 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
414ed537
DA
876 buf->pending = 1;
877 buf->used = 0;
878}
879
0c76be35
DA
880static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
881 drm_r300_cmd_header_t header)
882{
883 u32 wait_until;
884 RING_LOCALS;
885
886 if (!header.wait.flags)
887 return;
888
889 wait_until = 0;
890
891 switch(header.wait.flags) {
892 case R300_WAIT_2D:
893 wait_until = RADEON_WAIT_2D_IDLE;
894 break;
895 case R300_WAIT_3D:
896 wait_until = RADEON_WAIT_3D_IDLE;
897 break;
898 case R300_NEW_WAIT_2D_3D:
899 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
900 break;
901 case R300_NEW_WAIT_2D_2D_CLEAN:
902 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
903 break;
904 case R300_NEW_WAIT_3D_3D_CLEAN:
905 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
906 break;
907 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
908 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
909 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
910 break;
911 default:
912 return;
913 }
914
915 BEGIN_RING(2);
916 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
917 OUT_RING(wait_until);
918 ADVANCE_RING();
919}
920
ee4621f0
DA
921static int r300_scratch(drm_radeon_private_t *dev_priv,
922 drm_radeon_kcmd_buffer_t *cmdbuf,
923 drm_r300_cmd_header_t header)
924{
925 u32 *ref_age_base;
926 u32 i, buf_idx, h_pending;
958a6f8c 927 u64 ptr_addr;
ee4621f0 928 RING_LOCALS;
bc5f4523
DA
929
930 if (cmdbuf->bufsz <
ee4621f0 931 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
20caafa6 932 return -EINVAL;
ee4621f0 933 }
bc5f4523 934
ee4621f0 935 if (header.scratch.reg >= 5) {
20caafa6 936 return -EINVAL;
ee4621f0 937 }
bc5f4523 938
ee4621f0 939 dev_priv->scratch_ages[header.scratch.reg]++;
bc5f4523 940
958a6f8c
DM
941 ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
942 ref_age_base = (u32 *)(unsigned long)ptr_addr;
bc5f4523 943
ee4621f0
DA
944 cmdbuf->buf += sizeof(u64);
945 cmdbuf->bufsz -= sizeof(u64);
bc5f4523 946
ee4621f0
DA
947 for (i=0; i < header.scratch.n_bufs; i++) {
948 buf_idx = *(u32 *)cmdbuf->buf;
949 buf_idx *= 2; /* 8 bytes per buf */
bc5f4523 950
ee4621f0 951 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
20caafa6 952 return -EINVAL;
ee4621f0 953 }
bc5f4523 954
ee4621f0 955 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
20caafa6 956 return -EINVAL;
ee4621f0 957 }
bc5f4523 958
ee4621f0 959 if (h_pending == 0) {
20caafa6 960 return -EINVAL;
ee4621f0 961 }
bc5f4523 962
ee4621f0 963 h_pending--;
bc5f4523 964
ee4621f0 965 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
20caafa6 966 return -EINVAL;
ee4621f0 967 }
bc5f4523 968
ee4621f0
DA
969 cmdbuf->buf += sizeof(buf_idx);
970 cmdbuf->bufsz -= sizeof(buf_idx);
971 }
bc5f4523 972
ee4621f0 973 BEGIN_RING(2);
c6c656b4
OM
974 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
975 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ee4621f0 976 ADVANCE_RING();
bc5f4523 977
ee4621f0
DA
978 return 0;
979}
980
c0beb2a7
DA
981/**
982 * Uploads user-supplied vertex program instructions or parameters onto
983 * the graphics card.
984 * Called by r300_do_cp_cmdbuf.
985 */
986static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
987 drm_radeon_kcmd_buffer_t *cmdbuf,
988 drm_r300_cmd_header_t header)
989{
990 int sz;
991 int addr;
992 int type;
993 int clamp;
994 int stride;
995 RING_LOCALS;
996
997 sz = header.r500fp.count;
998 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
999 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
1000
1001 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
1002 clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
1003
1004 addr |= (type << 16);
1005 addr |= (clamp << 17);
1006
1007 stride = type ? 4 : 6;
1008
1009 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1010 if (!sz)
1011 return 0;
1012 if (sz * stride * 4 > cmdbuf->bufsz)
1013 return -EINVAL;
1014
1015 BEGIN_RING(3 + sz * stride);
1016 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1017 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1018 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
1019
1020 ADVANCE_RING();
1021
1022 cmdbuf->buf += sz * stride * 4;
1023 cmdbuf->bufsz -= sz * stride * 4;
1024
1025 return 0;
1026}
1027
1028
414ed537
DA
1029/**
1030 * Parses and validates a user-supplied command buffer and emits appropriate
1031 * commands on the DMA ring buffer.
1032 * Called by the ioctl handler function radeon_cp_cmdbuf.
1033 */
84b1fd10 1034int r300_do_cp_cmdbuf(struct drm_device *dev,
6c340eac 1035 struct drm_file *file_priv,
d985c108 1036 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537
DA
1037{
1038 drm_radeon_private_t *dev_priv = dev->dev_private;
7c1c2871 1039 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
cdd55a29 1040 struct drm_device_dma *dma = dev->dma;
056219e2 1041 struct drm_buf *buf = NULL;
414ed537
DA
1042 int emit_dispatch_age = 0;
1043 int ret = 0;
1044
1045 DRM_DEBUG("\n");
1046
54f961a6 1047 /* pacify */
414ed537
DA
1048 r300_pacify(dev_priv);
1049
1050 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1051 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1052 if (ret)
1053 goto cleanup;
b5e89ed5 1054 }
414ed537 1055
b5e89ed5 1056 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
414ed537
DA
1057 int idx;
1058 drm_r300_cmd_header_t header;
1059
1060 header.u = *(unsigned int *)cmdbuf->buf;
1061
1062 cmdbuf->buf += sizeof(header);
1063 cmdbuf->bufsz -= sizeof(header);
1064
b5e89ed5
DA
1065 switch (header.header.cmd_type) {
1066 case R300_CMD_PACKET0:
414ed537
DA
1067 DRM_DEBUG("R300_CMD_PACKET0\n");
1068 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
1069 if (ret) {
1070 DRM_ERROR("r300_emit_packet0 failed\n");
1071 goto cleanup;
b5e89ed5 1072 }
414ed537
DA
1073 break;
1074
1075 case R300_CMD_VPU:
1076 DRM_DEBUG("R300_CMD_VPU\n");
1077 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
1078 if (ret) {
1079 DRM_ERROR("r300_emit_vpu failed\n");
1080 goto cleanup;
b5e89ed5 1081 }
414ed537
DA
1082 break;
1083
1084 case R300_CMD_PACKET3:
1085 DRM_DEBUG("R300_CMD_PACKET3\n");
1086 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
1087 if (ret) {
1088 DRM_ERROR("r300_emit_packet3 failed\n");
1089 goto cleanup;
b5e89ed5 1090 }
414ed537
DA
1091 break;
1092
1093 case R300_CMD_END3D:
1094 DRM_DEBUG("R300_CMD_END3D\n");
b5e89ed5
DA
1095 /* TODO:
1096 Ideally userspace driver should not need to issue this call,
1097 i.e. the drm driver should issue it automatically and prevent
1098 lockups.
1099
1100 In practice, we do not understand why this call is needed and what
1101 it does (except for some vague guesses that it has to do with cache
1102 coherence) and so the user space driver does it.
1103
1104 Once we are sure which uses prevent lockups the code could be moved
1105 into the kernel and the userspace driver will not
1106 need to use this command.
1107
1108 Note that issuing this command does not hurt anything
1109 except, possibly, performance */
414ed537
DA
1110 r300_pacify(dev_priv);
1111 break;
1112
1113 case R300_CMD_CP_DELAY:
1114 /* simple enough, we can do it here */
1115 DRM_DEBUG("R300_CMD_CP_DELAY\n");
1116 {
1117 int i;
1118 RING_LOCALS;
1119
1120 BEGIN_RING(header.delay.count);
b5e89ed5 1121 for (i = 0; i < header.delay.count; i++)
414ed537
DA
1122 OUT_RING(RADEON_CP_PACKET2);
1123 ADVANCE_RING();
1124 }
1125 break;
1126
1127 case R300_CMD_DMA_DISCARD:
1128 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
b5e89ed5
DA
1129 idx = header.dma.buf_idx;
1130 if (idx < 0 || idx >= dma->buf_count) {
1131 DRM_ERROR("buffer index %d (of %d max)\n",
1132 idx, dma->buf_count - 1);
20caafa6 1133 ret = -EINVAL;
414ed537 1134 goto cleanup;
b5e89ed5
DA
1135 }
1136
1137 buf = dma->buflist[idx];
6c340eac 1138 if (buf->file_priv != file_priv || buf->pending) {
b5e89ed5 1139 DRM_ERROR("bad buffer %p %p %d\n",
6c340eac
EA
1140 buf->file_priv, file_priv,
1141 buf->pending);
20caafa6 1142 ret = -EINVAL;
b5e89ed5
DA
1143 goto cleanup;
1144 }
414ed537
DA
1145
1146 emit_dispatch_age = 1;
7c1c2871 1147 r300_discard_buffer(dev, file_priv->master, buf);
b5e89ed5 1148 break;
414ed537
DA
1149
1150 case R300_CMD_WAIT:
414ed537 1151 DRM_DEBUG("R300_CMD_WAIT\n");
0c76be35 1152 r300_cmd_wait(dev_priv, header);
414ed537
DA
1153 break;
1154
ee4621f0
DA
1155 case R300_CMD_SCRATCH:
1156 DRM_DEBUG("R300_CMD_SCRATCH\n");
1157 ret = r300_scratch(dev_priv, cmdbuf, header);
1158 if (ret) {
1159 DRM_ERROR("r300_scratch failed\n");
1160 goto cleanup;
1161 }
1162 break;
bc5f4523 1163
c0beb2a7
DA
1164 case R300_CMD_R500FP:
1165 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1166 DRM_ERROR("Calling r500 command on r300 card\n");
1167 ret = -EINVAL;
1168 goto cleanup;
1169 }
1170 DRM_DEBUG("R300_CMD_R500FP\n");
1171 ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
1172 if (ret) {
1173 DRM_ERROR("r300_emit_r500fp failed\n");
1174 goto cleanup;
1175 }
1176 break;
414ed537
DA
1177 default:
1178 DRM_ERROR("bad cmd_type %i at %p\n",
b5e89ed5 1179 header.header.cmd_type,
414ed537 1180 cmdbuf->buf - sizeof(header));
20caafa6 1181 ret = -EINVAL;
414ed537 1182 goto cleanup;
b5e89ed5 1183 }
414ed537
DA
1184 }
1185
1186 DRM_DEBUG("END\n");
1187
b5e89ed5 1188 cleanup:
414ed537
DA
1189 r300_pacify(dev_priv);
1190
1191 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1192 * for two reasons:
1193 * (1) This may coalesce multiple age emissions into a single one and
1194 * (2) more importantly, some chips lock up hard when scratch registers
1195 * are written inside the pacifier bracket.
1196 */
1197 if (emit_dispatch_age) {
1198 RING_LOCALS;
1199
1200 /* Emit the vertex buffer age */
1201 BEGIN_RING(2);
7c1c2871 1202 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
414ed537 1203 ADVANCE_RING();
b5e89ed5 1204 }
414ed537
DA
1205
1206 COMMIT_RING();
1207
1208 return ret;
1209}