Merge commit '5bc66170dc486556a1e36fd384463536573f4b82' into x86/urgent
[linux-2.6-block.git] / arch / powerpc / platforms / cell / spufs / hw_ops.c
CommitLineData
8b3d6663
AB
1/* hw_ops.c - query/set operations on active SPU context.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
8b3d6663
AB
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
3a843d7c 25#include <linux/poll.h>
8b3d6663 26#include <linux/smp.h>
8b3d6663
AB
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29
30#include <asm/io.h>
31#include <asm/spu.h>
540270d8 32#include <asm/spu_priv1.h>
8b3d6663
AB
33#include <asm/spu_csa.h>
34#include <asm/mmu_context.h>
35#include "spufs.h"
36
37static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
38{
39 struct spu *spu = ctx->spu;
40 struct spu_problem __iomem *prob = spu->problem;
41 u32 mbox_stat;
42 int ret = 0;
43
44 spin_lock_irq(&spu->register_lock);
45 mbox_stat = in_be32(&prob->mb_stat_R);
46 if (mbox_stat & 0x0000ff) {
47 *data = in_be32(&prob->pu_mb_R);
48 ret = 4;
49 }
50 spin_unlock_irq(&spu->register_lock);
51 return ret;
52}
53
54static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
55{
56 return in_be32(&ctx->spu->problem->mb_stat_R);
57}
58
3a843d7c
AB
59static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
60 unsigned int events)
61{
62 struct spu *spu = ctx->spu;
3a843d7c
AB
63 int ret = 0;
64 u32 stat;
65
66 spin_lock_irq(&spu->register_lock);
67 stat = in_be32(&spu->problem->mb_stat_R);
68
69 /* if the requested event is there, return the poll
70 mask, otherwise enable the interrupt to get notified,
71 but first mark any pending interrupts as done so
72 we don't get woken up unnecessarily */
73
74 if (events & (POLLIN | POLLRDNORM)) {
75 if (stat & 0xff0000)
76 ret |= POLLIN | POLLRDNORM;
77 else {
8af30675
JK
78 spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
79 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
3a843d7c
AB
80 }
81 }
82 if (events & (POLLOUT | POLLWRNORM)) {
83 if (stat & 0x00ff00)
84 ret = POLLOUT | POLLWRNORM;
85 else {
8af30675
JK
86 spu_int_stat_clear(spu, 2,
87 CLASS2_MAILBOX_THRESHOLD_INTR);
88 spu_int_mask_or(spu, 2,
89 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
3a843d7c
AB
90 }
91 }
92 spin_unlock_irq(&spu->register_lock);
93 return ret;
94}
95
8b3d6663
AB
96static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97{
98 struct spu *spu = ctx->spu;
99 struct spu_problem __iomem *prob = spu->problem;
8b3d6663
AB
100 struct spu_priv2 __iomem *priv2 = spu->priv2;
101 int ret;
102
103 spin_lock_irq(&spu->register_lock);
104 if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105 /* read the first available word */
106 *data = in_be64(&priv2->puint_mb_R);
107 ret = 4;
108 } else {
109 /* make sure we get woken up by the interrupt */
8af30675 110 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
8b3d6663
AB
111 ret = 0;
112 }
113 spin_unlock_irq(&spu->register_lock);
114 return ret;
115}
116
117static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118{
119 struct spu *spu = ctx->spu;
120 struct spu_problem __iomem *prob = spu->problem;
8b3d6663
AB
121 int ret;
122
123 spin_lock_irq(&spu->register_lock);
124 if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125 /* we have space to write wbox_data to */
126 out_be32(&prob->spu_mb_W, data);
127 ret = 4;
128 } else {
129 /* make sure we get woken up by the interrupt when space
130 becomes available */
8af30675 131 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
8b3d6663
AB
132 ret = 0;
133 }
134 spin_unlock_irq(&spu->register_lock);
135 return ret;
136}
137
8b3d6663
AB
138static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
139{
140 out_be32(&ctx->spu->problem->signal_notify1, data);
141}
142
8b3d6663
AB
143static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
144{
145 out_be32(&ctx->spu->problem->signal_notify2, data);
146}
147
148static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
149{
150 struct spu *spu = ctx->spu;
151 struct spu_priv2 __iomem *priv2 = spu->priv2;
152 u64 tmp;
153
154 spin_lock_irq(&spu->register_lock);
155 tmp = in_be64(&priv2->spu_cfg_RW);
156 if (val)
157 tmp |= 1;
158 else
159 tmp &= ~1;
160 out_be64(&priv2->spu_cfg_RW, tmp);
161 spin_unlock_irq(&spu->register_lock);
162}
163
164static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
165{
166 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
167}
168
169static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
170{
171 struct spu *spu = ctx->spu;
172 struct spu_priv2 __iomem *priv2 = spu->priv2;
173 u64 tmp;
174
175 spin_lock_irq(&spu->register_lock);
176 tmp = in_be64(&priv2->spu_cfg_RW);
177 if (val)
178 tmp |= 2;
179 else
180 tmp &= ~2;
181 out_be64(&priv2->spu_cfg_RW, tmp);
182 spin_unlock_irq(&spu->register_lock);
183}
184
185static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
186{
187 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
188}
189
190static u32 spu_hw_npc_read(struct spu_context *ctx)
191{
192 return in_be32(&ctx->spu->problem->spu_npc_RW);
193}
194
195static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
196{
197 out_be32(&ctx->spu->problem->spu_npc_RW, val);
198}
199
200static u32 spu_hw_status_read(struct spu_context *ctx)
201{
202 return in_be32(&ctx->spu->problem->spu_status_R);
203}
204
205static char *spu_hw_get_ls(struct spu_context *ctx)
206{
207 return ctx->spu->local_store;
208}
209
cc210b3e
LB
210static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
211{
212 out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
213}
214
3960c260
JK
215static u32 spu_hw_runcntl_read(struct spu_context *ctx)
216{
217 return in_be32(&ctx->spu->problem->spu_runcntl_RW);
218}
219
5110459f
AB
220static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
221{
5737edd1
MN
222 spin_lock_irq(&ctx->spu->register_lock);
223 if (val & SPU_RUNCNTL_ISOLATE)
cc210b3e
LB
224 spu_hw_privcntl_write(ctx,
225 SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
5110459f 226 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
5737edd1 227 spin_unlock_irq(&ctx->spu->register_lock);
5110459f
AB
228}
229
c25620d7
MN
230static void spu_hw_runcntl_stop(struct spu_context *ctx)
231{
232 spin_lock_irq(&ctx->spu->register_lock);
233 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
234 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
235 cpu_relax();
236 spin_unlock_irq(&ctx->spu->register_lock);
237}
238
ee2d7340 239static void spu_hw_master_start(struct spu_context *ctx)
5110459f 240{
ee2d7340
AB
241 struct spu *spu = ctx->spu;
242 u64 sr1;
243
244 spin_lock_irq(&spu->register_lock);
245 sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
246 spu_mfc_sr1_set(spu, sr1);
247 spin_unlock_irq(&spu->register_lock);
248}
249
250static void spu_hw_master_stop(struct spu_context *ctx)
251{
252 struct spu *spu = ctx->spu;
253 u64 sr1;
254
255 spin_lock_irq(&spu->register_lock);
256 sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
257 spu_mfc_sr1_set(spu, sr1);
258 spin_unlock_irq(&spu->register_lock);
5110459f
AB
259}
260
a33a7d73
AB
261static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
262{
ed2bfcd2 263 struct spu_problem __iomem *prob = ctx->spu->problem;
a33a7d73
AB
264 int ret;
265
266 spin_lock_irq(&ctx->spu->register_lock);
267 ret = -EAGAIN;
268 if (in_be32(&prob->dma_querytype_RW))
269 goto out;
270 ret = 0;
271 out_be32(&prob->dma_querymask_RW, mask);
272 out_be32(&prob->dma_querytype_RW, mode);
273out:
274 spin_unlock_irq(&ctx->spu->register_lock);
275 return ret;
276}
277
278static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
279{
280 return in_be32(&ctx->spu->problem->dma_tagstatus_R);
281}
282
283static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
284{
285 return in_be32(&ctx->spu->problem->dma_qstatus_R);
286}
287
288static int spu_hw_send_mfc_command(struct spu_context *ctx,
289 struct mfc_dma_command *cmd)
290{
291 u32 status;
ed2bfcd2 292 struct spu_problem __iomem *prob = ctx->spu->problem;
a33a7d73
AB
293
294 spin_lock_irq(&ctx->spu->register_lock);
295 out_be32(&prob->mfc_lsa_W, cmd->lsa);
296 out_be64(&prob->mfc_ea_W, cmd->ea);
297 out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
298 cmd->size << 16 | cmd->tag);
299 out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
300 cmd->class << 16 | cmd->cmd);
301 status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
302 spin_unlock_irq(&ctx->spu->register_lock);
303
304 switch (status & 0xffff) {
305 case 0:
306 return 0;
307 case 2:
308 return -EAGAIN;
309 default:
310 return -EINVAL;
311 }
312}
313
57dace23
AB
314static void spu_hw_restart_dma(struct spu_context *ctx)
315{
316 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
317
318 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
319 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
320}
321
8b3d6663
AB
322struct spu_context_ops spu_hw_ops = {
323 .mbox_read = spu_hw_mbox_read,
324 .mbox_stat_read = spu_hw_mbox_stat_read,
3a843d7c 325 .mbox_stat_poll = spu_hw_mbox_stat_poll,
8b3d6663
AB
326 .ibox_read = spu_hw_ibox_read,
327 .wbox_write = spu_hw_wbox_write,
8b3d6663 328 .signal1_write = spu_hw_signal1_write,
8b3d6663
AB
329 .signal2_write = spu_hw_signal2_write,
330 .signal1_type_set = spu_hw_signal1_type_set,
331 .signal1_type_get = spu_hw_signal1_type_get,
332 .signal2_type_set = spu_hw_signal2_type_set,
333 .signal2_type_get = spu_hw_signal2_type_get,
334 .npc_read = spu_hw_npc_read,
335 .npc_write = spu_hw_npc_write,
336 .status_read = spu_hw_status_read,
337 .get_ls = spu_hw_get_ls,
cc210b3e 338 .privcntl_write = spu_hw_privcntl_write,
3960c260 339 .runcntl_read = spu_hw_runcntl_read,
5110459f 340 .runcntl_write = spu_hw_runcntl_write,
c25620d7 341 .runcntl_stop = spu_hw_runcntl_stop,
ee2d7340
AB
342 .master_start = spu_hw_master_start,
343 .master_stop = spu_hw_master_stop,
a33a7d73
AB
344 .set_mfc_query = spu_hw_set_mfc_query,
345 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
346 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
347 .send_mfc_command = spu_hw_send_mfc_command,
57dace23 348 .restart_dma = spu_hw_restart_dma,
8b3d6663 349};