mailbox: mtk-cmdq: add MT8186 support
[linux-2.6-block.git] / drivers / mailbox / mtk-cmdq-mailbox.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/mailbox_controller.h>
17 #include <linux/mailbox/mtk-cmdq-mailbox.h>
18 #include <linux/of_device.h>
19
20 #define CMDQ_OP_CODE_MASK               (0xff << CMDQ_OP_CODE_SHIFT)
21 #define CMDQ_NUM_CMD(t)                 (t->cmd_buf_size / CMDQ_INST_SIZE)
22 #define CMDQ_GCE_NUM_MAX                (2)
23
24 #define CMDQ_CURR_IRQ_STATUS            0x10
25 #define CMDQ_SYNC_TOKEN_UPDATE          0x68
26 #define CMDQ_THR_SLOT_CYCLES            0x30
27 #define CMDQ_THR_BASE                   0x100
28 #define CMDQ_THR_SIZE                   0x80
29 #define CMDQ_THR_WARM_RESET             0x00
30 #define CMDQ_THR_ENABLE_TASK            0x04
31 #define CMDQ_THR_SUSPEND_TASK           0x08
32 #define CMDQ_THR_CURR_STATUS            0x0c
33 #define CMDQ_THR_IRQ_STATUS             0x10
34 #define CMDQ_THR_IRQ_ENABLE             0x14
35 #define CMDQ_THR_CURR_ADDR              0x20
36 #define CMDQ_THR_END_ADDR               0x24
37 #define CMDQ_THR_WAIT_TOKEN             0x30
38 #define CMDQ_THR_PRIORITY               0x40
39
40 #define GCE_GCTL_VALUE                  0x48
41 #define GCE_CTRL_BY_SW                          GENMASK(2, 0)
42 #define GCE_DDR_EN                              GENMASK(18, 16)
43
44 #define CMDQ_THR_ACTIVE_SLOT_CYCLES     0x3200
45 #define CMDQ_THR_ENABLED                0x1
46 #define CMDQ_THR_DISABLED               0x0
47 #define CMDQ_THR_SUSPEND                0x1
48 #define CMDQ_THR_RESUME                 0x0
49 #define CMDQ_THR_STATUS_SUSPENDED       BIT(1)
50 #define CMDQ_THR_DO_WARM_RESET          BIT(0)
51 #define CMDQ_THR_IRQ_DONE               0x1
52 #define CMDQ_THR_IRQ_ERROR              0x12
53 #define CMDQ_THR_IRQ_EN                 (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
54 #define CMDQ_THR_IS_WAITING             BIT(31)
55
56 #define CMDQ_JUMP_BY_OFFSET             0x10000000
57 #define CMDQ_JUMP_BY_PA                 0x10000001
58
59 struct cmdq_thread {
60         struct mbox_chan        *chan;
61         void __iomem            *base;
62         struct list_head        task_busy_list;
63         u32                     priority;
64 };
65
66 struct cmdq_task {
67         struct cmdq             *cmdq;
68         struct list_head        list_entry;
69         dma_addr_t              pa_base;
70         struct cmdq_thread      *thread;
71         struct cmdq_pkt         *pkt; /* the packet sent from mailbox client */
72 };
73
74 struct cmdq {
75         struct mbox_controller  mbox;
76         void __iomem            *base;
77         int                     irq;
78         u32                     thread_nr;
79         u32                     irq_mask;
80         struct cmdq_thread      *thread;
81         struct clk_bulk_data    clocks[CMDQ_GCE_NUM_MAX];
82         bool                    suspended;
83         u8                      shift_pa;
84         bool                    control_by_sw;
85         bool                    sw_ddr_en;
86         u32                     gce_num;
87 };
88
89 struct gce_plat {
90         u32 thread_nr;
91         u8 shift;
92         bool control_by_sw;
93         bool sw_ddr_en;
94         u32 gce_num;
95 };
96
97 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
98 {
99         WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
100
101         if (enable)
102                 writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
103         else
104                 writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
105
106         clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
107 }
108
109 u8 cmdq_get_shift_pa(struct mbox_chan *chan)
110 {
111         struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
112
113         return cmdq->shift_pa;
114 }
115 EXPORT_SYMBOL(cmdq_get_shift_pa);
116
117 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
118 {
119         u32 status;
120
121         writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
122
123         /* If already disabled, treat as suspended successful. */
124         if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
125                 return 0;
126
127         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
128                         status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
129                 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
130                         (u32)(thread->base - cmdq->base));
131                 return -EFAULT;
132         }
133
134         return 0;
135 }
136
137 static void cmdq_thread_resume(struct cmdq_thread *thread)
138 {
139         writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
140 }
141
142 static void cmdq_init(struct cmdq *cmdq)
143 {
144         int i;
145         u32 gctl_regval = 0;
146
147         WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
148         if (cmdq->control_by_sw)
149                 gctl_regval = GCE_CTRL_BY_SW;
150         if (cmdq->sw_ddr_en)
151                 gctl_regval |= GCE_DDR_EN;
152
153         if (gctl_regval)
154                 writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
155
156         writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
157         for (i = 0; i <= CMDQ_MAX_EVENT; i++)
158                 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
159         clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
160 }
161
162 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
163 {
164         u32 warm_reset;
165
166         writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
167         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
168                         warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
169                         0, 10)) {
170                 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
171                         (u32)(thread->base - cmdq->base));
172                 return -EFAULT;
173         }
174
175         return 0;
176 }
177
178 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
179 {
180         cmdq_thread_reset(cmdq, thread);
181         writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
182 }
183
184 /* notify GCE to re-fetch commands by setting GCE thread PC */
185 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
186 {
187         writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
188                thread->base + CMDQ_THR_CURR_ADDR);
189 }
190
191 static void cmdq_task_insert_into_thread(struct cmdq_task *task)
192 {
193         struct device *dev = task->cmdq->mbox.dev;
194         struct cmdq_thread *thread = task->thread;
195         struct cmdq_task *prev_task = list_last_entry(
196                         &thread->task_busy_list, typeof(*task), list_entry);
197         u64 *prev_task_base = prev_task->pkt->va_base;
198
199         /* let previous task jump to this task */
200         dma_sync_single_for_cpu(dev, prev_task->pa_base,
201                                 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
202         prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
203                 (u64)CMDQ_JUMP_BY_PA << 32 |
204                 (task->pa_base >> task->cmdq->shift_pa);
205         dma_sync_single_for_device(dev, prev_task->pa_base,
206                                    prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
207
208         cmdq_thread_invalidate_fetched_data(thread);
209 }
210
211 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
212 {
213         return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
214 }
215
216 static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
217 {
218         struct cmdq_cb_data data;
219
220         data.sta = sta;
221         data.pkt = task->pkt;
222         mbox_chan_received_data(task->thread->chan, &data);
223
224         list_del(&task->list_entry);
225 }
226
227 static void cmdq_task_handle_error(struct cmdq_task *task)
228 {
229         struct cmdq_thread *thread = task->thread;
230         struct cmdq_task *next_task;
231         struct cmdq *cmdq = task->cmdq;
232
233         dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
234         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
235         next_task = list_first_entry_or_null(&thread->task_busy_list,
236                         struct cmdq_task, list_entry);
237         if (next_task)
238                 writel(next_task->pa_base >> cmdq->shift_pa,
239                        thread->base + CMDQ_THR_CURR_ADDR);
240         cmdq_thread_resume(thread);
241 }
242
243 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
244                                     struct cmdq_thread *thread)
245 {
246         struct cmdq_task *task, *tmp, *curr_task = NULL;
247         u32 curr_pa, irq_flag, task_end_pa;
248         bool err;
249
250         irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
251         writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
252
253         /*
254          * When ISR call this function, another CPU core could run
255          * "release task" right before we acquire the spin lock, and thus
256          * reset / disable this GCE thread, so we need to check the enable
257          * bit of this GCE thread.
258          */
259         if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
260                 return;
261
262         if (irq_flag & CMDQ_THR_IRQ_ERROR)
263                 err = true;
264         else if (irq_flag & CMDQ_THR_IRQ_DONE)
265                 err = false;
266         else
267                 return;
268
269         curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
270
271         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
272                                  list_entry) {
273                 task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
274                 if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
275                         curr_task = task;
276
277                 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
278                         cmdq_task_exec_done(task, 0);
279                         kfree(task);
280                 } else if (err) {
281                         cmdq_task_exec_done(task, -ENOEXEC);
282                         cmdq_task_handle_error(curr_task);
283                         kfree(task);
284                 }
285
286                 if (curr_task)
287                         break;
288         }
289
290         if (list_empty(&thread->task_busy_list)) {
291                 cmdq_thread_disable(cmdq, thread);
292                 clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
293         }
294 }
295
296 static irqreturn_t cmdq_irq_handler(int irq, void *dev)
297 {
298         struct cmdq *cmdq = dev;
299         unsigned long irq_status, flags = 0L;
300         int bit;
301
302         irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
303         if (!(irq_status ^ cmdq->irq_mask))
304                 return IRQ_NONE;
305
306         for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
307                 struct cmdq_thread *thread = &cmdq->thread[bit];
308
309                 spin_lock_irqsave(&thread->chan->lock, flags);
310                 cmdq_thread_irq_handler(cmdq, thread);
311                 spin_unlock_irqrestore(&thread->chan->lock, flags);
312         }
313
314         return IRQ_HANDLED;
315 }
316
317 static int cmdq_suspend(struct device *dev)
318 {
319         struct cmdq *cmdq = dev_get_drvdata(dev);
320         struct cmdq_thread *thread;
321         int i;
322         bool task_running = false;
323
324         cmdq->suspended = true;
325
326         for (i = 0; i < cmdq->thread_nr; i++) {
327                 thread = &cmdq->thread[i];
328                 if (!list_empty(&thread->task_busy_list)) {
329                         task_running = true;
330                         break;
331                 }
332         }
333
334         if (task_running)
335                 dev_warn(dev, "exist running task(s) in suspend\n");
336
337         if (cmdq->sw_ddr_en)
338                 cmdq_sw_ddr_enable(cmdq, false);
339
340         clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
341
342         return 0;
343 }
344
345 static int cmdq_resume(struct device *dev)
346 {
347         struct cmdq *cmdq = dev_get_drvdata(dev);
348
349         WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
350         cmdq->suspended = false;
351
352         if (cmdq->sw_ddr_en)
353                 cmdq_sw_ddr_enable(cmdq, true);
354
355         return 0;
356 }
357
358 static int cmdq_remove(struct platform_device *pdev)
359 {
360         struct cmdq *cmdq = platform_get_drvdata(pdev);
361
362         if (cmdq->sw_ddr_en)
363                 cmdq_sw_ddr_enable(cmdq, false);
364
365         clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
366         return 0;
367 }
368
369 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
370 {
371         struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
372         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
373         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
374         struct cmdq_task *task;
375         unsigned long curr_pa, end_pa;
376
377         /* Client should not flush new tasks if suspended. */
378         WARN_ON(cmdq->suspended);
379
380         task = kzalloc(sizeof(*task), GFP_ATOMIC);
381         if (!task)
382                 return -ENOMEM;
383
384         task->cmdq = cmdq;
385         INIT_LIST_HEAD(&task->list_entry);
386         task->pa_base = pkt->pa_base;
387         task->thread = thread;
388         task->pkt = pkt;
389
390         if (list_empty(&thread->task_busy_list)) {
391                 WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
392
393                 /*
394                  * The thread reset will clear thread related register to 0,
395                  * including pc, end, priority, irq, suspend and enable. Thus
396                  * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
397                  * thread and make it running.
398                  */
399                 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
400
401                 writel(task->pa_base >> cmdq->shift_pa,
402                        thread->base + CMDQ_THR_CURR_ADDR);
403                 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
404                        thread->base + CMDQ_THR_END_ADDR);
405
406                 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
407                 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
408                 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
409         } else {
410                 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
411                 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
412                         cmdq->shift_pa;
413                 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
414                         cmdq->shift_pa;
415                 /* check boundary */
416                 if (curr_pa == end_pa - CMDQ_INST_SIZE ||
417                     curr_pa == end_pa) {
418                         /* set to this task directly */
419                         writel(task->pa_base >> cmdq->shift_pa,
420                                thread->base + CMDQ_THR_CURR_ADDR);
421                 } else {
422                         cmdq_task_insert_into_thread(task);
423                         smp_mb(); /* modify jump before enable thread */
424                 }
425                 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
426                        thread->base + CMDQ_THR_END_ADDR);
427                 cmdq_thread_resume(thread);
428         }
429         list_move_tail(&task->list_entry, &thread->task_busy_list);
430
431         return 0;
432 }
433
434 static int cmdq_mbox_startup(struct mbox_chan *chan)
435 {
436         return 0;
437 }
438
439 static void cmdq_mbox_shutdown(struct mbox_chan *chan)
440 {
441         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
442         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
443         struct cmdq_task *task, *tmp;
444         unsigned long flags;
445
446         spin_lock_irqsave(&thread->chan->lock, flags);
447         if (list_empty(&thread->task_busy_list))
448                 goto done;
449
450         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
451
452         /* make sure executed tasks have success callback */
453         cmdq_thread_irq_handler(cmdq, thread);
454         if (list_empty(&thread->task_busy_list))
455                 goto done;
456
457         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
458                                  list_entry) {
459                 cmdq_task_exec_done(task, -ECONNABORTED);
460                 kfree(task);
461         }
462
463         cmdq_thread_disable(cmdq, thread);
464         clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
465
466 done:
467         /*
468          * The thread->task_busy_list empty means thread already disable. The
469          * cmdq_mbox_send_data() always reset thread which clear disable and
470          * suspend statue when first pkt send to channel, so there is no need
471          * to do any operation here, only unlock and leave.
472          */
473         spin_unlock_irqrestore(&thread->chan->lock, flags);
474 }
475
476 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
477 {
478         struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
479         struct cmdq_cb_data data;
480         struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
481         struct cmdq_task *task, *tmp;
482         unsigned long flags;
483         u32 enable;
484
485         spin_lock_irqsave(&thread->chan->lock, flags);
486         if (list_empty(&thread->task_busy_list))
487                 goto out;
488
489         WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
490         if (!cmdq_thread_is_in_wfe(thread))
491                 goto wait;
492
493         list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
494                                  list_entry) {
495                 data.sta = -ECONNABORTED;
496                 data.pkt = task->pkt;
497                 mbox_chan_received_data(task->thread->chan, &data);
498                 list_del(&task->list_entry);
499                 kfree(task);
500         }
501
502         cmdq_thread_resume(thread);
503         cmdq_thread_disable(cmdq, thread);
504         clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
505
506 out:
507         spin_unlock_irqrestore(&thread->chan->lock, flags);
508         return 0;
509
510 wait:
511         cmdq_thread_resume(thread);
512         spin_unlock_irqrestore(&thread->chan->lock, flags);
513         if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
514                                       enable, enable == 0, 1, timeout)) {
515                 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
516                         (u32)(thread->base - cmdq->base));
517
518                 return -EFAULT;
519         }
520         return 0;
521 }
522
523 static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
524         .send_data = cmdq_mbox_send_data,
525         .startup = cmdq_mbox_startup,
526         .shutdown = cmdq_mbox_shutdown,
527         .flush = cmdq_mbox_flush,
528 };
529
530 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
531                 const struct of_phandle_args *sp)
532 {
533         int ind = sp->args[0];
534         struct cmdq_thread *thread;
535
536         if (ind >= mbox->num_chans)
537                 return ERR_PTR(-EINVAL);
538
539         thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
540         thread->priority = sp->args[1];
541         thread->chan = &mbox->chans[ind];
542
543         return &mbox->chans[ind];
544 }
545
546 static int cmdq_probe(struct platform_device *pdev)
547 {
548         struct device *dev = &pdev->dev;
549         struct cmdq *cmdq;
550         int err, i;
551         struct gce_plat *plat_data;
552         struct device_node *phandle = dev->of_node;
553         struct device_node *node;
554         int alias_id = 0;
555         static const char * const clk_name = "gce";
556         static const char * const clk_names[] = { "gce0", "gce1" };
557
558         cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
559         if (!cmdq)
560                 return -ENOMEM;
561
562         cmdq->base = devm_platform_ioremap_resource(pdev, 0);
563         if (IS_ERR(cmdq->base))
564                 return PTR_ERR(cmdq->base);
565
566         cmdq->irq = platform_get_irq(pdev, 0);
567         if (cmdq->irq < 0)
568                 return cmdq->irq;
569
570         plat_data = (struct gce_plat *)of_device_get_match_data(dev);
571         if (!plat_data) {
572                 dev_err(dev, "failed to get match data\n");
573                 return -EINVAL;
574         }
575
576         cmdq->thread_nr = plat_data->thread_nr;
577         cmdq->shift_pa = plat_data->shift;
578         cmdq->control_by_sw = plat_data->control_by_sw;
579         cmdq->sw_ddr_en = plat_data->sw_ddr_en;
580         cmdq->gce_num = plat_data->gce_num;
581         cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
582         err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
583                                "mtk_cmdq", cmdq);
584         if (err < 0) {
585                 dev_err(dev, "failed to register ISR (%d)\n", err);
586                 return err;
587         }
588
589         dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
590                 dev, cmdq->base, cmdq->irq);
591
592         if (cmdq->gce_num > 1) {
593                 for_each_child_of_node(phandle->parent, node) {
594                         alias_id = of_alias_get_id(node, clk_name);
595                         if (alias_id >= 0 && alias_id < cmdq->gce_num) {
596                                 cmdq->clocks[alias_id].id = clk_names[alias_id];
597                                 cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
598                                 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
599                                         of_node_put(node);
600                                         return dev_err_probe(dev,
601                                                              PTR_ERR(cmdq->clocks[alias_id].clk),
602                                                              "failed to get gce clk: %d\n",
603                                                              alias_id);
604                                 }
605                         }
606                 }
607         } else {
608                 cmdq->clocks[alias_id].id = clk_name;
609                 cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
610                 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
611                         return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
612                                              "failed to get gce clk\n");
613                 }
614         }
615
616         cmdq->mbox.dev = dev;
617         cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
618                                         sizeof(*cmdq->mbox.chans), GFP_KERNEL);
619         if (!cmdq->mbox.chans)
620                 return -ENOMEM;
621
622         cmdq->mbox.num_chans = cmdq->thread_nr;
623         cmdq->mbox.ops = &cmdq_mbox_chan_ops;
624         cmdq->mbox.of_xlate = cmdq_xlate;
625
626         /* make use of TXDONE_BY_ACK */
627         cmdq->mbox.txdone_irq = false;
628         cmdq->mbox.txdone_poll = false;
629
630         cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
631                                         sizeof(*cmdq->thread), GFP_KERNEL);
632         if (!cmdq->thread)
633                 return -ENOMEM;
634
635         for (i = 0; i < cmdq->thread_nr; i++) {
636                 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
637                                 CMDQ_THR_SIZE * i;
638                 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
639                 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
640         }
641
642         err = devm_mbox_controller_register(dev, &cmdq->mbox);
643         if (err < 0) {
644                 dev_err(dev, "failed to register mailbox: %d\n", err);
645                 return err;
646         }
647
648         platform_set_drvdata(pdev, cmdq);
649
650         WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
651
652         cmdq_init(cmdq);
653
654         return 0;
655 }
656
657 static const struct dev_pm_ops cmdq_pm_ops = {
658         .suspend = cmdq_suspend,
659         .resume = cmdq_resume,
660 };
661
662 static const struct gce_plat gce_plat_v2 = {
663         .thread_nr = 16,
664         .shift = 0,
665         .control_by_sw = false,
666         .gce_num = 1
667 };
668
669 static const struct gce_plat gce_plat_v3 = {
670         .thread_nr = 24,
671         .shift = 0,
672         .control_by_sw = false,
673         .gce_num = 1
674 };
675
676 static const struct gce_plat gce_plat_v4 = {
677         .thread_nr = 24,
678         .shift = 3,
679         .control_by_sw = false,
680         .gce_num = 1
681 };
682
683 static const struct gce_plat gce_plat_v5 = {
684         .thread_nr = 24,
685         .shift = 3,
686         .control_by_sw = true,
687         .gce_num = 1
688 };
689
690 static const struct gce_plat gce_plat_v6 = {
691         .thread_nr = 24,
692         .shift = 3,
693         .control_by_sw = true,
694         .gce_num = 2
695 };
696
697 static const struct gce_plat gce_plat_v7 = {
698         .thread_nr = 24,
699         .shift = 3,
700         .control_by_sw = true,
701         .sw_ddr_en = true,
702         .gce_num = 1
703 };
704
705 static const struct of_device_id cmdq_of_ids[] = {
706         {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
707         {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
708         {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
709         {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
710         {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
711         {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
712         {}
713 };
714
715 static struct platform_driver cmdq_drv = {
716         .probe = cmdq_probe,
717         .remove = cmdq_remove,
718         .driver = {
719                 .name = "mtk_cmdq",
720                 .pm = &cmdq_pm_ops,
721                 .of_match_table = cmdq_of_ids,
722         }
723 };
724
725 static int __init cmdq_drv_init(void)
726 {
727         return platform_driver_register(&cmdq_drv);
728 }
729
730 static void __exit cmdq_drv_exit(void)
731 {
732         platform_driver_unregister(&cmdq_drv);
733 }
734
735 subsys_initcall(cmdq_drv_init);
736 module_exit(cmdq_drv_exit);
737
738 MODULE_LICENSE("GPL v2");