media: venus: hfi_venus: Sanitize venus_halt_axi() per-VPU-version
[linux-2.6-block.git] / drivers / media / platform / qcom / venus / hfi_venus.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2017 Linaro Ltd.
5  */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14
15 #include "core.h"
16 #include "hfi_cmds.h"
17 #include "hfi_msgs.h"
18 #include "hfi_venus.h"
19 #include "hfi_venus_io.h"
20 #include "firmware.h"
21
22 #define HFI_MASK_QHDR_TX_TYPE           0xff000000
23 #define HFI_MASK_QHDR_RX_TYPE           0x00ff0000
24 #define HFI_MASK_QHDR_PRI_TYPE          0x0000ff00
25 #define HFI_MASK_QHDR_ID_TYPE           0x000000ff
26
27 #define HFI_HOST_TO_CTRL_CMD_Q          0
28 #define HFI_CTRL_TO_HOST_MSG_Q          1
29 #define HFI_CTRL_TO_HOST_DBG_Q          2
30 #define HFI_MASK_QHDR_STATUS            0x000000ff
31
32 #define IFACEQ_NUM                      3
33 #define IFACEQ_CMD_IDX                  0
34 #define IFACEQ_MSG_IDX                  1
35 #define IFACEQ_DBG_IDX                  2
36 #define IFACEQ_MAX_BUF_COUNT            50
37 #define IFACEQ_MAX_PARALLEL_CLNTS       16
38 #define IFACEQ_DFLT_QHDR                0x01010000
39
40 #define POLL_INTERVAL_US                50
41
42 #define IFACEQ_MAX_PKT_SIZE             1024
43 #define IFACEQ_MED_PKT_SIZE             768
44 #define IFACEQ_MIN_PKT_SIZE             8
45 #define IFACEQ_VAR_SMALL_PKT_SIZE       100
46 #define IFACEQ_VAR_LARGE_PKT_SIZE       512
47 #define IFACEQ_VAR_HUGE_PKT_SIZE        (1024 * 12)
48
49 struct hfi_queue_table_header {
50         u32 version;
51         u32 size;
52         u32 qhdr0_offset;
53         u32 qhdr_size;
54         u32 num_q;
55         u32 num_active_q;
56 };
57
58 struct hfi_queue_header {
59         u32 status;
60         u32 start_addr;
61         u32 type;
62         u32 q_size;
63         u32 pkt_size;
64         u32 pkt_drop_cnt;
65         u32 rx_wm;
66         u32 tx_wm;
67         u32 rx_req;
68         u32 tx_req;
69         u32 rx_irq_status;
70         u32 tx_irq_status;
71         u32 read_idx;
72         u32 write_idx;
73 };
74
75 #define IFACEQ_TABLE_SIZE       \
76         (sizeof(struct hfi_queue_table_header) +        \
77          sizeof(struct hfi_queue_header) * IFACEQ_NUM)
78
79 #define IFACEQ_QUEUE_SIZE       (IFACEQ_MAX_PKT_SIZE *  \
80         IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
81
82 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i)      \
83         (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) +      \
84                 ((i) * sizeof(struct hfi_queue_header)))
85
86 #define QDSS_SIZE               SZ_4K
87 #define SFR_SIZE                SZ_4K
88 #define QUEUE_SIZE              \
89         (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
90
91 #define ALIGNED_QDSS_SIZE       ALIGN(QDSS_SIZE, SZ_4K)
92 #define ALIGNED_SFR_SIZE        ALIGN(SFR_SIZE, SZ_4K)
93 #define ALIGNED_QUEUE_SIZE      ALIGN(QUEUE_SIZE, SZ_4K)
94 #define SHARED_QSIZE            ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95                                       ALIGNED_QDSS_SIZE, SZ_1M)
96
97 struct mem_desc {
98         dma_addr_t da;  /* device address */
99         void *kva;      /* kernel virtual address */
100         u32 size;
101         unsigned long attrs;
102 };
103
104 struct iface_queue {
105         struct hfi_queue_header *qhdr;
106         struct mem_desc qmem;
107 };
108
109 enum venus_state {
110         VENUS_STATE_DEINIT = 1,
111         VENUS_STATE_INIT,
112 };
113
114 struct venus_hfi_device {
115         struct venus_core *core;
116         u32 irq_status;
117         u32 last_packet_type;
118         bool power_enabled;
119         bool suspended;
120         enum venus_state state;
121         /* serialize read / write to the shared memory */
122         struct mutex lock;
123         struct completion pwr_collapse_prep;
124         struct completion release_resource;
125         struct mem_desc ifaceq_table;
126         struct mem_desc sfr;
127         struct iface_queue queues[IFACEQ_NUM];
128         u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129         u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
130 };
131
132 static bool venus_pkt_debug;
133 int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134 static bool venus_fw_low_power_mode = true;
135 static int venus_hw_rsp_timeout = 1000;
136 static bool venus_fw_coverage;
137
138 static void venus_set_state(struct venus_hfi_device *hdev,
139                             enum venus_state state)
140 {
141         mutex_lock(&hdev->lock);
142         hdev->state = state;
143         mutex_unlock(&hdev->lock);
144 }
145
146 static bool venus_is_valid_state(struct venus_hfi_device *hdev)
147 {
148         return hdev->state != VENUS_STATE_DEINIT;
149 }
150
151 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
152 {
153         size_t pkt_size = *(u32 *)packet;
154
155         if (!venus_pkt_debug)
156                 return;
157
158         print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
159                        pkt_size, true);
160 }
161
162 static int venus_write_queue(struct venus_hfi_device *hdev,
163                              struct iface_queue *queue,
164                              void *packet, u32 *rx_req)
165 {
166         struct hfi_queue_header *qhdr;
167         u32 dwords, new_wr_idx;
168         u32 empty_space, rd_idx, wr_idx, qsize;
169         u32 *wr_ptr;
170
171         if (!queue->qmem.kva)
172                 return -EINVAL;
173
174         qhdr = queue->qhdr;
175         if (!qhdr)
176                 return -EINVAL;
177
178         venus_dump_packet(hdev, packet);
179
180         dwords = (*(u32 *)packet) >> 2;
181         if (!dwords)
182                 return -EINVAL;
183
184         rd_idx = qhdr->read_idx;
185         wr_idx = qhdr->write_idx;
186         qsize = qhdr->q_size;
187         /* ensure rd/wr indices's are read from memory */
188         rmb();
189
190         if (wr_idx >= rd_idx)
191                 empty_space = qsize - (wr_idx - rd_idx);
192         else
193                 empty_space = rd_idx - wr_idx;
194
195         if (empty_space <= dwords) {
196                 qhdr->tx_req = 1;
197                 /* ensure tx_req is updated in memory */
198                 wmb();
199                 return -ENOSPC;
200         }
201
202         qhdr->tx_req = 0;
203         /* ensure tx_req is updated in memory */
204         wmb();
205
206         new_wr_idx = wr_idx + dwords;
207         wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
208         if (new_wr_idx < qsize) {
209                 memcpy(wr_ptr, packet, dwords << 2);
210         } else {
211                 size_t len;
212
213                 new_wr_idx -= qsize;
214                 len = (dwords - new_wr_idx) << 2;
215                 memcpy(wr_ptr, packet, len);
216                 memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
217         }
218
219         /* make sure packet is written before updating the write index */
220         wmb();
221
222         qhdr->write_idx = new_wr_idx;
223         *rx_req = qhdr->rx_req ? 1 : 0;
224
225         /* make sure write index is updated before an interrupt is raised */
226         mb();
227
228         return 0;
229 }
230
231 static int venus_read_queue(struct venus_hfi_device *hdev,
232                             struct iface_queue *queue, void *pkt, u32 *tx_req)
233 {
234         struct hfi_queue_header *qhdr;
235         u32 dwords, new_rd_idx;
236         u32 rd_idx, wr_idx, type, qsize;
237         u32 *rd_ptr;
238         u32 recv_request = 0;
239         int ret = 0;
240
241         if (!queue->qmem.kva)
242                 return -EINVAL;
243
244         qhdr = queue->qhdr;
245         if (!qhdr)
246                 return -EINVAL;
247
248         type = qhdr->type;
249         rd_idx = qhdr->read_idx;
250         wr_idx = qhdr->write_idx;
251         qsize = qhdr->q_size;
252
253         /* make sure data is valid before using it */
254         rmb();
255
256         /*
257          * Do not set receive request for debug queue, if set, Venus generates
258          * interrupt for debug messages even when there is no response message
259          * available. In general debug queue will not become full as it is being
260          * emptied out for every interrupt from Venus. Venus will anyway
261          * generates interrupt if it is full.
262          */
263         if (type & HFI_CTRL_TO_HOST_MSG_Q)
264                 recv_request = 1;
265
266         if (rd_idx == wr_idx) {
267                 qhdr->rx_req = recv_request;
268                 *tx_req = 0;
269                 /* update rx_req field in memory */
270                 wmb();
271                 return -ENODATA;
272         }
273
274         rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
275         dwords = *rd_ptr >> 2;
276         if (!dwords)
277                 return -EINVAL;
278
279         new_rd_idx = rd_idx + dwords;
280         if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
281                 if (new_rd_idx < qsize) {
282                         memcpy(pkt, rd_ptr, dwords << 2);
283                 } else {
284                         size_t len;
285
286                         new_rd_idx -= qsize;
287                         len = (dwords - new_rd_idx) << 2;
288                         memcpy(pkt, rd_ptr, len);
289                         memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
290                 }
291         } else {
292                 /* bad packet received, dropping */
293                 new_rd_idx = qhdr->write_idx;
294                 ret = -EBADMSG;
295         }
296
297         /* ensure the packet is read before updating read index */
298         rmb();
299
300         qhdr->read_idx = new_rd_idx;
301         /* ensure updating read index */
302         wmb();
303
304         rd_idx = qhdr->read_idx;
305         wr_idx = qhdr->write_idx;
306         /* ensure rd/wr indices are read from memory */
307         rmb();
308
309         if (rd_idx != wr_idx)
310                 qhdr->rx_req = 0;
311         else
312                 qhdr->rx_req = recv_request;
313
314         *tx_req = qhdr->tx_req ? 1 : 0;
315
316         /* ensure rx_req is stored to memory and tx_req is loaded from memory */
317         mb();
318
319         venus_dump_packet(hdev, pkt);
320
321         return ret;
322 }
323
324 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
325                        u32 size)
326 {
327         struct device *dev = hdev->core->dev;
328
329         desc->attrs = DMA_ATTR_WRITE_COMBINE;
330         desc->size = ALIGN(size, SZ_4K);
331
332         desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
333                                     desc->attrs);
334         if (!desc->kva)
335                 return -ENOMEM;
336
337         return 0;
338 }
339
340 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
341 {
342         struct device *dev = hdev->core->dev;
343
344         dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
345 }
346
347 static void venus_set_registers(struct venus_hfi_device *hdev)
348 {
349         const struct venus_resources *res = hdev->core->res;
350         const struct reg_val *tbl = res->reg_tbl;
351         unsigned int count = res->reg_tbl_size;
352         unsigned int i;
353
354         for (i = 0; i < count; i++)
355                 writel(tbl[i].value, hdev->core->base + tbl[i].reg);
356 }
357
358 static void venus_soft_int(struct venus_hfi_device *hdev)
359 {
360         void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
361         u32 clear_bit;
362
363         if (IS_V6(hdev->core))
364                 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
365         else
366                 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
367
368         writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
369 }
370
371 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
372                                          void *pkt, bool sync)
373 {
374         struct device *dev = hdev->core->dev;
375         struct hfi_pkt_hdr *cmd_packet;
376         struct iface_queue *queue;
377         u32 rx_req;
378         int ret;
379
380         if (!venus_is_valid_state(hdev))
381                 return -EINVAL;
382
383         cmd_packet = (struct hfi_pkt_hdr *)pkt;
384         hdev->last_packet_type = cmd_packet->pkt_type;
385
386         queue = &hdev->queues[IFACEQ_CMD_IDX];
387
388         ret = venus_write_queue(hdev, queue, pkt, &rx_req);
389         if (ret) {
390                 dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
391                 return ret;
392         }
393
394         if (sync) {
395                 /*
396                  * Inform video hardware to raise interrupt for synchronous
397                  * commands
398                  */
399                 queue = &hdev->queues[IFACEQ_MSG_IDX];
400                 queue->qhdr->rx_req = 1;
401                 /* ensure rx_req is updated in memory */
402                 wmb();
403         }
404
405         if (rx_req)
406                 venus_soft_int(hdev);
407
408         return 0;
409 }
410
411 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
412 {
413         int ret;
414
415         mutex_lock(&hdev->lock);
416         ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
417         mutex_unlock(&hdev->lock);
418
419         return ret;
420 }
421
422 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
423                                        u32 size, u32 addr, void *cookie)
424 {
425         struct venus_hfi_device *hdev = to_hfi_priv(core);
426         struct hfi_sys_set_resource_pkt *pkt;
427         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
428         int ret;
429
430         if (id == VIDC_RESOURCE_NONE)
431                 return 0;
432
433         pkt = (struct hfi_sys_set_resource_pkt *)packet;
434
435         ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
436         if (ret)
437                 return ret;
438
439         ret = venus_iface_cmdq_write(hdev, pkt, false);
440         if (ret)
441                 return ret;
442
443         return 0;
444 }
445
446 static int venus_boot_core(struct venus_hfi_device *hdev)
447 {
448         struct device *dev = hdev->core->dev;
449         static const unsigned int max_tries = 100;
450         u32 ctrl_status = 0, mask_val = 0;
451         unsigned int count = 0;
452         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
453         void __iomem *wrapper_base = hdev->core->wrapper_base;
454         int ret = 0;
455
456         if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
457                 mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
458                 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
459                               WRAPPER_INTR_MASK_A2HCPU_MASK);
460         } else {
461                 mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
462         }
463
464         writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
465         writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
466
467         writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
468         while (!ctrl_status && count < max_tries) {
469                 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
470                 if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
471                         dev_err(dev, "invalid setting for UC_REGION\n");
472                         ret = -EINVAL;
473                         break;
474                 }
475
476                 usleep_range(500, 1000);
477                 count++;
478         }
479
480         if (count >= max_tries)
481                 ret = -ETIMEDOUT;
482
483         if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
484                 writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
485                 writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
486         }
487
488         return ret;
489 }
490
491 static u32 venus_hwversion(struct venus_hfi_device *hdev)
492 {
493         struct device *dev = hdev->core->dev;
494         void __iomem *wrapper_base = hdev->core->wrapper_base;
495         u32 ver;
496         u32 major, minor, step;
497
498         ver = readl(wrapper_base + WRAPPER_HW_VERSION);
499         major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
500         major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
501         minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
502         minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
503         step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
504
505         dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
506
507         return major;
508 }
509
510 static int venus_run(struct venus_hfi_device *hdev)
511 {
512         struct device *dev = hdev->core->dev;
513         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
514         int ret;
515
516         /*
517          * Re-program all of the registers that get reset as a result of
518          * regulator_disable() and _enable()
519          */
520         venus_set_registers(hdev);
521
522         writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
523         writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
524         writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
525         writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
526         if (hdev->sfr.da)
527                 writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
528
529         ret = venus_boot_core(hdev);
530         if (ret) {
531                 dev_err(dev, "failed to reset venus core\n");
532                 return ret;
533         }
534
535         venus_hwversion(hdev);
536
537         return 0;
538 }
539
540 static int venus_halt_axi(struct venus_hfi_device *hdev)
541 {
542         void __iomem *wrapper_base = hdev->core->wrapper_base;
543         void __iomem *vbif_base = hdev->core->vbif_base;
544         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
545         void __iomem *aon_base = hdev->core->aon_base;
546         struct device *dev = hdev->core->dev;
547         u32 val;
548         u32 mask_val;
549         int ret;
550
551         if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
552                 writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
553
554                 if (IS_IRIS2_1(hdev->core))
555                         goto skip_aon_mvp_noc;
556
557                 writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
558                 ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
559                                          val,
560                                          val & BIT(0),
561                                          POLL_INTERVAL_US,
562                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
563                 if (ret)
564                         return -ETIMEDOUT;
565
566 skip_aon_mvp_noc:
567                 mask_val = (BIT(2) | BIT(1) | BIT(0));
568                 writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
569
570                 writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
571                 ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
572                                          val,
573                                          val == 0,
574                                          POLL_INTERVAL_US,
575                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
576
577                 if (ret) {
578                         dev_err(dev, "DBLP Release: lpi_status %x\n", val);
579                         return -ETIMEDOUT;
580                 }
581                 return 0;
582         }
583
584         if (IS_V4(hdev->core)) {
585                 val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
586                 val |= WRAPPER_CPU_AXI_HALT_HALT;
587                 writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
588
589                 ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
590                                          val,
591                                          val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
592                                          POLL_INTERVAL_US,
593                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
594                 if (ret) {
595                         dev_err(dev, "AXI bus port halt timeout\n");
596                         return ret;
597                 }
598
599                 return 0;
600         }
601
602         /* Halt AXI and AXI IMEM VBIF Access */
603         val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
604         val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
605         writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
606
607         /* Request for AXI bus port halt */
608         ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
609                                  val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
610                                  POLL_INTERVAL_US,
611                                  VBIF_AXI_HALT_ACK_TIMEOUT_US);
612         if (ret) {
613                 dev_err(dev, "AXI bus port halt timeout\n");
614                 return ret;
615         }
616
617         return 0;
618 }
619
620 static int venus_power_off(struct venus_hfi_device *hdev)
621 {
622         int ret;
623
624         if (!hdev->power_enabled)
625                 return 0;
626
627         ret = venus_set_hw_state_suspend(hdev->core);
628         if (ret)
629                 return ret;
630
631         ret = venus_halt_axi(hdev);
632         if (ret)
633                 return ret;
634
635         hdev->power_enabled = false;
636
637         return 0;
638 }
639
640 static int venus_power_on(struct venus_hfi_device *hdev)
641 {
642         int ret;
643
644         if (hdev->power_enabled)
645                 return 0;
646
647         ret = venus_set_hw_state_resume(hdev->core);
648         if (ret)
649                 goto err;
650
651         ret = venus_run(hdev);
652         if (ret)
653                 goto err_suspend;
654
655         hdev->power_enabled = true;
656
657         return 0;
658
659 err_suspend:
660         venus_set_hw_state_suspend(hdev->core);
661 err:
662         hdev->power_enabled = false;
663         return ret;
664 }
665
666 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
667                                         void *pkt)
668 {
669         struct iface_queue *queue;
670         u32 tx_req;
671         int ret;
672
673         if (!venus_is_valid_state(hdev))
674                 return -EINVAL;
675
676         queue = &hdev->queues[IFACEQ_MSG_IDX];
677
678         ret = venus_read_queue(hdev, queue, pkt, &tx_req);
679         if (ret)
680                 return ret;
681
682         if (tx_req)
683                 venus_soft_int(hdev);
684
685         return 0;
686 }
687
688 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
689 {
690         int ret;
691
692         mutex_lock(&hdev->lock);
693         ret = venus_iface_msgq_read_nolock(hdev, pkt);
694         mutex_unlock(&hdev->lock);
695
696         return ret;
697 }
698
699 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
700                                         void *pkt)
701 {
702         struct iface_queue *queue;
703         u32 tx_req;
704         int ret;
705
706         ret = venus_is_valid_state(hdev);
707         if (!ret)
708                 return -EINVAL;
709
710         queue = &hdev->queues[IFACEQ_DBG_IDX];
711
712         ret = venus_read_queue(hdev, queue, pkt, &tx_req);
713         if (ret)
714                 return ret;
715
716         if (tx_req)
717                 venus_soft_int(hdev);
718
719         return 0;
720 }
721
722 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
723 {
724         int ret;
725
726         if (!pkt)
727                 return -EINVAL;
728
729         mutex_lock(&hdev->lock);
730         ret = venus_iface_dbgq_read_nolock(hdev, pkt);
731         mutex_unlock(&hdev->lock);
732
733         return ret;
734 }
735
736 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
737 {
738         qhdr->status = 1;
739         qhdr->type = IFACEQ_DFLT_QHDR;
740         qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
741         qhdr->pkt_size = 0;
742         qhdr->rx_wm = 1;
743         qhdr->tx_wm = 1;
744         qhdr->rx_req = 1;
745         qhdr->tx_req = 0;
746         qhdr->rx_irq_status = 0;
747         qhdr->tx_irq_status = 0;
748         qhdr->read_idx = 0;
749         qhdr->write_idx = 0;
750 }
751
752 static void venus_interface_queues_release(struct venus_hfi_device *hdev)
753 {
754         mutex_lock(&hdev->lock);
755
756         venus_free(hdev, &hdev->ifaceq_table);
757         venus_free(hdev, &hdev->sfr);
758
759         memset(hdev->queues, 0, sizeof(hdev->queues));
760         memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
761         memset(&hdev->sfr, 0, sizeof(hdev->sfr));
762
763         mutex_unlock(&hdev->lock);
764 }
765
766 static int venus_interface_queues_init(struct venus_hfi_device *hdev)
767 {
768         struct hfi_queue_table_header *tbl_hdr;
769         struct iface_queue *queue;
770         struct hfi_sfr *sfr;
771         struct mem_desc desc = {0};
772         unsigned int offset;
773         unsigned int i;
774         int ret;
775
776         ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
777         if (ret)
778                 return ret;
779
780         hdev->ifaceq_table = desc;
781         offset = IFACEQ_TABLE_SIZE;
782
783         for (i = 0; i < IFACEQ_NUM; i++) {
784                 queue = &hdev->queues[i];
785                 queue->qmem.da = desc.da + offset;
786                 queue->qmem.kva = desc.kva + offset;
787                 queue->qmem.size = IFACEQ_QUEUE_SIZE;
788                 offset += queue->qmem.size;
789                 queue->qhdr =
790                         IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
791
792                 venus_set_qhdr_defaults(queue->qhdr);
793
794                 queue->qhdr->start_addr = queue->qmem.da;
795
796                 if (i == IFACEQ_CMD_IDX)
797                         queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
798                 else if (i == IFACEQ_MSG_IDX)
799                         queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
800                 else if (i == IFACEQ_DBG_IDX)
801                         queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
802         }
803
804         tbl_hdr = hdev->ifaceq_table.kva;
805         tbl_hdr->version = 0;
806         tbl_hdr->size = IFACEQ_TABLE_SIZE;
807         tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
808         tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
809         tbl_hdr->num_q = IFACEQ_NUM;
810         tbl_hdr->num_active_q = IFACEQ_NUM;
811
812         /*
813          * Set receive request to zero on debug queue as there is no
814          * need of interrupt from video hardware for debug messages
815          */
816         queue = &hdev->queues[IFACEQ_DBG_IDX];
817         queue->qhdr->rx_req = 0;
818
819         ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
820         if (ret) {
821                 hdev->sfr.da = 0;
822         } else {
823                 hdev->sfr = desc;
824                 sfr = hdev->sfr.kva;
825                 sfr->buf_size = ALIGNED_SFR_SIZE;
826         }
827
828         /* ensure table and queue header structs are settled in memory */
829         wmb();
830
831         return 0;
832 }
833
834 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
835 {
836         struct hfi_sys_set_property_pkt *pkt;
837         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
838
839         pkt = (struct hfi_sys_set_property_pkt *)packet;
840
841         pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
842
843         return venus_iface_cmdq_write(hdev, pkt, false);
844 }
845
846 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
847 {
848         struct hfi_sys_set_property_pkt *pkt;
849         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
850
851         pkt = (struct hfi_sys_set_property_pkt *)packet;
852
853         pkt_sys_coverage_config(pkt, mode);
854
855         return venus_iface_cmdq_write(hdev, pkt, false);
856 }
857
858 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
859                                       bool enable)
860 {
861         struct hfi_sys_set_property_pkt *pkt;
862         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
863
864         if (!enable)
865                 return 0;
866
867         pkt = (struct hfi_sys_set_property_pkt *)packet;
868
869         pkt_sys_idle_indicator(pkt, enable);
870
871         return venus_iface_cmdq_write(hdev, pkt, false);
872 }
873
874 static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
875                                        bool enable)
876 {
877         struct hfi_sys_set_property_pkt *pkt;
878         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
879
880         pkt = (struct hfi_sys_set_property_pkt *)packet;
881
882         pkt_sys_power_control(pkt, enable);
883
884         return venus_iface_cmdq_write(hdev, pkt, false);
885 }
886
887 static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
888 {
889         struct hfi_sys_set_property_pkt *pkt;
890         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
891         const struct venus_resources *res = hdev->core->res;
892         int ret;
893
894         pkt = (struct hfi_sys_set_property_pkt *)packet;
895
896         pkt_sys_ubwc_config(pkt, res->ubwc_conf);
897
898         ret = venus_iface_cmdq_write(hdev, pkt, false);
899         if (ret)
900                 return ret;
901
902         return 0;
903 }
904
905 static int venus_get_queue_size(struct venus_hfi_device *hdev,
906                                 unsigned int index)
907 {
908         struct hfi_queue_header *qhdr;
909
910         if (index >= IFACEQ_NUM)
911                 return -EINVAL;
912
913         qhdr = hdev->queues[index].qhdr;
914         if (!qhdr)
915                 return -EINVAL;
916
917         return abs(qhdr->read_idx - qhdr->write_idx);
918 }
919
920 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
921 {
922         struct device *dev = hdev->core->dev;
923         const struct venus_resources *res = hdev->core->res;
924         int ret;
925
926         ret = venus_sys_set_debug(hdev, venus_fw_debug);
927         if (ret)
928                 dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
929
930         /* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
931         if (IS_V1(hdev->core)) {
932                 ret = venus_sys_set_idle_message(hdev, false);
933                 if (ret)
934                         dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
935         }
936
937         ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
938         if (ret)
939                 dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
940                          ret);
941
942         /* For specific venus core, it is mandatory to set the UBWC configuration */
943         if (res->ubwc_conf) {
944                 ret = venus_sys_set_ubwc_config(hdev);
945                 if (ret)
946                         dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
947         }
948
949         return ret;
950 }
951
952 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
953 {
954         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
955         struct hfi_session_pkt pkt;
956
957         pkt_session_cmd(&pkt, pkt_type, inst);
958
959         return venus_iface_cmdq_write(hdev, &pkt, sync);
960 }
961
962 static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
963 {
964         struct device *dev = hdev->core->dev;
965         void *packet = hdev->dbg_buf;
966
967         while (!venus_iface_dbgq_read(hdev, packet)) {
968                 struct hfi_msg_sys_coverage_pkt *pkt = packet;
969
970                 if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
971                         struct hfi_msg_sys_debug_pkt *pkt = packet;
972
973                         dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
974                 }
975         }
976 }
977
978 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
979                                         bool wait)
980 {
981         unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
982         struct hfi_sys_pc_prep_pkt pkt;
983         int ret;
984
985         init_completion(&hdev->pwr_collapse_prep);
986
987         pkt_sys_pc_prep(&pkt);
988
989         ret = venus_iface_cmdq_write(hdev, &pkt, false);
990         if (ret)
991                 return ret;
992
993         if (!wait)
994                 return 0;
995
996         ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
997         if (!ret) {
998                 venus_flush_debug_queue(hdev);
999                 return -ETIMEDOUT;
1000         }
1001
1002         return 0;
1003 }
1004
1005 static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1006 {
1007         int ret1, ret2;
1008
1009         ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1010         if (ret1 < 0)
1011                 return ret1;
1012
1013         ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1014         if (ret2 < 0)
1015                 return ret2;
1016
1017         if (!ret1 && !ret2)
1018                 return 1;
1019
1020         return 0;
1021 }
1022
1023 static void venus_sfr_print(struct venus_hfi_device *hdev)
1024 {
1025         struct device *dev = hdev->core->dev;
1026         struct hfi_sfr *sfr = hdev->sfr.kva;
1027         void *p;
1028
1029         if (!sfr)
1030                 return;
1031
1032         p = memchr(sfr->data, '\0', sfr->buf_size);
1033         /*
1034          * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1035          * that Venus is in the process of crashing.
1036          */
1037         if (!p)
1038                 sfr->data[sfr->buf_size - 1] = '\0';
1039
1040         dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1041 }
1042
1043 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1044                                         void *packet)
1045 {
1046         struct hfi_msg_event_notify_pkt *event_pkt = packet;
1047
1048         if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1049                 return;
1050
1051         venus_set_state(hdev, VENUS_STATE_DEINIT);
1052
1053         venus_sfr_print(hdev);
1054 }
1055
1056 static irqreturn_t venus_isr_thread(struct venus_core *core)
1057 {
1058         struct venus_hfi_device *hdev = to_hfi_priv(core);
1059         const struct venus_resources *res;
1060         void *pkt;
1061         u32 msg_ret;
1062
1063         if (!hdev)
1064                 return IRQ_NONE;
1065
1066         res = hdev->core->res;
1067         pkt = hdev->pkt_buf;
1068
1069
1070         while (!venus_iface_msgq_read(hdev, pkt)) {
1071                 msg_ret = hfi_process_msg_packet(core, pkt);
1072                 switch (msg_ret) {
1073                 case HFI_MSG_EVENT_NOTIFY:
1074                         venus_process_msg_sys_error(hdev, pkt);
1075                         break;
1076                 case HFI_MSG_SYS_INIT:
1077                         venus_hfi_core_set_resource(core, res->vmem_id,
1078                                                     res->vmem_size,
1079                                                     res->vmem_addr,
1080                                                     hdev);
1081                         break;
1082                 case HFI_MSG_SYS_RELEASE_RESOURCE:
1083                         complete(&hdev->release_resource);
1084                         break;
1085                 case HFI_MSG_SYS_PC_PREP:
1086                         complete(&hdev->pwr_collapse_prep);
1087                         break;
1088                 default:
1089                         break;
1090                 }
1091         }
1092
1093         venus_flush_debug_queue(hdev);
1094
1095         return IRQ_HANDLED;
1096 }
1097
1098 static irqreturn_t venus_isr(struct venus_core *core)
1099 {
1100         struct venus_hfi_device *hdev = to_hfi_priv(core);
1101         u32 status;
1102         void __iomem *cpu_cs_base;
1103         void __iomem *wrapper_base;
1104
1105         if (!hdev)
1106                 return IRQ_NONE;
1107
1108         cpu_cs_base = hdev->core->cpu_cs_base;
1109         wrapper_base = hdev->core->wrapper_base;
1110
1111         status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1112         if (IS_V6(core)) {
1113                 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1114                     status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
1115                     status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1116                         hdev->irq_status = status;
1117         } else {
1118                 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1119                     status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1120                     status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1121                         hdev->irq_status = status;
1122         }
1123         writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1124         if (!IS_V6(core))
1125                 writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1126
1127         return IRQ_WAKE_THREAD;
1128 }
1129
1130 static int venus_core_init(struct venus_core *core)
1131 {
1132         struct venus_hfi_device *hdev = to_hfi_priv(core);
1133         struct device *dev = core->dev;
1134         struct hfi_sys_get_property_pkt version_pkt;
1135         struct hfi_sys_init_pkt pkt;
1136         int ret;
1137
1138         pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1139
1140         venus_set_state(hdev, VENUS_STATE_INIT);
1141
1142         ret = venus_iface_cmdq_write(hdev, &pkt, false);
1143         if (ret)
1144                 return ret;
1145
1146         pkt_sys_image_version(&version_pkt);
1147
1148         ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1149         if (ret)
1150                 dev_warn(dev, "failed to send image version pkt to fw\n");
1151
1152         ret = venus_sys_set_default_properties(hdev);
1153         if (ret)
1154                 return ret;
1155
1156         return 0;
1157 }
1158
1159 static int venus_core_deinit(struct venus_core *core)
1160 {
1161         struct venus_hfi_device *hdev = to_hfi_priv(core);
1162
1163         venus_set_state(hdev, VENUS_STATE_DEINIT);
1164         hdev->suspended = true;
1165         hdev->power_enabled = false;
1166
1167         return 0;
1168 }
1169
1170 static int venus_core_ping(struct venus_core *core, u32 cookie)
1171 {
1172         struct venus_hfi_device *hdev = to_hfi_priv(core);
1173         struct hfi_sys_ping_pkt pkt;
1174
1175         pkt_sys_ping(&pkt, cookie);
1176
1177         return venus_iface_cmdq_write(hdev, &pkt, false);
1178 }
1179
1180 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1181 {
1182         struct venus_hfi_device *hdev = to_hfi_priv(core);
1183         struct hfi_sys_test_ssr_pkt pkt;
1184         int ret;
1185
1186         ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1187         if (ret)
1188                 return ret;
1189
1190         return venus_iface_cmdq_write(hdev, &pkt, false);
1191 }
1192
1193 static int venus_session_init(struct venus_inst *inst, u32 session_type,
1194                               u32 codec)
1195 {
1196         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1197         struct hfi_session_init_pkt pkt;
1198         int ret;
1199
1200         ret = venus_sys_set_debug(hdev, venus_fw_debug);
1201         if (ret)
1202                 goto err;
1203
1204         ret = pkt_session_init(&pkt, inst, session_type, codec);
1205         if (ret)
1206                 goto err;
1207
1208         ret = venus_iface_cmdq_write(hdev, &pkt, true);
1209         if (ret)
1210                 goto err;
1211
1212         return 0;
1213
1214 err:
1215         venus_flush_debug_queue(hdev);
1216         return ret;
1217 }
1218
1219 static int venus_session_end(struct venus_inst *inst)
1220 {
1221         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1222         struct device *dev = hdev->core->dev;
1223
1224         if (venus_fw_coverage) {
1225                 if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1226                         dev_warn(dev, "fw coverage msg ON failed\n");
1227         }
1228
1229         return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1230 }
1231
1232 static int venus_session_abort(struct venus_inst *inst)
1233 {
1234         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1235
1236         venus_flush_debug_queue(hdev);
1237
1238         return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1239 }
1240
1241 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1242 {
1243         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1244         struct hfi_session_flush_pkt pkt;
1245         int ret;
1246
1247         ret = pkt_session_flush(&pkt, inst, flush_mode);
1248         if (ret)
1249                 return ret;
1250
1251         return venus_iface_cmdq_write(hdev, &pkt, true);
1252 }
1253
1254 static int venus_session_start(struct venus_inst *inst)
1255 {
1256         return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1257 }
1258
1259 static int venus_session_stop(struct venus_inst *inst)
1260 {
1261         return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1262 }
1263
1264 static int venus_session_continue(struct venus_inst *inst)
1265 {
1266         return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1267 }
1268
1269 static int venus_session_etb(struct venus_inst *inst,
1270                              struct hfi_frame_data *in_frame)
1271 {
1272         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1273         u32 session_type = inst->session_type;
1274         int ret;
1275
1276         if (session_type == VIDC_SESSION_TYPE_DEC) {
1277                 struct hfi_session_empty_buffer_compressed_pkt pkt;
1278
1279                 ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1280                 if (ret)
1281                         return ret;
1282
1283                 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1284         } else if (session_type == VIDC_SESSION_TYPE_ENC) {
1285                 struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1286
1287                 ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1288                 if (ret)
1289                         return ret;
1290
1291                 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1292         } else {
1293                 ret = -EINVAL;
1294         }
1295
1296         return ret;
1297 }
1298
1299 static int venus_session_ftb(struct venus_inst *inst,
1300                              struct hfi_frame_data *out_frame)
1301 {
1302         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1303         struct hfi_session_fill_buffer_pkt pkt;
1304         int ret;
1305
1306         ret = pkt_session_ftb(&pkt, inst, out_frame);
1307         if (ret)
1308                 return ret;
1309
1310         return venus_iface_cmdq_write(hdev, &pkt, false);
1311 }
1312
1313 static int venus_session_set_buffers(struct venus_inst *inst,
1314                                      struct hfi_buffer_desc *bd)
1315 {
1316         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1317         struct hfi_session_set_buffers_pkt *pkt;
1318         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1319         int ret;
1320
1321         if (bd->buffer_type == HFI_BUFFER_INPUT)
1322                 return 0;
1323
1324         pkt = (struct hfi_session_set_buffers_pkt *)packet;
1325
1326         ret = pkt_session_set_buffers(pkt, inst, bd);
1327         if (ret)
1328                 return ret;
1329
1330         return venus_iface_cmdq_write(hdev, pkt, false);
1331 }
1332
1333 static int venus_session_unset_buffers(struct venus_inst *inst,
1334                                        struct hfi_buffer_desc *bd)
1335 {
1336         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1337         struct hfi_session_release_buffer_pkt *pkt;
1338         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1339         int ret;
1340
1341         if (bd->buffer_type == HFI_BUFFER_INPUT)
1342                 return 0;
1343
1344         pkt = (struct hfi_session_release_buffer_pkt *)packet;
1345
1346         ret = pkt_session_unset_buffers(pkt, inst, bd);
1347         if (ret)
1348                 return ret;
1349
1350         return venus_iface_cmdq_write(hdev, pkt, true);
1351 }
1352
1353 static int venus_session_load_res(struct venus_inst *inst)
1354 {
1355         return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1356 }
1357
1358 static int venus_session_release_res(struct venus_inst *inst)
1359 {
1360         return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1361 }
1362
1363 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1364                                        u32 seq_hdr_len)
1365 {
1366         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1367         struct hfi_session_parse_sequence_header_pkt *pkt;
1368         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1369         int ret;
1370
1371         pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1372
1373         ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1374         if (ret)
1375                 return ret;
1376
1377         ret = venus_iface_cmdq_write(hdev, pkt, false);
1378         if (ret)
1379                 return ret;
1380
1381         return 0;
1382 }
1383
1384 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1385                                      u32 seq_hdr_len)
1386 {
1387         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1388         struct hfi_session_get_sequence_header_pkt *pkt;
1389         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1390         int ret;
1391
1392         pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1393
1394         ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1395         if (ret)
1396                 return ret;
1397
1398         return venus_iface_cmdq_write(hdev, pkt, false);
1399 }
1400
1401 static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1402                                       void *pdata)
1403 {
1404         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1405         struct hfi_session_set_property_pkt *pkt;
1406         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1407         int ret;
1408
1409         pkt = (struct hfi_session_set_property_pkt *)packet;
1410
1411         ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1412         if (ret == -ENOTSUPP)
1413                 return 0;
1414         if (ret)
1415                 return ret;
1416
1417         return venus_iface_cmdq_write(hdev, pkt, false);
1418 }
1419
1420 static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1421 {
1422         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1423         struct hfi_session_get_property_pkt pkt;
1424         int ret;
1425
1426         ret = pkt_session_get_property(&pkt, inst, ptype);
1427         if (ret)
1428                 return ret;
1429
1430         return venus_iface_cmdq_write(hdev, &pkt, true);
1431 }
1432
1433 static int venus_resume(struct venus_core *core)
1434 {
1435         struct venus_hfi_device *hdev = to_hfi_priv(core);
1436         int ret = 0;
1437
1438         mutex_lock(&hdev->lock);
1439
1440         if (!hdev->suspended)
1441                 goto unlock;
1442
1443         ret = venus_power_on(hdev);
1444
1445 unlock:
1446         if (!ret)
1447                 hdev->suspended = false;
1448
1449         mutex_unlock(&hdev->lock);
1450
1451         return ret;
1452 }
1453
1454 static int venus_suspend_1xx(struct venus_core *core)
1455 {
1456         struct venus_hfi_device *hdev = to_hfi_priv(core);
1457         struct device *dev = core->dev;
1458         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1459         u32 ctrl_status;
1460         int ret;
1461
1462         if (!hdev->power_enabled || hdev->suspended)
1463                 return 0;
1464
1465         mutex_lock(&hdev->lock);
1466         ret = venus_is_valid_state(hdev);
1467         mutex_unlock(&hdev->lock);
1468
1469         if (!ret) {
1470                 dev_err(dev, "bad state, cannot suspend\n");
1471                 return -EINVAL;
1472         }
1473
1474         ret = venus_prepare_power_collapse(hdev, true);
1475         if (ret) {
1476                 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1477                 return ret;
1478         }
1479
1480         mutex_lock(&hdev->lock);
1481
1482         if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1483                 mutex_unlock(&hdev->lock);
1484                 return -EINVAL;
1485         }
1486
1487         ret = venus_are_queues_empty(hdev);
1488         if (ret < 0 || !ret) {
1489                 mutex_unlock(&hdev->lock);
1490                 return -EINVAL;
1491         }
1492
1493         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1494         if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1495                 mutex_unlock(&hdev->lock);
1496                 return -EINVAL;
1497         }
1498
1499         ret = venus_power_off(hdev);
1500         if (ret) {
1501                 mutex_unlock(&hdev->lock);
1502                 return ret;
1503         }
1504
1505         hdev->suspended = true;
1506
1507         mutex_unlock(&hdev->lock);
1508
1509         return 0;
1510 }
1511
1512 static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1513 {
1514         void __iomem *wrapper_base = hdev->core->wrapper_base;
1515         void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1516         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1517         u32 ctrl_status, cpu_status;
1518
1519         if (IS_V6(hdev->core))
1520                 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1521         else
1522                 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1523         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1524
1525         if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1526             ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1527                 return true;
1528
1529         return false;
1530 }
1531
1532 static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1533 {
1534         void __iomem *wrapper_base = hdev->core->wrapper_base;
1535         void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1536         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1537         u32 ctrl_status, cpu_status;
1538
1539         if (IS_V6(hdev->core))
1540                 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1541         else
1542                 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1543         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1544
1545         if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1546             ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1547                 return true;
1548
1549         return false;
1550 }
1551
1552 static int venus_suspend_3xx(struct venus_core *core)
1553 {
1554         struct venus_hfi_device *hdev = to_hfi_priv(core);
1555         struct device *dev = core->dev;
1556         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1557         u32 ctrl_status;
1558         bool val;
1559         int ret;
1560
1561         if (!hdev->power_enabled || hdev->suspended)
1562                 return 0;
1563
1564         mutex_lock(&hdev->lock);
1565         ret = venus_is_valid_state(hdev);
1566         mutex_unlock(&hdev->lock);
1567
1568         if (!ret) {
1569                 dev_err(dev, "bad state, cannot suspend\n");
1570                 return -EINVAL;
1571         }
1572
1573         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1574         if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1575                 goto power_off;
1576
1577         /*
1578          * Power collapse sequence for Venus 3xx and 4xx versions:
1579          * 1. Check for ARM9 and video core to be idle by checking WFI bit
1580          *    (bit 0) in CPU status register and by checking Idle (bit 30) in
1581          *    Control status register for video core.
1582          * 2. Send a command to prepare for power collapse.
1583          * 3. Check for WFI and PC_READY bits.
1584          */
1585         ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1586                                  1500, 100 * 1500);
1587         if (ret) {
1588                 dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
1589                 return ret;
1590         }
1591
1592         ret = venus_prepare_power_collapse(hdev, false);
1593         if (ret) {
1594                 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1595                 return ret;
1596         }
1597
1598         ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1599                                  1500, 100 * 1500);
1600         if (ret)
1601                 return ret;
1602
1603 power_off:
1604         mutex_lock(&hdev->lock);
1605
1606         ret = venus_power_off(hdev);
1607         if (ret) {
1608                 dev_err(dev, "venus_power_off (%d)\n", ret);
1609                 mutex_unlock(&hdev->lock);
1610                 return ret;
1611         }
1612
1613         hdev->suspended = true;
1614
1615         mutex_unlock(&hdev->lock);
1616
1617         return 0;
1618 }
1619
1620 static int venus_suspend(struct venus_core *core)
1621 {
1622         if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1623                 return venus_suspend_3xx(core);
1624
1625         return venus_suspend_1xx(core);
1626 }
1627
1628 static const struct hfi_ops venus_hfi_ops = {
1629         .core_init                      = venus_core_init,
1630         .core_deinit                    = venus_core_deinit,
1631         .core_ping                      = venus_core_ping,
1632         .core_trigger_ssr               = venus_core_trigger_ssr,
1633
1634         .session_init                   = venus_session_init,
1635         .session_end                    = venus_session_end,
1636         .session_abort                  = venus_session_abort,
1637         .session_flush                  = venus_session_flush,
1638         .session_start                  = venus_session_start,
1639         .session_stop                   = venus_session_stop,
1640         .session_continue               = venus_session_continue,
1641         .session_etb                    = venus_session_etb,
1642         .session_ftb                    = venus_session_ftb,
1643         .session_set_buffers            = venus_session_set_buffers,
1644         .session_unset_buffers          = venus_session_unset_buffers,
1645         .session_load_res               = venus_session_load_res,
1646         .session_release_res            = venus_session_release_res,
1647         .session_parse_seq_hdr          = venus_session_parse_seq_hdr,
1648         .session_get_seq_hdr            = venus_session_get_seq_hdr,
1649         .session_set_property           = venus_session_set_property,
1650         .session_get_property           = venus_session_get_property,
1651
1652         .resume                         = venus_resume,
1653         .suspend                        = venus_suspend,
1654
1655         .isr                            = venus_isr,
1656         .isr_thread                     = venus_isr_thread,
1657 };
1658
1659 void venus_hfi_destroy(struct venus_core *core)
1660 {
1661         struct venus_hfi_device *hdev = to_hfi_priv(core);
1662
1663         core->priv = NULL;
1664         venus_interface_queues_release(hdev);
1665         mutex_destroy(&hdev->lock);
1666         kfree(hdev);
1667         core->ops = NULL;
1668 }
1669
1670 int venus_hfi_create(struct venus_core *core)
1671 {
1672         struct venus_hfi_device *hdev;
1673         int ret;
1674
1675         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1676         if (!hdev)
1677                 return -ENOMEM;
1678
1679         mutex_init(&hdev->lock);
1680
1681         hdev->core = core;
1682         hdev->suspended = true;
1683         core->priv = hdev;
1684         core->ops = &venus_hfi_ops;
1685
1686         ret = venus_interface_queues_init(hdev);
1687         if (ret)
1688                 goto err_kfree;
1689
1690         return 0;
1691
1692 err_kfree:
1693         kfree(hdev);
1694         core->priv = NULL;
1695         core->ops = NULL;
1696         return ret;
1697 }
1698
1699 void venus_hfi_queues_reinit(struct venus_core *core)
1700 {
1701         struct venus_hfi_device *hdev = to_hfi_priv(core);
1702         struct hfi_queue_table_header *tbl_hdr;
1703         struct iface_queue *queue;
1704         struct hfi_sfr *sfr;
1705         unsigned int i;
1706
1707         mutex_lock(&hdev->lock);
1708
1709         for (i = 0; i < IFACEQ_NUM; i++) {
1710                 queue = &hdev->queues[i];
1711                 queue->qhdr =
1712                         IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1713
1714                 venus_set_qhdr_defaults(queue->qhdr);
1715
1716                 queue->qhdr->start_addr = queue->qmem.da;
1717
1718                 if (i == IFACEQ_CMD_IDX)
1719                         queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1720                 else if (i == IFACEQ_MSG_IDX)
1721                         queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1722                 else if (i == IFACEQ_DBG_IDX)
1723                         queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1724         }
1725
1726         tbl_hdr = hdev->ifaceq_table.kva;
1727         tbl_hdr->version = 0;
1728         tbl_hdr->size = IFACEQ_TABLE_SIZE;
1729         tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1730         tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1731         tbl_hdr->num_q = IFACEQ_NUM;
1732         tbl_hdr->num_active_q = IFACEQ_NUM;
1733
1734         /*
1735          * Set receive request to zero on debug queue as there is no
1736          * need of interrupt from video hardware for debug messages
1737          */
1738         queue = &hdev->queues[IFACEQ_DBG_IDX];
1739         queue->qhdr->rx_req = 0;
1740
1741         sfr = hdev->sfr.kva;
1742         sfr->buf_size = ALIGNED_SFR_SIZE;
1743
1744         /* ensure table and queue header structs are settled in memory */
1745         wmb();
1746
1747         mutex_unlock(&hdev->lock);
1748 }