1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Etnaviv Project
6 #include <linux/moduleparam.h>
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
15 static int etnaviv_job_hang_limit = 0;
16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
17 static int etnaviv_hw_jobs_limit = 4;
18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
20 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
22 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
23 struct dma_fence *fence = NULL;
25 if (likely(!sched_job->s_fence->finished.error))
26 fence = etnaviv_gpu_submit(submit);
28 dev_dbg(submit->gpu->dev, "skipping bad job\n");
33 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
36 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
37 struct etnaviv_gpu *gpu = submit->gpu;
42 * If the GPU managed to complete this jobs fence, the timout is
45 if (dma_fence_is_signaled(submit->out_fence))
49 * If the GPU is still making forward progress on the front-end (which
50 * should never loop) we shift out the timeout to give it a chance to
53 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
54 change = dma_addr - gpu->hangcheck_dma_addr;
55 if (gpu->state == ETNA_GPU_STATE_RUNNING &&
56 (gpu->completed_fence != gpu->hangcheck_fence ||
57 change < 0 || change > 16)) {
58 gpu->hangcheck_dma_addr = dma_addr;
59 gpu->hangcheck_fence = gpu->completed_fence;
64 drm_sched_stop(&gpu->sched, sched_job);
67 drm_sched_increase_karma(sched_job);
69 /* get the GPU back into the init state */
70 etnaviv_core_dump(submit);
71 etnaviv_gpu_recover_hang(submit);
73 drm_sched_resubmit_jobs(&gpu->sched);
75 drm_sched_start(&gpu->sched);
76 return DRM_GPU_SCHED_STAT_NOMINAL;
79 list_add(&sched_job->list, &sched_job->sched->pending_list);
80 return DRM_GPU_SCHED_STAT_NOMINAL;
83 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
85 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87 drm_sched_job_cleanup(sched_job);
89 etnaviv_submit_put(submit);
92 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
93 .run_job = etnaviv_sched_run_job,
94 .timedout_job = etnaviv_sched_timedout_job,
95 .free_job = etnaviv_sched_free_job,
98 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
100 struct etnaviv_gpu *gpu = submit->gpu;
104 * Hold the sched lock across the whole operation to avoid jobs being
105 * pushed out of order with regard to their sched fence seqnos as
106 * allocated in drm_sched_job_arm.
108 mutex_lock(&gpu->sched_lock);
110 drm_sched_job_arm(&submit->sched_job);
112 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
113 ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
114 submit->out_fence, xa_limit_32b,
115 &gpu->next_user_fence, GFP_KERNEL);
117 drm_sched_job_cleanup(&submit->sched_job);
121 /* the scheduler holds on to the job now */
122 kref_get(&submit->refcount);
124 drm_sched_entity_push_job(&submit->sched_job);
127 mutex_unlock(&gpu->sched_lock);
132 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
136 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
137 DRM_SCHED_PRIORITY_COUNT,
138 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
139 msecs_to_jiffies(500), NULL, NULL,
140 dev_name(gpu->dev), gpu->dev);
147 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
149 drm_sched_fini(&gpu->sched);