drm/amdgpu: get rid of incorrect TDR
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
CommitLineData
c1b69ed0
CZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
7034decf 29#include "amdgpu_trace.h"
c1b69ed0 30
e472d258
ML
31static void amdgpu_job_free_handler(struct work_struct *ws)
32{
33 struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
34 kfree(job);
35}
36
50838c8c
CK
37int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
38 struct amdgpu_job **job)
39{
40 size_t size = sizeof(struct amdgpu_job);
41
42 if (num_ibs == 0)
43 return -EINVAL;
44
45 size += sizeof(struct amdgpu_ib) * num_ibs;
46
47 *job = kzalloc(size, GFP_KERNEL);
48 if (!*job)
49 return -ENOMEM;
50
51 (*job)->adev = adev;
52 (*job)->ibs = (void *)&(*job)[1];
53 (*job)->num_ibs = num_ibs;
e472d258 54 INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
50838c8c 55
e86f9cee
CK
56 amdgpu_sync_create(&(*job)->sync);
57
50838c8c
CK
58 return 0;
59}
60
d71518b5
CK
61int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
62 struct amdgpu_job **job)
63{
64 int r;
65
66 r = amdgpu_job_alloc(adev, 1, job);
67 if (r)
68 return r;
69
70 r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
71 if (r)
72 kfree(*job);
73
74 return r;
75}
76
50838c8c
CK
77void amdgpu_job_free(struct amdgpu_job *job)
78{
79 unsigned i;
676d8c24
ML
80 struct fence *f;
81 /* use sched fence if available */
82 f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
50838c8c
CK
83
84 for (i = 0; i < job->num_ibs; ++i)
676d8c24 85 amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
73cfa5f5 86 fence_put(job->fence);
50838c8c
CK
87
88 amdgpu_bo_unref(&job->uf.bo);
e86f9cee 89 amdgpu_sync_free(&job->sync);
e472d258
ML
90
91 if (!job->base.use_sched)
92 kfree(job);
d71518b5
CK
93}
94
95int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
2bd9ccfa
CK
96 struct amd_sched_entity *entity, void *owner,
97 struct fence **f)
d71518b5 98{
e686941a
ML
99 struct fence *fence;
100 int r;
d71518b5 101 job->ring = ring;
d71518b5 102
e686941a
ML
103 if (!f)
104 return -EINVAL;
105
106 r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence);
107 if (r)
108 return r;
d71518b5
CK
109
110 job->owner = owner;
e686941a 111 *f = fence_get(fence);
d71518b5
CK
112 amd_sched_entity_push_job(&job->base);
113
114 return 0;
50838c8c
CK
115}
116
0856cab1 117static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
e61235db 118{
a6db8a33 119 struct amdgpu_job *job = to_amdgpu_job(sched_job);
8d0a7cea
CK
120 struct amdgpu_vm *vm = job->ibs->vm;
121
e86f9cee 122 struct fence *fence = amdgpu_sync_get_fence(&job->sync);
8d0a7cea 123
4ff37a83 124 if (fence == NULL && vm && !job->ibs->vm_id) {
b07c60c0 125 struct amdgpu_ring *ring = job->ring;
4ff37a83
CK
126 unsigned i, vm_id;
127 uint64_t vm_pd_addr;
8d0a7cea
CK
128 int r;
129
e86f9cee 130 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
4ff37a83
CK
131 &job->base.s_fence->base,
132 &vm_id, &vm_pd_addr);
94dd0a4a 133 if (r)
8d0a7cea 134 DRM_ERROR("Error getting VM ID (%d)\n", r);
4ff37a83
CK
135 else {
136 for (i = 0; i < job->num_ibs; ++i) {
137 job->ibs[i].vm_id = vm_id;
138 job->ibs[i].vm_pd_addr = vm_pd_addr;
139 }
140 }
8d0a7cea 141
e86f9cee 142 fence = amdgpu_sync_get_fence(&job->sync);
8d0a7cea
CK
143 }
144
145 return fence;
e61235db
CK
146}
147
0856cab1 148static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
c1b69ed0 149{
ec72b800 150 struct fence *fence = NULL;
4c7eb91c 151 struct amdgpu_job *job;
bd755d08 152 int r;
c1b69ed0 153
4c7eb91c 154 if (!sched_job) {
4cef9267 155 DRM_ERROR("job is null\n");
6f0e54a9 156 return NULL;
4cef9267 157 }
a6db8a33 158 job = to_amdgpu_job(sched_job);
e86f9cee
CK
159
160 r = amdgpu_sync_wait(&job->sync);
161 if (r) {
162 DRM_ERROR("failed to sync wait (%d)\n", r);
163 return NULL;
164 }
165
7034decf 166 trace_amdgpu_sched_run_job(job);
336d1f5e 167 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
e86f9cee 168 job->sync.last_vm_update, &fence);
1886d1a9
CK
169 if (r) {
170 DRM_ERROR("Error scheduling IBs (%d)\n", r);
c1b69ed0 171 goto err;
1886d1a9
CK
172 }
173
1886d1a9 174err:
73cfa5f5 175 job->fence = fence;
d71518b5 176 amdgpu_job_free(job);
ec72b800 177 return fence;
c1b69ed0
CZ
178}
179
180struct amd_sched_backend_ops amdgpu_sched_ops = {
0856cab1
CK
181 .dependency = amdgpu_job_dependency,
182 .run_job = amdgpu_job_run,
c1b69ed0 183};