drm/i915: split out vlv sideband to a separate file
[linux-block.git] / drivers / gpu / drm / i915 / intel_sideband.c
CommitLineData
59de0813
JN
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
a75d035f
CW
25#include <asm/iosf_mbi.h>
26
59de0813 27#include "i915_drv.h"
1d455f8d 28#include "intel_sideband.h"
59de0813 29
59de0813 30/* SBI access */
75319428
CW
31static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
32 enum intel_sbi_destination destination,
33 u32 *val, bool is_read)
59de0813 34{
75319428
CW
35 struct intel_uncore *uncore = &i915->uncore;
36 u32 cmd;
221c7862 37
75319428 38 lockdep_assert_held(&i915->sb_lock);
59de0813 39
75319428
CW
40 if (intel_wait_for_register_fw(uncore,
41 SBI_CTL_STAT, SBI_BUSY, 0,
42 100)) {
b908af55
WK
43 drm_err(&i915->drm,
44 "timeout waiting for SBI to become ready\n");
75319428 45 return -EBUSY;
59de0813
JN
46 }
47
75319428
CW
48 intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
49 intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
59de0813
JN
50
51 if (destination == SBI_ICLK)
75319428 52 cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
59de0813 53 else
75319428
CW
54 cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
55 if (!is_read)
56 cmd |= BIT(8);
57 intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
58
59 if (__intel_wait_for_register_fw(uncore,
60 SBI_CTL_STAT, SBI_BUSY, 0,
61 100, 100, &cmd)) {
b908af55
WK
62 drm_err(&i915->drm,
63 "timeout waiting for SBI to complete read\n");
75319428 64 return -ETIMEDOUT;
b0734f77
CW
65 }
66
75319428 67 if (cmd & SBI_RESPONSE_FAIL) {
b908af55 68 drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
75319428 69 return -ENXIO;
59de0813
JN
70 }
71
75319428
CW
72 if (is_read)
73 *val = intel_uncore_read_fw(uncore, SBI_DATA);
74
75 return 0;
59de0813
JN
76}
77
75319428
CW
78u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
79 enum intel_sbi_destination destination)
59de0813 80{
75319428 81 u32 result = 0;
59de0813 82
75319428 83 intel_sbi_rw(i915, reg, destination, &result, true);
59de0813 84
75319428
CW
85 return result;
86}
b0734f77 87
75319428
CW
88void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
89 enum intel_sbi_destination destination)
90{
91 intel_sbi_rw(i915, reg, destination, &value, false);
59de0813 92}
e0516e83 93
81b55ef1 94static int gen6_check_mailbox_status(u32 mbox)
e0516e83
CW
95{
96 switch (mbox & GEN6_PCODE_ERROR_MASK) {
97 case GEN6_PCODE_SUCCESS:
98 return 0;
99 case GEN6_PCODE_UNIMPLEMENTED_CMD:
100 return -ENODEV;
101 case GEN6_PCODE_ILLEGAL_CMD:
102 return -ENXIO;
103 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
104 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
105 return -EOVERFLOW;
106 case GEN6_PCODE_TIMEOUT:
107 return -ETIMEDOUT;
108 default:
109 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
110 return 0;
111 }
112}
113
81b55ef1 114static int gen7_check_mailbox_status(u32 mbox)
e0516e83
CW
115{
116 switch (mbox & GEN6_PCODE_ERROR_MASK) {
117 case GEN6_PCODE_SUCCESS:
118 return 0;
119 case GEN6_PCODE_ILLEGAL_CMD:
120 return -ENXIO;
121 case GEN7_PCODE_TIMEOUT:
122 return -ETIMEDOUT;
123 case GEN7_PCODE_ILLEGAL_DATA:
124 return -EINVAL;
f22fd334
MR
125 case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
126 return -ENXIO;
127 case GEN11_PCODE_LOCKED:
128 return -EBUSY;
f136c58a
SL
129 case GEN11_PCODE_REJECTED:
130 return -EACCES;
e0516e83
CW
131 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
132 return -EOVERFLOW;
133 default:
134 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
135 return 0;
136 }
137}
138
139static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
d284d514 140 u32 mbox, u32 *val, u32 *val1,
e0516e83
CW
141 int fast_timeout_us,
142 int slow_timeout_ms,
143 bool is_read)
144{
145 struct intel_uncore *uncore = &i915->uncore;
146
147 lockdep_assert_held(&i915->sb_lock);
148
149 /*
54b3f0e6
JN
150 * GEN6_PCODE_* are outside of the forcewake domain, we can use
151 * intel_uncore_read/write_fw variants to reduce the amount of work
e0516e83
CW
152 * required when reading/writing.
153 */
154
155 if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
156 return -EAGAIN;
157
158 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
d284d514 159 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
e0516e83
CW
160 intel_uncore_write_fw(uncore,
161 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
162
163 if (__intel_wait_for_register_fw(uncore,
164 GEN6_PCODE_MAILBOX,
165 GEN6_PCODE_READY, 0,
166 fast_timeout_us,
167 slow_timeout_ms,
168 &mbox))
169 return -ETIMEDOUT;
170
171 if (is_read)
172 *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
d284d514
VS
173 if (is_read && val1)
174 *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
e0516e83 175
651e7d48 176 if (GRAPHICS_VER(i915) > 6)
e0516e83
CW
177 return gen7_check_mailbox_status(mbox);
178 else
179 return gen6_check_mailbox_status(mbox);
180}
181
d284d514
VS
182int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
183 u32 *val, u32 *val1)
e0516e83
CW
184{
185 int err;
186
187 mutex_lock(&i915->sb_lock);
d284d514 188 err = __sandybridge_pcode_rw(i915, mbox, val, val1,
378974f7 189 500, 20,
e0516e83
CW
190 true);
191 mutex_unlock(&i915->sb_lock);
192
193 if (err) {
b908af55
WK
194 drm_dbg(&i915->drm,
195 "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
196 mbox, __builtin_return_address(0), err);
e0516e83
CW
197 }
198
199 return err;
200}
201
202int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
203 u32 mbox, u32 val,
204 int fast_timeout_us,
205 int slow_timeout_ms)
206{
207 int err;
208
209 mutex_lock(&i915->sb_lock);
d284d514 210 err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
e0516e83
CW
211 fast_timeout_us, slow_timeout_ms,
212 false);
213 mutex_unlock(&i915->sb_lock);
214
215 if (err) {
b908af55
WK
216 drm_dbg(&i915->drm,
217 "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
218 val, mbox, __builtin_return_address(0), err);
e0516e83
CW
219 }
220
221 return err;
222}
223
224static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
225 u32 request, u32 reply_mask, u32 reply,
226 u32 *status)
227{
d284d514 228 *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
e0516e83
CW
229 500, 0,
230 true);
231
232 return *status || ((request & reply_mask) == reply);
233}
234
235/**
236 * skl_pcode_request - send PCODE request until acknowledgment
237 * @i915: device private
238 * @mbox: PCODE mailbox ID the request is targeted for
239 * @request: request ID
240 * @reply_mask: mask used to check for request acknowledgment
241 * @reply: value used to check for request acknowledgment
242 * @timeout_base_ms: timeout for polling with preemption enabled
243 *
244 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
245 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
246 * The request is acknowledged once the PCODE reply dword equals @reply after
247 * applying @reply_mask. Polling is first attempted with preemption enabled
248 * for @timeout_base_ms and if this times out for another 50 ms with
249 * preemption disabled.
250 *
251 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
252 * other error as reported by PCODE.
253 */
254int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
255 u32 reply_mask, u32 reply, int timeout_base_ms)
256{
257 u32 status;
258 int ret;
259
260 mutex_lock(&i915->sb_lock);
261
262#define COND \
263 skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
264
265 /*
266 * Prime the PCODE by doing a request first. Normally it guarantees
267 * that a subsequent request, at most @timeout_base_ms later, succeeds.
268 * _wait_for() doesn't guarantee when its passed condition is evaluated
269 * first, so send the first request explicitly.
270 */
271 if (COND) {
272 ret = 0;
273 goto out;
274 }
275 ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
276 if (!ret)
277 goto out;
278
279 /*
280 * The above can time out if the number of requests was low (2 in the
281 * worst case) _and_ PCODE was busy for some reason even after a
282 * (queued) request and @timeout_base_ms delay. As a workaround retry
283 * the poll with preemption disabled to maximize the number of
284 * requests. Increase the timeout from @timeout_base_ms to 50ms to
285 * account for interrupts that could reduce the number of these
286 * requests, and for any quirks of the PCODE firmware that delays
287 * the request completion.
288 */
b908af55
WK
289 drm_dbg_kms(&i915->drm,
290 "PCODE timeout, retrying with preemption disabled\n");
48a1b8d4 291 drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
e0516e83
CW
292 preempt_disable();
293 ret = wait_for_atomic(COND, 50);
294 preempt_enable();
295
296out:
297 mutex_unlock(&i915->sb_lock);
298 return ret ? ret : status;
299#undef COND
300}
f9c730ed 301
41c791fc 302int intel_pcode_init(struct drm_i915_private *i915)
f9c730ed 303{
41c791fc 304 int ret = 0;
f9c730ed
MR
305
306 if (!IS_DGFX(i915))
41c791fc 307 return ret;
f9c730ed
MR
308
309 ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
310 DG1_UNCORE_GET_INIT_STATUS,
311 DG1_UNCORE_INIT_STATUS_COMPLETE,
41c791fc
BN
312 DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
313
314 drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
315
f9c730ed
MR
316 if (ret)
317 drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
41c791fc
BN
318
319 return ret;
f9c730ed 320}