Commit | Line | Data |
---|---|---|
f8a58d63 MW |
1 | /* |
2 | * Copyright © 2016-2017 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include "i915_drv.h" | |
25 | #include "intel_guc_ct.h" | |
26 | ||
27 | enum { CTB_SEND = 0, CTB_RECV = 1 }; | |
28 | ||
29 | enum { CTB_OWNER_HOST = 0 }; | |
30 | ||
31 | void intel_guc_ct_init_early(struct intel_guc_ct *ct) | |
32 | { | |
33 | /* we're using static channel owners */ | |
34 | ct->host_channel.owner = CTB_OWNER_HOST; | |
35 | } | |
36 | ||
37 | static inline const char *guc_ct_buffer_type_to_str(u32 type) | |
38 | { | |
39 | switch (type) { | |
40 | case INTEL_GUC_CT_BUFFER_TYPE_SEND: | |
41 | return "SEND"; | |
42 | case INTEL_GUC_CT_BUFFER_TYPE_RECV: | |
43 | return "RECV"; | |
44 | default: | |
45 | return "<invalid>"; | |
46 | } | |
47 | } | |
48 | ||
49 | static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, | |
50 | u32 cmds_addr, u32 size, u32 owner) | |
51 | { | |
52 | DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", | |
53 | desc, cmds_addr, size, owner); | |
54 | memset(desc, 0, sizeof(*desc)); | |
55 | desc->addr = cmds_addr; | |
56 | desc->size = size; | |
57 | desc->owner = owner; | |
58 | } | |
59 | ||
60 | static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) | |
61 | { | |
62 | DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", | |
63 | desc, desc->head, desc->tail); | |
64 | desc->head = 0; | |
65 | desc->tail = 0; | |
66 | desc->is_in_error = 0; | |
67 | } | |
68 | ||
69 | static int guc_action_register_ct_buffer(struct intel_guc *guc, | |
70 | u32 desc_addr, | |
71 | u32 type) | |
72 | { | |
73 | u32 action[] = { | |
74 | INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER, | |
75 | desc_addr, | |
76 | sizeof(struct guc_ct_buffer_desc), | |
77 | type | |
78 | }; | |
79 | int err; | |
80 | ||
81 | /* Can't use generic send(), CT registration must go over MMIO */ | |
82 | err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); | |
83 | if (err) | |
84 | DRM_ERROR("CT: register %s buffer failed; err=%d\n", | |
85 | guc_ct_buffer_type_to_str(type), err); | |
86 | return err; | |
87 | } | |
88 | ||
89 | static int guc_action_deregister_ct_buffer(struct intel_guc *guc, | |
90 | u32 owner, | |
91 | u32 type) | |
92 | { | |
93 | u32 action[] = { | |
94 | INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, | |
95 | owner, | |
96 | type | |
97 | }; | |
98 | int err; | |
99 | ||
100 | /* Can't use generic send(), CT deregistration must go over MMIO */ | |
101 | err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); | |
102 | if (err) | |
103 | DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", | |
104 | guc_ct_buffer_type_to_str(type), owner, err); | |
105 | return err; | |
106 | } | |
107 | ||
108 | static bool ctch_is_open(struct intel_guc_ct_channel *ctch) | |
109 | { | |
110 | return ctch->vma != NULL; | |
111 | } | |
112 | ||
113 | static int ctch_init(struct intel_guc *guc, | |
114 | struct intel_guc_ct_channel *ctch) | |
115 | { | |
116 | struct i915_vma *vma; | |
117 | void *blob; | |
118 | int err; | |
119 | int i; | |
120 | ||
121 | GEM_BUG_ON(ctch->vma); | |
122 | ||
123 | /* We allocate 1 page to hold both descriptors and both buffers. | |
124 | * ___________..................... | |
125 | * |desc (SEND)| : | |
126 | * |___________| PAGE/4 | |
127 | * :___________....................: | |
128 | * |desc (RECV)| : | |
129 | * |___________| PAGE/4 | |
130 | * :_______________________________: | |
131 | * |cmds (SEND) | | |
132 | * | PAGE/4 | |
133 | * |_______________________________| | |
134 | * |cmds (RECV) | | |
135 | * | PAGE/4 | |
136 | * |_______________________________| | |
137 | * | |
138 | * Each message can use a maximum of 32 dwords and we don't expect to | |
139 | * have more than 1 in flight at any time, so we have enough space. | |
140 | * Some logic further ahead will rely on the fact that there is only 1 | |
141 | * page and that it is always mapped, so if the size is changed the | |
142 | * other code will need updating as well. | |
143 | */ | |
144 | ||
145 | /* allocate vma */ | |
146 | vma = intel_guc_allocate_vma(guc, PAGE_SIZE); | |
147 | if (IS_ERR(vma)) { | |
148 | err = PTR_ERR(vma); | |
149 | goto err_out; | |
150 | } | |
151 | ctch->vma = vma; | |
152 | ||
153 | /* map first page */ | |
154 | blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); | |
155 | if (IS_ERR(blob)) { | |
156 | err = PTR_ERR(blob); | |
157 | goto err_vma; | |
158 | } | |
159 | DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma)); | |
160 | ||
161 | /* store pointers to desc and cmds */ | |
162 | for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { | |
163 | GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); | |
164 | ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; | |
165 | ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; | |
166 | } | |
167 | ||
168 | return 0; | |
169 | ||
170 | err_vma: | |
171 | i915_vma_unpin_and_release(&ctch->vma); | |
172 | err_out: | |
173 | DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", | |
174 | ctch->owner, err); | |
175 | return err; | |
176 | } | |
177 | ||
178 | static void ctch_fini(struct intel_guc *guc, | |
179 | struct intel_guc_ct_channel *ctch) | |
180 | { | |
181 | GEM_BUG_ON(!ctch->vma); | |
182 | ||
183 | i915_gem_object_unpin_map(ctch->vma->obj); | |
184 | i915_vma_unpin_and_release(&ctch->vma); | |
185 | } | |
186 | ||
187 | static int ctch_open(struct intel_guc *guc, | |
188 | struct intel_guc_ct_channel *ctch) | |
189 | { | |
190 | u32 base; | |
191 | int err; | |
192 | int i; | |
193 | ||
194 | DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n", | |
195 | ctch->owner, yesno(ctch_is_open(ctch))); | |
196 | ||
197 | if (!ctch->vma) { | |
198 | err = ctch_init(guc, ctch); | |
199 | if (unlikely(err)) | |
200 | goto err_out; | |
1c5a9071 | 201 | GEM_BUG_ON(!ctch->vma); |
f8a58d63 MW |
202 | } |
203 | ||
204 | /* vma should be already allocated and map'ed */ | |
205 | base = guc_ggtt_offset(ctch->vma); | |
206 | ||
207 | /* (re)initialize descriptors | |
208 | * cmds buffers are in the second half of the blob page | |
209 | */ | |
210 | for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { | |
211 | GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); | |
212 | guc_ct_buffer_desc_init(ctch->ctbs[i].desc, | |
213 | base + PAGE_SIZE/4 * i + PAGE_SIZE/2, | |
214 | PAGE_SIZE/4, | |
215 | ctch->owner); | |
216 | } | |
217 | ||
218 | /* register buffers, starting wirh RECV buffer | |
219 | * descriptors are in first half of the blob | |
220 | */ | |
221 | err = guc_action_register_ct_buffer(guc, | |
222 | base + PAGE_SIZE/4 * CTB_RECV, | |
223 | INTEL_GUC_CT_BUFFER_TYPE_RECV); | |
224 | if (unlikely(err)) | |
225 | goto err_fini; | |
226 | ||
227 | err = guc_action_register_ct_buffer(guc, | |
228 | base + PAGE_SIZE/4 * CTB_SEND, | |
229 | INTEL_GUC_CT_BUFFER_TYPE_SEND); | |
230 | if (unlikely(err)) | |
231 | goto err_deregister; | |
232 | ||
233 | return 0; | |
234 | ||
235 | err_deregister: | |
236 | guc_action_deregister_ct_buffer(guc, | |
237 | ctch->owner, | |
238 | INTEL_GUC_CT_BUFFER_TYPE_RECV); | |
239 | err_fini: | |
240 | ctch_fini(guc, ctch); | |
241 | err_out: | |
242 | DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); | |
243 | return err; | |
244 | } | |
245 | ||
246 | static void ctch_close(struct intel_guc *guc, | |
247 | struct intel_guc_ct_channel *ctch) | |
248 | { | |
249 | GEM_BUG_ON(!ctch_is_open(ctch)); | |
250 | ||
251 | guc_action_deregister_ct_buffer(guc, | |
252 | ctch->owner, | |
253 | INTEL_GUC_CT_BUFFER_TYPE_SEND); | |
254 | guc_action_deregister_ct_buffer(guc, | |
255 | ctch->owner, | |
256 | INTEL_GUC_CT_BUFFER_TYPE_RECV); | |
257 | ctch_fini(guc, ctch); | |
258 | } | |
259 | ||
260 | static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) | |
261 | { | |
262 | /* For now it's trivial */ | |
263 | return ++ctch->next_fence; | |
264 | } | |
265 | ||
266 | static int ctb_write(struct intel_guc_ct_buffer *ctb, | |
267 | const u32 *action, | |
268 | u32 len /* in dwords */, | |
269 | u32 fence) | |
270 | { | |
271 | struct guc_ct_buffer_desc *desc = ctb->desc; | |
272 | u32 head = desc->head / 4; /* in dwords */ | |
273 | u32 tail = desc->tail / 4; /* in dwords */ | |
274 | u32 size = desc->size / 4; /* in dwords */ | |
275 | u32 used; /* in dwords */ | |
276 | u32 header; | |
277 | u32 *cmds = ctb->cmds; | |
278 | unsigned int i; | |
279 | ||
280 | GEM_BUG_ON(desc->size % 4); | |
281 | GEM_BUG_ON(desc->head % 4); | |
282 | GEM_BUG_ON(desc->tail % 4); | |
283 | GEM_BUG_ON(tail >= size); | |
284 | ||
285 | /* | |
286 | * tail == head condition indicates empty. GuC FW does not support | |
287 | * using up the entire buffer to get tail == head meaning full. | |
288 | */ | |
289 | if (tail < head) | |
290 | used = (size - head) + tail; | |
291 | else | |
292 | used = tail - head; | |
293 | ||
294 | /* make sure there is a space including extra dw for the fence */ | |
295 | if (unlikely(used + len + 1 >= size)) | |
296 | return -ENOSPC; | |
297 | ||
298 | /* Write the message. The format is the following: | |
299 | * DW0: header (including action code) | |
300 | * DW1: fence | |
301 | * DW2+: action data | |
302 | */ | |
303 | header = (len << GUC_CT_MSG_LEN_SHIFT) | | |
304 | (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | | |
305 | (action[0] << GUC_CT_MSG_ACTION_SHIFT); | |
306 | ||
307 | cmds[tail] = header; | |
308 | tail = (tail + 1) % size; | |
309 | ||
310 | cmds[tail] = fence; | |
311 | tail = (tail + 1) % size; | |
312 | ||
313 | for (i = 1; i < len; i++) { | |
314 | cmds[tail] = action[i]; | |
315 | tail = (tail + 1) % size; | |
316 | } | |
317 | ||
318 | /* now update desc tail (back in bytes) */ | |
319 | desc->tail = tail * 4; | |
320 | GEM_BUG_ON(desc->tail > desc->size); | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | /* Wait for the response from the GuC. | |
326 | * @fence: response fence | |
327 | * @status: placeholder for status | |
328 | * return: 0 response received (status is valid) | |
329 | * -ETIMEDOUT no response within hardcoded timeout | |
330 | * -EPROTO no response, ct buffer was in error | |
331 | */ | |
332 | static int wait_for_response(struct guc_ct_buffer_desc *desc, | |
333 | u32 fence, | |
334 | u32 *status) | |
335 | { | |
336 | int err; | |
337 | ||
338 | /* | |
339 | * Fast commands should complete in less than 10us, so sample quickly | |
340 | * up to that length of time, then switch to a slower sleep-wait loop. | |
341 | * No GuC command should ever take longer than 10ms. | |
342 | */ | |
343 | #define done (READ_ONCE(desc->fence) == fence) | |
344 | err = wait_for_us(done, 10); | |
345 | if (err) | |
346 | err = wait_for(done, 10); | |
347 | #undef done | |
348 | ||
349 | if (unlikely(err)) { | |
350 | DRM_ERROR("CT: fence %u failed; reported fence=%u\n", | |
351 | fence, desc->fence); | |
352 | ||
353 | if (WARN_ON(desc->is_in_error)) { | |
354 | /* Something went wrong with the messaging, try to reset | |
355 | * the buffer and hope for the best | |
356 | */ | |
357 | guc_ct_buffer_desc_reset(desc); | |
358 | err = -EPROTO; | |
359 | } | |
360 | } | |
361 | ||
362 | *status = desc->status; | |
363 | return err; | |
364 | } | |
365 | ||
366 | static int ctch_send(struct intel_guc *guc, | |
367 | struct intel_guc_ct_channel *ctch, | |
368 | const u32 *action, | |
369 | u32 len, | |
370 | u32 *status) | |
371 | { | |
372 | struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; | |
373 | struct guc_ct_buffer_desc *desc = ctb->desc; | |
374 | u32 fence; | |
375 | int err; | |
376 | ||
377 | GEM_BUG_ON(!ctch_is_open(ctch)); | |
378 | GEM_BUG_ON(!len); | |
379 | GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); | |
380 | ||
381 | fence = ctch_get_next_fence(ctch); | |
382 | err = ctb_write(ctb, action, len, fence); | |
383 | if (unlikely(err)) | |
384 | return err; | |
385 | ||
386 | intel_guc_notify(guc); | |
387 | ||
388 | err = wait_for_response(desc, fence, status); | |
389 | if (unlikely(err)) | |
390 | return err; | |
391 | if (*status != INTEL_GUC_STATUS_SUCCESS) | |
392 | return -EIO; | |
393 | return 0; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Command Transport (CT) buffer based GuC send function. | |
398 | */ | |
399 | static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len) | |
400 | { | |
401 | struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; | |
402 | u32 status = ~0; /* undefined */ | |
403 | int err; | |
404 | ||
405 | mutex_lock(&guc->send_mutex); | |
406 | ||
407 | err = ctch_send(guc, ctch, action, len, &status); | |
408 | if (unlikely(err)) { | |
409 | DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", | |
410 | action[0], err, status); | |
411 | } | |
412 | ||
413 | mutex_unlock(&guc->send_mutex); | |
414 | return err; | |
415 | } | |
416 | ||
417 | /** | |
418 | * Enable buffer based command transport | |
419 | * Shall only be called for platforms with HAS_GUC_CT. | |
420 | * @guc: the guc | |
421 | * return: 0 on success | |
422 | * non-zero on failure | |
423 | */ | |
424 | int intel_guc_enable_ct(struct intel_guc *guc) | |
425 | { | |
426 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | |
427 | struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; | |
428 | int err; | |
429 | ||
430 | GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); | |
431 | ||
432 | err = ctch_open(guc, ctch); | |
433 | if (unlikely(err)) | |
434 | return err; | |
435 | ||
436 | /* Switch into cmd transport buffer based send() */ | |
437 | guc->send = intel_guc_send_ct; | |
438 | DRM_INFO("CT: %s\n", enableddisabled(true)); | |
439 | return 0; | |
440 | } | |
441 | ||
442 | /** | |
443 | * Disable buffer based command transport. | |
444 | * Shall only be called for platforms with HAS_GUC_CT. | |
445 | * @guc: the guc | |
446 | */ | |
447 | void intel_guc_disable_ct(struct intel_guc *guc) | |
448 | { | |
449 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | |
450 | struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; | |
451 | ||
452 | GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); | |
453 | ||
454 | if (!ctch_is_open(ctch)) | |
455 | return; | |
456 | ||
457 | ctch_close(guc, ctch); | |
458 | ||
459 | /* Disable send */ | |
460 | guc->send = intel_guc_send_nop; | |
461 | DRM_INFO("CT: %s\n", enableddisabled(false)); | |
462 | } |