1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
9 #ifndef __ADRENO_GPU_H__
10 #define __ADRENO_GPU_H__
12 #include <linux/firmware.h>
13 #include <linux/iopoll.h>
17 #include "adreno_common.xml.h"
18 #include "adreno_pm4.xml.h"
20 extern bool snapshot_debugbus;
21 extern bool allow_vram_carveout;
25 ADRENO_FW_SQE = 0, /* a6xx */
27 ADRENO_FW_GMU = 1, /* a6xx */
32 #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
33 #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
34 #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
45 #define ADRENO_REV(core, major, minor, patchid) \
46 ((struct adreno_rev){ core, major, minor, patchid })
48 struct adreno_gpu_funcs {
49 struct msm_gpu_funcs base;
50 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
53 struct adreno_reglist {
58 extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
59 extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
62 struct adreno_rev rev;
65 const char *fw[ADRENO_FW_MAX];
68 struct msm_gpu *(*init)(struct drm_device *dev);
71 const struct adreno_reglist *hwcg;
72 u64 address_space_size;
75 const struct adreno_info *adreno_info(struct adreno_rev rev);
79 struct adreno_rev rev;
80 const struct adreno_info *info;
81 uint32_t gmem; /* actual gmem size */
82 uint32_t revn; /* numeric revision name */
84 const struct adreno_gpu_funcs *funcs;
86 /* interesting register offsets to dump: */
87 const unsigned int *registers;
90 * Are we loading fw from legacy path? Prior to addition
91 * of gpu firmware to linux-firmware, the fw files were
92 * placed in toplevel firmware directory, following qcom's
93 * android kernel. But linux-firmware preferred they be
94 * placed in a 'qcom' subdirectory.
96 * For backwards compatibility, we try first to load from
97 * the new path, using request_firmware_direct() to avoid
98 * any potential timeout waiting for usermode helper, then
99 * fall back to the old path (with direct load). And
100 * finally fall back to request_firmware() with the new
101 * path to allow the usermode helper.
104 FW_LOCATION_UNKNOWN = 0,
105 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
106 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
111 const struct firmware *fw[ADRENO_FW_MAX];
114 * Register offsets are different between some GPUs.
115 * GPU specific offsets will be exported by GPU specific
116 * code (a3xx_gpu.c) and stored in this common location.
118 const unsigned int *reg_offsets;
121 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
123 struct adreno_ocmem {
129 /* platform config data (ie. from DT, or pdata) */
130 struct adreno_platform_config {
131 struct adreno_rev rev;
134 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
136 #define spin_until(X) ({ \
137 int __ret = -ETIMEDOUT; \
138 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
144 } while (time_before(jiffies, __t)); \
148 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
150 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
152 /* revn can be zero, but if not is set at same time as info */
153 WARN_ON_ONCE(!gpu->info);
155 return gpu->revn == revn;
158 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
160 return gpu->gmu_is_wrapper;
163 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
165 /* revn can be zero, but if not is set at same time as info */
166 WARN_ON_ONCE(!gpu->info);
168 return (gpu->revn < 300);
171 static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
173 /* revn can be zero, but if not is set at same time as info */
174 WARN_ON_ONCE(!gpu->info);
176 return (gpu->revn < 210);
179 static inline bool adreno_is_a225(const struct adreno_gpu *gpu)
181 return adreno_is_revn(gpu, 225);
184 static inline bool adreno_is_a305(const struct adreno_gpu *gpu)
186 return adreno_is_revn(gpu, 305);
189 static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
191 /* yes, 307, because a305c is 306 */
192 return adreno_is_revn(gpu, 307);
195 static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
197 return adreno_is_revn(gpu, 320);
200 static inline bool adreno_is_a330(const struct adreno_gpu *gpu)
202 return adreno_is_revn(gpu, 330);
205 static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu)
207 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
210 static inline int adreno_is_a405(const struct adreno_gpu *gpu)
212 return adreno_is_revn(gpu, 405);
215 static inline int adreno_is_a420(const struct adreno_gpu *gpu)
217 return adreno_is_revn(gpu, 420);
220 static inline int adreno_is_a430(const struct adreno_gpu *gpu)
222 return adreno_is_revn(gpu, 430);
225 static inline int adreno_is_a506(const struct adreno_gpu *gpu)
227 return adreno_is_revn(gpu, 506);
230 static inline int adreno_is_a508(const struct adreno_gpu *gpu)
232 return adreno_is_revn(gpu, 508);
235 static inline int adreno_is_a509(const struct adreno_gpu *gpu)
237 return adreno_is_revn(gpu, 509);
240 static inline int adreno_is_a510(const struct adreno_gpu *gpu)
242 return adreno_is_revn(gpu, 510);
245 static inline int adreno_is_a512(const struct adreno_gpu *gpu)
247 return adreno_is_revn(gpu, 512);
250 static inline int adreno_is_a530(const struct adreno_gpu *gpu)
252 return adreno_is_revn(gpu, 530);
255 static inline int adreno_is_a540(const struct adreno_gpu *gpu)
257 return adreno_is_revn(gpu, 540);
260 static inline int adreno_is_a610(const struct adreno_gpu *gpu)
262 return adreno_is_revn(gpu, 610);
265 static inline int adreno_is_a618(const struct adreno_gpu *gpu)
267 return adreno_is_revn(gpu, 618);
270 static inline int adreno_is_a619(const struct adreno_gpu *gpu)
272 return adreno_is_revn(gpu, 619);
275 static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
277 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
280 static inline int adreno_is_a630(const struct adreno_gpu *gpu)
282 return adreno_is_revn(gpu, 630);
285 static inline int adreno_is_a640(const struct adreno_gpu *gpu)
287 return adreno_is_revn(gpu, 640);
290 static inline int adreno_is_a650(const struct adreno_gpu *gpu)
292 return adreno_is_revn(gpu, 650);
295 static inline int adreno_is_7c3(const struct adreno_gpu *gpu)
297 /* The order of args is important here to handle ANY_ID correctly */
298 return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
301 static inline int adreno_is_a660(const struct adreno_gpu *gpu)
303 return adreno_is_revn(gpu, 660);
306 static inline int adreno_is_a680(const struct adreno_gpu *gpu)
308 return adreno_is_revn(gpu, 680);
311 static inline int adreno_is_a690(const struct adreno_gpu *gpu)
313 /* The order of args is important here to handle ANY_ID correctly */
314 return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev);
317 /* check for a615, a616, a618, a619 or any derivatives */
318 static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
320 return adreno_is_revn(gpu, 615) ||
321 adreno_is_revn(gpu, 616) ||
322 adreno_is_revn(gpu, 618) ||
323 adreno_is_revn(gpu, 619);
326 static inline int adreno_is_a660_family(const struct adreno_gpu *gpu)
328 return adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_7c3(gpu);
331 /* check for a650, a660, or any derivatives */
332 static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
334 return adreno_is_revn(gpu, 650) ||
335 adreno_is_revn(gpu, 620) ||
336 adreno_is_a660_family(gpu);
339 static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
341 return adreno_is_a640(gpu) || adreno_is_a680(gpu);
344 u64 adreno_private_address_space_size(struct msm_gpu *gpu);
345 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
346 uint32_t param, uint64_t *value, uint32_t *len);
347 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
348 uint32_t param, uint64_t value, uint32_t len);
349 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
351 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
352 const struct firmware *fw, u64 *iova);
353 int adreno_hw_init(struct msm_gpu *gpu);
354 void adreno_recover(struct msm_gpu *gpu);
355 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
356 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
357 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
358 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
359 struct drm_printer *p);
361 void adreno_dump_info(struct msm_gpu *gpu);
362 void adreno_dump(struct msm_gpu *gpu);
363 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
364 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
366 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
367 struct adreno_ocmem *ocmem);
368 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
370 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
371 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
373 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
374 int adreno_load_fw(struct adreno_gpu *adreno_gpu);
376 void adreno_gpu_state_destroy(struct msm_gpu_state *state);
378 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
379 int adreno_gpu_state_put(struct msm_gpu_state *state);
380 void adreno_show_object(struct drm_printer *p, void **ptr, int len,
384 * Common helper function to initialize the default address space for arm-smmu
387 struct msm_gem_address_space *
388 adreno_create_address_space(struct msm_gpu *gpu,
389 struct platform_device *pdev);
391 struct msm_gem_address_space *
392 adreno_iommu_create_address_space(struct msm_gpu *gpu,
393 struct platform_device *pdev,
394 unsigned long quirks);
396 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
397 struct adreno_smmu_fault_info *info, const char *block,
400 int adreno_read_speedbin(struct device *dev, u32 *speedbin);
403 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
406 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
408 /* ringbuffer helpers (the parts that are adreno specific) */
411 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
413 adreno_wait_ring(ring, cnt+1);
414 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
419 OUT_PKT2(struct msm_ringbuffer *ring)
421 adreno_wait_ring(ring, 1);
422 OUT_RING(ring, CP_TYPE2_PKT);
426 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
428 adreno_wait_ring(ring, cnt+1);
429 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
432 static inline u32 PM4_PARITY(u32 val)
434 return (0x9669 >> (0xF & (val ^
435 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
436 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
440 /* Maximum number of values that can be executed for one opcode */
441 #define TYPE4_MAX_PAYLOAD 127
443 #define PKT4(_reg, _cnt) \
444 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
445 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
448 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
450 adreno_wait_ring(ring, cnt + 1);
451 OUT_RING(ring, PKT4(regindx, cnt));
455 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
457 adreno_wait_ring(ring, cnt + 1);
458 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
459 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
462 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
463 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
464 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
465 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
466 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
468 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
470 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
474 * Given a register and a count, return a value to program into
475 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
476 * registers starting at _reg.
478 * The register base needs to be a multiple of the length. If it is not, the
479 * hardware will quietly mask off the bits for you and shift the size. For
480 * example, if you intend the protection to start at 0x07 for a length of 4
481 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
482 * expose registers you intended to protect!
484 #define ADRENO_PROTECT_RW(_reg, _len) \
485 ((1 << 30) | (1 << 29) | \
486 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
489 * Same as above, but allow reads over the range. For areas of mixed use (such
490 * as performance counters) this allows us to protect a much larger range with a
493 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
495 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
498 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
499 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
502 #endif /* __ADRENO_GPU_H__ */