Merge tag 'x86_core_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.h
CommitLineData
ed6e6a34
BG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef KFD_DEVICE_QUEUE_MANAGER_H_
25#define KFD_DEVICE_QUEUE_MANAGER_H_
26
27#include <linux/rwsem.h>
28#include <linux/list.h>
efeaed4d
FK
29#include <linux/mutex.h>
30#include <linux/sched/mm.h>
ed6e6a34
BG
31#include "kfd_priv.h"
32#include "kfd_mqd_manager.h"
33
ed6e6a34 34
d9d4623c
YZ
35#define VMID_NUM 16
36
ed6e6a34
BG
37struct device_process_node {
38 struct qcm_process_device *qpd;
39 struct list_head list;
40};
41
64c7f8cf 42/**
45c9a5e4 43 * struct device_queue_manager_ops
64c7f8cf
BG
44 *
45 * @create_queue: Queue creation routine.
46 *
47 * @destroy_queue: Queue destruction routine.
48 *
49 * @update_queue: Queue update routine.
50 *
64c7f8cf
BG
51 * @exeute_queues: Dispatches the queues list to the H/W.
52 *
53 * @register_process: This routine associates a specific process with device.
54 *
55 * @unregister_process: destroys the associations between process to device.
56 *
57 * @initialize: Initializes the pipelines and memory module for that device.
58 *
59 * @start: Initializes the resources/modules the the device needs for queues
60 * execution. This function is called on device initialization and after the
61 * system woke up after suspension.
62 *
63 * @stop: This routine stops execution of all the active queue running on the
64 * H/W and basically this function called on system suspend.
65 *
66 * @uninitialize: Destroys all the device queue manager resources allocated in
67 * initialize routine.
68 *
69 * @create_kernel_queue: Creates kernel queue. Used for debug queue.
70 *
71 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
72 *
73 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
74 * memory apertures.
75 *
9fd3f1bf
FK
76 * @process_termination: Clears all process queues belongs to that device.
77 *
26103436
FK
78 * @evict_process_queues: Evict all active queues of a process
79 *
80 * @restore_process_queues: Restore all evicted queues queues of a process
81 *
5df099e8
JC
82 * @get_wave_state: Retrieves context save state and optionally copies the
83 * control stack, if kept in the MQD, to the given userspace address.
64c7f8cf
BG
84 */
85
45c9a5e4 86struct device_queue_manager_ops {
ed6e6a34
BG
87 int (*create_queue)(struct device_queue_manager *dqm,
88 struct queue *q,
b46cb7d7 89 struct qcm_process_device *qpd);
992839ad 90
ed6e6a34
BG
91 int (*destroy_queue)(struct device_queue_manager *dqm,
92 struct qcm_process_device *qpd,
93 struct queue *q);
992839ad 94
ed6e6a34
BG
95 int (*update_queue)(struct device_queue_manager *dqm,
96 struct queue *q);
64c7f8cf 97
ed6e6a34
BG
98 int (*register_process)(struct device_queue_manager *dqm,
99 struct qcm_process_device *qpd);
992839ad 100
ed6e6a34
BG
101 int (*unregister_process)(struct device_queue_manager *dqm,
102 struct qcm_process_device *qpd);
992839ad 103
ed6e6a34
BG
104 int (*initialize)(struct device_queue_manager *dqm);
105 int (*start)(struct device_queue_manager *dqm);
106 int (*stop)(struct device_queue_manager *dqm);
09c34e8d 107 void (*pre_reset)(struct device_queue_manager *dqm);
ed6e6a34
BG
108 void (*uninitialize)(struct device_queue_manager *dqm);
109 int (*create_kernel_queue)(struct device_queue_manager *dqm,
110 struct kernel_queue *kq,
111 struct qcm_process_device *qpd);
992839ad 112
ed6e6a34
BG
113 void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
114 struct kernel_queue *kq,
115 struct qcm_process_device *qpd);
992839ad 116
ed6e6a34
BG
117 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
118 struct qcm_process_device *qpd,
119 enum cache_policy default_policy,
120 enum cache_policy alternate_policy,
121 void __user *alternate_aperture_base,
122 uint64_t alternate_aperture_size);
9fd3f1bf 123
d7b9bd22
FK
124 int (*set_trap_handler)(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd,
126 uint64_t tba_addr,
127 uint64_t tma_addr);
128
9fd3f1bf
FK
129 int (*process_termination)(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd);
26103436
FK
131
132 int (*evict_process_queues)(struct device_queue_manager *dqm,
133 struct qcm_process_device *qpd);
134 int (*restore_process_queues)(struct device_queue_manager *dqm,
135 struct qcm_process_device *qpd);
5df099e8
JC
136
137 int (*get_wave_state)(struct device_queue_manager *dqm,
138 struct queue *q,
139 void __user *ctl_stack,
140 u32 *ctl_stack_used_size,
141 u32 *save_area_used_size);
45c9a5e4
OG
142};
143
d42af779 144struct device_queue_manager_asic_ops {
bfd5e378 145 int (*update_qpd)(struct device_queue_manager *dqm,
d42af779 146 struct qcm_process_device *qpd);
d42af779
OG
147 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
148 struct qcm_process_device *qpd,
149 enum cache_policy default_policy,
150 enum cache_policy alternate_policy,
151 void __user *alternate_aperture_base,
152 uint64_t alternate_aperture_size);
3e3f6e1a
OG
153 void (*init_sdma_vm)(struct device_queue_manager *dqm,
154 struct queue *q,
155 struct qcm_process_device *qpd);
972fcdb5
OZ
156 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
157 struct kfd_dev *dev);
d42af779
OG
158};
159
45c9a5e4
OG
160/**
161 * struct device_queue_manager
162 *
163 * This struct is a base class for the kfd queues scheduler in the
164 * device level. The device base class should expose the basic operations
165 * for queue creation and queue destruction. This base class hides the
166 * scheduling mode of the driver and the specific implementation of the
167 * concrete device. This class is the only class in the queues scheduler
168 * that configures the H/W.
169 *
170 */
ed6e6a34 171
45c9a5e4
OG
172struct device_queue_manager {
173 struct device_queue_manager_ops ops;
bfd5e378 174 struct device_queue_manager_asic_ops asic_ops;
ed6e6a34 175
8d5f3552 176 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
ed6e6a34
BG
177 struct packet_manager packets;
178 struct kfd_dev *dev;
efeaed4d 179 struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
ed6e6a34 180 struct list_head queues;
efeaed4d 181 unsigned int saved_flags;
ed6e6a34 182 unsigned int processes_count;
81b820b3 183 unsigned int active_queue_count;
b42902f4 184 unsigned int active_cp_queue_count;
b8020b03 185 unsigned int gws_queue_count;
b8cbab04 186 unsigned int total_queue_count;
ed6e6a34
BG
187 unsigned int next_pipe_to_allocate;
188 unsigned int *allocated_queues;
cb77ee7c 189 uint64_t sdma_bitmap;
1b4670f6 190 uint64_t xgmi_sdma_bitmap;
d9d4623c
YZ
191 /* the pasid mapping for each kfd vmid */
192 uint16_t vmid_pasid[VMID_NUM];
ed6e6a34 193 uint64_t pipelines_addr;
ed6e6a34 194 uint64_t fence_gpu_addr;
e92049ae 195 uint64_t *fence_addr;
ed6e6a34
BG
196 struct kfd_mem_obj *fence_mem;
197 bool active_runlist;
d146c5a7 198 int sched_policy;
73ea648d
SL
199
200 /* hw exception */
201 bool is_hws_hang;
09c34e8d 202 bool is_resetting;
73ea648d 203 struct work_struct hw_exception_work;
11614c36 204 struct kfd_mem_obj hiq_sdma_mqd;
2c99a547 205 bool sched_running;
ed6e6a34
BG
206};
207
bfd5e378
YZ
208void device_queue_manager_init_cik(
209 struct device_queue_manager_asic_ops *asic_ops);
97672cbe
FK
210void device_queue_manager_init_cik_hawaii(
211 struct device_queue_manager_asic_ops *asic_ops);
bfd5e378
YZ
212void device_queue_manager_init_vi(
213 struct device_queue_manager_asic_ops *asic_ops);
97672cbe
FK
214void device_queue_manager_init_vi_tonga(
215 struct device_queue_manager_asic_ops *asic_ops);
bed4f110
FK
216void device_queue_manager_init_v9(
217 struct device_queue_manager_asic_ops *asic_ops);
14328aa5
PC
218void device_queue_manager_init_v10_navi10(
219 struct device_queue_manager_asic_ops *asic_ops);
a22fc854
BG
220void program_sh_mem_settings(struct device_queue_manager *dqm,
221 struct qcm_process_device *qpd);
e6945304 222unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
d0b63bb3
AR
223unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
224unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
98bb9222 225unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
1b4670f6 226unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
0b3674ae 227
a104299b 228static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
0b3674ae
OG
229{
230 return (pdd->lds_base >> 16) & 0xFF;
231}
232
a104299b 233static inline unsigned int
0b3674ae
OG
234get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
235{
236 return (pdd->lds_base >> 60) & 0x0E;
237}
238
efeaed4d
FK
239/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
240 * happens while holding this lock anywhere to prevent deadlocks when
241 * an MMU notifier runs in reclaim-FS context.
242 */
243static inline void dqm_lock(struct device_queue_manager *dqm)
244{
245 mutex_lock(&dqm->lock_hidden);
1fb8b1fc 246 dqm->saved_flags = memalloc_noreclaim_save();
efeaed4d
FK
247}
248static inline void dqm_unlock(struct device_queue_manager *dqm)
249{
1fb8b1fc 250 memalloc_noreclaim_restore(dqm->saved_flags);
efeaed4d
FK
251 mutex_unlock(&dqm->lock_hidden);
252}
253
818b0324
MJ
254static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
255{
256 /*
257 * SDMA activity counter is stored at queue's RPTR + 0x8 location.
258 */
259 return get_user(*val, q_rptr + 1);
260}
ed6e6a34 261#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */