Commit | Line | Data |
---|---|---|
b91d43dd FK |
1 | /* |
2 | * Copyright 2016-2018 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/printk.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/uaccess.h> | |
27 | #include "kfd_priv.h" | |
28 | #include "kfd_mqd_manager.h" | |
29 | #include "v9_structs.h" | |
30 | #include "gc/gc_9_0_offset.h" | |
31 | #include "gc/gc_9_0_sh_mask.h" | |
32 | #include "sdma0/sdma0_4_0_sh_mask.h" | |
33 | ||
34 | static inline struct v9_mqd *get_mqd(void *mqd) | |
35 | { | |
36 | return (struct v9_mqd *)mqd; | |
37 | } | |
38 | ||
39 | static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) | |
40 | { | |
41 | return (struct v9_sdma_mqd *)mqd; | |
42 | } | |
43 | ||
39e7f331 FK |
44 | static void update_cu_mask(struct mqd_manager *mm, void *mqd, |
45 | struct queue_properties *q) | |
46 | { | |
47 | struct v9_mqd *m; | |
48 | uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ | |
49 | ||
50 | if (q->cu_mask_count == 0) | |
51 | return; | |
52 | ||
53 | mqd_symmetrically_map_cu_mask(mm, | |
54 | q->cu_mask, q->cu_mask_count, se_mask); | |
55 | ||
56 | m = get_mqd(mqd); | |
57 | m->compute_static_thread_mgmt_se0 = se_mask[0]; | |
58 | m->compute_static_thread_mgmt_se1 = se_mask[1]; | |
59 | m->compute_static_thread_mgmt_se2 = se_mask[2]; | |
60 | m->compute_static_thread_mgmt_se3 = se_mask[3]; | |
61 | ||
62 | pr_debug("update cu mask to %#x %#x %#x %#x\n", | |
63 | m->compute_static_thread_mgmt_se0, | |
64 | m->compute_static_thread_mgmt_se1, | |
65 | m->compute_static_thread_mgmt_se2, | |
66 | m->compute_static_thread_mgmt_se3); | |
67 | } | |
68 | ||
b91d43dd FK |
69 | static int init_mqd(struct mqd_manager *mm, void **mqd, |
70 | struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, | |
71 | struct queue_properties *q) | |
72 | { | |
73 | int retval; | |
74 | uint64_t addr; | |
75 | struct v9_mqd *m; | |
76 | struct kfd_dev *kfd = mm->dev; | |
77 | ||
78 | /* From V9, for CWSR, the control stack is located on the next page | |
79 | * boundary after the mqd, we will use the gtt allocation function | |
80 | * instead of sub-allocation function. | |
81 | */ | |
82 | if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { | |
1cd106ec | 83 | *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); |
b91d43dd FK |
84 | if (!*mqd_mem_obj) |
85 | return -ENOMEM; | |
86 | retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd, | |
87 | ALIGN(q->ctl_stack_size, PAGE_SIZE) + | |
88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), | |
89 | &((*mqd_mem_obj)->gtt_mem), | |
90 | &((*mqd_mem_obj)->gpu_addr), | |
15426dbb | 91 | (void *)&((*mqd_mem_obj)->cpu_ptr), true); |
b91d43dd FK |
92 | } else |
93 | retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), | |
94 | mqd_mem_obj); | |
95 | if (retval != 0) | |
96 | return -ENOMEM; | |
97 | ||
98 | m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; | |
99 | addr = (*mqd_mem_obj)->gpu_addr; | |
100 | ||
101 | memset(m, 0, sizeof(struct v9_mqd)); | |
102 | ||
103 | m->header = 0xC0310800; | |
104 | m->compute_pipelinestat_enable = 1; | |
105 | m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; | |
106 | m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; | |
107 | m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; | |
108 | m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; | |
109 | ||
110 | m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | | |
111 | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; | |
112 | ||
113 | m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; | |
114 | ||
115 | m->cp_mqd_base_addr_lo = lower_32_bits(addr); | |
116 | m->cp_mqd_base_addr_hi = upper_32_bits(addr); | |
117 | ||
118 | m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | | |
119 | 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | | |
120 | 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; | |
121 | ||
122 | m->cp_hqd_pipe_priority = 1; | |
123 | m->cp_hqd_queue_priority = 15; | |
124 | ||
125 | if (q->format == KFD_QUEUE_FORMAT_AQL) { | |
126 | m->cp_hqd_aql_control = | |
127 | 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; | |
128 | } | |
129 | ||
130 | if (q->tba_addr) { | |
131 | m->compute_pgm_rsrc2 |= | |
132 | (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); | |
133 | } | |
134 | ||
135 | if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { | |
136 | m->cp_hqd_persistent_state |= | |
137 | (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); | |
138 | m->cp_hqd_ctx_save_base_addr_lo = | |
139 | lower_32_bits(q->ctx_save_restore_area_address); | |
140 | m->cp_hqd_ctx_save_base_addr_hi = | |
141 | upper_32_bits(q->ctx_save_restore_area_address); | |
142 | m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; | |
143 | m->cp_hqd_cntl_stack_size = q->ctl_stack_size; | |
144 | m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; | |
145 | m->cp_hqd_wg_state_offset = q->ctl_stack_size; | |
146 | } | |
147 | ||
148 | *mqd = m; | |
149 | if (gart_addr) | |
150 | *gart_addr = addr; | |
151 | retval = mm->update_mqd(mm, m, q); | |
152 | ||
153 | return retval; | |
154 | } | |
155 | ||
156 | static int load_mqd(struct mqd_manager *mm, void *mqd, | |
157 | uint32_t pipe_id, uint32_t queue_id, | |
158 | struct queue_properties *p, struct mm_struct *mms) | |
159 | { | |
160 | /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ | |
161 | uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); | |
162 | ||
163 | return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, | |
164 | (uint32_t __user *)p->write_ptr, | |
165 | wptr_shift, 0, mms); | |
166 | } | |
167 | ||
168 | static int update_mqd(struct mqd_manager *mm, void *mqd, | |
169 | struct queue_properties *q) | |
170 | { | |
171 | struct v9_mqd *m; | |
172 | ||
173 | m = get_mqd(mqd); | |
174 | ||
175 | m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; | |
176 | m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; | |
177 | pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); | |
178 | ||
179 | m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); | |
180 | m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); | |
181 | ||
182 | m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); | |
183 | m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); | |
184 | m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); | |
185 | m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); | |
186 | ||
187 | m->cp_hqd_pq_doorbell_control = | |
188 | q->doorbell_off << | |
189 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; | |
190 | pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", | |
191 | m->cp_hqd_pq_doorbell_control); | |
192 | ||
193 | m->cp_hqd_ib_control = | |
194 | 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | | |
195 | 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT; | |
196 | ||
197 | /* | |
198 | * HW does not clamp this field correctly. Maximum EOP queue size | |
199 | * is constrained by per-SE EOP done signal count, which is 8-bit. | |
200 | * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit | |
201 | * more than (EOP entry count - 1) so a queue size of 0x800 dwords | |
202 | * is safe, giving a maximum field value of 0xA. | |
203 | */ | |
204 | m->cp_hqd_eop_control = min(0xA, | |
205 | order_base_2(q->eop_ring_buffer_size / 4) - 1); | |
206 | m->cp_hqd_eop_base_addr_lo = | |
207 | lower_32_bits(q->eop_ring_buffer_address >> 8); | |
208 | m->cp_hqd_eop_base_addr_hi = | |
209 | upper_32_bits(q->eop_ring_buffer_address >> 8); | |
210 | ||
211 | m->cp_hqd_iq_timer = 0; | |
212 | ||
213 | m->cp_hqd_vmid = q->vmid; | |
214 | ||
215 | if (q->format == KFD_QUEUE_FORMAT_AQL) { | |
216 | m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | | |
217 | 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | | |
218 | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | | |
219 | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; | |
220 | m->cp_hqd_pq_doorbell_control |= 1 << | |
221 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; | |
222 | } | |
223 | if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) | |
224 | m->cp_hqd_ctx_save_control = 0; | |
225 | ||
39e7f331 FK |
226 | update_cu_mask(mm, mqd, q); |
227 | ||
b91d43dd FK |
228 | q->is_active = (q->queue_size > 0 && |
229 | q->queue_address != 0 && | |
230 | q->queue_percent > 0 && | |
231 | !q->is_evicted); | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
236 | ||
237 | static int destroy_mqd(struct mqd_manager *mm, void *mqd, | |
238 | enum kfd_preempt_type type, | |
239 | unsigned int timeout, uint32_t pipe_id, | |
240 | uint32_t queue_id) | |
241 | { | |
242 | return mm->dev->kfd2kgd->hqd_destroy | |
243 | (mm->dev->kgd, mqd, type, timeout, | |
244 | pipe_id, queue_id); | |
245 | } | |
246 | ||
247 | static void uninit_mqd(struct mqd_manager *mm, void *mqd, | |
248 | struct kfd_mem_obj *mqd_mem_obj) | |
249 | { | |
250 | struct kfd_dev *kfd = mm->dev; | |
251 | ||
252 | if (mqd_mem_obj->gtt_mem) { | |
253 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); | |
254 | kfree(mqd_mem_obj); | |
255 | } else { | |
256 | kfd_gtt_sa_free(mm->dev, mqd_mem_obj); | |
257 | } | |
258 | } | |
259 | ||
260 | static bool is_occupied(struct mqd_manager *mm, void *mqd, | |
261 | uint64_t queue_address, uint32_t pipe_id, | |
262 | uint32_t queue_id) | |
263 | { | |
264 | return mm->dev->kfd2kgd->hqd_is_occupied( | |
265 | mm->dev->kgd, queue_address, | |
266 | pipe_id, queue_id); | |
267 | } | |
268 | ||
269 | static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, | |
270 | struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, | |
271 | struct queue_properties *q) | |
272 | { | |
273 | struct v9_mqd *m; | |
274 | int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); | |
275 | ||
276 | if (retval != 0) | |
277 | return retval; | |
278 | ||
279 | m = get_mqd(*mqd); | |
280 | ||
281 | m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | | |
282 | 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; | |
283 | ||
284 | return retval; | |
285 | } | |
286 | ||
287 | static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, | |
288 | struct queue_properties *q) | |
289 | { | |
290 | struct v9_mqd *m; | |
291 | int retval = update_mqd(mm, mqd, q); | |
292 | ||
293 | if (retval != 0) | |
294 | return retval; | |
295 | ||
296 | /* TODO: what's the point? update_mqd already does this. */ | |
297 | m = get_mqd(mqd); | |
298 | m->cp_hqd_vmid = q->vmid; | |
299 | return retval; | |
300 | } | |
301 | ||
302 | static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, | |
303 | struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, | |
304 | struct queue_properties *q) | |
305 | { | |
306 | int retval; | |
307 | struct v9_sdma_mqd *m; | |
308 | ||
309 | ||
310 | retval = kfd_gtt_sa_allocate(mm->dev, | |
311 | sizeof(struct v9_sdma_mqd), | |
312 | mqd_mem_obj); | |
313 | ||
314 | if (retval != 0) | |
315 | return -ENOMEM; | |
316 | ||
317 | m = (struct v9_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr; | |
318 | ||
319 | memset(m, 0, sizeof(struct v9_sdma_mqd)); | |
320 | ||
321 | *mqd = m; | |
322 | if (gart_addr) | |
323 | *gart_addr = (*mqd_mem_obj)->gpu_addr; | |
324 | ||
325 | retval = mm->update_mqd(mm, m, q); | |
326 | ||
327 | return retval; | |
328 | } | |
329 | ||
330 | static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, | |
331 | struct kfd_mem_obj *mqd_mem_obj) | |
332 | { | |
333 | kfd_gtt_sa_free(mm->dev, mqd_mem_obj); | |
334 | } | |
335 | ||
336 | static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, | |
337 | uint32_t pipe_id, uint32_t queue_id, | |
338 | struct queue_properties *p, struct mm_struct *mms) | |
339 | { | |
340 | return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, | |
341 | (uint32_t __user *)p->write_ptr, | |
342 | mms); | |
343 | } | |
344 | ||
345 | #define SDMA_RLC_DUMMY_DEFAULT 0xf | |
346 | ||
347 | static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, | |
348 | struct queue_properties *q) | |
349 | { | |
350 | struct v9_sdma_mqd *m; | |
351 | ||
352 | m = get_sdma_mqd(mqd); | |
353 | m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) | |
354 | << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | | |
355 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | | |
356 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | | |
357 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; | |
358 | ||
359 | m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); | |
360 | m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); | |
361 | m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); | |
362 | m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); | |
363 | m->sdmax_rlcx_doorbell_offset = | |
364 | q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT; | |
365 | ||
366 | m->sdma_engine_id = q->sdma_engine_id; | |
367 | m->sdma_queue_id = q->sdma_queue_id; | |
368 | m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; | |
369 | ||
370 | q->is_active = (q->queue_size > 0 && | |
371 | q->queue_address != 0 && | |
372 | q->queue_percent > 0 && | |
373 | !q->is_evicted); | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | /* | |
379 | * * preempt type here is ignored because there is only one way | |
380 | * * to preempt sdma queue | |
381 | */ | |
382 | static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, | |
383 | enum kfd_preempt_type type, | |
384 | unsigned int timeout, uint32_t pipe_id, | |
385 | uint32_t queue_id) | |
386 | { | |
387 | return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); | |
388 | } | |
389 | ||
390 | static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, | |
391 | uint64_t queue_address, uint32_t pipe_id, | |
392 | uint32_t queue_id) | |
393 | { | |
394 | return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); | |
395 | } | |
396 | ||
397 | #if defined(CONFIG_DEBUG_FS) | |
398 | ||
399 | static int debugfs_show_mqd(struct seq_file *m, void *data) | |
400 | { | |
401 | seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, | |
402 | data, sizeof(struct v9_mqd), false); | |
403 | return 0; | |
404 | } | |
405 | ||
406 | static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) | |
407 | { | |
408 | seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, | |
409 | data, sizeof(struct v9_sdma_mqd), false); | |
410 | return 0; | |
411 | } | |
412 | ||
413 | #endif | |
414 | ||
415 | struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, | |
416 | struct kfd_dev *dev) | |
417 | { | |
418 | struct mqd_manager *mqd; | |
419 | ||
420 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) | |
421 | return NULL; | |
422 | ||
1cd106ec | 423 | mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); |
b91d43dd FK |
424 | if (!mqd) |
425 | return NULL; | |
426 | ||
427 | mqd->dev = dev; | |
428 | ||
429 | switch (type) { | |
430 | case KFD_MQD_TYPE_CP: | |
431 | case KFD_MQD_TYPE_COMPUTE: | |
432 | mqd->init_mqd = init_mqd; | |
433 | mqd->uninit_mqd = uninit_mqd; | |
434 | mqd->load_mqd = load_mqd; | |
435 | mqd->update_mqd = update_mqd; | |
436 | mqd->destroy_mqd = destroy_mqd; | |
437 | mqd->is_occupied = is_occupied; | |
438 | #if defined(CONFIG_DEBUG_FS) | |
439 | mqd->debugfs_show_mqd = debugfs_show_mqd; | |
440 | #endif | |
441 | break; | |
442 | case KFD_MQD_TYPE_HIQ: | |
443 | mqd->init_mqd = init_mqd_hiq; | |
444 | mqd->uninit_mqd = uninit_mqd; | |
445 | mqd->load_mqd = load_mqd; | |
446 | mqd->update_mqd = update_mqd_hiq; | |
447 | mqd->destroy_mqd = destroy_mqd; | |
448 | mqd->is_occupied = is_occupied; | |
449 | #if defined(CONFIG_DEBUG_FS) | |
450 | mqd->debugfs_show_mqd = debugfs_show_mqd; | |
451 | #endif | |
452 | break; | |
453 | case KFD_MQD_TYPE_SDMA: | |
454 | mqd->init_mqd = init_mqd_sdma; | |
455 | mqd->uninit_mqd = uninit_mqd_sdma; | |
456 | mqd->load_mqd = load_mqd_sdma; | |
457 | mqd->update_mqd = update_mqd_sdma; | |
458 | mqd->destroy_mqd = destroy_mqd_sdma; | |
459 | mqd->is_occupied = is_occupied_sdma; | |
460 | #if defined(CONFIG_DEBUG_FS) | |
461 | mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; | |
462 | #endif | |
463 | break; | |
464 | default: | |
465 | kfree(mqd); | |
466 | return NULL; | |
467 | } | |
468 | ||
469 | return mqd; | |
470 | } |