1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
9 #include "include/hw_ip/mmu/mmu_general.h"
10 #include "include/hw_ip/mmu/mmu_v1_0.h"
11 #include "include/goya/asic_reg/goya_masks.h"
12 #include "include/goya/goya_reg_map.h"
14 #include <linux/pci.h>
15 #include <linux/genalloc.h>
16 #include <linux/hwmon.h>
17 #include <linux/io-64-nonatomic-lo-hi.h>
18 #include <linux/iommu.h>
19 #include <linux/seq_file.h>
22 * GOYA security scheme:
24 * 1. Host is protected by:
25 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
28 * 2. DRAM is protected by:
29 * - Range registers (protect the first 512MB)
30 * - MMU (isolation between users)
32 * 3. Configuration is protected by:
36 * When MMU is disabled:
38 * QMAN DMA: PQ, CQ, CP, DMA are secured.
39 * PQ, CB and the data are on the host.
42 * PQ, CQ and CP are not secured.
43 * PQ, CB and the data are on the SRAM/DRAM.
45 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
46 * - checks DMA pointer
47 * - WREG, MSG_PROT are not allowed.
48 * - MSG_LONG/SHORT are allowed.
50 * A read/write transaction by the QMAN to a protected area will succeed if
51 * and only if the QMAN's CP is secured and MSG_PROT is used
54 * When MMU is enabled:
56 * QMAN DMA: PQ, CQ and CP are secured.
57 * MMU is set to bypass on the Secure props register of the QMAN.
58 * The reasons we don't enable MMU for PQ, CQ and CP are:
59 * - PQ entry is in kernel address space and the driver doesn't map it.
60 * - CP writes to MSIX register and to kernel address space (completion
63 * DMA is not secured but because CP is secured, the driver still needs to parse
64 * the CB, but doesn't need to check the DMA addresses.
66 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
67 * the driver doesn't map memory in MMU.
69 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
71 * DMA RR does NOT protect host because DMA is not secured
75 #define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
76 #define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
78 #define GOYA_MMU_REGS_NUM 63
80 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
82 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
83 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
84 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
85 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
86 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
87 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
88 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
89 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
90 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
92 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
94 #define GOYA_MAX_STRING_LEN 20
96 #define GOYA_CB_POOL_CB_CNT 512
97 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
99 #define IS_QM_IDLE(engine, qm_glbl_sts0) \
100 (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
101 #define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
102 #define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
103 #define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
105 #define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
106 (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
107 engine##_CMDQ_IDLE_MASK)
108 #define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
109 IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
110 #define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
111 IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
113 #define IS_DMA_IDLE(dma_core_sts0) \
114 !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
116 #define IS_TPC_IDLE(tpc_cfg_sts) \
117 (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
119 #define IS_MME_IDLE(mme_arch_sts) \
120 (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
123 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
124 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
125 "goya cq 4", "goya cpu eq"
128 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
129 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
130 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
131 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
132 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
133 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
134 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
135 [PACKET_FENCE] = sizeof(struct packet_fence),
136 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
137 [PACKET_NOP] = sizeof(struct packet_nop),
138 [PACKET_STOP] = sizeof(struct packet_stop)
141 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
142 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
143 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
144 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
145 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
146 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
147 mmTPC0_QM_GLBL_SECURE_PROPS,
148 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
149 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
150 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
153 mmTPC1_QM_GLBL_SECURE_PROPS,
154 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
155 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
156 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
159 mmTPC2_QM_GLBL_SECURE_PROPS,
160 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
161 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
162 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
165 mmTPC3_QM_GLBL_SECURE_PROPS,
166 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
167 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
168 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
171 mmTPC4_QM_GLBL_SECURE_PROPS,
172 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
173 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
174 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
177 mmTPC5_QM_GLBL_SECURE_PROPS,
178 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
179 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
180 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
183 mmTPC6_QM_GLBL_SECURE_PROPS,
184 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
185 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
186 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
189 mmTPC7_QM_GLBL_SECURE_PROPS,
190 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
191 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
192 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
195 mmMME_QM_GLBL_SECURE_PROPS,
196 mmMME_QM_GLBL_NON_SECURE_PROPS,
197 mmMME_CMDQ_GLBL_SECURE_PROPS,
198 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
199 mmMME_SBA_CONTROL_DATA,
200 mmMME_SBB_CONTROL_DATA,
201 mmMME_SBC_CONTROL_DATA,
202 mmMME_WBC_CONTROL_DATA,
203 mmPCIE_WRAP_PSOC_ARUSER,
204 mmPCIE_WRAP_PSOC_AWUSER
207 static u32 goya_all_events[] = {
208 GOYA_ASYNC_EVENT_ID_PCIE_IF,
209 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
210 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
211 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
212 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
213 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
214 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
215 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
216 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
217 GOYA_ASYNC_EVENT_ID_MME_ECC,
218 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
219 GOYA_ASYNC_EVENT_ID_MMU_ECC,
220 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
221 GOYA_ASYNC_EVENT_ID_DMA_ECC,
222 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
223 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
224 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
225 GOYA_ASYNC_EVENT_ID_SRAM0,
226 GOYA_ASYNC_EVENT_ID_SRAM1,
227 GOYA_ASYNC_EVENT_ID_SRAM2,
228 GOYA_ASYNC_EVENT_ID_SRAM3,
229 GOYA_ASYNC_EVENT_ID_SRAM4,
230 GOYA_ASYNC_EVENT_ID_SRAM5,
231 GOYA_ASYNC_EVENT_ID_SRAM6,
232 GOYA_ASYNC_EVENT_ID_SRAM7,
233 GOYA_ASYNC_EVENT_ID_SRAM8,
234 GOYA_ASYNC_EVENT_ID_SRAM9,
235 GOYA_ASYNC_EVENT_ID_SRAM10,
236 GOYA_ASYNC_EVENT_ID_SRAM11,
237 GOYA_ASYNC_EVENT_ID_SRAM12,
238 GOYA_ASYNC_EVENT_ID_SRAM13,
239 GOYA_ASYNC_EVENT_ID_SRAM14,
240 GOYA_ASYNC_EVENT_ID_SRAM15,
241 GOYA_ASYNC_EVENT_ID_SRAM16,
242 GOYA_ASYNC_EVENT_ID_SRAM17,
243 GOYA_ASYNC_EVENT_ID_SRAM18,
244 GOYA_ASYNC_EVENT_ID_SRAM19,
245 GOYA_ASYNC_EVENT_ID_SRAM20,
246 GOYA_ASYNC_EVENT_ID_SRAM21,
247 GOYA_ASYNC_EVENT_ID_SRAM22,
248 GOYA_ASYNC_EVENT_ID_SRAM23,
249 GOYA_ASYNC_EVENT_ID_SRAM24,
250 GOYA_ASYNC_EVENT_ID_SRAM25,
251 GOYA_ASYNC_EVENT_ID_SRAM26,
252 GOYA_ASYNC_EVENT_ID_SRAM27,
253 GOYA_ASYNC_EVENT_ID_SRAM28,
254 GOYA_ASYNC_EVENT_ID_SRAM29,
255 GOYA_ASYNC_EVENT_ID_GIC500,
256 GOYA_ASYNC_EVENT_ID_PLL0,
257 GOYA_ASYNC_EVENT_ID_PLL1,
258 GOYA_ASYNC_EVENT_ID_PLL3,
259 GOYA_ASYNC_EVENT_ID_PLL4,
260 GOYA_ASYNC_EVENT_ID_PLL5,
261 GOYA_ASYNC_EVENT_ID_PLL6,
262 GOYA_ASYNC_EVENT_ID_AXI_ECC,
263 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
264 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
265 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
266 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
267 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
268 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
269 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
270 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
271 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
272 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
273 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
274 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
275 GOYA_ASYNC_EVENT_ID_MME_WACS,
276 GOYA_ASYNC_EVENT_ID_MME_WACSD,
277 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
278 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
279 GOYA_ASYNC_EVENT_ID_PSOC,
280 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
281 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
282 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
283 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
284 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
285 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
286 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
287 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
288 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
289 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
290 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
291 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
292 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
293 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
294 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
295 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
296 GOYA_ASYNC_EVENT_ID_TPC0_QM,
297 GOYA_ASYNC_EVENT_ID_TPC1_QM,
298 GOYA_ASYNC_EVENT_ID_TPC2_QM,
299 GOYA_ASYNC_EVENT_ID_TPC3_QM,
300 GOYA_ASYNC_EVENT_ID_TPC4_QM,
301 GOYA_ASYNC_EVENT_ID_TPC5_QM,
302 GOYA_ASYNC_EVENT_ID_TPC6_QM,
303 GOYA_ASYNC_EVENT_ID_TPC7_QM,
304 GOYA_ASYNC_EVENT_ID_MME_QM,
305 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
306 GOYA_ASYNC_EVENT_ID_DMA0_QM,
307 GOYA_ASYNC_EVENT_ID_DMA1_QM,
308 GOYA_ASYNC_EVENT_ID_DMA2_QM,
309 GOYA_ASYNC_EVENT_ID_DMA3_QM,
310 GOYA_ASYNC_EVENT_ID_DMA4_QM,
311 GOYA_ASYNC_EVENT_ID_DMA0_CH,
312 GOYA_ASYNC_EVENT_ID_DMA1_CH,
313 GOYA_ASYNC_EVENT_ID_DMA2_CH,
314 GOYA_ASYNC_EVENT_ID_DMA3_CH,
315 GOYA_ASYNC_EVENT_ID_DMA4_CH,
316 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
317 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
318 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
319 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
320 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
321 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
322 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
323 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
324 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
325 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
326 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
327 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
328 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
329 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
330 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
331 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
332 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
335 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
336 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
337 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
338 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
340 void goya_get_fixed_properties(struct hl_device *hdev)
342 struct asic_fixed_properties *prop = &hdev->asic_prop;
345 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
346 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
347 prop->hw_queues_props[i].driver_only = 0;
348 prop->hw_queues_props[i].requires_kernel_cb = 1;
351 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
352 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
353 prop->hw_queues_props[i].driver_only = 1;
354 prop->hw_queues_props[i].requires_kernel_cb = 0;
357 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
358 NUMBER_OF_INT_HW_QUEUES; i++) {
359 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
360 prop->hw_queues_props[i].driver_only = 0;
361 prop->hw_queues_props[i].requires_kernel_cb = 0;
364 for (; i < HL_MAX_QUEUES; i++)
365 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
367 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
369 prop->dram_base_address = DRAM_PHYS_BASE;
370 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
371 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
372 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
374 prop->sram_base_address = SRAM_BASE_ADDR;
375 prop->sram_size = SRAM_SIZE;
376 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
377 prop->sram_user_base_address = prop->sram_base_address +
378 SRAM_USER_BASE_OFFSET;
380 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
381 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
383 prop->mmu_pgt_size = 0x800000; /* 8MB */
385 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
386 prop->mmu_pte_size = HL_PTE_SIZE;
387 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
388 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
389 prop->dram_page_size = PAGE_SIZE_2MB;
391 prop->dmmu.hop0_shift = HOP0_SHIFT;
392 prop->dmmu.hop1_shift = HOP1_SHIFT;
393 prop->dmmu.hop2_shift = HOP2_SHIFT;
394 prop->dmmu.hop3_shift = HOP3_SHIFT;
395 prop->dmmu.hop4_shift = HOP4_SHIFT;
396 prop->dmmu.hop0_mask = HOP0_MASK;
397 prop->dmmu.hop1_mask = HOP1_MASK;
398 prop->dmmu.hop2_mask = HOP2_MASK;
399 prop->dmmu.hop3_mask = HOP3_MASK;
400 prop->dmmu.hop4_mask = HOP4_MASK;
401 prop->dmmu.start_addr = VA_DDR_SPACE_START;
402 prop->dmmu.end_addr = VA_DDR_SPACE_END;
403 prop->dmmu.page_size = PAGE_SIZE_2MB;
405 /* shifts and masks are the same in PMMU and DMMU */
406 memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
407 prop->pmmu.start_addr = VA_HOST_SPACE_START;
408 prop->pmmu.end_addr = VA_HOST_SPACE_END;
409 prop->pmmu.page_size = PAGE_SIZE_4KB;
411 /* PMMU and HPMMU are the same except of page size */
412 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
413 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
415 prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
416 prop->cfg_size = CFG_SIZE;
417 prop->max_asid = MAX_ASID;
418 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
419 prop->high_pll = PLL_HIGH_DEFAULT;
420 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
421 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
422 prop->max_power_default = MAX_POWER_DEFAULT;
423 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
424 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
425 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
427 strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
432 * goya_pci_bars_map - Map PCI BARS of Goya device
434 * @hdev: pointer to hl_device structure
436 * Request PCI regions and map them to kernel virtual addresses.
437 * Returns 0 on success
440 static int goya_pci_bars_map(struct hl_device *hdev)
442 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
443 bool is_wc[3] = {false, false, true};
446 rc = hl_pci_bars_map(hdev, name, is_wc);
450 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
451 (CFG_BASE - SRAM_BASE_ADDR);
456 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
458 struct goya_device *goya = hdev->asic_specific;
462 if ((goya) && (goya->ddr_bar_cur_addr == addr))
465 /* Inbound Region 1 - Bar 4 - Point to DDR */
466 rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
471 old_addr = goya->ddr_bar_cur_addr;
472 goya->ddr_bar_cur_addr = addr;
479 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
481 * @hdev: pointer to hl_device structure
483 * This is needed in case the firmware doesn't initialize the iATU
486 static int goya_init_iatu(struct hl_device *hdev)
488 return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
489 HOST_PHYS_BASE, HOST_PHYS_SIZE);
493 * goya_early_init - GOYA early initialization code
495 * @hdev: pointer to hl_device structure
499 * PCI controller initialization
503 static int goya_early_init(struct hl_device *hdev)
505 struct asic_fixed_properties *prop = &hdev->asic_prop;
506 struct pci_dev *pdev = hdev->pdev;
510 goya_get_fixed_properties(hdev);
512 /* Check BAR sizes */
513 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
515 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
517 (unsigned long long) pci_resource_len(pdev,
523 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
525 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
527 (unsigned long long) pci_resource_len(pdev,
533 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
535 rc = hl_pci_init(hdev);
540 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
541 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
543 "PCI strap is not configured correctly, PCI bus errors may occur\n");
550 * goya_early_fini - GOYA early finalization code
552 * @hdev: pointer to hl_device structure
557 static int goya_early_fini(struct hl_device *hdev)
564 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
566 /* mask to zero the MMBP and ASID bits */
567 WREG32_AND(reg, ~0x7FF);
568 WREG32_OR(reg, asid);
571 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
573 struct goya_device *goya = hdev->asic_specific;
575 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
579 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
581 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
583 RREG32(mmDMA_QM_0_GLBL_PROT);
587 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
589 * @hdev: pointer to hl_device structure
592 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
594 struct asic_fixed_properties *prop = &hdev->asic_prop;
596 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
597 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
598 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
599 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
602 int goya_late_init(struct hl_device *hdev)
604 struct asic_fixed_properties *prop = &hdev->asic_prop;
607 goya_fetch_psoc_frequency(hdev);
609 rc = goya_mmu_clear_pgt_range(hdev);
612 "Failed to clear MMU page tables range %d\n", rc);
616 rc = goya_mmu_set_dram_default_page(hdev);
618 dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
622 rc = goya_mmu_add_mappings_for_device_cpu(hdev);
626 rc = goya_init_cpu_queues(hdev);
630 rc = goya_test_cpu_queue(hdev);
634 rc = goya_armcp_info_get(hdev);
636 dev_err(hdev->dev, "Failed to get armcp info %d\n", rc);
640 /* Now that we have the DRAM size in ASIC prop, we need to check
641 * its size and configure the DMA_IF DDR wrap protection (which is in
642 * the MMU block) accordingly. The value is the log2 of the DRAM size
644 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
646 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
649 "Failed to enable PCI access from CPU %d\n", rc);
653 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
654 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
660 * goya_late_fini - GOYA late tear-down code
662 * @hdev: pointer to hl_device structure
664 * Free sensors allocated structures
666 void goya_late_fini(struct hl_device *hdev)
668 const struct hwmon_channel_info **channel_info_arr;
671 if (!hdev->hl_chip_info->info)
674 channel_info_arr = hdev->hl_chip_info->info;
676 while (channel_info_arr[i]) {
677 kfree(channel_info_arr[i]->config);
678 kfree(channel_info_arr[i]);
682 kfree(channel_info_arr);
684 hdev->hl_chip_info->info = NULL;
688 * goya_sw_init - Goya software initialization code
690 * @hdev: pointer to hl_device structure
693 static int goya_sw_init(struct hl_device *hdev)
695 struct goya_device *goya;
698 /* Allocate device structure */
699 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
703 /* according to goya_init_iatu */
704 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
706 goya->mme_clk = GOYA_PLL_FREQ_LOW;
707 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
708 goya->ic_clk = GOYA_PLL_FREQ_LOW;
710 hdev->asic_specific = goya;
712 /* Create DMA pool for small allocations */
713 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
714 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
715 if (!hdev->dma_pool) {
716 dev_err(hdev->dev, "failed to create DMA pool\n");
718 goto free_goya_device;
721 hdev->cpu_accessible_dma_mem =
722 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
723 HL_CPU_ACCESSIBLE_MEM_SIZE,
724 &hdev->cpu_accessible_dma_address,
725 GFP_KERNEL | __GFP_ZERO);
727 if (!hdev->cpu_accessible_dma_mem) {
732 dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
733 &hdev->cpu_accessible_dma_address);
735 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
736 if (!hdev->cpu_accessible_dma_pool) {
738 "Failed to create CPU accessible DMA pool\n");
740 goto free_cpu_dma_mem;
743 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
744 (uintptr_t) hdev->cpu_accessible_dma_mem,
745 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
748 "Failed to add memory to CPU accessible DMA pool\n");
750 goto free_cpu_accessible_dma_pool;
753 spin_lock_init(&goya->hw_queues_lock);
754 hdev->supports_coresight = true;
755 hdev->supports_soft_reset = true;
759 free_cpu_accessible_dma_pool:
760 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
762 hdev->asic_funcs->asic_dma_free_coherent(hdev,
763 HL_CPU_ACCESSIBLE_MEM_SIZE,
764 hdev->cpu_accessible_dma_mem,
765 hdev->cpu_accessible_dma_address);
767 dma_pool_destroy(hdev->dma_pool);
775 * goya_sw_fini - Goya software tear-down code
777 * @hdev: pointer to hl_device structure
780 static int goya_sw_fini(struct hl_device *hdev)
782 struct goya_device *goya = hdev->asic_specific;
784 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
786 hdev->asic_funcs->asic_dma_free_coherent(hdev,
787 HL_CPU_ACCESSIBLE_MEM_SIZE,
788 hdev->cpu_accessible_dma_mem,
789 hdev->cpu_accessible_dma_address);
791 dma_pool_destroy(hdev->dma_pool);
798 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
799 dma_addr_t bus_address)
801 struct goya_device *goya = hdev->asic_specific;
802 u32 mtr_base_lo, mtr_base_hi;
803 u32 so_base_lo, so_base_hi;
804 u32 gic_base_lo, gic_base_hi;
805 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
806 u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
808 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
809 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
810 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
811 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
814 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
816 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
818 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
819 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
821 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
822 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
823 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
825 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
826 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
827 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
828 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
829 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
830 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
831 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
832 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
834 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
835 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
836 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
838 if (goya->hw_cap_initialized & HW_CAP_MMU)
839 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
841 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
843 if (hdev->stop_on_err)
844 dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
846 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
847 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
850 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
852 u32 gic_base_lo, gic_base_hi;
854 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
857 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
859 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
861 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
862 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
863 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
864 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
867 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
870 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
872 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
873 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
877 * goya_init_dma_qmans - Initialize QMAN DMA registers
879 * @hdev: pointer to hl_device structure
881 * Initialize the H/W registers of the QMAN DMA channels
884 void goya_init_dma_qmans(struct hl_device *hdev)
886 struct goya_device *goya = hdev->asic_specific;
887 struct hl_hw_queue *q;
890 if (goya->hw_cap_initialized & HW_CAP_DMA)
893 q = &hdev->kernel_queues[0];
895 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
896 q->cq_id = q->msi_vec = i;
897 goya_init_dma_qman(hdev, i, q->bus_address);
898 goya_init_dma_ch(hdev, i);
901 goya->hw_cap_initialized |= HW_CAP_DMA;
905 * goya_disable_external_queues - Disable external queues
907 * @hdev: pointer to hl_device structure
910 static void goya_disable_external_queues(struct hl_device *hdev)
912 struct goya_device *goya = hdev->asic_specific;
914 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
917 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
918 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
919 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
920 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
921 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
924 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
925 u32 cp_sts_reg, u32 glbl_sts0_reg)
930 /* use the values of TPC0 as they are all the same*/
932 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
934 status = RREG32(cp_sts_reg);
935 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
936 rc = hl_poll_timeout(
940 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
942 QMAN_FENCE_TIMEOUT_USEC);
944 /* if QMAN is stuck in fence no need to check for stop */
949 rc = hl_poll_timeout(
953 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
955 QMAN_STOP_TIMEOUT_USEC);
959 "Timeout while waiting for QMAN to stop\n");
967 * goya_stop_external_queues - Stop external queues
969 * @hdev: pointer to hl_device structure
971 * Returns 0 on success
974 static int goya_stop_external_queues(struct hl_device *hdev)
978 struct goya_device *goya = hdev->asic_specific;
980 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
983 rc = goya_stop_queue(hdev,
984 mmDMA_QM_0_GLBL_CFG1,
986 mmDMA_QM_0_GLBL_STS0);
989 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
993 rc = goya_stop_queue(hdev,
994 mmDMA_QM_1_GLBL_CFG1,
996 mmDMA_QM_1_GLBL_STS0);
999 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1003 rc = goya_stop_queue(hdev,
1004 mmDMA_QM_2_GLBL_CFG1,
1006 mmDMA_QM_2_GLBL_STS0);
1009 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1013 rc = goya_stop_queue(hdev,
1014 mmDMA_QM_3_GLBL_CFG1,
1016 mmDMA_QM_3_GLBL_STS0);
1019 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1023 rc = goya_stop_queue(hdev,
1024 mmDMA_QM_4_GLBL_CFG1,
1026 mmDMA_QM_4_GLBL_STS0);
1029 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1037 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1039 * @hdev: pointer to hl_device structure
1041 * Returns 0 on success
1044 int goya_init_cpu_queues(struct hl_device *hdev)
1046 struct goya_device *goya = hdev->asic_specific;
1049 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1052 if (!hdev->cpu_queues_enable)
1055 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1058 eq = &hdev->event_queue;
1060 WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1061 WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1063 WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1064 WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1066 WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1067 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1068 WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1069 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1071 WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1072 WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1073 WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1075 /* Used for EQ CI */
1076 WREG32(mmCPU_EQ_CI, 0);
1078 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1080 WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1082 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1083 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1085 err = hl_poll_timeout(
1087 mmCPU_PQ_INIT_STATUS,
1089 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1091 GOYA_CPU_TIMEOUT_USEC);
1095 "Failed to setup communication with device CPU\n");
1099 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1103 static void goya_set_pll_refclk(struct hl_device *hdev)
1105 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1106 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1107 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1108 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1110 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1111 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1112 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1113 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1115 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1116 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1117 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1118 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1120 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1121 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1122 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1123 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1125 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1126 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1127 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1128 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1130 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1131 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1132 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1133 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1135 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1136 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1137 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1138 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1141 static void goya_disable_clk_rlx(struct hl_device *hdev)
1143 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1144 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1147 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1149 u64 tpc_eml_address;
1150 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1153 tpc_offset = tpc_id * 0x40000;
1154 tpc_eml_offset = tpc_id * 0x200000;
1155 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1156 tpc_slm_offset = tpc_eml_address + 0x100000;
1159 * Workaround for Bug H2 #2443 :
1160 * "TPC SB is not initialized on chip reset"
1163 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1164 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1165 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1168 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1170 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1171 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1172 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1173 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1174 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1175 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1176 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1177 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1178 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1179 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1181 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1182 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1184 err = hl_poll_timeout(
1186 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1188 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1190 HL_DEVICE_TIMEOUT_USEC);
1194 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1196 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1197 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1199 msleep(GOYA_RESET_WAIT_MSEC);
1201 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1202 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1204 msleep(GOYA_RESET_WAIT_MSEC);
1206 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1207 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1209 val = RREG32(tpc_slm_offset);
1212 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1214 struct goya_device *goya = hdev->asic_specific;
1220 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1223 /* Workaround for H2 #2443 */
1225 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1226 _goya_tpc_mbist_workaround(hdev, i);
1228 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1232 * goya_init_golden_registers - Initialize golden registers
1234 * @hdev: pointer to hl_device structure
1236 * Initialize the H/W registers of the device
1239 static void goya_init_golden_registers(struct hl_device *hdev)
1241 struct goya_device *goya = hdev->asic_specific;
1242 u32 polynom[10], tpc_intr_mask, offset;
1245 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1248 polynom[0] = 0x00020080;
1249 polynom[1] = 0x00401000;
1250 polynom[2] = 0x00200800;
1251 polynom[3] = 0x00002000;
1252 polynom[4] = 0x00080200;
1253 polynom[5] = 0x00040100;
1254 polynom[6] = 0x00100400;
1255 polynom[7] = 0x00004000;
1256 polynom[8] = 0x00010000;
1257 polynom[9] = 0x00008000;
1259 /* Mask all arithmetic interrupts from TPC */
1260 tpc_intr_mask = 0x7FFF;
1262 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1263 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1264 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1265 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1266 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1267 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1269 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1270 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1271 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1272 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1273 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1276 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1277 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1278 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1279 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1280 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1282 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1283 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1284 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1285 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1286 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1288 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1289 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1290 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1291 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1292 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1294 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1295 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1296 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1297 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1298 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1301 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1302 WREG32(mmMME_AGU, 0x0f0f0f10);
1303 WREG32(mmMME_SEI_MASK, ~0x0);
1305 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1306 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1307 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1308 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1309 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1310 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1311 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1312 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1313 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1314 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1315 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1316 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1317 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1318 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1319 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1320 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1321 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1322 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1323 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1324 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1325 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1326 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1327 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1328 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1329 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1330 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1331 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1332 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1333 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1334 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1335 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1336 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1337 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1338 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1339 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1340 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1341 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1342 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1343 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1344 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1345 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1346 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1347 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1348 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1349 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1350 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1351 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1352 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1353 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1354 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1355 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1356 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1357 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1358 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1359 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1360 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1361 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1362 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1363 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1364 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1365 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1366 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1367 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1368 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1369 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1370 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1371 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1372 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1373 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1374 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1375 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1376 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1377 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1378 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1379 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1380 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1381 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1382 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1383 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1384 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1385 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1386 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1387 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1388 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1390 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1391 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1392 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1393 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1394 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1395 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1396 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1397 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1398 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1399 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1400 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1401 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1403 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1404 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1405 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1406 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1407 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1408 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1409 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1410 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1411 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1412 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1413 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1414 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1416 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1417 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1418 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1419 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1420 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1421 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1422 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1423 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1424 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1425 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1426 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1427 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1429 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1430 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1431 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1432 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1433 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1434 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1435 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1436 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1437 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1438 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1439 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1440 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1442 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1443 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1444 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1445 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1446 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1447 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1448 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1449 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1450 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1451 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1452 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1453 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1455 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1456 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1457 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1458 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1459 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1460 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1461 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1462 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1463 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1464 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1465 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1466 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1468 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1469 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1470 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1471 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1472 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1473 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1474 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1476 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1477 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1478 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1479 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1480 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1481 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1482 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1483 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1485 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1486 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1489 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1490 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1491 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1492 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1493 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1496 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1498 * Workaround for Bug H2 #2441 :
1499 * "ST.NOP set trace event illegal opcode"
1501 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1503 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1504 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1505 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1506 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1508 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
1509 ICACHE_FETCH_LINE_NUM, 2);
1512 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1513 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1514 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1516 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1517 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1518 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1521 * Workaround for H2 #HW-23 bug
1522 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1523 * This limitation is still large enough to not affect Gen4 bandwidth.
1524 * We need to only limit that DMA channel because the user can only read
1525 * from Host using DMA CH 1
1527 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1529 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1531 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1534 static void goya_init_mme_qman(struct hl_device *hdev)
1536 u32 mtr_base_lo, mtr_base_hi;
1537 u32 so_base_lo, so_base_hi;
1538 u32 gic_base_lo, gic_base_hi;
1541 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1542 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1543 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1544 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1547 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1549 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1551 qman_base_addr = hdev->asic_prop.sram_base_address +
1552 MME_QMAN_BASE_OFFSET;
1554 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1555 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1556 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1557 WREG32(mmMME_QM_PQ_PI, 0);
1558 WREG32(mmMME_QM_PQ_CI, 0);
1559 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1560 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1561 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1562 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1564 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1565 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1566 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1567 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1569 /* QMAN CQ has 8 cache lines */
1570 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1572 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1573 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1575 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1577 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1579 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1581 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1584 static void goya_init_mme_cmdq(struct hl_device *hdev)
1586 u32 mtr_base_lo, mtr_base_hi;
1587 u32 so_base_lo, so_base_hi;
1588 u32 gic_base_lo, gic_base_hi;
1590 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1591 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1592 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1593 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1596 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1598 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1600 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1601 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1602 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1603 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1605 /* CMDQ CQ has 20 cache lines */
1606 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1608 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1609 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1611 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1613 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1615 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1617 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1620 void goya_init_mme_qmans(struct hl_device *hdev)
1622 struct goya_device *goya = hdev->asic_specific;
1623 u32 so_base_lo, so_base_hi;
1625 if (goya->hw_cap_initialized & HW_CAP_MME)
1628 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1629 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1631 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1632 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1634 goya_init_mme_qman(hdev);
1635 goya_init_mme_cmdq(hdev);
1637 goya->hw_cap_initialized |= HW_CAP_MME;
1640 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1642 u32 mtr_base_lo, mtr_base_hi;
1643 u32 so_base_lo, so_base_hi;
1644 u32 gic_base_lo, gic_base_hi;
1646 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1648 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1649 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1650 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1651 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1654 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1656 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1658 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1660 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1661 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1662 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1663 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1664 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1665 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1666 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1667 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1668 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1670 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1671 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1672 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1673 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1675 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1677 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1678 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1680 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1681 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1683 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1685 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1687 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1690 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1692 u32 mtr_base_lo, mtr_base_hi;
1693 u32 so_base_lo, so_base_hi;
1694 u32 gic_base_lo, gic_base_hi;
1695 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1697 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1698 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1699 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1700 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1703 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1705 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1707 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1708 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1709 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1710 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1712 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1714 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1715 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1717 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1718 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1720 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1722 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1724 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1727 void goya_init_tpc_qmans(struct hl_device *hdev)
1729 struct goya_device *goya = hdev->asic_specific;
1730 u32 so_base_lo, so_base_hi;
1731 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1732 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1735 if (goya->hw_cap_initialized & HW_CAP_TPC)
1738 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1739 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1741 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1742 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1744 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1748 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1749 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1750 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1751 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1752 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1753 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1754 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1755 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1757 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1758 goya_init_tpc_cmdq(hdev, i);
1760 goya->hw_cap_initialized |= HW_CAP_TPC;
1764 * goya_disable_internal_queues - Disable internal queues
1766 * @hdev: pointer to hl_device structure
1769 static void goya_disable_internal_queues(struct hl_device *hdev)
1771 struct goya_device *goya = hdev->asic_specific;
1773 if (!(goya->hw_cap_initialized & HW_CAP_MME))
1776 WREG32(mmMME_QM_GLBL_CFG0, 0);
1777 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1780 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
1783 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1784 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1786 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1787 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1789 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1790 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1792 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1793 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1795 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1796 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1798 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1799 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1801 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1802 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1804 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1805 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1809 * goya_stop_internal_queues - Stop internal queues
1811 * @hdev: pointer to hl_device structure
1813 * Returns 0 on success
1816 static int goya_stop_internal_queues(struct hl_device *hdev)
1818 struct goya_device *goya = hdev->asic_specific;
1821 if (!(goya->hw_cap_initialized & HW_CAP_MME))
1825 * Each queue (QMAN) is a separate H/W logic. That means that each
1826 * QMAN can be stopped independently and failure to stop one does NOT
1827 * mandate we should not try to stop other QMANs
1830 rc = goya_stop_queue(hdev,
1833 mmMME_QM_GLBL_STS0);
1836 dev_err(hdev->dev, "failed to stop MME QMAN\n");
1840 rc = goya_stop_queue(hdev,
1841 mmMME_CMDQ_GLBL_CFG1,
1843 mmMME_CMDQ_GLBL_STS0);
1846 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1851 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
1854 rc = goya_stop_queue(hdev,
1855 mmTPC0_QM_GLBL_CFG1,
1857 mmTPC0_QM_GLBL_STS0);
1860 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1864 rc = goya_stop_queue(hdev,
1865 mmTPC0_CMDQ_GLBL_CFG1,
1867 mmTPC0_CMDQ_GLBL_STS0);
1870 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
1874 rc = goya_stop_queue(hdev,
1875 mmTPC1_QM_GLBL_CFG1,
1877 mmTPC1_QM_GLBL_STS0);
1880 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
1884 rc = goya_stop_queue(hdev,
1885 mmTPC1_CMDQ_GLBL_CFG1,
1887 mmTPC1_CMDQ_GLBL_STS0);
1890 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
1894 rc = goya_stop_queue(hdev,
1895 mmTPC2_QM_GLBL_CFG1,
1897 mmTPC2_QM_GLBL_STS0);
1900 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
1904 rc = goya_stop_queue(hdev,
1905 mmTPC2_CMDQ_GLBL_CFG1,
1907 mmTPC2_CMDQ_GLBL_STS0);
1910 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
1914 rc = goya_stop_queue(hdev,
1915 mmTPC3_QM_GLBL_CFG1,
1917 mmTPC3_QM_GLBL_STS0);
1920 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
1924 rc = goya_stop_queue(hdev,
1925 mmTPC3_CMDQ_GLBL_CFG1,
1927 mmTPC3_CMDQ_GLBL_STS0);
1930 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
1934 rc = goya_stop_queue(hdev,
1935 mmTPC4_QM_GLBL_CFG1,
1937 mmTPC4_QM_GLBL_STS0);
1940 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
1944 rc = goya_stop_queue(hdev,
1945 mmTPC4_CMDQ_GLBL_CFG1,
1947 mmTPC4_CMDQ_GLBL_STS0);
1950 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
1954 rc = goya_stop_queue(hdev,
1955 mmTPC5_QM_GLBL_CFG1,
1957 mmTPC5_QM_GLBL_STS0);
1960 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
1964 rc = goya_stop_queue(hdev,
1965 mmTPC5_CMDQ_GLBL_CFG1,
1967 mmTPC5_CMDQ_GLBL_STS0);
1970 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
1974 rc = goya_stop_queue(hdev,
1975 mmTPC6_QM_GLBL_CFG1,
1977 mmTPC6_QM_GLBL_STS0);
1980 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
1984 rc = goya_stop_queue(hdev,
1985 mmTPC6_CMDQ_GLBL_CFG1,
1987 mmTPC6_CMDQ_GLBL_STS0);
1990 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
1994 rc = goya_stop_queue(hdev,
1995 mmTPC7_QM_GLBL_CFG1,
1997 mmTPC7_QM_GLBL_STS0);
2000 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2004 rc = goya_stop_queue(hdev,
2005 mmTPC7_CMDQ_GLBL_CFG1,
2007 mmTPC7_CMDQ_GLBL_STS0);
2010 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2017 static void goya_dma_stall(struct hl_device *hdev)
2019 struct goya_device *goya = hdev->asic_specific;
2021 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
2024 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2025 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2026 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2027 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2028 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2031 static void goya_tpc_stall(struct hl_device *hdev)
2033 struct goya_device *goya = hdev->asic_specific;
2035 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2038 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2039 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2040 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2041 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2042 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2043 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2044 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2045 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2048 static void goya_mme_stall(struct hl_device *hdev)
2050 struct goya_device *goya = hdev->asic_specific;
2052 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2055 WREG32(mmMME_STALL, 0xFFFFFFFF);
2058 static int goya_enable_msix(struct hl_device *hdev)
2060 struct goya_device *goya = hdev->asic_specific;
2061 int cq_cnt = hdev->asic_prop.completion_queues_count;
2062 int rc, i, irq_cnt_init, irq;
2064 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2067 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2068 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2071 "MSI-X: Failed to enable support -- %d/%d\n",
2072 GOYA_MSIX_ENTRIES, rc);
2076 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2077 irq = pci_irq_vector(hdev->pdev, i);
2078 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2079 &hdev->completion_queue[i]);
2081 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2086 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2088 rc = request_irq(irq, hl_irq_handler_eq, 0,
2089 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2090 &hdev->event_queue);
2092 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2096 goya->hw_cap_initialized |= HW_CAP_MSIX;
2100 for (i = 0 ; i < irq_cnt_init ; i++)
2101 free_irq(pci_irq_vector(hdev->pdev, i),
2102 &hdev->completion_queue[i]);
2104 pci_free_irq_vectors(hdev->pdev);
2108 static void goya_sync_irqs(struct hl_device *hdev)
2110 struct goya_device *goya = hdev->asic_specific;
2113 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2116 /* Wait for all pending IRQs to be finished */
2117 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2118 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2120 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2123 static void goya_disable_msix(struct hl_device *hdev)
2125 struct goya_device *goya = hdev->asic_specific;
2128 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2131 goya_sync_irqs(hdev);
2133 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2134 free_irq(irq, &hdev->event_queue);
2136 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2137 irq = pci_irq_vector(hdev->pdev, i);
2138 free_irq(irq, &hdev->completion_queue[i]);
2141 pci_free_irq_vectors(hdev->pdev);
2143 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2146 static void goya_enable_timestamp(struct hl_device *hdev)
2148 /* Disable the timestamp counter */
2149 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2151 /* Zero the lower/upper parts of the 64-bit counter */
2152 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2153 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2155 /* Enable the counter */
2156 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2159 static void goya_disable_timestamp(struct hl_device *hdev)
2161 /* Disable the timestamp counter */
2162 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2165 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2167 u32 wait_timeout_ms, cpu_timeout_ms;
2170 "Halting compute engines and disabling interrupts\n");
2173 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2174 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2176 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2177 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2182 * I don't know what is the state of the CPU so make sure it is
2183 * stopped in any means necessary
2185 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2186 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2187 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2188 msleep(cpu_timeout_ms);
2191 goya_stop_external_queues(hdev);
2192 goya_stop_internal_queues(hdev);
2194 msleep(wait_timeout_ms);
2196 goya_dma_stall(hdev);
2197 goya_tpc_stall(hdev);
2198 goya_mme_stall(hdev);
2200 msleep(wait_timeout_ms);
2202 goya_disable_external_queues(hdev);
2203 goya_disable_internal_queues(hdev);
2205 goya_disable_timestamp(hdev);
2208 goya_disable_msix(hdev);
2209 goya_mmu_remove_device_cpu_mappings(hdev);
2211 goya_sync_irqs(hdev);
2216 * goya_load_firmware_to_device() - Load LINUX FW code to device.
2217 * @hdev: Pointer to hl_device structure.
2219 * Copy LINUX fw code from firmware file to HBM BAR.
2221 * Return: 0 on success, non-zero for failure.
2223 static int goya_load_firmware_to_device(struct hl_device *hdev)
2227 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2229 return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
2233 * goya_load_boot_fit_to_device() - Load boot fit to device.
2234 * @hdev: Pointer to hl_device structure.
2236 * Copy boot fit file to SRAM BAR.
2238 * Return: 0 on success, non-zero for failure.
2240 static int goya_load_boot_fit_to_device(struct hl_device *hdev)
2244 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2246 return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst);
2250 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2251 * The version string should be located by that offset.
2253 static void goya_read_device_fw_version(struct hl_device *hdev,
2254 enum hl_fw_component fwc)
2262 ver_off = RREG32(mmUBOOT_VER_OFFSET);
2263 dest = hdev->asic_prop.uboot_ver;
2266 case FW_COMP_PREBOOT:
2267 ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2268 dest = hdev->asic_prop.preboot_ver;
2272 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2276 ver_off &= ~((u32)SRAM_BASE_ADDR);
2278 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2279 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2282 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2284 strcpy(dest, "unavailable");
2288 static int goya_init_cpu(struct hl_device *hdev)
2290 struct goya_device *goya = hdev->asic_specific;
2293 if (!hdev->cpu_enable)
2296 if (goya->hw_cap_initialized & HW_CAP_CPU)
2300 * Before pushing u-boot/linux to device, need to set the ddr bar to
2301 * base address of dram
2303 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2305 "failed to map DDR bar to DRAM base address\n");
2309 rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
2310 mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
2311 mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0,
2312 false, GOYA_CPU_TIMEOUT_USEC,
2313 GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
2318 goya->hw_cap_initialized |= HW_CAP_CPU;
2323 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2326 u32 status, timeout_usec;
2330 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2332 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2334 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2335 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2336 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2338 rc = hl_poll_timeout(
2342 !(status & 0x80000000),
2348 "Timeout during MMU hop0 config of asid %d\n", asid);
2355 int goya_mmu_init(struct hl_device *hdev)
2357 struct asic_fixed_properties *prop = &hdev->asic_prop;
2358 struct goya_device *goya = hdev->asic_specific;
2362 if (!hdev->mmu_enable)
2365 if (goya->hw_cap_initialized & HW_CAP_MMU)
2368 hdev->dram_supports_virtual_memory = true;
2369 hdev->dram_default_page_mapping = true;
2371 for (i = 0 ; i < prop->max_asid ; i++) {
2372 hop0_addr = prop->mmu_pgt_addr +
2373 (i * prop->mmu_hop_table_size);
2375 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2378 "failed to set hop0 addr for asid %d\n", i);
2383 goya->hw_cap_initialized |= HW_CAP_MMU;
2385 /* init MMU cache manage page */
2386 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2387 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2388 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2390 /* Remove follower feature due to performance bug */
2391 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2392 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2394 hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
2395 VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
2397 WREG32(mmMMU_MMU_ENABLE, 1);
2398 WREG32(mmMMU_SPI_MASK, 0xF);
2407 * goya_hw_init - Goya hardware initialization code
2409 * @hdev: pointer to hl_device structure
2411 * Returns 0 on success
2414 static int goya_hw_init(struct hl_device *hdev)
2416 struct asic_fixed_properties *prop = &hdev->asic_prop;
2419 dev_info(hdev->dev, "Starting initialization of H/W\n");
2421 /* Perform read from the device to make sure device is up */
2422 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2425 * Let's mark in the H/W that we have reached this point. We check
2426 * this value in the reset_before_init function to understand whether
2427 * we need to reset the chip before doing H/W init. This register is
2428 * cleared by the H/W upon H/W reset
2430 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2432 rc = goya_init_cpu(hdev);
2434 dev_err(hdev->dev, "failed to initialize CPU\n");
2438 goya_tpc_mbist_workaround(hdev);
2440 goya_init_golden_registers(hdev);
2443 * After CPU initialization is finished, change DDR bar mapping inside
2444 * iATU to point to the start address of the MMU page tables
2446 if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
2447 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2449 "failed to map DDR bar to MMU page tables\n");
2453 rc = goya_mmu_init(hdev);
2457 goya_init_security(hdev);
2459 goya_init_dma_qmans(hdev);
2461 goya_init_mme_qmans(hdev);
2463 goya_init_tpc_qmans(hdev);
2465 goya_enable_timestamp(hdev);
2467 /* MSI-X must be enabled before CPU queues are initialized */
2468 rc = goya_enable_msix(hdev);
2470 goto disable_queues;
2472 /* Perform read from the device to flush all MSI-X configuration */
2473 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2478 goya_disable_internal_queues(hdev);
2479 goya_disable_external_queues(hdev);
2485 * goya_hw_fini - Goya hardware tear-down code
2487 * @hdev: pointer to hl_device structure
2488 * @hard_reset: should we do hard reset to all engines or just reset the
2489 * compute/dma engines
2491 static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2493 struct goya_device *goya = hdev->asic_specific;
2494 u32 reset_timeout_ms, status;
2497 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2499 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2502 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2503 goya_disable_clk_rlx(hdev);
2504 goya_set_pll_refclk(hdev);
2506 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2508 "Issued HARD reset command, going to wait %dms\n",
2511 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2513 "Issued SOFT reset command, going to wait %dms\n",
2518 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2519 * itself is in reset. In either reset we need to wait until the reset
2522 msleep(reset_timeout_ms);
2524 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2525 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2527 "Timeout while waiting for device to reset 0x%x\n",
2531 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2532 HW_CAP_GOLDEN | HW_CAP_TPC);
2533 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2534 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2538 /* Chicken bit to re-initiate boot sequencer flow */
2539 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2540 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2541 /* Move boot manager FSM to pre boot sequencer init state */
2542 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2543 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2545 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2546 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2547 HW_CAP_DMA | HW_CAP_MME |
2548 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2549 HW_CAP_GOLDEN | HW_CAP_TPC);
2550 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2553 int goya_suspend(struct hl_device *hdev)
2557 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2559 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2564 int goya_resume(struct hl_device *hdev)
2566 return goya_init_iatu(hdev);
2569 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2570 u64 kaddress, phys_addr_t paddress, u32 size)
2574 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2575 VM_DONTCOPY | VM_NORESERVE;
2577 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2578 size, vma->vm_page_prot);
2580 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2585 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2587 u32 db_reg_offset, db_value;
2589 switch (hw_queue_id) {
2590 case GOYA_QUEUE_ID_DMA_0:
2591 db_reg_offset = mmDMA_QM_0_PQ_PI;
2594 case GOYA_QUEUE_ID_DMA_1:
2595 db_reg_offset = mmDMA_QM_1_PQ_PI;
2598 case GOYA_QUEUE_ID_DMA_2:
2599 db_reg_offset = mmDMA_QM_2_PQ_PI;
2602 case GOYA_QUEUE_ID_DMA_3:
2603 db_reg_offset = mmDMA_QM_3_PQ_PI;
2606 case GOYA_QUEUE_ID_DMA_4:
2607 db_reg_offset = mmDMA_QM_4_PQ_PI;
2610 case GOYA_QUEUE_ID_CPU_PQ:
2611 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2614 case GOYA_QUEUE_ID_MME:
2615 db_reg_offset = mmMME_QM_PQ_PI;
2618 case GOYA_QUEUE_ID_TPC0:
2619 db_reg_offset = mmTPC0_QM_PQ_PI;
2622 case GOYA_QUEUE_ID_TPC1:
2623 db_reg_offset = mmTPC1_QM_PQ_PI;
2626 case GOYA_QUEUE_ID_TPC2:
2627 db_reg_offset = mmTPC2_QM_PQ_PI;
2630 case GOYA_QUEUE_ID_TPC3:
2631 db_reg_offset = mmTPC3_QM_PQ_PI;
2634 case GOYA_QUEUE_ID_TPC4:
2635 db_reg_offset = mmTPC4_QM_PQ_PI;
2638 case GOYA_QUEUE_ID_TPC5:
2639 db_reg_offset = mmTPC5_QM_PQ_PI;
2642 case GOYA_QUEUE_ID_TPC6:
2643 db_reg_offset = mmTPC6_QM_PQ_PI;
2646 case GOYA_QUEUE_ID_TPC7:
2647 db_reg_offset = mmTPC7_QM_PQ_PI;
2651 /* Should never get here */
2652 dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2659 /* ring the doorbell */
2660 WREG32(db_reg_offset, db_value);
2662 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2663 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2664 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2667 void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2669 /* The QMANs are on the SRAM so need to copy to IO space */
2670 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2673 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2674 dma_addr_t *dma_handle, gfp_t flags)
2676 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2679 /* Shift to the device's base physical address of host memory */
2681 *dma_handle += HOST_PHYS_BASE;
2686 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2687 void *cpu_addr, dma_addr_t dma_handle)
2689 /* Cancel the device's base physical address of host memory */
2690 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2692 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2695 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2696 dma_addr_t *dma_handle, u16 *queue_len)
2701 *dma_handle = hdev->asic_prop.sram_base_address;
2703 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
2706 case GOYA_QUEUE_ID_MME:
2707 offset = MME_QMAN_BASE_OFFSET;
2708 *queue_len = MME_QMAN_LENGTH;
2710 case GOYA_QUEUE_ID_TPC0:
2711 offset = TPC0_QMAN_BASE_OFFSET;
2712 *queue_len = TPC_QMAN_LENGTH;
2714 case GOYA_QUEUE_ID_TPC1:
2715 offset = TPC1_QMAN_BASE_OFFSET;
2716 *queue_len = TPC_QMAN_LENGTH;
2718 case GOYA_QUEUE_ID_TPC2:
2719 offset = TPC2_QMAN_BASE_OFFSET;
2720 *queue_len = TPC_QMAN_LENGTH;
2722 case GOYA_QUEUE_ID_TPC3:
2723 offset = TPC3_QMAN_BASE_OFFSET;
2724 *queue_len = TPC_QMAN_LENGTH;
2726 case GOYA_QUEUE_ID_TPC4:
2727 offset = TPC4_QMAN_BASE_OFFSET;
2728 *queue_len = TPC_QMAN_LENGTH;
2730 case GOYA_QUEUE_ID_TPC5:
2731 offset = TPC5_QMAN_BASE_OFFSET;
2732 *queue_len = TPC_QMAN_LENGTH;
2734 case GOYA_QUEUE_ID_TPC6:
2735 offset = TPC6_QMAN_BASE_OFFSET;
2736 *queue_len = TPC_QMAN_LENGTH;
2738 case GOYA_QUEUE_ID_TPC7:
2739 offset = TPC7_QMAN_BASE_OFFSET;
2740 *queue_len = TPC_QMAN_LENGTH;
2743 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2748 *dma_handle += offset;
2753 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2755 struct packet_msg_prot *fence_pkt;
2757 dma_addr_t fence_dma_addr;
2763 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2765 timeout = HL_DEVICE_TIMEOUT_USEC;
2767 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
2768 dev_err_ratelimited(hdev->dev,
2769 "Can't send driver job on QMAN0 because the device is not idle\n");
2773 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2777 "Failed to allocate fence memory for QMAN0\n");
2781 goya_qman0_set_security(hdev, true);
2783 cb = job->patched_cb;
2785 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
2786 job->job_cb_size - sizeof(struct packet_msg_prot));
2788 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2789 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2790 (1 << GOYA_PKT_CTL_MB_SHIFT);
2791 fence_pkt->ctl = cpu_to_le32(tmp);
2792 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
2793 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2795 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
2796 job->job_cb_size, cb->bus_address);
2798 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
2799 goto free_fence_ptr;
2802 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
2803 (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
2806 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2808 if (rc == -ETIMEDOUT) {
2809 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
2810 goto free_fence_ptr;
2814 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2817 goya_qman0_set_security(hdev, false);
2822 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
2823 u32 timeout, long *result)
2825 struct goya_device *goya = hdev->asic_specific;
2827 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
2833 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
2837 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2839 struct packet_msg_prot *fence_pkt;
2840 dma_addr_t pkt_dma_addr;
2842 dma_addr_t fence_dma_addr;
2846 fence_val = GOYA_QMAN0_FENCE_VAL;
2848 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2852 "Failed to allocate memory for queue testing\n");
2858 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
2859 sizeof(struct packet_msg_prot),
2860 GFP_KERNEL, &pkt_dma_addr);
2863 "Failed to allocate packet for queue testing\n");
2865 goto free_fence_ptr;
2868 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2869 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2870 (1 << GOYA_PKT_CTL_MB_SHIFT);
2871 fence_pkt->ctl = cpu_to_le32(tmp);
2872 fence_pkt->value = cpu_to_le32(fence_val);
2873 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2875 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
2876 sizeof(struct packet_msg_prot),
2880 "Failed to send fence packet\n");
2884 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
2885 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
2887 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
2889 if (rc == -ETIMEDOUT) {
2891 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
2892 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
2897 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
2900 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2905 int goya_test_cpu_queue(struct hl_device *hdev)
2907 struct goya_device *goya = hdev->asic_specific;
2910 * check capability here as send_cpu_message() won't update the result
2911 * value if no capability
2913 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
2916 return hl_fw_test_cpu_queue(hdev);
2919 int goya_test_queues(struct hl_device *hdev)
2921 int i, rc, ret_val = 0;
2923 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
2924 rc = goya_test_queue(hdev, i);
2932 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
2933 gfp_t mem_flags, dma_addr_t *dma_handle)
2937 if (size > GOYA_DMA_POOL_BLK_SIZE)
2940 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
2942 /* Shift to the device's base physical address of host memory */
2944 *dma_handle += HOST_PHYS_BASE;
2949 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
2950 dma_addr_t dma_addr)
2952 /* Cancel the device's base physical address of host memory */
2953 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
2955 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
2958 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
2959 dma_addr_t *dma_handle)
2963 vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
2964 *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
2965 VA_CPU_ACCESSIBLE_MEM_ADDR;
2970 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
2973 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
2976 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
2977 int nents, enum dma_data_direction dir)
2979 struct scatterlist *sg;
2982 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
2985 /* Shift to the device's base physical address of host memory */
2986 for_each_sg(sgl, sg, nents, i)
2987 sg->dma_address += HOST_PHYS_BASE;
2992 static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
2993 int nents, enum dma_data_direction dir)
2995 struct scatterlist *sg;
2998 /* Cancel the device's base physical address of host memory */
2999 for_each_sg(sgl, sg, nents, i)
3000 sg->dma_address -= HOST_PHYS_BASE;
3002 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3005 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3007 struct scatterlist *sg, *sg_next_iter;
3008 u32 count, dma_desc_cnt;
3010 dma_addr_t addr, addr_next;
3014 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3016 len = sg_dma_len(sg);
3017 addr = sg_dma_address(sg);
3022 while ((count + 1) < sgt->nents) {
3023 sg_next_iter = sg_next(sg);
3024 len_next = sg_dma_len(sg_next_iter);
3025 addr_next = sg_dma_address(sg_next_iter);
3030 if ((addr + len == addr_next) &&
3031 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3043 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3046 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3047 struct hl_cs_parser *parser,
3048 struct packet_lin_dma *user_dma_pkt,
3049 u64 addr, enum dma_data_direction dir)
3051 struct hl_userptr *userptr;
3054 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3055 parser->job_userptr_list, &userptr))
3056 goto already_pinned;
3058 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3062 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3067 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3069 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3070 userptr->sgt->nents, dir);
3072 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3076 userptr->dma_mapped = true;
3080 parser->patched_cb_size +=
3081 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3086 hl_unpin_host_memory(hdev, userptr);
3092 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3093 struct hl_cs_parser *parser,
3094 struct packet_lin_dma *user_dma_pkt)
3096 u64 device_memory_addr, addr;
3097 enum dma_data_direction dir;
3098 enum goya_dma_direction user_dir;
3099 bool sram_addr = true;
3100 bool skip_host_mem_pin = false;
3105 ctl = le32_to_cpu(user_dma_pkt->ctl);
3107 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3108 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3110 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3111 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3114 case DMA_HOST_TO_DRAM:
3115 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3116 dir = DMA_TO_DEVICE;
3118 addr = le64_to_cpu(user_dma_pkt->src_addr);
3119 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3121 skip_host_mem_pin = true;
3124 case DMA_DRAM_TO_HOST:
3125 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3126 dir = DMA_FROM_DEVICE;
3128 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3129 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3132 case DMA_HOST_TO_SRAM:
3133 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3134 dir = DMA_TO_DEVICE;
3135 addr = le64_to_cpu(user_dma_pkt->src_addr);
3136 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3138 skip_host_mem_pin = true;
3141 case DMA_SRAM_TO_HOST:
3142 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3143 dir = DMA_FROM_DEVICE;
3144 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3145 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3148 dev_err(hdev->dev, "DMA direction is undefined\n");
3153 if (!hl_mem_area_inside_range(device_memory_addr,
3154 le32_to_cpu(user_dma_pkt->tsize),
3155 hdev->asic_prop.sram_user_base_address,
3156 hdev->asic_prop.sram_end_address)) {
3159 "SRAM address 0x%llx + 0x%x is invalid\n",
3161 user_dma_pkt->tsize);
3165 if (!hl_mem_area_inside_range(device_memory_addr,
3166 le32_to_cpu(user_dma_pkt->tsize),
3167 hdev->asic_prop.dram_user_base_address,
3168 hdev->asic_prop.dram_end_address)) {
3171 "DRAM address 0x%llx + 0x%x is invalid\n",
3173 user_dma_pkt->tsize);
3178 if (skip_host_mem_pin)
3179 parser->patched_cb_size += sizeof(*user_dma_pkt);
3181 if ((dir == DMA_TO_DEVICE) &&
3182 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3184 "Can't DMA from host on queue other then 1\n");
3188 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3195 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3196 struct hl_cs_parser *parser,
3197 struct packet_lin_dma *user_dma_pkt)
3199 u64 sram_memory_addr, dram_memory_addr;
3200 enum goya_dma_direction user_dir;
3203 ctl = le32_to_cpu(user_dma_pkt->ctl);
3204 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3205 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3207 if (user_dir == DMA_DRAM_TO_SRAM) {
3208 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3209 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3210 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3212 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3213 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3214 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3217 if (!hl_mem_area_inside_range(sram_memory_addr,
3218 le32_to_cpu(user_dma_pkt->tsize),
3219 hdev->asic_prop.sram_user_base_address,
3220 hdev->asic_prop.sram_end_address)) {
3221 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3222 sram_memory_addr, user_dma_pkt->tsize);
3226 if (!hl_mem_area_inside_range(dram_memory_addr,
3227 le32_to_cpu(user_dma_pkt->tsize),
3228 hdev->asic_prop.dram_user_base_address,
3229 hdev->asic_prop.dram_end_address)) {
3230 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3231 dram_memory_addr, user_dma_pkt->tsize);
3235 parser->patched_cb_size += sizeof(*user_dma_pkt);
3240 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3241 struct hl_cs_parser *parser,
3242 struct packet_lin_dma *user_dma_pkt)
3244 enum goya_dma_direction user_dir;
3248 dev_dbg(hdev->dev, "DMA packet details:\n");
3249 dev_dbg(hdev->dev, "source == 0x%llx\n",
3250 le64_to_cpu(user_dma_pkt->src_addr));
3251 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3252 le64_to_cpu(user_dma_pkt->dst_addr));
3253 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3255 ctl = le32_to_cpu(user_dma_pkt->ctl);
3256 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3257 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3260 * Special handling for DMA with size 0. The H/W has a bug where
3261 * this can cause the QMAN DMA to get stuck, so block it here.
3263 if (user_dma_pkt->tsize == 0) {
3265 "Got DMA with size 0, might reset the device\n");
3269 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3270 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3272 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3277 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3278 struct hl_cs_parser *parser,
3279 struct packet_lin_dma *user_dma_pkt)
3281 dev_dbg(hdev->dev, "DMA packet details:\n");
3282 dev_dbg(hdev->dev, "source == 0x%llx\n",
3283 le64_to_cpu(user_dma_pkt->src_addr));
3284 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3285 le64_to_cpu(user_dma_pkt->dst_addr));
3286 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3290 * We can't allow user to read from Host using QMANs other than 1.
3291 * PMMU and HPMMU addresses are equal, check only one of them.
3293 if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3294 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3295 le32_to_cpu(user_dma_pkt->tsize),
3296 hdev->asic_prop.pmmu.start_addr,
3297 hdev->asic_prop.pmmu.end_addr)) {
3299 "Can't DMA from host on queue other then 1\n");
3303 if (user_dma_pkt->tsize == 0) {
3305 "Got DMA with size 0, might reset the device\n");
3309 parser->patched_cb_size += sizeof(*user_dma_pkt);
3314 static int goya_validate_wreg32(struct hl_device *hdev,
3315 struct hl_cs_parser *parser,
3316 struct packet_wreg32 *wreg_pkt)
3318 struct goya_device *goya = hdev->asic_specific;
3319 u32 sob_start_addr, sob_end_addr;
3322 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3323 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3325 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3326 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3327 dev_dbg(hdev->dev, "value == 0x%x\n",
3328 le32_to_cpu(wreg_pkt->value));
3330 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3331 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3337 * With MMU, DMA channels are not secured, so it doesn't matter where
3338 * the WR COMP will be written to because it will go out with
3339 * non-secured property
3341 if (goya->hw_cap_initialized & HW_CAP_MMU)
3344 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3345 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3347 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3348 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3350 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3358 static int goya_validate_cb(struct hl_device *hdev,
3359 struct hl_cs_parser *parser, bool is_mmu)
3361 u32 cb_parsed_length = 0;
3364 parser->patched_cb_size = 0;
3366 /* cb_user_size is more than 0 so loop will always be executed */
3367 while (cb_parsed_length < parser->user_cb_size) {
3368 enum packet_id pkt_id;
3370 struct goya_packet *user_pkt;
3372 user_pkt = (struct goya_packet *) (uintptr_t)
3373 (parser->user_cb->kernel_address + cb_parsed_length);
3375 pkt_id = (enum packet_id) (
3376 (le64_to_cpu(user_pkt->header) &
3377 PACKET_HEADER_PACKET_ID_MASK) >>
3378 PACKET_HEADER_PACKET_ID_SHIFT);
3380 pkt_size = goya_packet_sizes[pkt_id];
3381 cb_parsed_length += pkt_size;
3382 if (cb_parsed_length > parser->user_cb_size) {
3384 "packet 0x%x is out of CB boundary\n", pkt_id);
3390 case PACKET_WREG_32:
3392 * Although it is validated after copy in patch_cb(),
3393 * need to validate here as well because patch_cb() is
3394 * not called in MMU path while this function is called
3396 rc = goya_validate_wreg32(hdev,
3397 parser, (struct packet_wreg32 *) user_pkt);
3398 parser->patched_cb_size += pkt_size;
3401 case PACKET_WREG_BULK:
3403 "User not allowed to use WREG_BULK\n");
3407 case PACKET_MSG_PROT:
3409 "User not allowed to use MSG_PROT\n");
3414 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3419 dev_err(hdev->dev, "User not allowed to use STOP\n");
3423 case PACKET_LIN_DMA:
3425 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3426 (struct packet_lin_dma *) user_pkt);
3428 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3429 (struct packet_lin_dma *) user_pkt);
3432 case PACKET_MSG_LONG:
3433 case PACKET_MSG_SHORT:
3436 parser->patched_cb_size += pkt_size;
3440 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3451 * The new CB should have space at the end for two MSG_PROT packets:
3452 * 1. A packet that will act as a completion packet
3453 * 2. A packet that will generate MSI-X interrupt
3455 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3460 static int goya_patch_dma_packet(struct hl_device *hdev,
3461 struct hl_cs_parser *parser,
3462 struct packet_lin_dma *user_dma_pkt,
3463 struct packet_lin_dma *new_dma_pkt,
3464 u32 *new_dma_pkt_size)
3466 struct hl_userptr *userptr;
3467 struct scatterlist *sg, *sg_next_iter;
3468 u32 count, dma_desc_cnt;
3470 dma_addr_t dma_addr, dma_addr_next;
3471 enum goya_dma_direction user_dir;
3472 u64 device_memory_addr, addr;
3473 enum dma_data_direction dir;
3474 struct sg_table *sgt;
3475 bool skip_host_mem_pin = false;
3477 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3479 ctl = le32_to_cpu(user_dma_pkt->ctl);
3481 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3482 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3484 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3485 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3487 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3488 (user_dma_pkt->tsize == 0)) {
3489 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3490 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3494 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3495 addr = le64_to_cpu(user_dma_pkt->src_addr);
3496 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3497 dir = DMA_TO_DEVICE;
3499 skip_host_mem_pin = true;
3501 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3502 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3503 dir = DMA_FROM_DEVICE;
3506 if ((!skip_host_mem_pin) &&
3507 (hl_userptr_is_pinned(hdev, addr,
3508 le32_to_cpu(user_dma_pkt->tsize),
3509 parser->job_userptr_list, &userptr) == false)) {
3510 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3511 addr, user_dma_pkt->tsize);
3515 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3516 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3517 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3521 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3523 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3528 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3529 len = sg_dma_len(sg);
3530 dma_addr = sg_dma_address(sg);
3535 while ((count + 1) < sgt->nents) {
3536 sg_next_iter = sg_next(sg);
3537 len_next = sg_dma_len(sg_next_iter);
3538 dma_addr_next = sg_dma_address(sg_next_iter);
3543 if ((dma_addr + len == dma_addr_next) &&
3544 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3553 ctl = le32_to_cpu(user_dma_pkt->ctl);
3554 if (likely(dma_desc_cnt))
3555 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3556 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3557 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3558 new_dma_pkt->ctl = cpu_to_le32(ctl);
3559 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3561 if (dir == DMA_TO_DEVICE) {
3562 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3563 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3565 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3566 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3570 device_memory_addr += len;
3575 if (!dma_desc_cnt) {
3577 "Error of 0 SG entries when patching DMA packet\n");
3581 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3583 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3585 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3590 static int goya_patch_cb(struct hl_device *hdev,
3591 struct hl_cs_parser *parser)
3593 u32 cb_parsed_length = 0;
3594 u32 cb_patched_cur_length = 0;
3597 /* cb_user_size is more than 0 so loop will always be executed */
3598 while (cb_parsed_length < parser->user_cb_size) {
3599 enum packet_id pkt_id;
3601 u32 new_pkt_size = 0;
3602 struct goya_packet *user_pkt, *kernel_pkt;
3604 user_pkt = (struct goya_packet *) (uintptr_t)
3605 (parser->user_cb->kernel_address + cb_parsed_length);
3606 kernel_pkt = (struct goya_packet *) (uintptr_t)
3607 (parser->patched_cb->kernel_address +
3608 cb_patched_cur_length);
3610 pkt_id = (enum packet_id) (
3611 (le64_to_cpu(user_pkt->header) &
3612 PACKET_HEADER_PACKET_ID_MASK) >>
3613 PACKET_HEADER_PACKET_ID_SHIFT);
3615 pkt_size = goya_packet_sizes[pkt_id];
3616 cb_parsed_length += pkt_size;
3617 if (cb_parsed_length > parser->user_cb_size) {
3619 "packet 0x%x is out of CB boundary\n", pkt_id);
3625 case PACKET_LIN_DMA:
3626 rc = goya_patch_dma_packet(hdev, parser,
3627 (struct packet_lin_dma *) user_pkt,
3628 (struct packet_lin_dma *) kernel_pkt,
3630 cb_patched_cur_length += new_pkt_size;
3633 case PACKET_WREG_32:
3634 memcpy(kernel_pkt, user_pkt, pkt_size);
3635 cb_patched_cur_length += pkt_size;
3636 rc = goya_validate_wreg32(hdev, parser,
3637 (struct packet_wreg32 *) kernel_pkt);
3640 case PACKET_WREG_BULK:
3642 "User not allowed to use WREG_BULK\n");
3646 case PACKET_MSG_PROT:
3648 "User not allowed to use MSG_PROT\n");
3653 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3658 dev_err(hdev->dev, "User not allowed to use STOP\n");
3662 case PACKET_MSG_LONG:
3663 case PACKET_MSG_SHORT:
3666 memcpy(kernel_pkt, user_pkt, pkt_size);
3667 cb_patched_cur_length += pkt_size;
3671 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3684 static int goya_parse_cb_mmu(struct hl_device *hdev,
3685 struct hl_cs_parser *parser)
3687 u64 patched_cb_handle;
3688 u32 patched_cb_size;
3689 struct hl_cb *user_cb;
3693 * The new CB should have space at the end for two MSG_PROT pkt:
3694 * 1. A packet that will act as a completion packet
3695 * 2. A packet that will generate MSI-X interrupt
3697 parser->patched_cb_size = parser->user_cb_size +
3698 sizeof(struct packet_msg_prot) * 2;
3700 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3701 parser->patched_cb_size,
3702 &patched_cb_handle, HL_KERNEL_ASID_ID);
3706 "Failed to allocate patched CB for DMA CS %d\n",
3711 patched_cb_handle >>= PAGE_SHIFT;
3712 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3713 (u32) patched_cb_handle);
3714 /* hl_cb_get should never fail here so use kernel WARN */
3715 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3716 (u32) patched_cb_handle);
3717 if (!parser->patched_cb) {
3723 * The check that parser->user_cb_size <= parser->user_cb->size was done
3724 * in validate_queue_index().
3726 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
3727 (void *) (uintptr_t) parser->user_cb->kernel_address,
3728 parser->user_cb_size);
3730 patched_cb_size = parser->patched_cb_size;
3732 /* validate patched CB instead of user CB */
3733 user_cb = parser->user_cb;
3734 parser->user_cb = parser->patched_cb;
3735 rc = goya_validate_cb(hdev, parser, true);
3736 parser->user_cb = user_cb;
3739 hl_cb_put(parser->patched_cb);
3743 if (patched_cb_size != parser->patched_cb_size) {
3744 dev_err(hdev->dev, "user CB size mismatch\n");
3745 hl_cb_put(parser->patched_cb);
3752 * Always call cb destroy here because we still have 1 reference
3753 * to it by calling cb_get earlier. After the job will be completed,
3754 * cb_put will release it, but here we want to remove it from the
3757 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3758 patched_cb_handle << PAGE_SHIFT);
3763 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3764 struct hl_cs_parser *parser)
3766 u64 patched_cb_handle;
3769 rc = goya_validate_cb(hdev, parser, false);
3774 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3775 parser->patched_cb_size,
3776 &patched_cb_handle, HL_KERNEL_ASID_ID);
3779 "Failed to allocate patched CB for DMA CS %d\n", rc);
3783 patched_cb_handle >>= PAGE_SHIFT;
3784 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3785 (u32) patched_cb_handle);
3786 /* hl_cb_get should never fail here so use kernel WARN */
3787 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3788 (u32) patched_cb_handle);
3789 if (!parser->patched_cb) {
3794 rc = goya_patch_cb(hdev, parser);
3797 hl_cb_put(parser->patched_cb);
3801 * Always call cb destroy here because we still have 1 reference
3802 * to it by calling cb_get earlier. After the job will be completed,
3803 * cb_put will release it, but here we want to remove it from the
3806 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3807 patched_cb_handle << PAGE_SHIFT);
3811 hl_userptr_delete_list(hdev, parser->job_userptr_list);
3815 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
3816 struct hl_cs_parser *parser)
3818 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
3819 struct goya_device *goya = hdev->asic_specific;
3821 if (goya->hw_cap_initialized & HW_CAP_MMU)
3824 /* For internal queue jobs, just check if CB address is valid */
3825 if (hl_mem_area_inside_range(
3826 (u64) (uintptr_t) parser->user_cb,
3827 parser->user_cb_size,
3828 asic_prop->sram_user_base_address,
3829 asic_prop->sram_end_address))
3832 if (hl_mem_area_inside_range(
3833 (u64) (uintptr_t) parser->user_cb,
3834 parser->user_cb_size,
3835 asic_prop->dram_user_base_address,
3836 asic_prop->dram_end_address))
3840 "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
3841 parser->user_cb, parser->user_cb_size);
3846 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
3848 struct goya_device *goya = hdev->asic_specific;
3850 if (parser->queue_type == QUEUE_TYPE_INT)
3851 return goya_parse_cb_no_ext_queue(hdev, parser);
3853 if (goya->hw_cap_initialized & HW_CAP_MMU)
3854 return goya_parse_cb_mmu(hdev, parser);
3856 return goya_parse_cb_no_mmu(hdev, parser);
3859 void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
3860 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
3863 struct packet_msg_prot *cq_pkt;
3866 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
3867 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
3869 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3870 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3871 (1 << GOYA_PKT_CTL_MB_SHIFT);
3872 cq_pkt->ctl = cpu_to_le32(tmp);
3873 cq_pkt->value = cpu_to_le32(cq_val);
3874 cq_pkt->addr = cpu_to_le64(cq_addr);
3878 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3879 (1 << GOYA_PKT_CTL_MB_SHIFT);
3880 cq_pkt->ctl = cpu_to_le32(tmp);
3881 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
3882 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
3885 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
3887 WREG32(mmCPU_EQ_CI, val);
3890 void goya_restore_phase_topology(struct hl_device *hdev)
3895 static void goya_clear_sm_regs(struct hl_device *hdev)
3897 int i, num_of_sob_in_longs, num_of_mon_in_longs;
3899 num_of_sob_in_longs =
3900 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
3902 num_of_mon_in_longs =
3903 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
3905 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
3906 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
3908 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
3909 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
3911 /* Flush all WREG to prevent race */
3912 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
3916 * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
3919 * @hdev: pointer to hl_device structure
3920 * @addr: device or host mapped address
3921 * @val: returned value
3923 * In case of DDR address that is not mapped into the default aperture that
3924 * the DDR bar exposes, the function will configure the iATU so that the DDR
3925 * bar will be positioned at a base address that allows reading from the
3926 * required address. Configuring the iATU during normal operation can
3927 * lead to undefined behavior and therefore, should be done with extreme care
3930 static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
3932 struct asic_fixed_properties *prop = &hdev->asic_prop;
3936 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
3937 *val = RREG32(addr - CFG_BASE);
3939 } else if ((addr >= SRAM_BASE_ADDR) &&
3940 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
3942 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
3943 (addr - SRAM_BASE_ADDR));
3945 } else if ((addr >= DRAM_PHYS_BASE) &&
3946 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
3948 u64 bar_base_addr = DRAM_PHYS_BASE +
3949 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
3951 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
3952 if (ddr_bar_addr != U64_MAX) {
3953 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
3954 (addr - bar_base_addr));
3956 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
3959 if (ddr_bar_addr == U64_MAX)
3962 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
3963 *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
3973 * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
3976 * @hdev: pointer to hl_device structure
3977 * @addr: device or host mapped address
3978 * @val: returned value
3980 * In case of DDR address that is not mapped into the default aperture that
3981 * the DDR bar exposes, the function will configure the iATU so that the DDR
3982 * bar will be positioned at a base address that allows writing to the
3983 * required address. Configuring the iATU during normal operation can
3984 * lead to undefined behavior and therefore, should be done with extreme care
3987 static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
3989 struct asic_fixed_properties *prop = &hdev->asic_prop;
3993 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
3994 WREG32(addr - CFG_BASE, val);
3996 } else if ((addr >= SRAM_BASE_ADDR) &&
3997 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
3999 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4000 (addr - SRAM_BASE_ADDR));
4002 } else if ((addr >= DRAM_PHYS_BASE) &&
4003 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4005 u64 bar_base_addr = DRAM_PHYS_BASE +
4006 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4008 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4009 if (ddr_bar_addr != U64_MAX) {
4010 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4011 (addr - bar_base_addr));
4013 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4016 if (ddr_bar_addr == U64_MAX)
4019 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4020 *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4029 static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
4031 struct asic_fixed_properties *prop = &hdev->asic_prop;
4035 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4036 u32 val_l = RREG32(addr - CFG_BASE);
4037 u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
4039 *val = (((u64) val_h) << 32) | val_l;
4041 } else if ((addr >= SRAM_BASE_ADDR) &&
4042 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4044 *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4045 (addr - SRAM_BASE_ADDR));
4047 } else if ((addr >= DRAM_PHYS_BASE) &&
4049 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
4051 u64 bar_base_addr = DRAM_PHYS_BASE +
4052 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4054 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4055 if (ddr_bar_addr != U64_MAX) {
4056 *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
4057 (addr - bar_base_addr));
4059 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4062 if (ddr_bar_addr == U64_MAX)
4065 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4066 *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
4075 static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
4077 struct asic_fixed_properties *prop = &hdev->asic_prop;
4081 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4082 WREG32(addr - CFG_BASE, lower_32_bits(val));
4083 WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
4085 } else if ((addr >= SRAM_BASE_ADDR) &&
4086 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4088 writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4089 (addr - SRAM_BASE_ADDR));
4091 } else if ((addr >= DRAM_PHYS_BASE) &&
4093 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
4095 u64 bar_base_addr = DRAM_PHYS_BASE +
4096 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4098 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4099 if (ddr_bar_addr != U64_MAX) {
4100 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4101 (addr - bar_base_addr));
4103 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4106 if (ddr_bar_addr == U64_MAX)
4109 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4110 *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4119 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4121 struct goya_device *goya = hdev->asic_specific;
4123 if (hdev->hard_reset_pending)
4126 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4127 (addr - goya->ddr_bar_cur_addr));
4130 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4132 struct goya_device *goya = hdev->asic_specific;
4134 if (hdev->hard_reset_pending)
4137 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4138 (addr - goya->ddr_bar_cur_addr));
4141 static const char *_goya_get_event_desc(u16 event_type)
4143 switch (event_type) {
4144 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4146 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4147 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4148 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4149 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4150 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4151 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4152 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4153 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4155 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4157 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4158 return "MME_ecc_ext";
4159 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4161 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4163 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4165 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4166 return "CPU_if_ecc";
4167 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4169 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4170 return "PSOC_coresight";
4171 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4173 case GOYA_ASYNC_EVENT_ID_GIC500:
4175 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4177 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4179 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4180 return "L2_ram_ecc";
4181 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4182 return "PSOC_gpio_05_sw_reset";
4183 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4184 return "PSOC_gpio_10_vrhot_icrit";
4185 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4187 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4188 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4189 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4190 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4191 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4192 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4193 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4194 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4196 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4198 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4200 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4201 return "CPU_axi_splitter";
4202 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4203 return "PSOC_axi_dec";
4204 case GOYA_ASYNC_EVENT_ID_PSOC:
4206 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4207 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4208 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4209 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4210 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4211 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4212 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4213 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4214 return "TPC%d_krn_err";
4215 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4217 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4219 case GOYA_ASYNC_EVENT_ID_MME_QM:
4221 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4223 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4225 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4227 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4228 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4229 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4230 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4231 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4232 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4233 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4234 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4235 return "TPC%d_bmon_spmu";
4236 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4237 return "DMA_bm_ch%d";
4238 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4239 return "POWER_ENV_S";
4240 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4241 return "POWER_ENV_E";
4242 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4243 return "THERMAL_ENV_S";
4244 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4245 return "THERMAL_ENV_E";
4251 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4255 switch (event_type) {
4256 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4257 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4258 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4259 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4260 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4261 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4262 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4263 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4264 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4265 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4267 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4268 index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4269 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4271 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4272 index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4273 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4275 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4276 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4277 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4278 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4279 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4280 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4281 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4282 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4283 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4284 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4286 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4287 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4288 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4289 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4290 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4291 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4292 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4293 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4294 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4295 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4297 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4298 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4299 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4301 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4302 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4303 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4305 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4306 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4307 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4309 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4310 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4311 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4313 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4314 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4315 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4316 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4317 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4318 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4319 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4320 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4321 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4322 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4324 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4325 index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4326 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4329 snprintf(desc, size, _goya_get_event_desc(event_type));
4334 static void goya_print_razwi_info(struct hl_device *hdev)
4336 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4337 dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
4338 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4341 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4342 dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
4343 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4346 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4347 dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
4348 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4351 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4352 dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
4353 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4357 static void goya_print_mmu_error_info(struct hl_device *hdev)
4359 struct goya_device *goya = hdev->asic_specific;
4363 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4366 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4367 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4368 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4370 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4372 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
4375 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4379 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4384 goya_get_event_desc(event_type, desc, sizeof(desc));
4385 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4389 goya_print_razwi_info(hdev);
4390 goya_print_mmu_error_info(hdev);
4394 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4395 size_t irq_arr_size)
4397 struct armcp_unmask_irq_arr_packet *pkt;
4398 size_t total_pkt_size;
4401 int irq_num_entries, irq_arr_index;
4402 __le32 *goya_irq_arr;
4404 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4407 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4408 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4410 /* total_pkt_size is casted to u16 later on */
4411 if (total_pkt_size > USHRT_MAX) {
4412 dev_err(hdev->dev, "too many elements in IRQ array\n");
4416 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4420 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4421 pkt->length = cpu_to_le32(irq_num_entries);
4423 /* We must perform any necessary endianness conversation on the irq
4424 * array being passed to the goya hardware
4426 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4427 irq_arr_index < irq_num_entries ; irq_arr_index++)
4428 goya_irq_arr[irq_arr_index] =
4429 cpu_to_le32(irq_arr[irq_arr_index]);
4431 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4432 ARMCP_PKT_CTL_OPCODE_SHIFT);
4434 rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
4435 HL_DEVICE_TIMEOUT_USEC, &result);
4438 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4445 static int goya_soft_reset_late_init(struct hl_device *hdev)
4448 * Unmask all IRQs since some could have been received
4449 * during the soft reset
4451 return goya_unmask_irq_arr(hdev, goya_all_events,
4452 sizeof(goya_all_events));
4455 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4457 struct armcp_packet pkt;
4461 memset(&pkt, 0, sizeof(pkt));
4463 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4464 ARMCP_PKT_CTL_OPCODE_SHIFT);
4465 pkt.value = cpu_to_le64(event_type);
4467 rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4468 HL_DEVICE_TIMEOUT_USEC, &result);
4471 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4476 static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
4478 switch (event_type) {
4479 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4480 dev_info_ratelimited(hdev->dev,
4481 "Clock throttling due to power consumption\n");
4483 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4484 dev_info_ratelimited(hdev->dev,
4485 "Power envelop is safe, back to optimal clock\n");
4487 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4488 dev_info_ratelimited(hdev->dev,
4489 "Clock throttling due to overheating\n");
4491 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4492 dev_info_ratelimited(hdev->dev,
4493 "Thermal envelop is safe, back to optimal clock\n");
4497 dev_err(hdev->dev, "Received invalid clock change event %d\n",
4503 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4505 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4506 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4507 >> EQ_CTL_EVENT_TYPE_SHIFT);
4508 struct goya_device *goya = hdev->asic_specific;
4510 goya->events_stat[event_type]++;
4511 goya->events_stat_aggregate[event_type]++;
4513 switch (event_type) {
4514 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4515 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4516 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4517 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4518 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4519 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4520 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4521 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4522 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4523 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4524 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4525 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4526 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4527 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4528 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4529 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4530 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4531 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4532 case GOYA_ASYNC_EVENT_ID_GIC500:
4533 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4534 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4535 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4536 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4537 goya_print_irq_info(hdev, event_type, false);
4538 hl_device_reset(hdev, true, false);
4541 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4542 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4543 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4544 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4545 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4546 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4547 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4548 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4549 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4550 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4551 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4552 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4553 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4554 case GOYA_ASYNC_EVENT_ID_PSOC:
4555 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4556 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4557 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4558 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4559 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4560 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4561 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4562 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4563 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4564 case GOYA_ASYNC_EVENT_ID_MME_QM:
4565 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4566 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4567 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4568 goya_print_irq_info(hdev, event_type, true);
4569 goya_unmask_irq(hdev, event_type);
4572 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4573 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4574 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4575 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4576 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4577 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4578 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4579 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4580 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4581 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4582 goya_print_irq_info(hdev, event_type, false);
4583 goya_unmask_irq(hdev, event_type);
4586 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4587 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4588 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4589 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4590 goya_print_clk_change_info(hdev, event_type);
4591 goya_unmask_irq(hdev, event_type);
4595 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4601 void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4603 struct goya_device *goya = hdev->asic_specific;
4606 *size = (u32) sizeof(goya->events_stat_aggregate);
4607 return goya->events_stat_aggregate;
4610 *size = (u32) sizeof(goya->events_stat);
4611 return goya->events_stat;
4614 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4615 u64 val, bool is_dram)
4617 struct packet_lin_dma *lin_dma_pkt;
4618 struct hl_cs_job *job;
4621 int rc, lin_dma_pkts_cnt;
4623 lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4624 cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4625 sizeof(struct packet_msg_prot);
4626 cb = hl_cb_kernel_create(hdev, cb_size);
4630 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
4633 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4635 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4636 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4637 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4638 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4639 (1 << GOYA_PKT_CTL_MB_SHIFT));
4640 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4641 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4642 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4644 lin_dma_pkt->src_addr = cpu_to_le64(val);
4645 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4646 if (lin_dma_pkts_cnt > 1)
4647 lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4649 lin_dma_pkt->tsize = cpu_to_le32(size);
4654 } while (--lin_dma_pkts_cnt);
4656 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4658 dev_err(hdev->dev, "Failed to allocate a new job\n");
4665 job->user_cb->cs_cnt++;
4666 job->user_cb_size = cb_size;
4667 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4668 job->patched_cb = job->user_cb;
4669 job->job_cb_size = job->user_cb_size;
4671 hl_debugfs_add_job(hdev, job);
4673 rc = goya_send_job_on_qman0(hdev, job);
4675 hl_debugfs_remove_job(hdev, job);
4681 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4686 int goya_context_switch(struct hl_device *hdev, u32 asid)
4688 struct asic_fixed_properties *prop = &hdev->asic_prop;
4689 u64 addr = prop->sram_base_address, sob_addr;
4690 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4691 u64 val = 0x7777777777777777ull;
4693 u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4694 mmDMA_CH_0_WR_COMP_ADDR_LO;
4696 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4698 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4702 /* we need to reset registers that the user is allowed to change */
4703 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4704 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4706 for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4707 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4709 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4710 lower_32_bits(sob_addr));
4713 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4715 goya_mmu_prepare(hdev, asid);
4717 goya_clear_sm_regs(hdev);
4722 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4724 struct asic_fixed_properties *prop = &hdev->asic_prop;
4725 struct goya_device *goya = hdev->asic_specific;
4726 u64 addr = prop->mmu_pgt_addr;
4727 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4730 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4733 return goya_memset_device_memory(hdev, addr, size, 0, true);
4736 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4738 struct goya_device *goya = hdev->asic_specific;
4739 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4740 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4741 u64 val = 0x9999999999999999ull;
4743 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4746 return goya_memset_device_memory(hdev, addr, size, val, true);
4749 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4751 struct asic_fixed_properties *prop = &hdev->asic_prop;
4752 struct goya_device *goya = hdev->asic_specific;
4756 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4759 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4760 rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
4761 prop->dram_base_address + off, PAGE_SIZE_2MB,
4762 (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
4764 dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4765 prop->dram_base_address + off);
4770 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4771 rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4772 hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true);
4776 "Map failed for CPU accessible memory\n");
4777 off -= PAGE_SIZE_2MB;
4781 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4782 rc = hl_mmu_map(hdev->kernel_ctx,
4783 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4784 hdev->cpu_accessible_dma_address + cpu_off,
4785 PAGE_SIZE_4KB, true);
4788 "Map failed for CPU accessible memory\n");
4789 cpu_off -= PAGE_SIZE_4KB;
4795 goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4796 goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4797 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4798 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4800 /* Make sure configuration is flushed to device */
4801 RREG32(mmCPU_IF_AWUSER_OVR_EN);
4803 goya->device_cpu_mmu_mappings_done = true;
4808 for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4809 if (hl_mmu_unmap(hdev->kernel_ctx,
4810 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4811 PAGE_SIZE_4KB, true))
4812 dev_warn_ratelimited(hdev->dev,
4813 "failed to unmap address 0x%llx\n",
4814 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4816 for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4817 if (hl_mmu_unmap(hdev->kernel_ctx,
4818 prop->dram_base_address + off, PAGE_SIZE_2MB,
4820 dev_warn_ratelimited(hdev->dev,
4821 "failed to unmap address 0x%llx\n",
4822 prop->dram_base_address + off);
4827 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4829 struct asic_fixed_properties *prop = &hdev->asic_prop;
4830 struct goya_device *goya = hdev->asic_specific;
4833 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4836 if (!goya->device_cpu_mmu_mappings_done)
4839 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4840 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4842 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4843 if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4844 PAGE_SIZE_2MB, true))
4846 "Failed to unmap CPU accessible memory\n");
4848 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4849 if (hl_mmu_unmap(hdev->kernel_ctx,
4850 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4852 (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
4853 dev_warn_ratelimited(hdev->dev,
4854 "failed to unmap address 0x%llx\n",
4855 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4858 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
4859 if (hl_mmu_unmap(hdev->kernel_ctx,
4860 prop->dram_base_address + off, PAGE_SIZE_2MB,
4861 (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
4862 dev_warn_ratelimited(hdev->dev,
4863 "Failed to unmap address 0x%llx\n",
4864 prop->dram_base_address + off);
4866 goya->device_cpu_mmu_mappings_done = false;
4869 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4871 struct goya_device *goya = hdev->asic_specific;
4874 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4877 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4878 WARN(1, "asid %u is too big\n", asid);
4882 /* zero the MMBP and ASID bits and then set the ASID */
4883 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
4884 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
4887 static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
4890 struct goya_device *goya = hdev->asic_specific;
4891 u32 status, timeout_usec;
4894 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
4895 hdev->hard_reset_pending)
4898 /* no need in L1 only invalidation in Goya */
4903 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4905 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4907 mutex_lock(&hdev->mmu_cache_lock);
4909 /* L0 & L1 invalidation */
4910 WREG32(mmSTLB_INV_ALL_START, 1);
4912 rc = hl_poll_timeout(
4914 mmSTLB_INV_ALL_START,
4920 mutex_unlock(&hdev->mmu_cache_lock);
4923 dev_err_ratelimited(hdev->dev,
4924 "MMU cache invalidation timeout\n");
4925 hl_device_reset(hdev, true, false);
4931 static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4932 bool is_hard, u32 asid, u64 va, u64 size)
4934 struct goya_device *goya = hdev->asic_specific;
4935 u32 status, timeout_usec, inv_data, pi;
4938 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
4939 hdev->hard_reset_pending)
4942 /* no need in L1 only invalidation in Goya */
4947 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4949 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4951 mutex_lock(&hdev->mmu_cache_lock);
4954 * TODO: currently invalidate entire L0 & L1 as in regular hard
4955 * invalidation. Need to apply invalidation of specific cache lines with
4956 * mask of ASID & VA & size.
4957 * Note that L1 with be flushed entirely in any case.
4960 /* L0 & L1 invalidation */
4961 inv_data = RREG32(mmSTLB_CACHE_INV);
4963 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
4964 WREG32(mmSTLB_CACHE_INV,
4965 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
4967 rc = hl_poll_timeout(
4969 mmSTLB_INV_CONSUMER_INDEX,
4975 mutex_unlock(&hdev->mmu_cache_lock);
4978 dev_err_ratelimited(hdev->dev,
4979 "MMU cache invalidation timeout\n");
4980 hl_device_reset(hdev, true, false);
4986 int goya_send_heartbeat(struct hl_device *hdev)
4988 struct goya_device *goya = hdev->asic_specific;
4990 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4993 return hl_fw_send_heartbeat(hdev);
4996 int goya_armcp_info_get(struct hl_device *hdev)
4998 struct goya_device *goya = hdev->asic_specific;
4999 struct asic_fixed_properties *prop = &hdev->asic_prop;
5003 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5006 rc = hl_fw_armcp_info_get(hdev);
5010 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
5012 if ((!is_power_of_2(dram_size)) ||
5013 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5015 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5017 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5020 prop->dram_size = dram_size;
5021 prop->dram_end_address = prop->dram_base_address + dram_size;
5024 if (!strlen(prop->armcp_info.card_name))
5025 strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
5031 static void goya_set_clock_gating(struct hl_device *hdev)
5033 /* clock gating not supported in Goya */
5036 static void goya_disable_clock_gating(struct hl_device *hdev)
5038 /* clock gating not supported in Goya */
5041 static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
5044 const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
5045 const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
5046 u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
5048 bool is_idle = true, is_eng_idle;
5053 seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
5054 "--- ------- ------------ -------------\n");
5056 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5058 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5059 qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
5060 dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
5061 is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
5062 IS_DMA_IDLE(dma_core_sts0);
5063 is_idle &= is_eng_idle;
5066 *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
5068 seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
5069 qm_glbl_sts0, dma_core_sts0);
5074 "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
5075 "--- ------- ------------ -------------- ----------\n");
5077 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5079 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5080 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5081 cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5082 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5083 is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5084 IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5085 IS_TPC_IDLE(tpc_cfg_sts);
5086 is_idle &= is_eng_idle;
5089 *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
5091 seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
5092 qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5097 "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
5098 "--- ------- ------------ -------------- -----------\n");
5100 qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5101 cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5102 mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5103 is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5104 IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5105 IS_MME_IDLE(mme_arch_sts);
5106 is_idle &= is_eng_idle;
5109 *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
5111 seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5112 cmdq_glbl_sts0, mme_arch_sts);
5119 static void goya_hw_queues_lock(struct hl_device *hdev)
5120 __acquires(&goya->hw_queues_lock)
5122 struct goya_device *goya = hdev->asic_specific;
5124 spin_lock(&goya->hw_queues_lock);
5127 static void goya_hw_queues_unlock(struct hl_device *hdev)
5128 __releases(&goya->hw_queues_lock)
5130 struct goya_device *goya = hdev->asic_specific;
5132 spin_unlock(&goya->hw_queues_lock);
5135 static u32 goya_get_pci_id(struct hl_device *hdev)
5137 return hdev->pdev->device;
5140 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5143 struct goya_device *goya = hdev->asic_specific;
5145 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5148 return hl_fw_get_eeprom_data(hdev, data, max_size);
5151 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5153 return RREG32(mmHW_STATE);
5156 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
5161 static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
5166 static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
5171 static u32 goya_get_signal_cb_size(struct hl_device *hdev)
5176 static u32 goya_get_wait_cb_size(struct hl_device *hdev)
5181 static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
5186 static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
5187 u16 sob_val, u16 mon_id, u32 q_idx)
5192 static void goya_reset_sob(struct hl_device *hdev, void *data)
5197 static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
5199 if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
5200 HL_POWER9_HOST_MAGIC) {
5201 dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n");
5202 hdev->power9_64bit_dma_enable = 1;
5203 hdev->dma_mask = 64;
5205 dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n");
5206 hdev->power9_64bit_dma_enable = 0;
5207 hdev->dma_mask = 48;
5211 u64 goya_get_device_time(struct hl_device *hdev)
5213 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
5215 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
5218 static const struct hl_asic_funcs goya_funcs = {
5219 .early_init = goya_early_init,
5220 .early_fini = goya_early_fini,
5221 .late_init = goya_late_init,
5222 .late_fini = goya_late_fini,
5223 .sw_init = goya_sw_init,
5224 .sw_fini = goya_sw_fini,
5225 .hw_init = goya_hw_init,
5226 .hw_fini = goya_hw_fini,
5227 .halt_engines = goya_halt_engines,
5228 .suspend = goya_suspend,
5229 .resume = goya_resume,
5230 .cb_mmap = goya_cb_mmap,
5231 .ring_doorbell = goya_ring_doorbell,
5232 .pqe_write = goya_pqe_write,
5233 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5234 .asic_dma_free_coherent = goya_dma_free_coherent,
5235 .get_int_queue_base = goya_get_int_queue_base,
5236 .test_queues = goya_test_queues,
5237 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5238 .asic_dma_pool_free = goya_dma_pool_free,
5239 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5240 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5241 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5242 .cs_parser = goya_cs_parser,
5243 .asic_dma_map_sg = goya_dma_map_sg,
5244 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5245 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5246 .update_eq_ci = goya_update_eq_ci,
5247 .context_switch = goya_context_switch,
5248 .restore_phase_topology = goya_restore_phase_topology,
5249 .debugfs_read32 = goya_debugfs_read32,
5250 .debugfs_write32 = goya_debugfs_write32,
5251 .debugfs_read64 = goya_debugfs_read64,
5252 .debugfs_write64 = goya_debugfs_write64,
5253 .add_device_attr = goya_add_device_attr,
5254 .handle_eqe = goya_handle_eqe,
5255 .set_pll_profile = goya_set_pll_profile,
5256 .get_events_stat = goya_get_events_stat,
5257 .read_pte = goya_read_pte,
5258 .write_pte = goya_write_pte,
5259 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5260 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5261 .send_heartbeat = goya_send_heartbeat,
5262 .set_clock_gating = goya_set_clock_gating,
5263 .disable_clock_gating = goya_disable_clock_gating,
5264 .debug_coresight = goya_debug_coresight,
5265 .is_device_idle = goya_is_device_idle,
5266 .soft_reset_late_init = goya_soft_reset_late_init,
5267 .hw_queues_lock = goya_hw_queues_lock,
5268 .hw_queues_unlock = goya_hw_queues_unlock,
5269 .get_pci_id = goya_get_pci_id,
5270 .get_eeprom_data = goya_get_eeprom_data,
5271 .send_cpu_message = goya_send_cpu_message,
5272 .get_hw_state = goya_get_hw_state,
5273 .pci_bars_map = goya_pci_bars_map,
5274 .set_dram_bar_base = goya_set_ddr_bar_base,
5275 .init_iatu = goya_init_iatu,
5278 .halt_coresight = goya_halt_coresight,
5279 .get_clk_rate = goya_get_clk_rate,
5280 .get_queue_id_for_cq = goya_get_queue_id_for_cq,
5281 .read_device_fw_version = goya_read_device_fw_version,
5282 .load_firmware_to_device = goya_load_firmware_to_device,
5283 .load_boot_fit_to_device = goya_load_boot_fit_to_device,
5284 .ext_queue_init = goya_ext_queue_init,
5285 .ext_queue_reset = goya_ext_queue_reset,
5286 .get_signal_cb_size = goya_get_signal_cb_size,
5287 .get_wait_cb_size = goya_get_wait_cb_size,
5288 .gen_signal_cb = goya_gen_signal_cb,
5289 .gen_wait_cb = goya_gen_wait_cb,
5290 .reset_sob = goya_reset_sob,
5291 .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
5292 .get_device_time = goya_get_device_time
5296 * goya_set_asic_funcs - set Goya function pointers
5298 * @*hdev: pointer to hl_device structure
5301 void goya_set_asic_funcs(struct hl_device *hdev)
5303 hdev->asic_funcs = &goya_funcs;