1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/slab.h>
4 #include <linux/ctype.h>
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/pci_ids.h>
8 #include "adf_accel_devices.h"
9 #include "adf_common_drv.h"
10 #include "icp_qat_uclo.h"
11 #include "icp_qat_hal.h"
12 #include "icp_qat_fw_loader_handle.h"
14 #define UWORD_CPYBUF_SIZE 1024
15 #define INVLD_UWORD 0xffffffffffull
16 #define PID_MINOR_REV 0xf
17 #define PID_MAJOR_REV (0xf << 4)
19 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
20 unsigned int ae, unsigned int image_num)
22 struct icp_qat_uclo_aedata *ae_data;
23 struct icp_qat_uclo_encapme *encap_image;
24 struct icp_qat_uclo_page *page = NULL;
25 struct icp_qat_uclo_aeslice *ae_slice = NULL;
27 ae_data = &obj_handle->ae_data[ae];
28 encap_image = &obj_handle->ae_uimage[image_num];
29 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
30 ae_slice->encap_image = encap_image;
32 if (encap_image->img_ptr) {
33 ae_slice->ctx_mask_assigned =
34 encap_image->img_ptr->ctx_assigned;
35 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
37 ae_slice->ctx_mask_assigned = 0;
39 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
40 if (!ae_slice->region)
42 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
45 page = ae_slice->page;
46 page->encap_page = encap_image->page;
47 ae_slice->page->region = ae_slice->region;
51 kfree(ae_slice->region);
52 ae_slice->region = NULL;
56 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
61 pr_err("QAT: bad argument, ae_data is NULL\n ");
65 for (i = 0; i < ae_data->slice_num; i++) {
66 kfree(ae_data->ae_slices[i].region);
67 ae_data->ae_slices[i].region = NULL;
68 kfree(ae_data->ae_slices[i].page);
69 ae_data->ae_slices[i].page = NULL;
74 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
75 unsigned int str_offset)
77 if (!str_table->table_len || str_offset > str_table->table_len)
79 return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
82 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
84 int maj = hdr->maj_ver & 0xff;
85 int min = hdr->min_ver & 0xff;
87 if (hdr->file_id != ICP_QAT_UOF_FID) {
88 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
91 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
92 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
99 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
101 int maj = suof_hdr->maj_ver & 0xff;
102 int min = suof_hdr->min_ver & 0xff;
104 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
105 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
108 if (suof_hdr->fw_type != 0) {
109 pr_err("QAT: unsupported firmware type\n");
112 if (suof_hdr->num_chunks <= 0x1) {
113 pr_err("QAT: SUOF chunk amount is incorrect\n");
116 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
117 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
124 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
125 unsigned int addr, unsigned int *val,
126 unsigned int num_in_bytes)
129 unsigned char *ptr = (unsigned char *)val;
131 while (num_in_bytes) {
132 memcpy(&outval, ptr, 4);
133 SRAM_WRITE(handle, addr, outval);
140 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
141 unsigned char ae, unsigned int addr,
143 unsigned int num_in_bytes)
146 unsigned char *ptr = (unsigned char *)val;
148 addr >>= 0x2; /* convert to uword address */
150 while (num_in_bytes) {
151 memcpy(&outval, ptr, 4);
152 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
158 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
160 struct icp_qat_uof_batch_init
163 struct icp_qat_uof_batch_init *umem_init;
165 if (!umem_init_header)
167 umem_init = umem_init_header->next;
169 unsigned int addr, *value, size;
172 addr = umem_init->addr;
173 value = umem_init->value;
174 size = umem_init->size;
175 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
176 umem_init = umem_init->next;
181 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
182 struct icp_qat_uof_batch_init **base)
184 struct icp_qat_uof_batch_init *umem_init;
188 struct icp_qat_uof_batch_init *pre;
191 umem_init = umem_init->next;
197 static int qat_uclo_parse_num(char *str, unsigned int *num)
200 unsigned long ae = 0;
203 strncpy(buf, str, 15);
204 for (i = 0; i < 16; i++) {
205 if (!isdigit(buf[i])) {
210 if ((kstrtoul(buf, 10, &ae)))
213 *num = (unsigned int)ae;
217 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
218 struct icp_qat_uof_initmem *init_mem,
219 unsigned int size_range, unsigned int *ae)
221 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
224 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
225 pr_err("QAT: initmem is out of range");
228 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
229 pr_err("QAT: Memory scope for init_mem error\n");
232 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
234 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
237 if (qat_uclo_parse_num(str, ae)) {
238 pr_err("QAT: Parse num for AE number failed\n");
241 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
242 pr_err("QAT: ae %d out of range\n", *ae);
248 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
249 *handle, struct icp_qat_uof_initmem
250 *init_mem, unsigned int ae,
251 struct icp_qat_uof_batch_init
254 struct icp_qat_uof_batch_init *init_header, *tail;
255 struct icp_qat_uof_batch_init *mem_init, *tail_old;
256 struct icp_qat_uof_memvar_attr *mem_val_attr;
257 unsigned int i, flag = 0;
260 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
261 sizeof(struct icp_qat_uof_initmem));
263 init_header = *init_tab_base;
265 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
268 init_header->size = 1;
269 *init_tab_base = init_header;
272 tail_old = init_header;
273 while (tail_old->next)
274 tail_old = tail_old->next;
276 for (i = 0; i < init_mem->val_attr_num; i++) {
277 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
281 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
282 mem_init->value = &mem_val_attr->value;
284 mem_init->next = NULL;
285 tail->next = mem_init;
287 init_header->size += qat_hal_get_ins_num();
292 /* Do not free the list head unless we allocated it. */
293 tail_old = tail_old->next;
295 kfree(*init_tab_base);
296 *init_tab_base = NULL;
300 mem_init = tail_old->next;
307 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
308 struct icp_qat_uof_initmem *init_mem)
310 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
313 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
314 handle->chip_info->lm_size, &ae))
316 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
317 &obj_handle->lm_init_tab[ae]))
322 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
323 struct icp_qat_uof_initmem *init_mem)
325 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
326 unsigned int ae, ustore_size, uaddr, i;
327 struct icp_qat_uclo_aedata *aed;
329 ustore_size = obj_handle->ustore_phy_size;
330 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
332 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
333 &obj_handle->umem_init_tab[ae]))
335 /* set the highest ustore address referenced */
336 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
337 aed = &obj_handle->ae_data[ae];
338 for (i = 0; i < aed->slice_num; i++) {
339 if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
340 aed->ae_slices[i].encap_image->uwords_num = uaddr;
345 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
346 struct icp_qat_uof_initmem *init_mem)
348 switch (init_mem->region) {
349 case ICP_QAT_UOF_LMEM_REGION:
350 if (qat_uclo_init_lmem_seg(handle, init_mem))
353 case ICP_QAT_UOF_UMEM_REGION:
354 if (qat_uclo_init_umem_seg(handle, init_mem))
358 pr_err("QAT: initmem region error. region type=0x%x\n",
365 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
366 struct icp_qat_uclo_encapme *image)
369 struct icp_qat_uclo_encap_page *page;
370 struct icp_qat_uof_image *uof_image;
372 unsigned int ustore_size;
373 unsigned int patt_pos;
374 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
375 unsigned long ae_mask = handle->hal_handle->ae_mask;
376 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
379 uof_image = image->img_ptr;
380 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
384 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
385 memcpy(&fill_data[i], &uof_image->fill_pattern,
389 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
390 unsigned long ae_assigned = uof_image->ae_assigned;
392 if (!test_bit(ae, &ae_assigned))
395 if (!test_bit(ae, &cfg_ae_mask))
398 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
399 patt_pos = page->beg_addr_p + page->micro_words_num;
401 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
402 page->beg_addr_p, &fill_data[0]);
403 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
404 ustore_size - patt_pos + 1,
405 &fill_data[page->beg_addr_p]);
411 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
414 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
415 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
416 unsigned long ae_mask = handle->hal_handle->ae_mask;
418 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
419 if (initmem->num_in_bytes) {
420 if (qat_uclo_init_ae_memory(handle, initmem))
423 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
425 sizeof(struct icp_qat_uof_initmem)) +
426 (sizeof(struct icp_qat_uof_memvar_attr) *
427 initmem->val_attr_num));
430 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
431 if (qat_hal_batch_wr_lm(handle, ae,
432 obj_handle->lm_init_tab[ae])) {
433 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
436 qat_uclo_cleanup_batch_init_list(handle,
437 &obj_handle->lm_init_tab[ae]);
438 qat_uclo_batch_wr_umem(handle, ae,
439 obj_handle->umem_init_tab[ae]);
440 qat_uclo_cleanup_batch_init_list(handle,
447 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
448 char *chunk_id, void *cur)
451 struct icp_qat_uof_chunkhdr *chunk_hdr =
452 (struct icp_qat_uof_chunkhdr *)
453 ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
455 for (i = 0; i < obj_hdr->num_chunks; i++) {
456 if ((cur < (void *)&chunk_hdr[i]) &&
457 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
458 ICP_QAT_UOF_OBJID_LEN)) {
459 return &chunk_hdr[i];
465 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
468 unsigned int topbit = 1 << 0xF;
469 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
471 reg ^= inbyte << 0x8;
472 for (i = 0; i < 0x8; i++) {
474 reg = (reg << 1) ^ 0x1021;
481 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
483 unsigned int chksum = 0;
487 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
491 static struct icp_qat_uclo_objhdr *
492 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
495 struct icp_qat_uof_filechunkhdr *file_chunk;
496 struct icp_qat_uclo_objhdr *obj_hdr;
500 file_chunk = (struct icp_qat_uof_filechunkhdr *)
501 (buf + sizeof(struct icp_qat_uof_filehdr));
502 for (i = 0; i < file_hdr->num_chunks; i++) {
503 if (!strncmp(file_chunk->chunk_id, chunk_id,
504 ICP_QAT_UOF_OBJID_LEN)) {
505 chunk = buf + file_chunk->offset;
506 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
507 chunk, file_chunk->size))
509 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
512 obj_hdr->file_buff = chunk;
513 obj_hdr->checksum = file_chunk->checksum;
514 obj_hdr->size = file_chunk->size;
523 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
524 struct icp_qat_uof_image *image)
526 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
527 struct icp_qat_uof_objtable *neigh_reg_tab;
528 struct icp_qat_uof_code_page *code_page;
530 code_page = (struct icp_qat_uof_code_page *)
531 ((char *)image + sizeof(struct icp_qat_uof_image));
532 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
533 code_page->uc_var_tab_offset);
534 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
535 code_page->imp_var_tab_offset);
536 imp_expr_tab = (struct icp_qat_uof_objtable *)
537 (encap_uof_obj->beg_uof +
538 code_page->imp_expr_tab_offset);
539 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
540 imp_expr_tab->entry_num) {
541 pr_err("QAT: UOF can't contain imported variable to be parsed\n");
544 neigh_reg_tab = (struct icp_qat_uof_objtable *)
545 (encap_uof_obj->beg_uof +
546 code_page->neigh_reg_tab_offset);
547 if (neigh_reg_tab->entry_num) {
548 pr_err("QAT: UOF can't contain neighbor register table\n");
551 if (image->numpages > 1) {
552 pr_err("QAT: UOF can't contain multiple pages\n");
555 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
556 pr_err("QAT: UOF can't use shared control store feature\n");
559 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
560 pr_err("QAT: UOF can't use reloadable feature\n");
566 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
568 struct icp_qat_uof_image *img,
569 struct icp_qat_uclo_encap_page *page)
571 struct icp_qat_uof_code_page *code_page;
572 struct icp_qat_uof_code_area *code_area;
573 struct icp_qat_uof_objtable *uword_block_tab;
574 struct icp_qat_uof_uword_block *uwblock;
577 code_page = (struct icp_qat_uof_code_page *)
578 ((char *)img + sizeof(struct icp_qat_uof_image));
579 page->def_page = code_page->def_page;
580 page->page_region = code_page->page_region;
581 page->beg_addr_v = code_page->beg_addr_v;
582 page->beg_addr_p = code_page->beg_addr_p;
583 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
584 code_page->code_area_offset);
585 page->micro_words_num = code_area->micro_words_num;
586 uword_block_tab = (struct icp_qat_uof_objtable *)
587 (encap_uof_obj->beg_uof +
588 code_area->uword_block_tab);
589 page->uwblock_num = uword_block_tab->entry_num;
590 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
591 sizeof(struct icp_qat_uof_objtable));
592 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
593 for (i = 0; i < uword_block_tab->entry_num; i++)
594 page->uwblock[i].micro_words =
595 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
598 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
599 struct icp_qat_uclo_encapme *ae_uimage,
603 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
604 struct icp_qat_uof_image *image;
605 struct icp_qat_uof_objtable *ae_regtab;
606 struct icp_qat_uof_objtable *init_reg_sym_tab;
607 struct icp_qat_uof_objtable *sbreak_tab;
608 struct icp_qat_uof_encap_obj *encap_uof_obj =
609 &obj_handle->encap_uof_obj;
611 for (j = 0; j < max_image; j++) {
612 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
613 ICP_QAT_UOF_IMAG, chunk_hdr);
616 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
618 ae_regtab = (struct icp_qat_uof_objtable *)
619 (image->reg_tab_offset +
620 obj_handle->obj_hdr->file_buff);
621 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
622 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
623 (((char *)ae_regtab) +
624 sizeof(struct icp_qat_uof_objtable));
625 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
626 (image->init_reg_sym_tab +
627 obj_handle->obj_hdr->file_buff);
628 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
629 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
630 (((char *)init_reg_sym_tab) +
631 sizeof(struct icp_qat_uof_objtable));
632 sbreak_tab = (struct icp_qat_uof_objtable *)
633 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
634 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
635 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
636 (((char *)sbreak_tab) +
637 sizeof(struct icp_qat_uof_objtable));
638 ae_uimage[j].img_ptr = image;
639 if (qat_uclo_check_image_compat(encap_uof_obj, image))
642 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
644 if (!ae_uimage[j].page)
646 qat_uclo_map_image_page(encap_uof_obj, image,
651 for (i = 0; i < j; i++)
652 kfree(ae_uimage[i].page);
656 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
660 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
661 unsigned long ae_mask = handle->hal_handle->ae_mask;
662 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
664 for_each_set_bit(ae, &ae_mask, max_ae) {
665 if (!test_bit(ae, &cfg_ae_mask))
668 for (i = 0; i < obj_handle->uimage_num; i++) {
669 unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
671 if (!test_bit(ae, &ae_assigned))
674 if (qat_uclo_init_ae_data(obj_handle, ae, i))
679 pr_err("QAT: uimage uses AE not set\n");
685 static struct icp_qat_uof_strtable *
686 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
687 char *tab_name, struct icp_qat_uof_strtable *str_table)
689 struct icp_qat_uof_chunkhdr *chunk_hdr;
691 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
692 obj_hdr->file_buff, tab_name, NULL);
696 memcpy(&str_table->table_len, obj_hdr->file_buff +
697 chunk_hdr->offset, sizeof(str_table->table_len));
698 hdr_size = (char *)&str_table->strings - (char *)str_table;
699 str_table->strings = (uintptr_t)obj_hdr->file_buff +
700 chunk_hdr->offset + hdr_size;
707 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
708 struct icp_qat_uclo_init_mem_table *init_mem_tab)
710 struct icp_qat_uof_chunkhdr *chunk_hdr;
712 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
713 ICP_QAT_UOF_IMEM, NULL);
715 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
716 chunk_hdr->offset, sizeof(unsigned int));
717 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
718 (encap_uof_obj->beg_uof + chunk_hdr->offset +
719 sizeof(unsigned int));
724 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
726 switch (handle->pci_dev->device) {
727 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
728 return ICP_QAT_AC_895XCC_DEV_TYPE;
729 case PCI_DEVICE_ID_INTEL_QAT_C62X:
730 return ICP_QAT_AC_C62X_DEV_TYPE;
731 case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
732 return ICP_QAT_AC_C3XXX_DEV_TYPE;
733 case ADF_4XXX_PCI_DEVICE_ID:
734 case ADF_401XX_PCI_DEVICE_ID:
735 return ICP_QAT_AC_4XXX_A_DEV_TYPE;
737 pr_err("QAT: unsupported device 0x%x\n",
738 handle->pci_dev->device);
743 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
745 unsigned int maj_ver, prod_type = obj_handle->prod_type;
747 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
748 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
749 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
753 maj_ver = obj_handle->prod_rev & 0xff;
754 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
755 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
756 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
762 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
763 unsigned char ae, unsigned char ctx_mask,
764 enum icp_qat_uof_regtype reg_type,
765 unsigned short reg_addr, unsigned int value)
774 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
786 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
794 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
797 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
799 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
805 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
807 struct icp_qat_uclo_encapme *encap_ae)
810 unsigned char ctx_mask;
811 struct icp_qat_uof_init_regsym *init_regsym;
813 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
814 ICP_QAT_UCLO_MAX_CTX)
819 for (i = 0; i < encap_ae->init_regsym_num; i++) {
820 unsigned int exp_res;
822 init_regsym = &encap_ae->init_regsym[i];
823 exp_res = init_regsym->value;
824 switch (init_regsym->init_type) {
825 case ICP_QAT_UOF_INIT_REG:
826 qat_uclo_init_reg(handle, ae, ctx_mask,
827 (enum icp_qat_uof_regtype)
828 init_regsym->reg_type,
829 (unsigned short)init_regsym->reg_addr,
832 case ICP_QAT_UOF_INIT_REG_CTX:
833 /* check if ctx is appropriate for the ctxMode */
834 if (!((1 << init_regsym->ctx) & ctx_mask)) {
835 pr_err("QAT: invalid ctx num = 0x%x\n",
839 qat_uclo_init_reg(handle, ae,
841 (1 << init_regsym->ctx),
842 (enum icp_qat_uof_regtype)
843 init_regsym->reg_type,
844 (unsigned short)init_regsym->reg_addr,
847 case ICP_QAT_UOF_INIT_EXPR:
848 pr_err("QAT: INIT_EXPR feature not supported\n");
850 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
851 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
860 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
862 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
863 unsigned long ae_mask = handle->hal_handle->ae_mask;
864 struct icp_qat_uclo_aedata *aed;
867 if (obj_handle->global_inited)
869 if (obj_handle->init_mem_tab.entry_num) {
870 if (qat_uclo_init_memory(handle)) {
871 pr_err("QAT: initialize memory failed\n");
876 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
877 aed = &obj_handle->ae_data[ae];
878 for (s = 0; s < aed->slice_num; s++) {
879 if (!aed->ae_slices[s].encap_image)
881 if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
885 obj_handle->global_inited = 1;
889 static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
890 struct icp_qat_uclo_objhandle *obj_handle,
892 struct icp_qat_uof_image *uof_image)
897 mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
898 ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
900 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
903 if (handle->chip_info->nn) {
904 mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
905 ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
907 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
911 mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
912 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
914 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
917 mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
918 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
920 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
923 if (handle->chip_info->lm2lm3) {
924 mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
925 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
927 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
930 mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
931 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
933 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
936 mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
937 qat_hal_set_ae_tindex_mode(handle, ae, mode);
942 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
944 struct icp_qat_uof_image *uof_image;
945 struct icp_qat_uclo_aedata *ae_data;
946 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
947 unsigned long ae_mask = handle->hal_handle->ae_mask;
948 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
952 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
953 if (!test_bit(ae, &cfg_ae_mask))
956 ae_data = &obj_handle->ae_data[ae];
957 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
958 ICP_QAT_UCLO_MAX_CTX); s++) {
959 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
961 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
962 error = qat_hal_set_modes(handle, obj_handle, ae,
971 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
973 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
974 struct icp_qat_uclo_encapme *image;
977 for (a = 0; a < obj_handle->uimage_num; a++) {
978 image = &obj_handle->ae_uimage[a];
979 image->uwords_num = image->page->beg_addr_p +
980 image->page->micro_words_num;
984 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
986 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
989 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
990 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
991 obj_handle->obj_hdr->file_buff;
992 obj_handle->uword_in_bytes = 6;
993 obj_handle->prod_type = qat_uclo_get_dev_type(handle);
994 obj_handle->prod_rev = PID_MAJOR_REV |
995 (PID_MINOR_REV & handle->hal_handle->revision_id);
996 if (qat_uclo_check_uof_compat(obj_handle)) {
997 pr_err("QAT: UOF incompatible\n");
1000 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
1002 if (!obj_handle->uword_buf)
1004 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
1005 if (!obj_handle->obj_hdr->file_buff ||
1006 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
1007 &obj_handle->str_table)) {
1008 pr_err("QAT: UOF doesn't have effective images\n");
1011 obj_handle->uimage_num =
1012 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1013 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1014 if (!obj_handle->uimage_num)
1016 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1017 pr_err("QAT: Bad object\n");
1018 goto out_check_uof_aemask_err;
1020 qat_uclo_init_uword_num(handle);
1021 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1022 &obj_handle->init_mem_tab);
1023 if (qat_uclo_set_ae_mode(handle))
1024 goto out_check_uof_aemask_err;
1026 out_check_uof_aemask_err:
1027 for (ae = 0; ae < obj_handle->uimage_num; ae++)
1028 kfree(obj_handle->ae_uimage[ae].page);
1030 kfree(obj_handle->uword_buf);
1034 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1035 struct icp_qat_suof_filehdr *suof_ptr,
1038 unsigned int check_sum = 0;
1039 unsigned int min_ver_offset = 0;
1040 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1042 suof_handle->file_id = ICP_QAT_SUOF_FID;
1043 suof_handle->suof_buf = (char *)suof_ptr;
1044 suof_handle->suof_size = suof_size;
1045 min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1047 check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1049 if (check_sum != suof_ptr->check_sum) {
1050 pr_err("QAT: incorrect SUOF checksum\n");
1053 suof_handle->check_sum = suof_ptr->check_sum;
1054 suof_handle->min_ver = suof_ptr->min_ver;
1055 suof_handle->maj_ver = suof_ptr->maj_ver;
1056 suof_handle->fw_type = suof_ptr->fw_type;
1060 static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1061 struct icp_qat_suof_img_hdr *suof_img_hdr,
1062 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1064 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1065 struct icp_qat_simg_ae_mode *ae_mode;
1066 struct icp_qat_suof_objhdr *suof_objhdr;
1068 suof_img_hdr->simg_buf = (suof_handle->suof_buf +
1069 suof_chunk_hdr->offset +
1070 sizeof(*suof_objhdr));
1071 suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1072 (suof_handle->suof_buf +
1073 suof_chunk_hdr->offset))->img_length;
1075 suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1076 suof_img_hdr->css_key = (suof_img_hdr->css_header +
1077 sizeof(struct icp_qat_css_hdr));
1078 suof_img_hdr->css_signature = suof_img_hdr->css_key +
1079 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1080 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
1081 suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1082 ICP_QAT_CSS_SIGNATURE_LEN(handle);
1084 ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1085 suof_img_hdr->ae_mask = ae_mode->ae_mask;
1086 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1087 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1088 suof_img_hdr->fw_type = ae_mode->fw_type;
1092 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1093 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1095 char **sym_str = (char **)&suof_handle->sym_str;
1096 unsigned int *sym_size = &suof_handle->sym_size;
1097 struct icp_qat_suof_strtable *str_table_obj;
1099 *sym_size = *(unsigned int *)(uintptr_t)
1100 (suof_chunk_hdr->offset + suof_handle->suof_buf);
1101 *sym_str = (char *)(uintptr_t)
1102 (suof_handle->suof_buf + suof_chunk_hdr->offset +
1103 sizeof(str_table_obj->tab_length));
1106 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1107 struct icp_qat_suof_img_hdr *img_hdr)
1109 struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1110 unsigned int prod_rev, maj_ver, prod_type;
1112 prod_type = qat_uclo_get_dev_type(handle);
1113 img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1114 prod_rev = PID_MAJOR_REV |
1115 (PID_MINOR_REV & handle->hal_handle->revision_id);
1116 if (img_ae_mode->dev_type != prod_type) {
1117 pr_err("QAT: incompatible product type %x\n",
1118 img_ae_mode->dev_type);
1121 maj_ver = prod_rev & 0xff;
1122 if (maj_ver > img_ae_mode->devmax_ver ||
1123 maj_ver < img_ae_mode->devmin_ver) {
1124 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1130 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1132 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1134 kfree(sobj_handle->img_table.simg_hdr);
1135 sobj_handle->img_table.simg_hdr = NULL;
1136 kfree(handle->sobj_handle);
1137 handle->sobj_handle = NULL;
1140 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1141 unsigned int img_id, unsigned int num_simgs)
1143 struct icp_qat_suof_img_hdr img_header;
1145 if (img_id != num_simgs - 1) {
1146 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1147 sizeof(*suof_img_hdr));
1148 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1149 sizeof(*suof_img_hdr));
1150 memcpy(&suof_img_hdr[img_id], &img_header,
1151 sizeof(*suof_img_hdr));
1155 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1156 struct icp_qat_suof_filehdr *suof_ptr,
1159 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1160 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1161 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1162 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1164 struct icp_qat_suof_img_hdr img_header;
1166 if (!suof_ptr || suof_size == 0) {
1167 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1170 if (qat_uclo_check_suof_format(suof_ptr))
1172 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1175 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1176 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1178 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1179 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1181 if (suof_handle->img_table.num_simgs != 0) {
1182 suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1187 suof_handle->img_table.simg_hdr = suof_img_hdr;
1189 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1190 qat_uclo_map_simg(handle, &suof_img_hdr[i],
1191 &suof_chunk_hdr[1 + i]);
1192 ret = qat_uclo_check_simg_compat(handle,
1196 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1197 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1201 if (!handle->chip_info->tgroup_share_ustore) {
1202 qat_uclo_tail_img(suof_img_hdr, ae0_img,
1203 suof_handle->img_table.num_simgs);
1209 #define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
1210 #define BITS_IN_DWORD 32
1212 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1213 struct icp_qat_fw_auth_desc *desc)
1215 u32 fcu_sts, retry = 0;
1216 u32 fcu_ctl_csr, fcu_sts_csr;
1217 u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
1220 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1221 - sizeof(struct icp_qat_auth_chunk);
1223 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1224 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1225 fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
1226 fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
1228 SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1229 SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
1230 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1233 msleep(FW_AUTH_WAIT_PERIOD);
1234 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1235 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1237 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1238 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1240 } while (retry++ < FW_AUTH_MAX_RETRY);
1242 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1243 fcu_sts & FCU_AUTH_STS_MASK, retry);
1247 static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
1250 struct icp_qat_suof_handle *sobj_handle;
1252 if (!handle->chip_info->tgroup_share_ustore)
1255 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1256 if (handle->hal_handle->admin_ae_mask &
1257 sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1263 static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1264 struct icp_qat_fw_auth_desc *desc)
1266 unsigned long ae_mask = handle->hal_handle->ae_mask;
1267 unsigned long desc_ae_mask = desc->ae_mask;
1268 u32 fcu_sts, ae_broadcast_mask = 0;
1269 u32 fcu_loaded_csr, ae_loaded;
1270 u32 fcu_sts_csr, fcu_ctl_csr;
1271 unsigned int ae, retry = 0;
1273 if (handle->chip_info->tgroup_share_ustore) {
1274 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1275 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1276 fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1278 pr_err("Chip 0x%x doesn't support broadcast load\n",
1279 handle->pci_dev->device);
1283 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
1284 if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
1285 pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
1289 if (test_bit(ae, &desc_ae_mask))
1290 ae_broadcast_mask |= 1 << ae;
1293 if (ae_broadcast_mask) {
1294 SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
1297 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
1300 msleep(FW_AUTH_WAIT_PERIOD);
1301 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1302 fcu_sts &= FCU_AUTH_STS_MASK;
1304 if (fcu_sts == FCU_STS_LOAD_FAIL) {
1305 pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
1307 } else if (fcu_sts == FCU_STS_LOAD_DONE) {
1308 ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
1309 ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
1311 if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
1314 } while (retry++ < FW_AUTH_MAX_RETRY);
1316 if (retry > FW_AUTH_MAX_RETRY) {
1317 pr_err("QAT: broadcast load failed timeout %d\n", retry);
1324 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1325 struct icp_firml_dram_desc *dram_desc,
1331 vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1332 size, &ptr, GFP_KERNEL);
1335 dram_desc->dram_base_addr_v = vptr;
1336 dram_desc->dram_bus_addr = ptr;
1337 dram_desc->dram_size = size;
1341 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1342 struct icp_firml_dram_desc *dram_desc)
1344 if (handle && dram_desc && dram_desc->dram_base_addr_v) {
1345 dma_free_coherent(&handle->pci_dev->dev,
1346 (size_t)(dram_desc->dram_size),
1347 dram_desc->dram_base_addr_v,
1348 dram_desc->dram_bus_addr);
1352 memset(dram_desc, 0, sizeof(*dram_desc));
1355 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1356 struct icp_qat_fw_auth_desc **desc)
1358 struct icp_firml_dram_desc dram_desc;
1361 dram_desc.dram_base_addr_v = *desc;
1362 dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1363 (*desc))->chunk_bus_addr;
1364 dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1365 (*desc))->chunk_size;
1366 qat_uclo_simg_free(handle, &dram_desc);
1370 static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
1371 char *image, unsigned int size,
1372 unsigned int fw_type)
1374 char *fw_type_name = fw_type ? "MMP" : "AE";
1375 unsigned int css_dword_size = sizeof(u32);
1377 if (handle->chip_info->fw_auth) {
1378 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1379 unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
1381 if ((css_hdr->header_len * css_dword_size) != header_len)
1383 if ((css_hdr->size * css_dword_size) != size)
1385 if (fw_type != css_hdr->fw_type)
1387 if (size <= header_len)
1392 if (fw_type == CSS_AE_FIRMWARE) {
1393 if (size < sizeof(struct icp_qat_simg_ae_mode *) +
1394 ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
1396 if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
1398 } else if (fw_type == CSS_MMP_FIRMWARE) {
1399 if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
1402 pr_err("QAT: Unsupported firmware type\n");
1408 pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
1412 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1413 char *image, unsigned int size,
1414 struct icp_qat_fw_auth_desc **desc)
1416 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1417 struct icp_qat_fw_auth_desc *auth_desc;
1418 struct icp_qat_auth_chunk *auth_chunk;
1419 u64 virt_addr, bus_addr, virt_base;
1420 unsigned int length, simg_offset = sizeof(*auth_chunk);
1421 struct icp_qat_simg_ae_mode *simg_ae_mode;
1422 struct icp_firml_dram_desc img_desc;
1424 if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
1425 pr_err("QAT: error, input image size overflow %d\n", size);
1428 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1429 ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
1430 size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
1431 if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1432 pr_err("QAT: error, allocate continuous dram fail\n");
1436 auth_chunk = img_desc.dram_base_addr_v;
1437 auth_chunk->chunk_size = img_desc.dram_size;
1438 auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1439 virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1440 bus_addr = img_desc.dram_bus_addr + simg_offset;
1441 auth_desc = img_desc.dram_base_addr_v;
1442 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1443 auth_desc->css_hdr_low = (unsigned int)bus_addr;
1444 virt_addr = virt_base;
1446 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1448 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1450 virt_addr = virt_addr + sizeof(*css_hdr);
1452 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1453 auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1455 memcpy((void *)(uintptr_t)virt_addr,
1456 (void *)(image + sizeof(*css_hdr)),
1457 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
1459 memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1460 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
1463 memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1464 ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
1465 (void *)(image + sizeof(*css_hdr) +
1466 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1467 sizeof(unsigned int));
1470 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1471 auth_desc->fwsk_pub_low) +
1472 ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1473 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1474 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1475 auth_desc->signature_low = (unsigned int)bus_addr;
1477 memcpy((void *)(uintptr_t)virt_addr,
1478 (void *)(image + sizeof(*css_hdr) +
1479 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1480 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
1481 ICP_QAT_CSS_SIGNATURE_LEN(handle));
1483 bus_addr = ADD_ADDR(auth_desc->signature_high,
1484 auth_desc->signature_low) +
1485 ICP_QAT_CSS_SIGNATURE_LEN(handle);
1486 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1488 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1489 auth_desc->img_low = (unsigned int)bus_addr;
1490 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
1491 memcpy((void *)(uintptr_t)virt_addr,
1492 (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
1493 auth_desc->img_len);
1494 virt_addr = virt_base;
1496 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1498 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1499 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1500 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1501 auth_desc->img_ae_mode_data_low) +
1502 sizeof(struct icp_qat_simg_ae_mode);
1504 auth_desc->img_ae_init_data_high = (unsigned int)
1505 (bus_addr >> BITS_IN_DWORD);
1506 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1507 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1508 auth_desc->img_ae_insts_high = (unsigned int)
1509 (bus_addr >> BITS_IN_DWORD);
1510 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1511 virt_addr += sizeof(struct icp_qat_css_hdr);
1512 virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1513 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1514 simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
1515 auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
1517 auth_desc->img_ae_insts_high = auth_desc->img_high;
1518 auth_desc->img_ae_insts_low = auth_desc->img_low;
1524 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1525 struct icp_qat_fw_auth_desc *desc)
1527 unsigned long ae_mask = handle->hal_handle->ae_mask;
1528 u32 fcu_sts_csr, fcu_ctl_csr;
1529 u32 loaded_aes, loaded_csr;
1533 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1534 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1535 loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1537 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
1540 if (!((desc->ae_mask >> i) & 0x1))
1542 if (qat_hal_check_ae_active(handle, i)) {
1543 pr_err("QAT: AE %d is active\n", i);
1546 SET_CAP_CSR(handle, fcu_ctl_csr,
1547 (FCU_CTRL_CMD_LOAD |
1548 (1 << FCU_CTRL_BROADCAST_POS) |
1549 (i << FCU_CTRL_AE_POS)));
1552 msleep(FW_AUTH_WAIT_PERIOD);
1553 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1554 if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1555 FCU_STS_LOAD_DONE) {
1556 loaded_aes = GET_CAP_CSR(handle, loaded_csr);
1557 loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
1558 if (loaded_aes & (1 << i))
1561 } while (retry++ < FW_AUTH_MAX_RETRY);
1562 if (retry > FW_AUTH_MAX_RETRY) {
1563 pr_err("QAT: firmware load failed timeout %x\n", retry);
1570 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1571 void *addr_ptr, int mem_size)
1573 struct icp_qat_suof_handle *suof_handle;
1575 suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1578 handle->sobj_handle = suof_handle;
1579 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1580 qat_uclo_del_suof(handle);
1581 pr_err("QAT: map SUOF failed\n");
1587 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1588 void *addr_ptr, int mem_size)
1590 struct icp_qat_fw_auth_desc *desc = NULL;
1594 ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
1598 if (handle->chip_info->fw_auth) {
1599 status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
1601 status = qat_uclo_auth_fw(handle, desc);
1602 qat_uclo_ummap_auth_fw(handle, &desc);
1604 if (handle->chip_info->mmp_sram_size < mem_size) {
1605 pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
1608 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1613 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1614 void *addr_ptr, int mem_size)
1616 struct icp_qat_uof_filehdr *filehdr;
1617 struct icp_qat_uclo_objhandle *objhdl;
1619 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1622 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1623 if (!objhdl->obj_buf)
1624 goto out_objbuf_err;
1625 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1626 if (qat_uclo_check_uof_format(filehdr))
1627 goto out_objhdr_err;
1628 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1630 if (!objhdl->obj_hdr) {
1631 pr_err("QAT: object file chunk is null\n");
1632 goto out_objhdr_err;
1634 handle->obj_handle = objhdl;
1635 if (qat_uclo_parse_uof_obj(handle))
1636 goto out_overlay_obj_err;
1639 out_overlay_obj_err:
1640 handle->obj_handle = NULL;
1641 kfree(objhdl->obj_hdr);
1643 kfree(objhdl->obj_buf);
1649 static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1650 struct icp_qat_mof_file_hdr *mof_ptr,
1653 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1654 unsigned int min_ver_offset;
1655 unsigned int checksum;
1657 mobj_handle->file_id = ICP_QAT_MOF_FID;
1658 mobj_handle->mof_buf = (char *)mof_ptr;
1659 mobj_handle->mof_size = mof_size;
1661 min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
1663 checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
1665 if (checksum != mof_ptr->checksum) {
1666 pr_err("QAT: incorrect MOF checksum\n");
1670 mobj_handle->checksum = mof_ptr->checksum;
1671 mobj_handle->min_ver = mof_ptr->min_ver;
1672 mobj_handle->maj_ver = mof_ptr->maj_ver;
1676 static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1678 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1680 kfree(mobj_handle->obj_table.obj_hdr);
1681 mobj_handle->obj_table.obj_hdr = NULL;
1682 kfree(handle->mobj_handle);
1683 handle->mobj_handle = NULL;
1686 static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1687 char *obj_name, char **obj_ptr,
1688 unsigned int *obj_size)
1690 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1693 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1694 if (!strncmp(obj_hdr[i].obj_name, obj_name,
1695 ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1696 *obj_ptr = obj_hdr[i].obj_buf;
1697 *obj_size = obj_hdr[i].obj_size;
1702 pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1706 static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1707 struct icp_qat_mof_objhdr *mobj_hdr,
1708 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1712 if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
1713 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1714 obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
1715 } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
1716 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1717 obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
1719 pr_err("QAT: unsupported chunk id\n");
1722 mobj_hdr->obj_buf = obj;
1723 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1724 mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
1728 static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1730 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1731 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1732 struct icp_qat_mof_obj_hdr *uobj_hdr;
1733 struct icp_qat_mof_obj_hdr *sobj_hdr;
1734 struct icp_qat_mof_objhdr *mobj_hdr;
1735 unsigned int uobj_chunk_num = 0;
1736 unsigned int sobj_chunk_num = 0;
1737 unsigned int *valid_chunk;
1740 uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1741 sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1743 uobj_chunk_num = uobj_hdr->num_chunks;
1745 sobj_chunk_num = sobj_hdr->num_chunks;
1747 mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
1748 sizeof(*mobj_hdr), GFP_KERNEL);
1752 mobj_handle->obj_table.obj_hdr = mobj_hdr;
1753 valid_chunk = &mobj_handle->obj_table.num_objs;
1754 uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1755 ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
1756 sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1757 ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
1759 /* map uof objects */
1760 for (i = 0; i < uobj_chunk_num; i++) {
1761 ret = qat_uclo_map_obj_from_mof(mobj_handle,
1762 &mobj_hdr[*valid_chunk],
1769 /* map suof objects */
1770 for (i = 0; i < sobj_chunk_num; i++) {
1771 ret = qat_uclo_map_obj_from_mof(mobj_handle,
1772 &mobj_hdr[*valid_chunk],
1779 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
1780 pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
1786 static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
1787 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1789 char **sym_str = (char **)&mobj_handle->sym_str;
1790 unsigned int *sym_size = &mobj_handle->sym_size;
1791 struct icp_qat_mof_str_table *str_table_obj;
1793 *sym_size = *(unsigned int *)(uintptr_t)
1794 (mof_chunkhdr->offset + mobj_handle->mof_buf);
1795 *sym_str = (char *)(uintptr_t)
1796 (mobj_handle->mof_buf + mof_chunkhdr->offset +
1797 sizeof(str_table_obj->tab_len));
1800 static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
1801 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1803 char *chunk_id = mof_chunkhdr->chunk_id;
1805 if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1806 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
1807 else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1808 mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
1809 mof_chunkhdr->offset;
1810 else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1811 mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
1812 mof_chunkhdr->offset;
1815 static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
1817 int maj = mof_hdr->maj_ver & 0xff;
1818 int min = mof_hdr->min_ver & 0xff;
1820 if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
1821 pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
1825 if (mof_hdr->num_chunks <= 0x1) {
1826 pr_err("QAT: MOF chunk amount is incorrect\n");
1829 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
1830 pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
1837 static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
1838 struct icp_qat_mof_file_hdr *mof_ptr,
1839 u32 mof_size, char *obj_name, char **obj_ptr,
1840 unsigned int *obj_size)
1842 struct icp_qat_mof_chunkhdr *mof_chunkhdr;
1843 unsigned int file_id = mof_ptr->file_id;
1844 struct icp_qat_mof_handle *mobj_handle;
1845 unsigned short chunks_num;
1849 if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
1851 *obj_ptr = (char *)mof_ptr;
1853 *obj_size = mof_size;
1856 if (qat_uclo_check_mof_format(mof_ptr))
1859 mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
1863 handle->mobj_handle = mobj_handle;
1864 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
1868 mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
1869 chunks_num = mof_ptr->num_chunks;
1871 /* Parse MOF file chunks */
1872 for (i = 0; i < chunks_num; i++)
1873 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
1875 /* All sym_objs uobjs and sobjs should be available */
1876 if (!mobj_handle->sym_str ||
1877 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
1880 ret = qat_uclo_map_objs_from_mof(mobj_handle);
1884 /* Seek specified uof object in MOF */
1885 return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
1889 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1890 void *addr_ptr, u32 mem_size, char *obj_name)
1896 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1897 (sizeof(handle->hal_handle->ae_mask) * 8));
1899 if (!handle || !addr_ptr || mem_size < 24)
1903 ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
1904 &obj_addr, &obj_size);
1908 obj_addr = addr_ptr;
1909 obj_size = mem_size;
1912 return (handle->chip_info->fw_auth) ?
1913 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
1914 qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
1917 void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
1919 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1922 if (handle->mobj_handle)
1923 qat_uclo_del_mof(handle);
1924 if (handle->sobj_handle)
1925 qat_uclo_del_suof(handle);
1929 kfree(obj_handle->uword_buf);
1930 for (a = 0; a < obj_handle->uimage_num; a++)
1931 kfree(obj_handle->ae_uimage[a].page);
1933 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1934 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1936 kfree(obj_handle->obj_hdr);
1937 kfree(obj_handle->obj_buf);
1939 handle->obj_handle = NULL;
1942 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1943 struct icp_qat_uclo_encap_page *encap_page,
1944 u64 *uword, unsigned int addr_p,
1945 unsigned int raddr, u64 fill)
1947 unsigned int i, addr;
1954 addr = (encap_page->page_region) ? raddr : addr_p;
1955 for (i = 0; i < encap_page->uwblock_num; i++) {
1956 if (addr >= encap_page->uwblock[i].start_addr &&
1957 addr <= encap_page->uwblock[i].start_addr +
1958 encap_page->uwblock[i].words_num - 1) {
1959 addr -= encap_page->uwblock[i].start_addr;
1960 addr *= obj_handle->uword_in_bytes;
1961 memcpy(&uwrd, (void *)(((uintptr_t)
1962 encap_page->uwblock[i].micro_words) + addr),
1963 obj_handle->uword_in_bytes);
1964 uwrd = uwrd & GENMASK_ULL(43, 0);
1968 if (*uword == INVLD_UWORD)
1972 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1973 struct icp_qat_uclo_encap_page
1974 *encap_page, unsigned int ae)
1976 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1977 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1980 /* load the page starting at appropriate ustore address */
1981 /* get fill-pattern from an image -- they are all the same */
1982 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1984 uw_physical_addr = encap_page->beg_addr_p;
1985 uw_relative_addr = 0;
1986 words_num = encap_page->micro_words_num;
1988 if (words_num < UWORD_CPYBUF_SIZE)
1991 cpylen = UWORD_CPYBUF_SIZE;
1993 /* load the buffer */
1994 for (i = 0; i < cpylen; i++)
1995 qat_uclo_fill_uwords(obj_handle, encap_page,
1996 &obj_handle->uword_buf[i],
1997 uw_physical_addr + i,
1998 uw_relative_addr + i, fill_pat);
2000 /* copy the buffer to ustore */
2001 qat_hal_wr_uwords(handle, (unsigned char)ae,
2002 uw_physical_addr, cpylen,
2003 obj_handle->uword_buf);
2005 uw_physical_addr += cpylen;
2006 uw_relative_addr += cpylen;
2007 words_num -= cpylen;
2011 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2012 struct icp_qat_uof_image *image)
2014 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2015 unsigned long ae_mask = handle->hal_handle->ae_mask;
2016 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2017 unsigned long ae_assigned = image->ae_assigned;
2018 struct icp_qat_uclo_aedata *aed;
2019 unsigned int ctx_mask, s;
2020 struct icp_qat_uclo_page *page;
2024 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2028 /* load the default page and set assigned CTX PC
2029 * to the entrypoint address */
2030 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
2031 if (!test_bit(ae, &cfg_ae_mask))
2034 if (!test_bit(ae, &ae_assigned))
2037 aed = &obj_handle->ae_data[ae];
2038 /* find the slice to which this image is assigned */
2039 for (s = 0; s < aed->slice_num; s++) {
2040 if (image->ctx_assigned &
2041 aed->ae_slices[s].ctx_mask_assigned)
2044 if (s >= aed->slice_num)
2046 page = aed->ae_slices[s].page;
2047 if (!page->encap_page->def_page)
2049 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2051 page = aed->ae_slices[s].page;
2052 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2053 aed->ae_slices[s].cur_page[ctx] =
2054 (ctx_mask & (1 << ctx)) ? page : NULL;
2055 qat_hal_set_live_ctx(handle, (unsigned char)ae,
2056 image->ctx_assigned);
2057 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
2058 image->entry_address);
2062 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2065 struct icp_qat_fw_auth_desc *desc = NULL;
2066 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2067 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2070 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2071 ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
2072 simg_hdr[i].simg_len,
2077 if (qat_uclo_map_auth_fw(handle,
2078 (char *)simg_hdr[i].simg_buf,
2080 simg_hdr[i].simg_len,
2083 if (qat_uclo_auth_fw(handle, desc))
2085 if (qat_uclo_is_broadcast(handle, i)) {
2086 if (qat_uclo_broadcast_load_fw(handle, desc))
2089 if (qat_uclo_load_fw(handle, desc))
2092 qat_uclo_ummap_auth_fw(handle, &desc);
2096 qat_uclo_ummap_auth_fw(handle, &desc);
2100 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2102 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2105 if (qat_uclo_init_globals(handle))
2107 for (i = 0; i < obj_handle->uimage_num; i++) {
2108 if (!obj_handle->ae_uimage[i].img_ptr)
2110 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2112 qat_uclo_wr_uimage_page(handle,
2113 obj_handle->ae_uimage[i].img_ptr);
2118 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2120 return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2121 qat_uclo_wr_uof_img(handle);
2124 int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2125 unsigned int cfg_ae_mask)
2130 handle->cfg_ae_mask = cfg_ae_mask;