2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 if (!ae_slice->region)
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
94 kfree(ae_slice->region);
95 ae_slice->region = NULL;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i = 0; i < ae_data->slice_num; i++) {
109 kfree(ae_data->ae_slices[i].region);
110 ae_data->ae_slices[i].region = NULL;
111 kfree(ae_data->ae_slices[i].page);
112 ae_data->ae_slices[i].page = NULL;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
122 return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
144 int maj = suof_hdr->maj_ver & 0xff;
145 int min = suof_hdr->min_ver & 0xff;
147 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
151 if (suof_hdr->fw_type != 0) {
152 pr_err("QAT: unsupported firmware type\n");
155 if (suof_hdr->num_chunks <= 0x1) {
156 pr_err("QAT: SUOF chunk amount is incorrect\n");
159 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168 unsigned int addr, unsigned int *val,
169 unsigned int num_in_bytes)
172 unsigned char *ptr = (unsigned char *)val;
174 while (num_in_bytes) {
175 memcpy(&outval, ptr, 4);
176 SRAM_WRITE(handle, addr, outval);
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184 unsigned char ae, unsigned int addr,
186 unsigned int num_in_bytes)
189 unsigned char *ptr = (unsigned char *)val;
191 addr >>= 0x2; /* convert to uword address */
193 while (num_in_bytes) {
194 memcpy(&outval, ptr, 4);
195 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
203 struct icp_qat_uof_batch_init
206 struct icp_qat_uof_batch_init *umem_init;
208 if (!umem_init_header)
210 umem_init = umem_init_header->next;
212 unsigned int addr, *value, size;
215 addr = umem_init->addr;
216 value = umem_init->value;
217 size = umem_init->size;
218 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219 umem_init = umem_init->next;
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225 struct icp_qat_uof_batch_init **base)
227 struct icp_qat_uof_batch_init *umem_init;
231 struct icp_qat_uof_batch_init *pre;
234 umem_init = umem_init->next;
240 static int qat_uclo_parse_num(char *str, unsigned int *num)
243 unsigned long ae = 0;
246 strncpy(buf, str, 15);
247 for (i = 0; i < 16; i++) {
248 if (!isdigit(buf[i])) {
253 if ((kstrtoul(buf, 10, &ae)))
256 *num = (unsigned int)ae;
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261 struct icp_qat_uof_initmem *init_mem,
262 unsigned int size_range, unsigned int *ae)
264 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
267 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268 pr_err("QAT: initmem is out of range");
271 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272 pr_err("QAT: Memory scope for init_mem error\n");
275 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
277 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
280 if (qat_uclo_parse_num(str, ae)) {
281 pr_err("QAT: Parse num for AE number failed\n");
284 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285 pr_err("QAT: ae %d out of range\n", *ae);
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292 *handle, struct icp_qat_uof_initmem
293 *init_mem, unsigned int ae,
294 struct icp_qat_uof_batch_init
297 struct icp_qat_uof_batch_init *init_header, *tail;
298 struct icp_qat_uof_batch_init *mem_init, *tail_old;
299 struct icp_qat_uof_memvar_attr *mem_val_attr;
300 unsigned int i, flag = 0;
303 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304 sizeof(struct icp_qat_uof_initmem));
306 init_header = *init_tab_base;
308 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
311 init_header->size = 1;
312 *init_tab_base = init_header;
315 tail_old = init_header;
316 while (tail_old->next)
317 tail_old = tail_old->next;
319 for (i = 0; i < init_mem->val_attr_num; i++) {
320 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
324 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325 mem_init->value = &mem_val_attr->value;
327 mem_init->next = NULL;
328 tail->next = mem_init;
330 init_header->size += qat_hal_get_ins_num();
335 /* Do not free the list head unless we allocated it. */
336 tail_old = tail_old->next;
338 kfree(*init_tab_base);
339 *init_tab_base = NULL;
343 mem_init = tail_old->next;
350 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
351 struct icp_qat_uof_initmem *init_mem)
353 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
356 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
357 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
359 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
360 &obj_handle->lm_init_tab[ae]))
365 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
366 struct icp_qat_uof_initmem *init_mem)
368 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
369 unsigned int ae, ustore_size, uaddr, i;
371 ustore_size = obj_handle->ustore_phy_size;
372 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
374 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
375 &obj_handle->umem_init_tab[ae]))
377 /* set the highest ustore address referenced */
378 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
379 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
380 if (obj_handle->ae_data[ae].ae_slices[i].
381 encap_image->uwords_num < uaddr)
382 obj_handle->ae_data[ae].ae_slices[i].
383 encap_image->uwords_num = uaddr;
388 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
389 struct icp_qat_uof_initmem *init_mem)
391 switch (init_mem->region) {
392 case ICP_QAT_UOF_LMEM_REGION:
393 if (qat_uclo_init_lmem_seg(handle, init_mem))
396 case ICP_QAT_UOF_UMEM_REGION:
397 if (qat_uclo_init_umem_seg(handle, init_mem))
401 pr_err("QAT: initmem region error. region type=0x%x\n",
408 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
409 struct icp_qat_uclo_encapme *image)
412 struct icp_qat_uclo_encap_page *page;
413 struct icp_qat_uof_image *uof_image;
415 unsigned int ustore_size;
416 unsigned int patt_pos;
417 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
420 uof_image = image->img_ptr;
421 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
425 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
426 memcpy(&fill_data[i], &uof_image->fill_pattern,
430 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
431 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
433 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
434 patt_pos = page->beg_addr_p + page->micro_words_num;
436 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
437 page->beg_addr_p, &fill_data[0]);
438 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
439 ustore_size - patt_pos + 1,
440 &fill_data[page->beg_addr_p]);
446 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
449 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
450 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
452 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
453 if (initmem->num_in_bytes) {
454 if (qat_uclo_init_ae_memory(handle, initmem))
457 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
459 sizeof(struct icp_qat_uof_initmem)) +
460 (sizeof(struct icp_qat_uof_memvar_attr) *
461 initmem->val_attr_num));
463 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
464 if (qat_hal_batch_wr_lm(handle, ae,
465 obj_handle->lm_init_tab[ae])) {
466 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
469 qat_uclo_cleanup_batch_init_list(handle,
470 &obj_handle->lm_init_tab[ae]);
471 qat_uclo_batch_wr_umem(handle, ae,
472 obj_handle->umem_init_tab[ae]);
473 qat_uclo_cleanup_batch_init_list(handle,
480 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
481 char *chunk_id, void *cur)
484 struct icp_qat_uof_chunkhdr *chunk_hdr =
485 (struct icp_qat_uof_chunkhdr *)
486 ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
488 for (i = 0; i < obj_hdr->num_chunks; i++) {
489 if ((cur < (void *)&chunk_hdr[i]) &&
490 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
491 ICP_QAT_UOF_OBJID_LEN)) {
492 return &chunk_hdr[i];
498 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
501 unsigned int topbit = 1 << 0xF;
502 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
504 reg ^= inbyte << 0x8;
505 for (i = 0; i < 0x8; i++) {
507 reg = (reg << 1) ^ 0x1021;
514 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
516 unsigned int chksum = 0;
520 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
524 static struct icp_qat_uclo_objhdr *
525 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
528 struct icp_qat_uof_filechunkhdr *file_chunk;
529 struct icp_qat_uclo_objhdr *obj_hdr;
533 file_chunk = (struct icp_qat_uof_filechunkhdr *)
534 (buf + sizeof(struct icp_qat_uof_filehdr));
535 for (i = 0; i < file_hdr->num_chunks; i++) {
536 if (!strncmp(file_chunk->chunk_id, chunk_id,
537 ICP_QAT_UOF_OBJID_LEN)) {
538 chunk = buf + file_chunk->offset;
539 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
540 chunk, file_chunk->size))
542 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
545 obj_hdr->file_buff = chunk;
546 obj_hdr->checksum = file_chunk->checksum;
547 obj_hdr->size = file_chunk->size;
556 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
557 struct icp_qat_uof_image *image)
559 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
560 struct icp_qat_uof_objtable *neigh_reg_tab;
561 struct icp_qat_uof_code_page *code_page;
563 code_page = (struct icp_qat_uof_code_page *)
564 ((char *)image + sizeof(struct icp_qat_uof_image));
565 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
566 code_page->uc_var_tab_offset);
567 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
568 code_page->imp_var_tab_offset);
569 imp_expr_tab = (struct icp_qat_uof_objtable *)
570 (encap_uof_obj->beg_uof +
571 code_page->imp_expr_tab_offset);
572 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
573 imp_expr_tab->entry_num) {
574 pr_err("QAT: UOF can't contain imported variable to be parsed\n");
577 neigh_reg_tab = (struct icp_qat_uof_objtable *)
578 (encap_uof_obj->beg_uof +
579 code_page->neigh_reg_tab_offset);
580 if (neigh_reg_tab->entry_num) {
581 pr_err("QAT: UOF can't contain shared control store feature\n");
584 if (image->numpages > 1) {
585 pr_err("QAT: UOF can't contain multiple pages\n");
588 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
589 pr_err("QAT: UOF can't use shared control store feature\n");
592 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
593 pr_err("QAT: UOF can't use reloadable feature\n");
599 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
601 struct icp_qat_uof_image *img,
602 struct icp_qat_uclo_encap_page *page)
604 struct icp_qat_uof_code_page *code_page;
605 struct icp_qat_uof_code_area *code_area;
606 struct icp_qat_uof_objtable *uword_block_tab;
607 struct icp_qat_uof_uword_block *uwblock;
610 code_page = (struct icp_qat_uof_code_page *)
611 ((char *)img + sizeof(struct icp_qat_uof_image));
612 page->def_page = code_page->def_page;
613 page->page_region = code_page->page_region;
614 page->beg_addr_v = code_page->beg_addr_v;
615 page->beg_addr_p = code_page->beg_addr_p;
616 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
617 code_page->code_area_offset);
618 page->micro_words_num = code_area->micro_words_num;
619 uword_block_tab = (struct icp_qat_uof_objtable *)
620 (encap_uof_obj->beg_uof +
621 code_area->uword_block_tab);
622 page->uwblock_num = uword_block_tab->entry_num;
623 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
624 sizeof(struct icp_qat_uof_objtable));
625 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
626 for (i = 0; i < uword_block_tab->entry_num; i++)
627 page->uwblock[i].micro_words =
628 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
631 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
632 struct icp_qat_uclo_encapme *ae_uimage,
636 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
637 struct icp_qat_uof_image *image;
638 struct icp_qat_uof_objtable *ae_regtab;
639 struct icp_qat_uof_objtable *init_reg_sym_tab;
640 struct icp_qat_uof_objtable *sbreak_tab;
641 struct icp_qat_uof_encap_obj *encap_uof_obj =
642 &obj_handle->encap_uof_obj;
644 for (j = 0; j < max_image; j++) {
645 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
646 ICP_QAT_UOF_IMAG, chunk_hdr);
649 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
651 ae_regtab = (struct icp_qat_uof_objtable *)
652 (image->reg_tab_offset +
653 obj_handle->obj_hdr->file_buff);
654 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
655 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
656 (((char *)ae_regtab) +
657 sizeof(struct icp_qat_uof_objtable));
658 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
659 (image->init_reg_sym_tab +
660 obj_handle->obj_hdr->file_buff);
661 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
662 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
663 (((char *)init_reg_sym_tab) +
664 sizeof(struct icp_qat_uof_objtable));
665 sbreak_tab = (struct icp_qat_uof_objtable *)
666 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
667 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
668 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
669 (((char *)sbreak_tab) +
670 sizeof(struct icp_qat_uof_objtable));
671 ae_uimage[j].img_ptr = image;
672 if (qat_uclo_check_image_compat(encap_uof_obj, image))
675 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
677 if (!ae_uimage[j].page)
679 qat_uclo_map_image_page(encap_uof_obj, image,
684 for (i = 0; i < j; i++)
685 kfree(ae_uimage[i].page);
689 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
693 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
695 for (ae = 0; ae < max_ae; ae++) {
697 (unsigned long *)&handle->hal_handle->ae_mask))
699 for (i = 0; i < obj_handle->uimage_num; i++) {
700 if (!test_bit(ae, (unsigned long *)
701 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
704 if (qat_uclo_init_ae_data(obj_handle, ae, i))
709 pr_err("QAT: uimage uses AE not set\n");
715 static struct icp_qat_uof_strtable *
716 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
717 char *tab_name, struct icp_qat_uof_strtable *str_table)
719 struct icp_qat_uof_chunkhdr *chunk_hdr;
721 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
722 obj_hdr->file_buff, tab_name, NULL);
726 memcpy(&str_table->table_len, obj_hdr->file_buff +
727 chunk_hdr->offset, sizeof(str_table->table_len));
728 hdr_size = (char *)&str_table->strings - (char *)str_table;
729 str_table->strings = (uintptr_t)obj_hdr->file_buff +
730 chunk_hdr->offset + hdr_size;
737 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
738 struct icp_qat_uclo_init_mem_table *init_mem_tab)
740 struct icp_qat_uof_chunkhdr *chunk_hdr;
742 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
743 ICP_QAT_UOF_IMEM, NULL);
745 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
746 chunk_hdr->offset, sizeof(unsigned int));
747 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
748 (encap_uof_obj->beg_uof + chunk_hdr->offset +
749 sizeof(unsigned int));
754 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
756 switch (handle->pci_dev->device) {
757 case ADF_DH895XCC_PCI_DEVICE_ID:
758 return ICP_QAT_AC_895XCC_DEV_TYPE;
759 case ADF_C62X_PCI_DEVICE_ID:
760 return ICP_QAT_AC_C62X_DEV_TYPE;
761 case ADF_C3XXX_PCI_DEVICE_ID:
762 return ICP_QAT_AC_C3XXX_DEV_TYPE;
764 pr_err("QAT: unsupported device 0x%x\n",
765 handle->pci_dev->device);
770 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
772 unsigned int maj_ver, prod_type = obj_handle->prod_type;
774 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
775 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
776 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
780 maj_ver = obj_handle->prod_rev & 0xff;
781 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
782 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
783 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
789 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
790 unsigned char ae, unsigned char ctx_mask,
791 enum icp_qat_uof_regtype reg_type,
792 unsigned short reg_addr, unsigned int value)
801 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
813 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
821 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
824 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
826 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
832 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
834 struct icp_qat_uclo_encapme *encap_ae)
837 unsigned char ctx_mask;
838 struct icp_qat_uof_init_regsym *init_regsym;
840 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
841 ICP_QAT_UCLO_MAX_CTX)
846 for (i = 0; i < encap_ae->init_regsym_num; i++) {
847 unsigned int exp_res;
849 init_regsym = &encap_ae->init_regsym[i];
850 exp_res = init_regsym->value;
851 switch (init_regsym->init_type) {
852 case ICP_QAT_UOF_INIT_REG:
853 qat_uclo_init_reg(handle, ae, ctx_mask,
854 (enum icp_qat_uof_regtype)
855 init_regsym->reg_type,
856 (unsigned short)init_regsym->reg_addr,
859 case ICP_QAT_UOF_INIT_REG_CTX:
860 /* check if ctx is appropriate for the ctxMode */
861 if (!((1 << init_regsym->ctx) & ctx_mask)) {
862 pr_err("QAT: invalid ctx num = 0x%x\n",
866 qat_uclo_init_reg(handle, ae,
868 (1 << init_regsym->ctx),
869 (enum icp_qat_uof_regtype)
870 init_regsym->reg_type,
871 (unsigned short)init_regsym->reg_addr,
874 case ICP_QAT_UOF_INIT_EXPR:
875 pr_err("QAT: INIT_EXPR feature not supported\n");
877 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
878 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
887 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
889 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
892 if (obj_handle->global_inited)
894 if (obj_handle->init_mem_tab.entry_num) {
895 if (qat_uclo_init_memory(handle)) {
896 pr_err("QAT: initialize memory failed\n");
900 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
901 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
902 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
904 if (qat_uclo_init_reg_sym(handle, ae,
905 obj_handle->ae_data[ae].
906 ae_slices[s].encap_image))
910 obj_handle->global_inited = 1;
914 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
916 unsigned char ae, nn_mode, s;
917 struct icp_qat_uof_image *uof_image;
918 struct icp_qat_uclo_aedata *ae_data;
919 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
921 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
923 (unsigned long *)&handle->hal_handle->ae_mask))
925 ae_data = &obj_handle->ae_data[ae];
926 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
927 ICP_QAT_UCLO_MAX_CTX); s++) {
928 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
930 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
931 if (qat_hal_set_ae_ctx_mode(handle, ae,
932 (char)ICP_QAT_CTX_MODE
933 (uof_image->ae_mode))) {
934 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
937 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
938 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
939 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
942 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
943 (char)ICP_QAT_LOC_MEM0_MODE
944 (uof_image->ae_mode))) {
945 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
948 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
949 (char)ICP_QAT_LOC_MEM1_MODE
950 (uof_image->ae_mode))) {
951 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
959 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
961 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
962 struct icp_qat_uclo_encapme *image;
965 for (a = 0; a < obj_handle->uimage_num; a++) {
966 image = &obj_handle->ae_uimage[a];
967 image->uwords_num = image->page->beg_addr_p +
968 image->page->micro_words_num;
972 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
974 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
977 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
978 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
979 obj_handle->obj_hdr->file_buff;
980 obj_handle->uword_in_bytes = 6;
981 obj_handle->prod_type = qat_uclo_get_dev_type(handle);
982 obj_handle->prod_rev = PID_MAJOR_REV |
983 (PID_MINOR_REV & handle->hal_handle->revision_id);
984 if (qat_uclo_check_uof_compat(obj_handle)) {
985 pr_err("QAT: UOF incompatible\n");
988 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
990 if (!obj_handle->uword_buf)
992 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
993 if (!obj_handle->obj_hdr->file_buff ||
994 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
995 &obj_handle->str_table)) {
996 pr_err("QAT: UOF doesn't have effective images\n");
999 obj_handle->uimage_num =
1000 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1001 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1002 if (!obj_handle->uimage_num)
1004 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1005 pr_err("QAT: Bad object\n");
1006 goto out_check_uof_aemask_err;
1008 qat_uclo_init_uword_num(handle);
1009 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1010 &obj_handle->init_mem_tab);
1011 if (qat_uclo_set_ae_mode(handle))
1012 goto out_check_uof_aemask_err;
1014 out_check_uof_aemask_err:
1015 for (ae = 0; ae < obj_handle->uimage_num; ae++)
1016 kfree(obj_handle->ae_uimage[ae].page);
1018 kfree(obj_handle->uword_buf);
1022 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1023 struct icp_qat_suof_filehdr *suof_ptr,
1026 unsigned int check_sum = 0;
1027 unsigned int min_ver_offset = 0;
1028 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1030 suof_handle->file_id = ICP_QAT_SUOF_FID;
1031 suof_handle->suof_buf = (char *)suof_ptr;
1032 suof_handle->suof_size = suof_size;
1033 min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1035 check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1037 if (check_sum != suof_ptr->check_sum) {
1038 pr_err("QAT: incorrect SUOF checksum\n");
1041 suof_handle->check_sum = suof_ptr->check_sum;
1042 suof_handle->min_ver = suof_ptr->min_ver;
1043 suof_handle->maj_ver = suof_ptr->maj_ver;
1044 suof_handle->fw_type = suof_ptr->fw_type;
1048 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1049 struct icp_qat_suof_img_hdr *suof_img_hdr,
1050 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1052 struct icp_qat_simg_ae_mode *ae_mode;
1053 struct icp_qat_suof_objhdr *suof_objhdr;
1055 suof_img_hdr->simg_buf = (suof_handle->suof_buf +
1056 suof_chunk_hdr->offset +
1057 sizeof(*suof_objhdr));
1058 suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1059 (suof_handle->suof_buf +
1060 suof_chunk_hdr->offset))->img_length;
1062 suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1063 suof_img_hdr->css_key = (suof_img_hdr->css_header +
1064 sizeof(struct icp_qat_css_hdr));
1065 suof_img_hdr->css_signature = suof_img_hdr->css_key +
1066 ICP_QAT_CSS_FWSK_MODULUS_LEN +
1067 ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1068 suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1069 ICP_QAT_CSS_SIGNATURE_LEN;
1071 ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1072 suof_img_hdr->ae_mask = ae_mode->ae_mask;
1073 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1074 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1075 suof_img_hdr->fw_type = ae_mode->fw_type;
1079 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1080 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1082 char **sym_str = (char **)&suof_handle->sym_str;
1083 unsigned int *sym_size = &suof_handle->sym_size;
1084 struct icp_qat_suof_strtable *str_table_obj;
1086 *sym_size = *(unsigned int *)(uintptr_t)
1087 (suof_chunk_hdr->offset + suof_handle->suof_buf);
1088 *sym_str = (char *)(uintptr_t)
1089 (suof_handle->suof_buf + suof_chunk_hdr->offset +
1090 sizeof(str_table_obj->tab_length));
1093 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1094 struct icp_qat_suof_img_hdr *img_hdr)
1096 struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1097 unsigned int prod_rev, maj_ver, prod_type;
1099 prod_type = qat_uclo_get_dev_type(handle);
1100 img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1101 prod_rev = PID_MAJOR_REV |
1102 (PID_MINOR_REV & handle->hal_handle->revision_id);
1103 if (img_ae_mode->dev_type != prod_type) {
1104 pr_err("QAT: incompatible product type %x\n",
1105 img_ae_mode->dev_type);
1108 maj_ver = prod_rev & 0xff;
1109 if ((maj_ver > img_ae_mode->devmax_ver) ||
1110 (maj_ver < img_ae_mode->devmin_ver)) {
1111 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1117 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1119 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1121 kfree(sobj_handle->img_table.simg_hdr);
1122 sobj_handle->img_table.simg_hdr = NULL;
1123 kfree(handle->sobj_handle);
1124 handle->sobj_handle = NULL;
1127 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1128 unsigned int img_id, unsigned int num_simgs)
1130 struct icp_qat_suof_img_hdr img_header;
1132 if (img_id != num_simgs - 1) {
1133 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1134 sizeof(*suof_img_hdr));
1135 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1136 sizeof(*suof_img_hdr));
1137 memcpy(&suof_img_hdr[img_id], &img_header,
1138 sizeof(*suof_img_hdr));
1142 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1143 struct icp_qat_suof_filehdr *suof_ptr,
1146 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1147 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1148 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1149 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1151 struct icp_qat_suof_img_hdr img_header;
1153 if (!suof_ptr || (suof_size == 0)) {
1154 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1157 if (qat_uclo_check_suof_format(suof_ptr))
1159 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1162 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1163 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1165 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1166 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1168 if (suof_handle->img_table.num_simgs != 0) {
1169 suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1174 suof_handle->img_table.simg_hdr = suof_img_hdr;
1177 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1178 qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1179 &suof_chunk_hdr[1 + i]);
1180 ret = qat_uclo_check_simg_compat(handle,
1184 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1187 qat_uclo_tail_img(suof_img_hdr, ae0_img,
1188 suof_handle->img_table.num_simgs);
1192 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
1193 #define BITS_IN_DWORD 32
1195 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1196 struct icp_qat_fw_auth_desc *desc)
1198 unsigned int fcu_sts, retry = 0;
1201 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1202 - sizeof(struct icp_qat_auth_chunk);
1203 SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1204 SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1205 SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1208 msleep(FW_AUTH_WAIT_PERIOD);
1209 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1210 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1212 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1213 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1215 } while (retry++ < FW_AUTH_MAX_RETRY);
1217 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1218 fcu_sts & FCU_AUTH_STS_MASK, retry);
1222 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1223 struct icp_firml_dram_desc *dram_desc,
1229 vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1230 size, &ptr, GFP_KERNEL);
1233 dram_desc->dram_base_addr_v = vptr;
1234 dram_desc->dram_bus_addr = ptr;
1235 dram_desc->dram_size = size;
1239 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1240 struct icp_firml_dram_desc *dram_desc)
1242 dma_free_coherent(&handle->pci_dev->dev,
1243 (size_t)(dram_desc->dram_size),
1244 (dram_desc->dram_base_addr_v),
1245 dram_desc->dram_bus_addr);
1246 memset(dram_desc, 0, sizeof(*dram_desc));
1249 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1250 struct icp_qat_fw_auth_desc **desc)
1252 struct icp_firml_dram_desc dram_desc;
1254 dram_desc.dram_base_addr_v = *desc;
1255 dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1256 (*desc))->chunk_bus_addr;
1257 dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1258 (*desc))->chunk_size;
1259 qat_uclo_simg_free(handle, &dram_desc);
1262 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1263 char *image, unsigned int size,
1264 struct icp_qat_fw_auth_desc **desc)
1266 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1267 struct icp_qat_fw_auth_desc *auth_desc;
1268 struct icp_qat_auth_chunk *auth_chunk;
1269 u64 virt_addr, bus_addr, virt_base;
1270 unsigned int length, simg_offset = sizeof(*auth_chunk);
1271 struct icp_firml_dram_desc img_desc;
1273 if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1274 pr_err("QAT: error, input image size overflow %d\n", size);
1277 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1278 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1279 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1280 if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1281 pr_err("QAT: error, allocate continuous dram fail\n");
1285 auth_chunk = img_desc.dram_base_addr_v;
1286 auth_chunk->chunk_size = img_desc.dram_size;
1287 auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1288 virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1289 bus_addr = img_desc.dram_bus_addr + simg_offset;
1290 auth_desc = img_desc.dram_base_addr_v;
1291 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1292 auth_desc->css_hdr_low = (unsigned int)bus_addr;
1293 virt_addr = virt_base;
1295 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1297 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1299 virt_addr = virt_addr + sizeof(*css_hdr);
1301 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1302 auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1304 memcpy((void *)(uintptr_t)virt_addr,
1305 (void *)(image + sizeof(*css_hdr)),
1306 ICP_QAT_CSS_FWSK_MODULUS_LEN);
1308 memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1309 0, ICP_QAT_CSS_FWSK_PAD_LEN);
1312 memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1313 ICP_QAT_CSS_FWSK_PAD_LEN),
1314 (void *)(image + sizeof(*css_hdr) +
1315 ICP_QAT_CSS_FWSK_MODULUS_LEN),
1316 sizeof(unsigned int));
1319 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1320 auth_desc->fwsk_pub_low) +
1321 ICP_QAT_CSS_FWSK_PUB_LEN;
1322 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1323 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1324 auth_desc->signature_low = (unsigned int)bus_addr;
1326 memcpy((void *)(uintptr_t)virt_addr,
1327 (void *)(image + sizeof(*css_hdr) +
1328 ICP_QAT_CSS_FWSK_MODULUS_LEN +
1329 ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1330 ICP_QAT_CSS_SIGNATURE_LEN);
1332 bus_addr = ADD_ADDR(auth_desc->signature_high,
1333 auth_desc->signature_low) +
1334 ICP_QAT_CSS_SIGNATURE_LEN;
1335 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1337 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1338 auth_desc->img_low = (unsigned int)bus_addr;
1339 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1340 memcpy((void *)(uintptr_t)virt_addr,
1341 (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1342 auth_desc->img_len);
1343 virt_addr = virt_base;
1345 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1347 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1348 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1349 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1350 auth_desc->img_ae_mode_data_low) +
1351 sizeof(struct icp_qat_simg_ae_mode);
1353 auth_desc->img_ae_init_data_high = (unsigned int)
1354 (bus_addr >> BITS_IN_DWORD);
1355 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1356 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1357 auth_desc->img_ae_insts_high = (unsigned int)
1358 (bus_addr >> BITS_IN_DWORD);
1359 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1361 auth_desc->img_ae_insts_high = auth_desc->img_high;
1362 auth_desc->img_ae_insts_low = auth_desc->img_low;
1368 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1369 struct icp_qat_fw_auth_desc *desc)
1372 unsigned int fcu_sts;
1373 struct icp_qat_simg_ae_mode *virt_addr;
1374 unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1376 virt_addr = (void *)((uintptr_t)desc +
1377 sizeof(struct icp_qat_auth_chunk) +
1378 sizeof(struct icp_qat_css_hdr) +
1379 ICP_QAT_CSS_FWSK_PUB_LEN +
1380 ICP_QAT_CSS_SIGNATURE_LEN);
1381 for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1384 if (!((virt_addr->ae_mask >> i) & 0x1))
1386 if (qat_hal_check_ae_active(handle, i)) {
1387 pr_err("QAT: AE %d is active\n", i);
1390 SET_CAP_CSR(handle, FCU_CONTROL,
1391 (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1394 msleep(FW_AUTH_WAIT_PERIOD);
1395 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1396 if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1397 FCU_STS_LOAD_DONE) &&
1398 ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1400 } while (retry++ < FW_AUTH_MAX_RETRY);
1401 if (retry > FW_AUTH_MAX_RETRY) {
1402 pr_err("QAT: firmware load failed timeout %x\n", retry);
1409 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1410 void *addr_ptr, int mem_size)
1412 struct icp_qat_suof_handle *suof_handle;
1414 suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1417 handle->sobj_handle = suof_handle;
1418 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1419 qat_uclo_del_suof(handle);
1420 pr_err("QAT: map SUOF failed\n");
1426 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1427 void *addr_ptr, int mem_size)
1429 struct icp_qat_fw_auth_desc *desc = NULL;
1432 if (handle->fw_auth) {
1433 if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1434 status = qat_uclo_auth_fw(handle, desc);
1435 qat_uclo_ummap_auth_fw(handle, &desc);
1437 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1438 pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1441 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1446 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1447 void *addr_ptr, int mem_size)
1449 struct icp_qat_uof_filehdr *filehdr;
1450 struct icp_qat_uclo_objhandle *objhdl;
1452 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1455 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1456 if (!objhdl->obj_buf)
1457 goto out_objbuf_err;
1458 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1459 if (qat_uclo_check_uof_format(filehdr))
1460 goto out_objhdr_err;
1461 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1463 if (!objhdl->obj_hdr) {
1464 pr_err("QAT: object file chunk is null\n");
1465 goto out_objhdr_err;
1467 handle->obj_handle = objhdl;
1468 if (qat_uclo_parse_uof_obj(handle))
1469 goto out_overlay_obj_err;
1472 out_overlay_obj_err:
1473 handle->obj_handle = NULL;
1474 kfree(objhdl->obj_hdr);
1476 kfree(objhdl->obj_buf);
1482 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1483 void *addr_ptr, int mem_size)
1485 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1486 (sizeof(handle->hal_handle->ae_mask) * 8));
1488 if (!handle || !addr_ptr || mem_size < 24)
1491 return (handle->fw_auth) ?
1492 qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1493 qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1496 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1498 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1501 if (handle->sobj_handle)
1502 qat_uclo_del_suof(handle);
1506 kfree(obj_handle->uword_buf);
1507 for (a = 0; a < obj_handle->uimage_num; a++)
1508 kfree(obj_handle->ae_uimage[a].page);
1510 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1511 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1513 kfree(obj_handle->obj_hdr);
1514 kfree(obj_handle->obj_buf);
1516 handle->obj_handle = NULL;
1519 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1520 struct icp_qat_uclo_encap_page *encap_page,
1521 uint64_t *uword, unsigned int addr_p,
1522 unsigned int raddr, uint64_t fill)
1531 for (i = 0; i < encap_page->uwblock_num; i++) {
1532 if (raddr >= encap_page->uwblock[i].start_addr &&
1533 raddr <= encap_page->uwblock[i].start_addr +
1534 encap_page->uwblock[i].words_num - 1) {
1535 raddr -= encap_page->uwblock[i].start_addr;
1536 raddr *= obj_handle->uword_in_bytes;
1537 memcpy(&uwrd, (void *)(((uintptr_t)
1538 encap_page->uwblock[i].micro_words) + raddr),
1539 obj_handle->uword_in_bytes);
1540 uwrd = uwrd & 0xbffffffffffull;
1544 if (*uword == INVLD_UWORD)
1548 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1549 struct icp_qat_uclo_encap_page
1550 *encap_page, unsigned int ae)
1552 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1553 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1556 /* load the page starting at appropriate ustore address */
1557 /* get fill-pattern from an image -- they are all the same */
1558 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1560 uw_physical_addr = encap_page->beg_addr_p;
1561 uw_relative_addr = 0;
1562 words_num = encap_page->micro_words_num;
1564 if (words_num < UWORD_CPYBUF_SIZE)
1567 cpylen = UWORD_CPYBUF_SIZE;
1569 /* load the buffer */
1570 for (i = 0; i < cpylen; i++)
1571 qat_uclo_fill_uwords(obj_handle, encap_page,
1572 &obj_handle->uword_buf[i],
1573 uw_physical_addr + i,
1574 uw_relative_addr + i, fill_pat);
1576 /* copy the buffer to ustore */
1577 qat_hal_wr_uwords(handle, (unsigned char)ae,
1578 uw_physical_addr, cpylen,
1579 obj_handle->uword_buf);
1581 uw_physical_addr += cpylen;
1582 uw_relative_addr += cpylen;
1583 words_num -= cpylen;
1587 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1588 struct icp_qat_uof_image *image)
1590 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1591 unsigned int ctx_mask, s;
1592 struct icp_qat_uclo_page *page;
1596 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1600 /* load the default page and set assigned CTX PC
1601 * to the entrypoint address */
1602 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1603 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1605 /* find the slice to which this image is assigned */
1606 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1607 if (image->ctx_assigned & obj_handle->ae_data[ae].
1608 ae_slices[s].ctx_mask_assigned)
1611 if (s >= obj_handle->ae_data[ae].slice_num)
1613 page = obj_handle->ae_data[ae].ae_slices[s].page;
1614 if (!page->encap_page->def_page)
1616 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1618 page = obj_handle->ae_data[ae].ae_slices[s].page;
1619 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1620 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1621 (ctx_mask & (1 << ctx)) ? page : NULL;
1622 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1623 image->ctx_assigned);
1624 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1625 image->entry_address);
1629 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1632 struct icp_qat_fw_auth_desc *desc = NULL;
1633 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1634 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1636 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1637 if (qat_uclo_map_auth_fw(handle,
1638 (char *)simg_hdr[i].simg_buf,
1640 (simg_hdr[i].simg_len),
1643 if (qat_uclo_auth_fw(handle, desc))
1645 if (qat_uclo_load_fw(handle, desc))
1647 qat_uclo_ummap_auth_fw(handle, &desc);
1651 qat_uclo_ummap_auth_fw(handle, &desc);
1655 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1657 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1660 if (qat_uclo_init_globals(handle))
1662 for (i = 0; i < obj_handle->uimage_num; i++) {
1663 if (!obj_handle->ae_uimage[i].img_ptr)
1665 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1667 qat_uclo_wr_uimage_page(handle,
1668 obj_handle->ae_uimage[i].img_ptr);
1673 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1675 return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1676 qat_uclo_wr_uof_img(handle);