GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_uclo.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
56
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
61
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63                                  unsigned int ae, unsigned int image_num)
64 {
65         struct icp_qat_uclo_aedata *ae_data;
66         struct icp_qat_uclo_encapme *encap_image;
67         struct icp_qat_uclo_page *page = NULL;
68         struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70         ae_data = &obj_handle->ae_data[ae];
71         encap_image = &obj_handle->ae_uimage[image_num];
72         ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73         ae_slice->encap_image = encap_image;
74
75         if (encap_image->img_ptr) {
76                 ae_slice->ctx_mask_assigned =
77                                         encap_image->img_ptr->ctx_assigned;
78                 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79         } else {
80                 ae_slice->ctx_mask_assigned = 0;
81         }
82         ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83         if (!ae_slice->region)
84                 return -ENOMEM;
85         ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86         if (!ae_slice->page)
87                 goto out_err;
88         page = ae_slice->page;
89         page->encap_page = encap_image->page;
90         ae_slice->page->region = ae_slice->region;
91         ae_data->slice_num++;
92         return 0;
93 out_err:
94         kfree(ae_slice->region);
95         ae_slice->region = NULL;
96         return -ENOMEM;
97 }
98
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100 {
101         unsigned int i;
102
103         if (!ae_data) {
104                 pr_err("QAT: bad argument, ae_data is NULL\n ");
105                 return -EINVAL;
106         }
107
108         for (i = 0; i < ae_data->slice_num; i++) {
109                 kfree(ae_data->ae_slices[i].region);
110                 ae_data->ae_slices[i].region = NULL;
111                 kfree(ae_data->ae_slices[i].page);
112                 ae_data->ae_slices[i].page = NULL;
113         }
114         return 0;
115 }
116
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118                                  unsigned int str_offset)
119 {
120         if ((!str_table->table_len) || (str_offset > str_table->table_len))
121                 return NULL;
122         return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
123 }
124
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
126 {
127         int maj = hdr->maj_ver & 0xff;
128         int min = hdr->min_ver & 0xff;
129
130         if (hdr->file_id != ICP_QAT_UOF_FID) {
131                 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132                 return -EINVAL;
133         }
134         if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135                 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136                        maj, min);
137                 return -EINVAL;
138         }
139         return 0;
140 }
141
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
143 {
144         int maj = suof_hdr->maj_ver & 0xff;
145         int min = suof_hdr->min_ver & 0xff;
146
147         if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148                 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
149                 return -EINVAL;
150         }
151         if (suof_hdr->fw_type != 0) {
152                 pr_err("QAT: unsupported firmware type\n");
153                 return -EINVAL;
154         }
155         if (suof_hdr->num_chunks <= 0x1) {
156                 pr_err("QAT: SUOF chunk amount is incorrect\n");
157                 return -EINVAL;
158         }
159         if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160                 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
161                        maj, min);
162                 return -EINVAL;
163         }
164         return 0;
165 }
166
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168                                       unsigned int addr, unsigned int *val,
169                                       unsigned int num_in_bytes)
170 {
171         unsigned int outval;
172         unsigned char *ptr = (unsigned char *)val;
173
174         while (num_in_bytes) {
175                 memcpy(&outval, ptr, 4);
176                 SRAM_WRITE(handle, addr, outval);
177                 num_in_bytes -= 4;
178                 ptr += 4;
179                 addr += 4;
180         }
181 }
182
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184                                       unsigned char ae, unsigned int addr,
185                                       unsigned int *val,
186                                       unsigned int num_in_bytes)
187 {
188         unsigned int outval;
189         unsigned char *ptr = (unsigned char *)val;
190
191         addr >>= 0x2; /* convert to uword address */
192
193         while (num_in_bytes) {
194                 memcpy(&outval, ptr, 4);
195                 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
196                 num_in_bytes -= 4;
197                 ptr += 4;
198         }
199 }
200
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
202                                    unsigned char ae,
203                                    struct icp_qat_uof_batch_init
204                                    *umem_init_header)
205 {
206         struct icp_qat_uof_batch_init *umem_init;
207
208         if (!umem_init_header)
209                 return;
210         umem_init = umem_init_header->next;
211         while (umem_init) {
212                 unsigned int addr, *value, size;
213
214                 ae = umem_init->ae;
215                 addr = umem_init->addr;
216                 value = umem_init->value;
217                 size = umem_init->size;
218                 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219                 umem_init = umem_init->next;
220         }
221 }
222
223 static void
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225                                  struct icp_qat_uof_batch_init **base)
226 {
227         struct icp_qat_uof_batch_init *umem_init;
228
229         umem_init = *base;
230         while (umem_init) {
231                 struct icp_qat_uof_batch_init *pre;
232
233                 pre = umem_init;
234                 umem_init = umem_init->next;
235                 kfree(pre);
236         }
237         *base = NULL;
238 }
239
240 static int qat_uclo_parse_num(char *str, unsigned int *num)
241 {
242         char buf[16] = {0};
243         unsigned long ae = 0;
244         int i;
245
246         strncpy(buf, str, 15);
247         for (i = 0; i < 16; i++) {
248                 if (!isdigit(buf[i])) {
249                         buf[i] = '\0';
250                         break;
251                 }
252         }
253         if ((kstrtoul(buf, 10, &ae)))
254                 return -EFAULT;
255
256         *num = (unsigned int)ae;
257         return 0;
258 }
259
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261                                      struct icp_qat_uof_initmem *init_mem,
262                                      unsigned int size_range, unsigned int *ae)
263 {
264         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
265         char *str;
266
267         if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268                 pr_err("QAT: initmem is out of range");
269                 return -EINVAL;
270         }
271         if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272                 pr_err("QAT: Memory scope for init_mem error\n");
273                 return -EINVAL;
274         }
275         str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
276         if (!str) {
277                 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
278                 return -EINVAL;
279         }
280         if (qat_uclo_parse_num(str, ae)) {
281                 pr_err("QAT: Parse num for AE number failed\n");
282                 return -EINVAL;
283         }
284         if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285                 pr_err("QAT: ae %d out of range\n", *ae);
286                 return -EINVAL;
287         }
288         return 0;
289 }
290
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292                                            *handle, struct icp_qat_uof_initmem
293                                            *init_mem, unsigned int ae,
294                                            struct icp_qat_uof_batch_init
295                                            **init_tab_base)
296 {
297         struct icp_qat_uof_batch_init *init_header, *tail;
298         struct icp_qat_uof_batch_init *mem_init, *tail_old;
299         struct icp_qat_uof_memvar_attr *mem_val_attr;
300         unsigned int i, flag = 0;
301
302         mem_val_attr =
303                 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304                 sizeof(struct icp_qat_uof_initmem));
305
306         init_header = *init_tab_base;
307         if (!init_header) {
308                 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
309                 if (!init_header)
310                         return -ENOMEM;
311                 init_header->size = 1;
312                 *init_tab_base = init_header;
313                 flag = 1;
314         }
315         tail_old = init_header;
316         while (tail_old->next)
317                 tail_old = tail_old->next;
318         tail = tail_old;
319         for (i = 0; i < init_mem->val_attr_num; i++) {
320                 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
321                 if (!mem_init)
322                         goto out_err;
323                 mem_init->ae = ae;
324                 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325                 mem_init->value = &mem_val_attr->value;
326                 mem_init->size = 4;
327                 mem_init->next = NULL;
328                 tail->next = mem_init;
329                 tail = mem_init;
330                 init_header->size += qat_hal_get_ins_num();
331                 mem_val_attr++;
332         }
333         return 0;
334 out_err:
335         /* Do not free the list head unless we allocated it. */
336         tail_old = tail_old->next;
337         if (flag) {
338                 kfree(*init_tab_base);
339                 *init_tab_base = NULL;
340         }
341
342         while (tail_old) {
343                 mem_init = tail_old->next;
344                 kfree(tail_old);
345                 tail_old = mem_init;
346         }
347         return -ENOMEM;
348 }
349
350 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
351                                   struct icp_qat_uof_initmem *init_mem)
352 {
353         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
354         unsigned int ae;
355
356         if (qat_uclo_fetch_initmem_ae(handle, init_mem,
357                                       ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
358                 return -EINVAL;
359         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
360                                             &obj_handle->lm_init_tab[ae]))
361                 return -EINVAL;
362         return 0;
363 }
364
365 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
366                                   struct icp_qat_uof_initmem *init_mem)
367 {
368         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
369         unsigned int ae, ustore_size, uaddr, i;
370
371         ustore_size = obj_handle->ustore_phy_size;
372         if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
373                 return -EINVAL;
374         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
375                                             &obj_handle->umem_init_tab[ae]))
376                 return -EINVAL;
377         /* set the highest ustore address referenced */
378         uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
379         for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
380                 if (obj_handle->ae_data[ae].ae_slices[i].
381                     encap_image->uwords_num < uaddr)
382                         obj_handle->ae_data[ae].ae_slices[i].
383                         encap_image->uwords_num = uaddr;
384         }
385         return 0;
386 }
387
388 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
389                                    struct icp_qat_uof_initmem *init_mem)
390 {
391         switch (init_mem->region) {
392         case ICP_QAT_UOF_LMEM_REGION:
393                 if (qat_uclo_init_lmem_seg(handle, init_mem))
394                         return -EINVAL;
395                 break;
396         case ICP_QAT_UOF_UMEM_REGION:
397                 if (qat_uclo_init_umem_seg(handle, init_mem))
398                         return -EINVAL;
399                 break;
400         default:
401                 pr_err("QAT: initmem region error. region type=0x%x\n",
402                        init_mem->region);
403                 return -EINVAL;
404         }
405         return 0;
406 }
407
408 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
409                                 struct icp_qat_uclo_encapme *image)
410 {
411         unsigned int i;
412         struct icp_qat_uclo_encap_page *page;
413         struct icp_qat_uof_image *uof_image;
414         unsigned char ae;
415         unsigned int ustore_size;
416         unsigned int patt_pos;
417         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
418         uint64_t *fill_data;
419
420         uof_image = image->img_ptr;
421         fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
422                             GFP_KERNEL);
423         if (!fill_data)
424                 return -ENOMEM;
425         for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
426                 memcpy(&fill_data[i], &uof_image->fill_pattern,
427                        sizeof(uint64_t));
428         page = image->page;
429
430         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
431                 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
432                         continue;
433                 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
434                 patt_pos = page->beg_addr_p + page->micro_words_num;
435
436                 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
437                                   page->beg_addr_p, &fill_data[0]);
438                 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
439                                   ustore_size - patt_pos + 1,
440                                   &fill_data[page->beg_addr_p]);
441         }
442         kfree(fill_data);
443         return 0;
444 }
445
446 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
447 {
448         int i, ae;
449         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
450         struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
451
452         for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
453                 if (initmem->num_in_bytes) {
454                         if (qat_uclo_init_ae_memory(handle, initmem))
455                                 return -EINVAL;
456                 }
457                 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
458                         (uintptr_t)initmem +
459                         sizeof(struct icp_qat_uof_initmem)) +
460                         (sizeof(struct icp_qat_uof_memvar_attr) *
461                         initmem->val_attr_num));
462         }
463         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
464                 if (qat_hal_batch_wr_lm(handle, ae,
465                                         obj_handle->lm_init_tab[ae])) {
466                         pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
467                         return -EINVAL;
468                 }
469                 qat_uclo_cleanup_batch_init_list(handle,
470                                                  &obj_handle->lm_init_tab[ae]);
471                 qat_uclo_batch_wr_umem(handle, ae,
472                                        obj_handle->umem_init_tab[ae]);
473                 qat_uclo_cleanup_batch_init_list(handle,
474                                                  &obj_handle->
475                                                  umem_init_tab[ae]);
476         }
477         return 0;
478 }
479
480 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
481                                  char *chunk_id, void *cur)
482 {
483         int i;
484         struct icp_qat_uof_chunkhdr *chunk_hdr =
485             (struct icp_qat_uof_chunkhdr *)
486             ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
487
488         for (i = 0; i < obj_hdr->num_chunks; i++) {
489                 if ((cur < (void *)&chunk_hdr[i]) &&
490                     !strncmp(chunk_hdr[i].chunk_id, chunk_id,
491                              ICP_QAT_UOF_OBJID_LEN)) {
492                         return &chunk_hdr[i];
493                 }
494         }
495         return NULL;
496 }
497
498 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
499 {
500         int i;
501         unsigned int topbit = 1 << 0xF;
502         unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
503
504         reg ^= inbyte << 0x8;
505         for (i = 0; i < 0x8; i++) {
506                 if (reg & topbit)
507                         reg = (reg << 1) ^ 0x1021;
508                 else
509                         reg <<= 1;
510         }
511         return reg & 0xFFFF;
512 }
513
514 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
515 {
516         unsigned int chksum = 0;
517
518         if (ptr)
519                 while (num--)
520                         chksum = qat_uclo_calc_checksum(chksum, *ptr++);
521         return chksum;
522 }
523
524 static struct icp_qat_uclo_objhdr *
525 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
526                    char *chunk_id)
527 {
528         struct icp_qat_uof_filechunkhdr *file_chunk;
529         struct icp_qat_uclo_objhdr *obj_hdr;
530         char *chunk;
531         int i;
532
533         file_chunk = (struct icp_qat_uof_filechunkhdr *)
534                 (buf + sizeof(struct icp_qat_uof_filehdr));
535         for (i = 0; i < file_hdr->num_chunks; i++) {
536                 if (!strncmp(file_chunk->chunk_id, chunk_id,
537                              ICP_QAT_UOF_OBJID_LEN)) {
538                         chunk = buf + file_chunk->offset;
539                         if (file_chunk->checksum != qat_uclo_calc_str_checksum(
540                                 chunk, file_chunk->size))
541                                 break;
542                         obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
543                         if (!obj_hdr)
544                                 break;
545                         obj_hdr->file_buff = chunk;
546                         obj_hdr->checksum = file_chunk->checksum;
547                         obj_hdr->size = file_chunk->size;
548                         return obj_hdr;
549                 }
550                 file_chunk++;
551         }
552         return NULL;
553 }
554
555 static unsigned int
556 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
557                             struct icp_qat_uof_image *image)
558 {
559         struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
560         struct icp_qat_uof_objtable *neigh_reg_tab;
561         struct icp_qat_uof_code_page *code_page;
562
563         code_page = (struct icp_qat_uof_code_page *)
564                         ((char *)image + sizeof(struct icp_qat_uof_image));
565         uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
566                      code_page->uc_var_tab_offset);
567         imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
568                       code_page->imp_var_tab_offset);
569         imp_expr_tab = (struct icp_qat_uof_objtable *)
570                        (encap_uof_obj->beg_uof +
571                        code_page->imp_expr_tab_offset);
572         if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
573             imp_expr_tab->entry_num) {
574                 pr_err("QAT: UOF can't contain imported variable to be parsed\n");
575                 return -EINVAL;
576         }
577         neigh_reg_tab = (struct icp_qat_uof_objtable *)
578                         (encap_uof_obj->beg_uof +
579                         code_page->neigh_reg_tab_offset);
580         if (neigh_reg_tab->entry_num) {
581                 pr_err("QAT: UOF can't contain shared control store feature\n");
582                 return -EINVAL;
583         }
584         if (image->numpages > 1) {
585                 pr_err("QAT: UOF can't contain multiple pages\n");
586                 return -EINVAL;
587         }
588         if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
589                 pr_err("QAT: UOF can't use shared control store feature\n");
590                 return -EFAULT;
591         }
592         if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
593                 pr_err("QAT: UOF can't use reloadable feature\n");
594                 return -EFAULT;
595         }
596         return 0;
597 }
598
599 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
600                                      *encap_uof_obj,
601                                      struct icp_qat_uof_image *img,
602                                      struct icp_qat_uclo_encap_page *page)
603 {
604         struct icp_qat_uof_code_page *code_page;
605         struct icp_qat_uof_code_area *code_area;
606         struct icp_qat_uof_objtable *uword_block_tab;
607         struct icp_qat_uof_uword_block *uwblock;
608         int i;
609
610         code_page = (struct icp_qat_uof_code_page *)
611                         ((char *)img + sizeof(struct icp_qat_uof_image));
612         page->def_page = code_page->def_page;
613         page->page_region = code_page->page_region;
614         page->beg_addr_v = code_page->beg_addr_v;
615         page->beg_addr_p = code_page->beg_addr_p;
616         code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
617                                                 code_page->code_area_offset);
618         page->micro_words_num = code_area->micro_words_num;
619         uword_block_tab = (struct icp_qat_uof_objtable *)
620                           (encap_uof_obj->beg_uof +
621                           code_area->uword_block_tab);
622         page->uwblock_num = uword_block_tab->entry_num;
623         uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
624                         sizeof(struct icp_qat_uof_objtable));
625         page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
626         for (i = 0; i < uword_block_tab->entry_num; i++)
627                 page->uwblock[i].micro_words =
628                 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
629 }
630
631 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
632                                struct icp_qat_uclo_encapme *ae_uimage,
633                                int max_image)
634 {
635         int i, j;
636         struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
637         struct icp_qat_uof_image *image;
638         struct icp_qat_uof_objtable *ae_regtab;
639         struct icp_qat_uof_objtable *init_reg_sym_tab;
640         struct icp_qat_uof_objtable *sbreak_tab;
641         struct icp_qat_uof_encap_obj *encap_uof_obj =
642                                         &obj_handle->encap_uof_obj;
643
644         for (j = 0; j < max_image; j++) {
645                 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
646                                                 ICP_QAT_UOF_IMAG, chunk_hdr);
647                 if (!chunk_hdr)
648                         break;
649                 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
650                                                      chunk_hdr->offset);
651                 ae_regtab = (struct icp_qat_uof_objtable *)
652                            (image->reg_tab_offset +
653                            obj_handle->obj_hdr->file_buff);
654                 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
655                 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
656                         (((char *)ae_regtab) +
657                         sizeof(struct icp_qat_uof_objtable));
658                 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
659                                    (image->init_reg_sym_tab +
660                                    obj_handle->obj_hdr->file_buff);
661                 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
662                 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
663                         (((char *)init_reg_sym_tab) +
664                         sizeof(struct icp_qat_uof_objtable));
665                 sbreak_tab = (struct icp_qat_uof_objtable *)
666                         (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
667                 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
668                 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
669                                       (((char *)sbreak_tab) +
670                                       sizeof(struct icp_qat_uof_objtable));
671                 ae_uimage[j].img_ptr = image;
672                 if (qat_uclo_check_image_compat(encap_uof_obj, image))
673                         goto out_err;
674                 ae_uimage[j].page =
675                         kzalloc(sizeof(struct icp_qat_uclo_encap_page),
676                                 GFP_KERNEL);
677                 if (!ae_uimage[j].page)
678                         goto out_err;
679                 qat_uclo_map_image_page(encap_uof_obj, image,
680                                         ae_uimage[j].page);
681         }
682         return j;
683 out_err:
684         for (i = 0; i < j; i++)
685                 kfree(ae_uimage[i].page);
686         return 0;
687 }
688
689 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
690 {
691         int i, ae;
692         int mflag = 0;
693         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
694
695         for (ae = 0; ae < max_ae; ae++) {
696                 if (!test_bit(ae,
697                               (unsigned long *)&handle->hal_handle->ae_mask))
698                         continue;
699                 for (i = 0; i < obj_handle->uimage_num; i++) {
700                         if (!test_bit(ae, (unsigned long *)
701                         &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
702                                 continue;
703                         mflag = 1;
704                         if (qat_uclo_init_ae_data(obj_handle, ae, i))
705                                 return -EINVAL;
706                 }
707         }
708         if (!mflag) {
709                 pr_err("QAT: uimage uses AE not set\n");
710                 return -EINVAL;
711         }
712         return 0;
713 }
714
715 static struct icp_qat_uof_strtable *
716 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
717                        char *tab_name, struct icp_qat_uof_strtable *str_table)
718 {
719         struct icp_qat_uof_chunkhdr *chunk_hdr;
720
721         chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
722                                         obj_hdr->file_buff, tab_name, NULL);
723         if (chunk_hdr) {
724                 int hdr_size;
725
726                 memcpy(&str_table->table_len, obj_hdr->file_buff +
727                        chunk_hdr->offset, sizeof(str_table->table_len));
728                 hdr_size = (char *)&str_table->strings - (char *)str_table;
729                 str_table->strings = (uintptr_t)obj_hdr->file_buff +
730                                         chunk_hdr->offset + hdr_size;
731                 return str_table;
732         }
733         return NULL;
734 }
735
736 static void
737 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
738                            struct icp_qat_uclo_init_mem_table *init_mem_tab)
739 {
740         struct icp_qat_uof_chunkhdr *chunk_hdr;
741
742         chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
743                                         ICP_QAT_UOF_IMEM, NULL);
744         if (chunk_hdr) {
745                 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
746                         chunk_hdr->offset, sizeof(unsigned int));
747                 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
748                 (encap_uof_obj->beg_uof + chunk_hdr->offset +
749                 sizeof(unsigned int));
750         }
751 }
752
753 static unsigned int
754 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
755 {
756         switch (handle->pci_dev->device) {
757         case ADF_DH895XCC_PCI_DEVICE_ID:
758                 return ICP_QAT_AC_895XCC_DEV_TYPE;
759         case ADF_C62X_PCI_DEVICE_ID:
760                 return ICP_QAT_AC_C62X_DEV_TYPE;
761         case ADF_C3XXX_PCI_DEVICE_ID:
762                 return ICP_QAT_AC_C3XXX_DEV_TYPE;
763         default:
764                 pr_err("QAT: unsupported device 0x%x\n",
765                        handle->pci_dev->device);
766                 return 0;
767         }
768 }
769
770 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
771 {
772         unsigned int maj_ver, prod_type = obj_handle->prod_type;
773
774         if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
775                 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
776                        obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
777                        prod_type);
778                 return -EINVAL;
779         }
780         maj_ver = obj_handle->prod_rev & 0xff;
781         if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
782             (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
783                 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
784                 return -EINVAL;
785         }
786         return 0;
787 }
788
789 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
790                              unsigned char ae, unsigned char ctx_mask,
791                              enum icp_qat_uof_regtype reg_type,
792                              unsigned short reg_addr, unsigned int value)
793 {
794         switch (reg_type) {
795         case ICP_GPA_ABS:
796         case ICP_GPB_ABS:
797                 ctx_mask = 0;
798                 /* fall through */
799         case ICP_GPA_REL:
800         case ICP_GPB_REL:
801                 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
802                                         reg_addr, value);
803         case ICP_SR_ABS:
804         case ICP_DR_ABS:
805         case ICP_SR_RD_ABS:
806         case ICP_DR_RD_ABS:
807                 ctx_mask = 0;
808                 /* fall through */
809         case ICP_SR_REL:
810         case ICP_DR_REL:
811         case ICP_SR_RD_REL:
812         case ICP_DR_RD_REL:
813                 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
814                                             reg_addr, value);
815         case ICP_SR_WR_ABS:
816         case ICP_DR_WR_ABS:
817                 ctx_mask = 0;
818                 /* fall through */
819         case ICP_SR_WR_REL:
820         case ICP_DR_WR_REL:
821                 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
822                                             reg_addr, value);
823         case ICP_NEIGH_REL:
824                 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
825         default:
826                 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
827                 return -EFAULT;
828         }
829         return 0;
830 }
831
832 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
833                                  unsigned int ae,
834                                  struct icp_qat_uclo_encapme *encap_ae)
835 {
836         unsigned int i;
837         unsigned char ctx_mask;
838         struct icp_qat_uof_init_regsym *init_regsym;
839
840         if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
841             ICP_QAT_UCLO_MAX_CTX)
842                 ctx_mask = 0xff;
843         else
844                 ctx_mask = 0x55;
845
846         for (i = 0; i < encap_ae->init_regsym_num; i++) {
847                 unsigned int exp_res;
848
849                 init_regsym = &encap_ae->init_regsym[i];
850                 exp_res = init_regsym->value;
851                 switch (init_regsym->init_type) {
852                 case ICP_QAT_UOF_INIT_REG:
853                         qat_uclo_init_reg(handle, ae, ctx_mask,
854                                           (enum icp_qat_uof_regtype)
855                                           init_regsym->reg_type,
856                                           (unsigned short)init_regsym->reg_addr,
857                                           exp_res);
858                         break;
859                 case ICP_QAT_UOF_INIT_REG_CTX:
860                         /* check if ctx is appropriate for the ctxMode */
861                         if (!((1 << init_regsym->ctx) & ctx_mask)) {
862                                 pr_err("QAT: invalid ctx num = 0x%x\n",
863                                        init_regsym->ctx);
864                                 return -EINVAL;
865                         }
866                         qat_uclo_init_reg(handle, ae,
867                                           (unsigned char)
868                                           (1 << init_regsym->ctx),
869                                           (enum icp_qat_uof_regtype)
870                                           init_regsym->reg_type,
871                                           (unsigned short)init_regsym->reg_addr,
872                                           exp_res);
873                         break;
874                 case ICP_QAT_UOF_INIT_EXPR:
875                         pr_err("QAT: INIT_EXPR feature not supported\n");
876                         return -EINVAL;
877                 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
878                         pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
879                         return -EINVAL;
880                 default:
881                         break;
882                 }
883         }
884         return 0;
885 }
886
887 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
888 {
889         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
890         unsigned int s, ae;
891
892         if (obj_handle->global_inited)
893                 return 0;
894         if (obj_handle->init_mem_tab.entry_num) {
895                 if (qat_uclo_init_memory(handle)) {
896                         pr_err("QAT: initialize memory failed\n");
897                         return -EINVAL;
898                 }
899         }
900         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
901                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
902                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
903                                 continue;
904                         if (qat_uclo_init_reg_sym(handle, ae,
905                                                   obj_handle->ae_data[ae].
906                                                   ae_slices[s].encap_image))
907                                 return -EINVAL;
908                 }
909         }
910         obj_handle->global_inited = 1;
911         return 0;
912 }
913
914 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
915 {
916         unsigned char ae, nn_mode, s;
917         struct icp_qat_uof_image *uof_image;
918         struct icp_qat_uclo_aedata *ae_data;
919         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
920
921         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
922                 if (!test_bit(ae,
923                               (unsigned long *)&handle->hal_handle->ae_mask))
924                         continue;
925                 ae_data = &obj_handle->ae_data[ae];
926                 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
927                                       ICP_QAT_UCLO_MAX_CTX); s++) {
928                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
929                                 continue;
930                         uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
931                         if (qat_hal_set_ae_ctx_mode(handle, ae,
932                                                     (char)ICP_QAT_CTX_MODE
933                                                     (uof_image->ae_mode))) {
934                                 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
935                                 return -EFAULT;
936                         }
937                         nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
938                         if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
939                                 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
940                                 return -EFAULT;
941                         }
942                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
943                                                    (char)ICP_QAT_LOC_MEM0_MODE
944                                                    (uof_image->ae_mode))) {
945                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
946                                 return -EFAULT;
947                         }
948                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
949                                                    (char)ICP_QAT_LOC_MEM1_MODE
950                                                    (uof_image->ae_mode))) {
951                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
952                                 return -EFAULT;
953                         }
954                 }
955         }
956         return 0;
957 }
958
959 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
960 {
961         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
962         struct icp_qat_uclo_encapme *image;
963         int a;
964
965         for (a = 0; a < obj_handle->uimage_num; a++) {
966                 image = &obj_handle->ae_uimage[a];
967                 image->uwords_num = image->page->beg_addr_p +
968                                         image->page->micro_words_num;
969         }
970 }
971
972 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
973 {
974         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
975         unsigned int ae;
976
977         obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
978         obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
979                                              obj_handle->obj_hdr->file_buff;
980         obj_handle->uword_in_bytes = 6;
981         obj_handle->prod_type = qat_uclo_get_dev_type(handle);
982         obj_handle->prod_rev = PID_MAJOR_REV |
983                         (PID_MINOR_REV & handle->hal_handle->revision_id);
984         if (qat_uclo_check_uof_compat(obj_handle)) {
985                 pr_err("QAT: UOF incompatible\n");
986                 return -EINVAL;
987         }
988         obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
989                                         GFP_KERNEL);
990         if (!obj_handle->uword_buf)
991                 return -ENOMEM;
992         obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
993         if (!obj_handle->obj_hdr->file_buff ||
994             !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
995                                     &obj_handle->str_table)) {
996                 pr_err("QAT: UOF doesn't have effective images\n");
997                 goto out_err;
998         }
999         obj_handle->uimage_num =
1000                 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1001                                     ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1002         if (!obj_handle->uimage_num)
1003                 goto out_err;
1004         if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1005                 pr_err("QAT: Bad object\n");
1006                 goto out_check_uof_aemask_err;
1007         }
1008         qat_uclo_init_uword_num(handle);
1009         qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1010                                    &obj_handle->init_mem_tab);
1011         if (qat_uclo_set_ae_mode(handle))
1012                 goto out_check_uof_aemask_err;
1013         return 0;
1014 out_check_uof_aemask_err:
1015         for (ae = 0; ae < obj_handle->uimage_num; ae++)
1016                 kfree(obj_handle->ae_uimage[ae].page);
1017 out_err:
1018         kfree(obj_handle->uword_buf);
1019         return -EFAULT;
1020 }
1021
1022 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1023                                       struct icp_qat_suof_filehdr *suof_ptr,
1024                                       int suof_size)
1025 {
1026         unsigned int check_sum = 0;
1027         unsigned int min_ver_offset = 0;
1028         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1029
1030         suof_handle->file_id = ICP_QAT_SUOF_FID;
1031         suof_handle->suof_buf = (char *)suof_ptr;
1032         suof_handle->suof_size = suof_size;
1033         min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1034                                               min_ver);
1035         check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1036                                                min_ver_offset);
1037         if (check_sum != suof_ptr->check_sum) {
1038                 pr_err("QAT: incorrect SUOF checksum\n");
1039                 return -EINVAL;
1040         }
1041         suof_handle->check_sum = suof_ptr->check_sum;
1042         suof_handle->min_ver = suof_ptr->min_ver;
1043         suof_handle->maj_ver = suof_ptr->maj_ver;
1044         suof_handle->fw_type = suof_ptr->fw_type;
1045         return 0;
1046 }
1047
1048 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1049                               struct icp_qat_suof_img_hdr *suof_img_hdr,
1050                               struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1051 {
1052         struct icp_qat_simg_ae_mode *ae_mode;
1053         struct icp_qat_suof_objhdr *suof_objhdr;
1054
1055         suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1056                                    suof_chunk_hdr->offset +
1057                                    sizeof(*suof_objhdr));
1058         suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1059                                   (suof_handle->suof_buf +
1060                                    suof_chunk_hdr->offset))->img_length;
1061
1062         suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1063         suof_img_hdr->css_key = (suof_img_hdr->css_header +
1064                                  sizeof(struct icp_qat_css_hdr));
1065         suof_img_hdr->css_signature = suof_img_hdr->css_key +
1066                                       ICP_QAT_CSS_FWSK_MODULUS_LEN +
1067                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1068         suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1069                                  ICP_QAT_CSS_SIGNATURE_LEN;
1070
1071         ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1072         suof_img_hdr->ae_mask = ae_mode->ae_mask;
1073         suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1074         suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1075         suof_img_hdr->fw_type = ae_mode->fw_type;
1076 }
1077
1078 static void
1079 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1080                           struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1081 {
1082         char **sym_str = (char **)&suof_handle->sym_str;
1083         unsigned int *sym_size = &suof_handle->sym_size;
1084         struct icp_qat_suof_strtable *str_table_obj;
1085
1086         *sym_size = *(unsigned int *)(uintptr_t)
1087                    (suof_chunk_hdr->offset + suof_handle->suof_buf);
1088         *sym_str = (char *)(uintptr_t)
1089                    (suof_handle->suof_buf + suof_chunk_hdr->offset +
1090                    sizeof(str_table_obj->tab_length));
1091 }
1092
1093 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1094                                       struct icp_qat_suof_img_hdr *img_hdr)
1095 {
1096         struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1097         unsigned int prod_rev, maj_ver, prod_type;
1098
1099         prod_type = qat_uclo_get_dev_type(handle);
1100         img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1101         prod_rev = PID_MAJOR_REV |
1102                          (PID_MINOR_REV & handle->hal_handle->revision_id);
1103         if (img_ae_mode->dev_type != prod_type) {
1104                 pr_err("QAT: incompatible product type %x\n",
1105                        img_ae_mode->dev_type);
1106                 return -EINVAL;
1107         }
1108         maj_ver = prod_rev & 0xff;
1109         if ((maj_ver > img_ae_mode->devmax_ver) ||
1110             (maj_ver < img_ae_mode->devmin_ver)) {
1111                 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1112                 return -EINVAL;
1113         }
1114         return 0;
1115 }
1116
1117 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1118 {
1119         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1120
1121         kfree(sobj_handle->img_table.simg_hdr);
1122         sobj_handle->img_table.simg_hdr = NULL;
1123         kfree(handle->sobj_handle);
1124         handle->sobj_handle = NULL;
1125 }
1126
1127 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1128                               unsigned int img_id, unsigned int num_simgs)
1129 {
1130         struct icp_qat_suof_img_hdr img_header;
1131
1132         if (img_id != num_simgs - 1) {
1133                 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1134                        sizeof(*suof_img_hdr));
1135                 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1136                        sizeof(*suof_img_hdr));
1137                 memcpy(&suof_img_hdr[img_id], &img_header,
1138                        sizeof(*suof_img_hdr));
1139         }
1140 }
1141
1142 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1143                              struct icp_qat_suof_filehdr *suof_ptr,
1144                              int suof_size)
1145 {
1146         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1147         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1148         struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1149         int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1150         unsigned int i = 0;
1151         struct icp_qat_suof_img_hdr img_header;
1152
1153         if (!suof_ptr || (suof_size == 0)) {
1154                 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1155                 return -EINVAL;
1156         }
1157         if (qat_uclo_check_suof_format(suof_ptr))
1158                 return -EINVAL;
1159         ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1160         if (ret)
1161                 return ret;
1162         suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1163                          ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1164
1165         qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1166         suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1167
1168         if (suof_handle->img_table.num_simgs != 0) {
1169                 suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1170                                        sizeof(img_header),
1171                                        GFP_KERNEL);
1172                 if (!suof_img_hdr)
1173                         return -ENOMEM;
1174                 suof_handle->img_table.simg_hdr = suof_img_hdr;
1175         }
1176
1177         for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1178                 qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1179                                   &suof_chunk_hdr[1 + i]);
1180                 ret = qat_uclo_check_simg_compat(handle,
1181                                                  &suof_img_hdr[i]);
1182                 if (ret)
1183                         return ret;
1184                 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1185                         ae0_img = i;
1186         }
1187         qat_uclo_tail_img(suof_img_hdr, ae0_img,
1188                           suof_handle->img_table.num_simgs);
1189         return 0;
1190 }
1191
1192 #define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
1193 #define BITS_IN_DWORD 32
1194
1195 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1196                             struct icp_qat_fw_auth_desc *desc)
1197 {
1198         unsigned int fcu_sts, retry = 0;
1199         u64 bus_addr;
1200
1201         bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1202                            - sizeof(struct icp_qat_auth_chunk);
1203         SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1204         SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1205         SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1206
1207         do {
1208                 msleep(FW_AUTH_WAIT_PERIOD);
1209                 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1210                 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1211                         goto auth_fail;
1212                 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1213                         if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1214                                 return 0;
1215         } while (retry++ < FW_AUTH_MAX_RETRY);
1216 auth_fail:
1217         pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1218                fcu_sts & FCU_AUTH_STS_MASK, retry);
1219         return -EINVAL;
1220 }
1221
1222 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1223                                struct icp_firml_dram_desc *dram_desc,
1224                                unsigned int size)
1225 {
1226         void *vptr;
1227         dma_addr_t ptr;
1228
1229         vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1230                                   size, &ptr, GFP_KERNEL);
1231         if (!vptr)
1232                 return -ENOMEM;
1233         dram_desc->dram_base_addr_v = vptr;
1234         dram_desc->dram_bus_addr = ptr;
1235         dram_desc->dram_size = size;
1236         return 0;
1237 }
1238
1239 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1240                                struct icp_firml_dram_desc *dram_desc)
1241 {
1242         dma_free_coherent(&handle->pci_dev->dev,
1243                           (size_t)(dram_desc->dram_size),
1244                           (dram_desc->dram_base_addr_v),
1245                           dram_desc->dram_bus_addr);
1246         memset(dram_desc, 0, sizeof(*dram_desc));
1247 }
1248
1249 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1250                                    struct icp_qat_fw_auth_desc **desc)
1251 {
1252         struct icp_firml_dram_desc dram_desc;
1253
1254         dram_desc.dram_base_addr_v = *desc;
1255         dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1256                                    (*desc))->chunk_bus_addr;
1257         dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1258                                (*desc))->chunk_size;
1259         qat_uclo_simg_free(handle, &dram_desc);
1260 }
1261
1262 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1263                                 char *image, unsigned int size,
1264                                 struct icp_qat_fw_auth_desc **desc)
1265 {
1266         struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1267         struct icp_qat_fw_auth_desc *auth_desc;
1268         struct icp_qat_auth_chunk *auth_chunk;
1269         u64 virt_addr,  bus_addr, virt_base;
1270         unsigned int length, simg_offset = sizeof(*auth_chunk);
1271         struct icp_firml_dram_desc img_desc;
1272
1273         if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1274                 pr_err("QAT: error, input image size overflow %d\n", size);
1275                 return -EINVAL;
1276         }
1277         length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1278                  ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1279                  size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1280         if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1281                 pr_err("QAT: error, allocate continuous dram fail\n");
1282                 return -ENOMEM;
1283         }
1284
1285         auth_chunk = img_desc.dram_base_addr_v;
1286         auth_chunk->chunk_size = img_desc.dram_size;
1287         auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1288         virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1289         bus_addr  = img_desc.dram_bus_addr + simg_offset;
1290         auth_desc = img_desc.dram_base_addr_v;
1291         auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1292         auth_desc->css_hdr_low = (unsigned int)bus_addr;
1293         virt_addr = virt_base;
1294
1295         memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1296         /* pub key */
1297         bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1298                            sizeof(*css_hdr);
1299         virt_addr = virt_addr + sizeof(*css_hdr);
1300
1301         auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1302         auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1303
1304         memcpy((void *)(uintptr_t)virt_addr,
1305                (void *)(image + sizeof(*css_hdr)),
1306                ICP_QAT_CSS_FWSK_MODULUS_LEN);
1307         /* padding */
1308         memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1309                0, ICP_QAT_CSS_FWSK_PAD_LEN);
1310
1311         /* exponent */
1312         memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1313                ICP_QAT_CSS_FWSK_PAD_LEN),
1314                (void *)(image + sizeof(*css_hdr) +
1315                         ICP_QAT_CSS_FWSK_MODULUS_LEN),
1316                sizeof(unsigned int));
1317
1318         /* signature */
1319         bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1320                             auth_desc->fwsk_pub_low) +
1321                    ICP_QAT_CSS_FWSK_PUB_LEN;
1322         virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1323         auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1324         auth_desc->signature_low = (unsigned int)bus_addr;
1325
1326         memcpy((void *)(uintptr_t)virt_addr,
1327                (void *)(image + sizeof(*css_hdr) +
1328                ICP_QAT_CSS_FWSK_MODULUS_LEN +
1329                ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1330                ICP_QAT_CSS_SIGNATURE_LEN);
1331
1332         bus_addr = ADD_ADDR(auth_desc->signature_high,
1333                             auth_desc->signature_low) +
1334                    ICP_QAT_CSS_SIGNATURE_LEN;
1335         virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1336
1337         auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1338         auth_desc->img_low = (unsigned int)bus_addr;
1339         auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1340         memcpy((void *)(uintptr_t)virt_addr,
1341                (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1342                auth_desc->img_len);
1343         virt_addr = virt_base;
1344         /* AE firmware */
1345         if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1346             CSS_AE_FIRMWARE) {
1347                 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1348                 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1349                 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1350                                     auth_desc->img_ae_mode_data_low) +
1351                            sizeof(struct icp_qat_simg_ae_mode);
1352
1353                 auth_desc->img_ae_init_data_high = (unsigned int)
1354                                                  (bus_addr >> BITS_IN_DWORD);
1355                 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1356                 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1357                 auth_desc->img_ae_insts_high = (unsigned int)
1358                                              (bus_addr >> BITS_IN_DWORD);
1359                 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1360         } else {
1361                 auth_desc->img_ae_insts_high = auth_desc->img_high;
1362                 auth_desc->img_ae_insts_low = auth_desc->img_low;
1363         }
1364         *desc = auth_desc;
1365         return 0;
1366 }
1367
1368 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1369                             struct icp_qat_fw_auth_desc *desc)
1370 {
1371         unsigned int i;
1372         unsigned int fcu_sts;
1373         struct icp_qat_simg_ae_mode *virt_addr;
1374         unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1375
1376         virt_addr = (void *)((uintptr_t)desc +
1377                      sizeof(struct icp_qat_auth_chunk) +
1378                      sizeof(struct icp_qat_css_hdr) +
1379                      ICP_QAT_CSS_FWSK_PUB_LEN +
1380                      ICP_QAT_CSS_SIGNATURE_LEN);
1381         for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1382                 int retry = 0;
1383
1384                 if (!((virt_addr->ae_mask >> i) & 0x1))
1385                         continue;
1386                 if (qat_hal_check_ae_active(handle, i)) {
1387                         pr_err("QAT: AE %d is active\n", i);
1388                         return -EINVAL;
1389                 }
1390                 SET_CAP_CSR(handle, FCU_CONTROL,
1391                             (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1392
1393                 do {
1394                         msleep(FW_AUTH_WAIT_PERIOD);
1395                         fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1396                         if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1397                             FCU_STS_LOAD_DONE) &&
1398                             ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1399                                 break;
1400                 } while (retry++ < FW_AUTH_MAX_RETRY);
1401                 if (retry > FW_AUTH_MAX_RETRY) {
1402                         pr_err("QAT: firmware load failed timeout %x\n", retry);
1403                         return -EINVAL;
1404                 }
1405         }
1406         return 0;
1407 }
1408
1409 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1410                                  void *addr_ptr, int mem_size)
1411 {
1412         struct icp_qat_suof_handle *suof_handle;
1413
1414         suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1415         if (!suof_handle)
1416                 return -ENOMEM;
1417         handle->sobj_handle = suof_handle;
1418         if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1419                 qat_uclo_del_suof(handle);
1420                 pr_err("QAT: map SUOF failed\n");
1421                 return -EINVAL;
1422         }
1423         return 0;
1424 }
1425
1426 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1427                        void *addr_ptr, int mem_size)
1428 {
1429         struct icp_qat_fw_auth_desc *desc = NULL;
1430         int status = 0;
1431
1432         if (handle->fw_auth) {
1433                 if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1434                         status = qat_uclo_auth_fw(handle, desc);
1435                 qat_uclo_ummap_auth_fw(handle, &desc);
1436         } else {
1437                 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1438                         pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1439                         return -EINVAL;
1440                 }
1441                 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1442         }
1443         return status;
1444 }
1445
1446 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1447                                 void *addr_ptr, int mem_size)
1448 {
1449         struct icp_qat_uof_filehdr *filehdr;
1450         struct icp_qat_uclo_objhandle *objhdl;
1451
1452         objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1453         if (!objhdl)
1454                 return -ENOMEM;
1455         objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1456         if (!objhdl->obj_buf)
1457                 goto out_objbuf_err;
1458         filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1459         if (qat_uclo_check_uof_format(filehdr))
1460                 goto out_objhdr_err;
1461         objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1462                                              ICP_QAT_UOF_OBJS);
1463         if (!objhdl->obj_hdr) {
1464                 pr_err("QAT: object file chunk is null\n");
1465                 goto out_objhdr_err;
1466         }
1467         handle->obj_handle = objhdl;
1468         if (qat_uclo_parse_uof_obj(handle))
1469                 goto out_overlay_obj_err;
1470         return 0;
1471
1472 out_overlay_obj_err:
1473         handle->obj_handle = NULL;
1474         kfree(objhdl->obj_hdr);
1475 out_objhdr_err:
1476         kfree(objhdl->obj_buf);
1477 out_objbuf_err:
1478         kfree(objhdl);
1479         return -ENOMEM;
1480 }
1481
1482 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1483                      void *addr_ptr, int mem_size)
1484 {
1485         BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1486                      (sizeof(handle->hal_handle->ae_mask) * 8));
1487
1488         if (!handle || !addr_ptr || mem_size < 24)
1489                 return -EINVAL;
1490
1491         return (handle->fw_auth) ?
1492                         qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1493                         qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1494 }
1495
1496 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1497 {
1498         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1499         unsigned int a;
1500
1501         if (handle->sobj_handle)
1502                 qat_uclo_del_suof(handle);
1503         if (!obj_handle)
1504                 return;
1505
1506         kfree(obj_handle->uword_buf);
1507         for (a = 0; a < obj_handle->uimage_num; a++)
1508                 kfree(obj_handle->ae_uimage[a].page);
1509
1510         for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1511                 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1512
1513         kfree(obj_handle->obj_hdr);
1514         kfree(obj_handle->obj_buf);
1515         kfree(obj_handle);
1516         handle->obj_handle = NULL;
1517 }
1518
1519 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1520                                  struct icp_qat_uclo_encap_page *encap_page,
1521                                  uint64_t *uword, unsigned int addr_p,
1522                                  unsigned int raddr, uint64_t fill)
1523 {
1524         uint64_t uwrd = 0;
1525         unsigned int i;
1526
1527         if (!encap_page) {
1528                 *uword = fill;
1529                 return;
1530         }
1531         for (i = 0; i < encap_page->uwblock_num; i++) {
1532                 if (raddr >= encap_page->uwblock[i].start_addr &&
1533                     raddr <= encap_page->uwblock[i].start_addr +
1534                     encap_page->uwblock[i].words_num - 1) {
1535                         raddr -= encap_page->uwblock[i].start_addr;
1536                         raddr *= obj_handle->uword_in_bytes;
1537                         memcpy(&uwrd, (void *)(((uintptr_t)
1538                                encap_page->uwblock[i].micro_words) + raddr),
1539                                obj_handle->uword_in_bytes);
1540                         uwrd = uwrd & 0xbffffffffffull;
1541                 }
1542         }
1543         *uword = uwrd;
1544         if (*uword == INVLD_UWORD)
1545                 *uword = fill;
1546 }
1547
1548 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1549                                         struct icp_qat_uclo_encap_page
1550                                         *encap_page, unsigned int ae)
1551 {
1552         unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1553         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1554         uint64_t fill_pat;
1555
1556         /* load the page starting at appropriate ustore address */
1557         /* get fill-pattern from an image -- they are all the same */
1558         memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1559                sizeof(uint64_t));
1560         uw_physical_addr = encap_page->beg_addr_p;
1561         uw_relative_addr = 0;
1562         words_num = encap_page->micro_words_num;
1563         while (words_num) {
1564                 if (words_num < UWORD_CPYBUF_SIZE)
1565                         cpylen = words_num;
1566                 else
1567                         cpylen = UWORD_CPYBUF_SIZE;
1568
1569                 /* load the buffer */
1570                 for (i = 0; i < cpylen; i++)
1571                         qat_uclo_fill_uwords(obj_handle, encap_page,
1572                                              &obj_handle->uword_buf[i],
1573                                              uw_physical_addr + i,
1574                                              uw_relative_addr + i, fill_pat);
1575
1576                 /* copy the buffer to ustore */
1577                 qat_hal_wr_uwords(handle, (unsigned char)ae,
1578                                   uw_physical_addr, cpylen,
1579                                   obj_handle->uword_buf);
1580
1581                 uw_physical_addr += cpylen;
1582                 uw_relative_addr += cpylen;
1583                 words_num -= cpylen;
1584         }
1585 }
1586
1587 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1588                                     struct icp_qat_uof_image *image)
1589 {
1590         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1591         unsigned int ctx_mask, s;
1592         struct icp_qat_uclo_page *page;
1593         unsigned char ae;
1594         int ctx;
1595
1596         if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1597                 ctx_mask = 0xff;
1598         else
1599                 ctx_mask = 0x55;
1600         /* load the default page and set assigned CTX PC
1601          * to the entrypoint address */
1602         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1603                 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1604                         continue;
1605                 /* find the slice to which this image is assigned */
1606                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1607                         if (image->ctx_assigned & obj_handle->ae_data[ae].
1608                             ae_slices[s].ctx_mask_assigned)
1609                                 break;
1610                 }
1611                 if (s >= obj_handle->ae_data[ae].slice_num)
1612                         continue;
1613                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1614                 if (!page->encap_page->def_page)
1615                         continue;
1616                 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1617
1618                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1619                 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1620                         obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1621                                         (ctx_mask & (1 << ctx)) ? page : NULL;
1622                 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1623                                      image->ctx_assigned);
1624                 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1625                                image->entry_address);
1626         }
1627 }
1628
1629 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1630 {
1631         unsigned int i;
1632         struct icp_qat_fw_auth_desc *desc = NULL;
1633         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1634         struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1635
1636         for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1637                 if (qat_uclo_map_auth_fw(handle,
1638                                          (char *)simg_hdr[i].simg_buf,
1639                                          (unsigned int)
1640                                          (simg_hdr[i].simg_len),
1641                                          &desc))
1642                         goto wr_err;
1643                 if (qat_uclo_auth_fw(handle, desc))
1644                         goto wr_err;
1645                 if (qat_uclo_load_fw(handle, desc))
1646                         goto wr_err;
1647                 qat_uclo_ummap_auth_fw(handle, &desc);
1648         }
1649         return 0;
1650 wr_err:
1651         qat_uclo_ummap_auth_fw(handle, &desc);
1652         return -EINVAL;
1653 }
1654
1655 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1656 {
1657         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1658         unsigned int i;
1659
1660         if (qat_uclo_init_globals(handle))
1661                 return -EINVAL;
1662         for (i = 0; i < obj_handle->uimage_num; i++) {
1663                 if (!obj_handle->ae_uimage[i].img_ptr)
1664                         return -EINVAL;
1665                 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1666                         return -EINVAL;
1667                 qat_uclo_wr_uimage_page(handle,
1668                                         obj_handle->ae_uimage[i].img_ptr);
1669         }
1670         return 0;
1671 }
1672
1673 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1674 {
1675         return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1676                                    qat_uclo_wr_uof_img(handle);
1677 }