blob: a698d07ef97b049d53961fc8d35335a716a73c7e [file] [log] [blame]
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
48#include <linux/ctype.h>
49#include <linux/kernel.h>
50
51#include "adf_accel_devices.h"
52#include "adf_common_drv.h"
53#include "icp_qat_uclo.h"
54#include "icp_qat_hal.h"
55#include "icp_qat_fw_loader_handle.h"
56
57#define UWORD_CPYBUF_SIZE 1024
58#define INVLD_UWORD 0xffffffffffull
59#define PID_MINOR_REV 0xf
60#define PID_MAJOR_REV (0xf << 4)
61
62static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
64{
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
74
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79 } else {
80 ae_slice->ctx_mask_assigned = 0;
81 }
82 ae_slice->regions = kzalloc(sizeof(*(ae_slice->regions)), GFP_KERNEL);
83 if (!(ae_slice->regions))
84 return -ENOMEM;
85 ae_slice->page = kzalloc(sizeof(*(ae_slice->page)), GFP_KERNEL);
86 if (!(ae_slice->page))
87 goto out_err;
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->regions;
91 ae_data->slice_num++;
92 return 0;
93out_err:
94 kfree(ae_slice->regions);
95 ae_slice->regions = NULL;
96 return -ENOMEM;
97}
98
99static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100{
101 unsigned int ss = 0;
102
103 if (!ae_data) {
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
105 return -EINVAL;
106 }
107
108 for (ss = 0; ss < ae_data->slice_num; ss++) {
109 kfree(ae_data->ae_slices[ss].regions);
110 ae_data->ae_slices[ss].regions = NULL;
111 kfree(ae_data->ae_slices[ss].page);
112 ae_data->ae_slices[ss].page = NULL;
113 }
114 return 0;
115}
116
117static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
119{
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
121 return NULL;
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
123}
124
125static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
126{
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
129
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132 return -EINVAL;
133 }
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad uof version, major 0x%x, minor 0x%x\n",
136 maj, min);
137 return -EINVAL;
138 }
139 return 0;
140}
141
142static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
145{
146 unsigned int outval;
147 unsigned char *ptr = (unsigned char *)val;
148
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
152 num_in_bytes -= 4;
153 ptr += 4;
154 addr += 4;
155 }
156}
157
158static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
160 unsigned int *val,
161 unsigned int num_in_bytes)
162{
163 unsigned int outval;
164 unsigned char *ptr = (unsigned char *)val;
165
166 addr >>= 0x2; /* convert to uword address */
167
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
171 num_in_bytes -= 4;
172 ptr += 4;
173 }
174 return;
175}
176
177static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
178 unsigned char ae,
179 struct icp_qat_uof_batch_init
180 *umem_init_header)
181{
182 struct icp_qat_uof_batch_init *umem_init;
183
184 if (!umem_init_header)
185 return;
186 umem_init = umem_init_header->next;
187 while (umem_init) {
188 unsigned int addr, *value, size;
189 ae = umem_init->ae;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
195 }
196}
197
198static void
199qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
201{
202 struct icp_qat_uof_batch_init *umem_init;
203
204 umem_init = *base;
205 while (umem_init) {
206 struct icp_qat_uof_batch_init *pre;
207 pre = umem_init;
208 umem_init = umem_init->next;
209 kfree(pre);
210 }
211 *base = NULL;
212}
213
214static int qat_uclo_parse_num(char *str, unsigned int *num)
215{
216 char buf[16];
217 unsigned long ae = 0;
218 int i;
219
220 memset(buf, '\0', 16);
221 strncpy(buf, str, 15);
222 for (i = 0; i < 16; i++) {
223 if (!isdigit(buf[i])) {
224 buf[i] = '\0';
225 break;
226 }
227 }
228 if ((kstrtoul(buf, 10, &ae)))
229 return -EFAULT;
230
231 *num = (unsigned int)ae;
232 return 0;
233}
234
235static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
236 struct icp_qat_uof_initmem *init_mem,
237 unsigned int size_range, unsigned int *ae)
238{
239 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
240 char *str;
241
242 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
243 pr_err("QAT: initmem is out of range");
244 return -EINVAL;
245 }
246 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
247 pr_err("QAT: Memory scope for init_mem error\n");
248 return -EINVAL;
249 }
250 str = qat_uclo_get_string(&(obj_handle->str_table), init_mem->sym_name);
251 if (!str) {
252 pr_err("QAT: AE name assigned in uof init table is NULL\n");
253 return -EINVAL;
254 }
255 if (qat_uclo_parse_num(str, ae)) {
256 pr_err("QAT: Parse num for AE number failed\n");
257 return -EINVAL;
258 }
259 if (!test_bit(*ae, (unsigned long *)&(handle->hal_handle->ae_mask))) {
260 pr_err("QAT: ae %d to be init is fused off\n", *ae);
261 return -EINVAL;
262 }
263 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
264 pr_err("QAT: ae %d out of range\n", *ae);
265 return -EINVAL;
266 }
267 return 0;
268}
269
270static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
271 *handle, struct icp_qat_uof_initmem
272 *init_mem, unsigned int ae,
273 struct icp_qat_uof_batch_init
274 **init_tab_base)
275{
276 struct icp_qat_uof_batch_init *init_header, *tail;
277 struct icp_qat_uof_batch_init *mem_init, *tail_old;
278 struct icp_qat_uof_memvar_attr *mem_val_attr;
279 unsigned int i, flag = 0;
280
281 mem_val_attr =
282 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
283 sizeof(struct icp_qat_uof_initmem));
284
285 init_header = *init_tab_base;
286 if (!init_header) {
287 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
288 if (!init_header)
289 return -ENOMEM;
290 init_header->size = 1;
291 *init_tab_base = init_header;
292 flag = 1;
293 }
294 tail_old = init_header;
295 while (tail_old->next)
296 tail_old = tail_old->next;
297 tail = tail_old;
298 for (i = 0; i < init_mem->val_attr_num; i++) {
299 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
300 if (!mem_init)
301 goto out_err;
302 mem_init->ae = ae;
303 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
304 mem_init->value = &mem_val_attr->value;
305 mem_init->size = 4;
306 mem_init->next = NULL;
307 tail->next = mem_init;
308 tail = mem_init;
309 init_header->size += qat_hal_get_ins_num();
310 mem_val_attr++;
311 }
312 return 0;
313out_err:
314 while (tail_old) {
315 mem_init = tail_old->next;
316 kfree(tail_old);
317 tail_old = mem_init;
318 }
319 if (flag)
320 kfree(*init_tab_base);
321 return -ENOMEM;
322}
323
324static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
325 struct icp_qat_uof_initmem *init_mem)
326{
327 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
328 unsigned int ae;
329
330 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
331 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
332 return -EINVAL;
333 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
334 &(obj_handle->lm_init_tab[ae])))
335 return -EINVAL;
336 return 0;
337}
338
339static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
340 struct icp_qat_uof_initmem *init_mem)
341{
342 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
343 unsigned int ae, ustore_size, uaddr, i;
344
345 ustore_size = obj_handle->ustore_phy_size;
346 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
347 return -EINVAL;
348 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
349 &(obj_handle->umem_init_tab[ae])))
350 return -EINVAL;
351 /* set the highest ustore address referenced */
352 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
353 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
354 if (obj_handle->ae_data[ae].ae_slices[i].
355 encap_image->uwords_num < uaddr)
356 obj_handle->ae_data[ae].ae_slices[i].
357 encap_image->uwords_num = uaddr;
358 }
359 return 0;
360}
361
362#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
363static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
364 struct icp_qat_uof_initmem *init_mem)
365{
366 unsigned int i;
367 struct icp_qat_uof_memvar_attr *mem_val_attr;
368
369 mem_val_attr =
370 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
371 sizeof(struct icp_qat_uof_initmem));
372
373 switch (init_mem->region) {
374 case ICP_QAT_UOF_SRAM_REGION:
375 if ((init_mem->addr + init_mem->num_in_bytes) >
376 ICP_DH895XCC_PESRAM_BAR_SIZE) {
377 pr_err("QAT: initmem on SRAM is out of range");
378 return -EINVAL;
379 }
380 for (i = 0; i < init_mem->val_attr_num; i++) {
381 qat_uclo_wr_sram_by_words(handle,
382 init_mem->addr +
383 mem_val_attr->offset_in_byte,
384 &mem_val_attr->value, 4);
385 mem_val_attr++;
386 }
387 break;
388 case ICP_QAT_UOF_LMEM_REGION:
389 if (qat_uclo_init_lmem_seg(handle, init_mem))
390 return -EINVAL;
391 break;
392 case ICP_QAT_UOF_UMEM_REGION:
393 if (qat_uclo_init_umem_seg(handle, init_mem))
394 return -EINVAL;
395 break;
396 default:
397 pr_err("QAT: initmem region error. region type=0x%x\n",
398 init_mem->region);
399 return -EINVAL;
400 }
401 return 0;
402}
403
404static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
405 struct icp_qat_uclo_encapme *image)
406{
407 unsigned int i;
408 struct icp_qat_uclo_encap_page *page;
409 struct icp_qat_uof_image *uof_image;
410 unsigned char ae;
411 unsigned int ustore_size;
412 unsigned int patt_pos;
413 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
414 uint64_t *fill_data;
415
416 uof_image = image->img_ptr;
417 fill_data = kzalloc(ICP_QAT_UCLO_MAX_USTORE * sizeof(uint64_t),
418 GFP_KERNEL);
419 if (!fill_data)
420 return -EFAULT;
421 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
422 memcpy(&fill_data[i], &uof_image->fill_pattern,
423 sizeof(uint64_t));
424 page = image->page;
425
426 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
427 if (!test_bit(ae, (unsigned long *)&(uof_image->ae_assigned)))
428 continue;
429 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
430 patt_pos = page->beg_addr_p + page->micro_words_num;
431
432 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
433 page->beg_addr_p, &fill_data[0]);
434 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
435 ustore_size - patt_pos + 1,
436 &fill_data[page->beg_addr_p]);
437 }
438 kfree(fill_data);
439 return 0;
440}
441
442static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
443{
444 unsigned int i;
445 int status = 0;
446 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
447 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
448 int ae;
449
450 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
451 if (initmem->num_in_bytes) {
452 if (qat_uclo_init_ae_memory(handle, initmem))
453 return -EINVAL;
454 }
455 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
456 (unsigned long)initmem +
457 sizeof(struct icp_qat_uof_initmem)) +
458 (sizeof(struct icp_qat_uof_memvar_attr) *
459 initmem->val_attr_num));
460 }
461 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
462 if (qat_hal_batch_wr_lm(handle, ae,
463 obj_handle->lm_init_tab[ae])) {
464 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
465 return -EINVAL;
466 }
467 qat_uclo_cleanup_batch_init_list(handle,
468 &obj_handle->lm_init_tab[ae]);
469 qat_uclo_batch_wr_umem(handle, ae,
470 obj_handle->umem_init_tab[ae]);
471 qat_uclo_cleanup_batch_init_list(handle,
472 &obj_handle->
473 umem_init_tab[ae]);
474 }
475 return status;
476}
477
478static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
479 char *chunk_id, void *cur)
480{
481 int i;
482 struct icp_qat_uof_chunkhdr *chunk_hdr =
483 (struct icp_qat_uof_chunkhdr *)
484 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
485
486 for (i = 0; i < obj_hdr->num_chunks; i++) {
487 if ((cur < (void *)&chunk_hdr[i]) &&
488 !(strncmp(chunk_hdr[i].chunk_id, chunk_id,
489 ICP_QAT_UOF_OBJID_LEN))) {
490 return &chunk_hdr[i];
491 }
492 }
493 return NULL;
494}
495
496static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
497{
498 int i;
499 unsigned int topbit = 1 << 0xF;
500 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
501
502 reg ^= inbyte << 0x8;
503 for (i = 0; i < 0x8; i++) {
504 if (reg & topbit)
505 reg = (reg << 1) ^ 0x1021;
506 else
507 reg <<= 1;
508 }
509 return reg & 0xFFFF;
510}
511
512static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
513{
514 unsigned int chksum = 0;
515
516 if (ptr)
517 while (num--)
518 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
519 return chksum;
520}
521
522static struct icp_qat_uclo_objhdr *
523qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
524 char *chunk_id)
525{
526 struct icp_qat_uof_filechunkhdr *file_chunk;
527 struct icp_qat_uclo_objhdr *obj_hdr;
528 void *chunk;
529 int i;
530
531 file_chunk = (struct icp_qat_uof_filechunkhdr *)
532 (buf + sizeof(struct icp_qat_uof_filehdr));
533 for (i = 0; i < file_hdr->num_chunks; i++) {
534 if (!(strncmp(file_chunk->chunk_id, chunk_id,
535 ICP_QAT_UOF_OBJID_LEN))) {
536 chunk = buf + file_chunk->offset;
537 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
538 (char *)chunk, file_chunk->size))
539 break;
540 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
541 if (!obj_hdr)
542 break;
543 obj_hdr->file_buff = chunk;
544 obj_hdr->checksum = file_chunk->checksum;
545 obj_hdr->size = file_chunk->size;
546 return obj_hdr;
547 }
548 file_chunk++;
549 }
550 return NULL;
551}
552
553static unsigned int
554qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
555 struct icp_qat_uof_image *image)
556{
557 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
558 struct icp_qat_uof_objtable *neigh_reg_tab;
559 struct icp_qat_uof_code_page *code_page;
560
561 code_page = (struct icp_qat_uof_code_page *)
562 ((char *)image + sizeof(struct icp_qat_uof_image));
563 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
564 code_page->uc_var_tab_offset);
565 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
566 code_page->imp_var_tab_offset);
567 imp_expr_tab = (struct icp_qat_uof_objtable *)
568 (encap_uof_obj->beg_uof +
569 code_page->imp_expr_tab_offset);
570 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
571 imp_expr_tab->entry_num) {
572 pr_err("QAT: UOF can't contain imported variable to be parsed");
573 return -EINVAL;
574 }
575 neigh_reg_tab = (struct icp_qat_uof_objtable *)
576 (encap_uof_obj->beg_uof +
577 code_page->neigh_reg_tab_offset);
578 if (neigh_reg_tab->entry_num) {
579 pr_err("QAT: UOF can't contain shared control store feature");
580 return -EINVAL;
581 }
582 if (image->numpages > 1) {
583 pr_err("QAT: UOF can't contain multiple pages");
584 return -EINVAL;
585 }
586 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
587 pr_err("QAT: UOF can't use shared control store feature");
588 return -EFAULT;
589 }
590 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
591 pr_err("QAT: UOF can't use reloadable feature");
592 return -EFAULT;
593 }
594 return 0;
595}
596
597static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj
598 *encap_uof_obj,
599 struct icp_qat_uof_image *img,
600 struct icp_qat_uclo_encap_page *page)
601{
602 struct icp_qat_uof_code_page *code_page;
603 struct icp_qat_uof_code_area *code_area;
604 struct icp_qat_uof_objtable *uword_block_tab;
605 struct icp_qat_uof_uword_block *uwblock;
606 int i;
607
608 code_page = (struct icp_qat_uof_code_page *)
609 ((char *)img + sizeof(struct icp_qat_uof_image));
610 page->def_page = code_page->def_page;
611 page->page_region = code_page->page_region;
612 page->beg_addr_v = code_page->beg_addr_v;
613 page->beg_addr_p = code_page->beg_addr_p;
614 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
615 code_page->code_area_offset);
616 page->micro_words_num = code_area->micro_words_num;
617 uword_block_tab = (struct icp_qat_uof_objtable *)
618 (encap_uof_obj->beg_uof +
619 code_area->uword_block_tab);
620 page->uwblock_num = uword_block_tab->entry_num;
621 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
622 sizeof(struct icp_qat_uof_objtable));
623 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
624 for (i = 0; i < uword_block_tab->entry_num; i++)
625 page->uwblock[i].micro_words =
626 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
627}
628
629static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
630 struct icp_qat_uclo_encapme *ae_uimage,
631 int max_image)
632{
633 int a = 0, i;
634 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
635 struct icp_qat_uof_image *image;
636 struct icp_qat_uof_objtable *ae_regtab;
637 struct icp_qat_uof_objtable *init_reg_sym_tab;
638 struct icp_qat_uof_objtable *sbreak_tab;
639 struct icp_qat_uof_encap_obj *encap_uof_obj =
640 &obj_handle->encap_uof_obj;
641
642 for (a = 0; a < max_image; a++) {
643 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
644 ICP_QAT_UOF_IMAG, chunk_hdr);
645 if (!chunk_hdr)
646 break;
647 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
648 chunk_hdr->offset);
649 ae_regtab = (struct icp_qat_uof_objtable *)
650 (image->reg_tab_offset +
651 obj_handle->obj_hdr->file_buff);
652 ae_uimage[a].ae_reg_num = ae_regtab->entry_num;
653 ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *)
654 (((char *)ae_regtab) +
655 sizeof(struct icp_qat_uof_objtable));
656 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
657 (image->init_reg_sym_tab +
658 obj_handle->obj_hdr->file_buff);
659 ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num;
660 ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *)
661 (((char *)init_reg_sym_tab) +
662 sizeof(struct icp_qat_uof_objtable));
663 sbreak_tab = (struct icp_qat_uof_objtable *)
664 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
665 ae_uimage[a].sbreak_num = sbreak_tab->entry_num;
666 ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *)
667 (((char *)sbreak_tab) +
668 sizeof(struct icp_qat_uof_objtable));
669 ae_uimage[a].img_ptr = image;
670 if (qat_uclo_check_image_compat(encap_uof_obj, image))
671 goto out_err;
672 ae_uimage[a].page =
673 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
674 GFP_KERNEL);
675 if (!ae_uimage[a].page)
676 goto out_err;
677 qat_uclo_map_image_pages(encap_uof_obj, image,
678 ae_uimage[a].page);
679 }
680 return a;
681out_err:
682 for (i = 0; i < a; i++)
683 kfree(ae_uimage[i].page);
684 return 0;
685}
686
687static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
688{
689 int i, ae;
690 int mflag = 0;
691 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
692
693 for (ae = 0; ae <= max_ae; ae++) {
694 if (!test_bit(ae, (unsigned long *)
695 &(handle->hal_handle->ae_mask)))
696 continue;
697 for (i = 0; i < obj_handle->uimage_num; i++) {
698 if (!test_bit(ae, (unsigned long *)
699 &(obj_handle->ae_uimage[i].img_ptr->ae_assigned)))
700 continue;
701 mflag = 1;
702 if (qat_uclo_init_ae_data(obj_handle, ae, i))
703 return -EINVAL;
704 }
705 }
706 if (!mflag) {
707 pr_err("QAT: uimage uses AE not set");
708 return -EINVAL;
709 }
710 return 0;
711}
712
713static struct icp_qat_uof_strtable *
714qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
715 char *tab_name, struct icp_qat_uof_strtable *str_table)
716{
717 struct icp_qat_uof_chunkhdr *chunk_hdr;
718
719 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
720 obj_hdr->file_buff, tab_name, NULL);
721 if (chunk_hdr) {
722 int hdr_size;
723 memcpy(&str_table->table_len, obj_hdr->file_buff +
724 chunk_hdr->offset, sizeof(str_table->table_len));
725 hdr_size = (char *)&str_table->strings - (char *)str_table;
726 str_table->strings = (unsigned long)obj_hdr->file_buff +
727 chunk_hdr->offset + hdr_size;
728 return str_table;
729 }
730 return NULL;
731}
732
733static void
734qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
735 struct icp_qat_uclo_init_mem_table *init_mem_tab)
736{
737 struct icp_qat_uof_chunkhdr *chunk_hdr;
738
739 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
740 ICP_QAT_UOF_IMEM, NULL);
741 if (chunk_hdr) {
742 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
743 chunk_hdr->offset, sizeof(unsigned int));
744 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
745 (encap_uof_obj->beg_uof + chunk_hdr->offset +
746 sizeof(unsigned int));
747 }
748}
749
750static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
751{
752 unsigned int maj_ver, prod_type = obj_handle->prod_type;
753
754 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
755 pr_err("QAT: uof type 0x%x not match with cur platform 0x%x\n",
756 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
757 return -EINVAL;
758 }
759 maj_ver = obj_handle->prod_rev & 0xff;
760 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
761 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
762 pr_err("QAT: uof majVer 0x%x out of range\n", maj_ver);
763 return -EINVAL;
764 }
765 return 0;
766}
767
768static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
769 unsigned char ae, unsigned char ctx_mask,
770 enum icp_qat_uof_regtype reg_type,
771 unsigned short reg_addr, unsigned int value)
772{
773 switch (reg_type) {
774 case ICP_GPA_ABS:
775 case ICP_GPB_ABS:
776 ctx_mask = 0;
777 case ICP_GPA_REL:
778 case ICP_GPB_REL:
779 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
780 reg_addr, value);
781 case ICP_SR_ABS:
782 case ICP_DR_ABS:
783 case ICP_SR_RD_ABS:
784 case ICP_DR_RD_ABS:
785 ctx_mask = 0;
786 case ICP_SR_REL:
787 case ICP_DR_REL:
788 case ICP_SR_RD_REL:
789 case ICP_DR_RD_REL:
790 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
791 reg_addr, value);
792 case ICP_SR_WR_ABS:
793 case ICP_DR_WR_ABS:
794 ctx_mask = 0;
795 case ICP_SR_WR_REL:
796 case ICP_DR_WR_REL:
797 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
798 reg_addr, value);
799 case ICP_NEIGH_REL:
800 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
801 default:
802 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
803 return -EFAULT;
804 }
805 return 0;
806}
807
808static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
809 unsigned int ae,
810 struct icp_qat_uclo_encapme *encap_ae)
811{
812 unsigned int i;
813 unsigned char ctx_mask;
814 struct icp_qat_uof_init_regsym *init_regsym;
815
816 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
817 ICP_QAT_UCLO_MAX_CTX)
818 ctx_mask = 0xff;
819 else
820 ctx_mask = 0x55;
821
822 for (i = 0; i < encap_ae->init_regsym_num; i++) {
823 unsigned int exp_res;
824 init_regsym = &encap_ae->init_regsym[i];
825 exp_res = init_regsym->value;
826 switch (init_regsym->init_type) {
827 case ICP_QAT_UOF_INIT_REG:
828 qat_uclo_init_reg(handle, ae, ctx_mask,
829 (enum icp_qat_uof_regtype)
830 init_regsym->reg_type,
831 (unsigned short)init_regsym->reg_addr,
832 exp_res);
833 break;
834 case ICP_QAT_UOF_INIT_REG_CTX:
835 /* check if ctx is appropriate for the ctxMode */
836 if (!((1 << init_regsym->ctx) & ctx_mask)) {
837 pr_err("QAT: invalid ctx num = 0x%x\n",
838 init_regsym->ctx);
839 return -EINVAL;
840 }
841 qat_uclo_init_reg(handle, ae,
842 (unsigned char)
843 (1 << init_regsym->ctx),
844 (enum icp_qat_uof_regtype)
845 init_regsym->reg_type,
846 (unsigned short)init_regsym->reg_addr,
847 exp_res);
848 break;
849 case ICP_QAT_UOF_INIT_EXPR:
850 pr_err("QAT: INIT_EXPR feature not supported\n");
851 return -EINVAL;
852 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
853 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
854 return -EINVAL;
855 default:
856 break;
857 }
858 }
859 return 0;
860}
861
862static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
863{
864 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
865 unsigned int s, ae;
866
867 if (obj_handle->global_inited)
868 return 0;
869 if (obj_handle->init_mem_tab.entry_num) {
870 if (qat_uclo_init_memory(handle)) {
871 pr_err("QAT: initalize memory failed\n");
872 return -EINVAL;
873 }
874 }
875 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
876 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
877 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
878 continue;
879 if (qat_uclo_init_reg_sym(handle, ae,
880 obj_handle->ae_data[ae].
881 ae_slices[s].encap_image))
882 return -EINVAL;
883 }
884 }
885 obj_handle->global_inited = 1;
886 return 0;
887}
888
889static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
890{
891 unsigned char ae, nn_mode, s;
892 struct icp_qat_uof_image *uof_image;
893 struct icp_qat_uclo_aedata *ae_data;
894 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
895
896 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
897 if (!test_bit(ae,
898 (unsigned long *)&(handle->hal_handle->ae_mask)))
899 continue;
900 ae_data = &(obj_handle->ae_data[ae]);
901 for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX;
902 s++) {
903 if (!(obj_handle->ae_data[ae].ae_slices[s].encap_image))
904 continue;
905 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
906 if (qat_hal_set_ae_ctx_mode(handle, ae,
907 (char)ICP_QAT_CTX_MODE
908 (uof_image->ae_mode))) {
909 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
910 return -EFAULT;
911 }
912 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
913 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
914 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
915 return -EFAULT;
916 }
917 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
918 (char)ICP_QAT_LOC_MEM0_MODE
919 (uof_image->ae_mode))) {
920 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
921 return -EFAULT;
922 }
923 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
924 (char)ICP_QAT_LOC_MEM1_MODE
925 (uof_image->ae_mode))) {
926 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
927 return -EFAULT;
928 }
929 }
930 }
931 return 0;
932}
933
934static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
935{
936 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
937 struct icp_qat_uclo_encapme *image;
938 int a;
939
940 for (a = 0; a < obj_handle->uimage_num; a++) {
941 image = &obj_handle->ae_uimage[a];
942 image->uwords_num = image->page->beg_addr_p +
943 image->page->micro_words_num;
944 }
945}
946
947static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
948{
949 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
950 unsigned int ae;
951
952 obj_handle->uword_buf = kzalloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t),
953 GFP_KERNEL);
954 if (!obj_handle->uword_buf)
955 return -ENOMEM;
956 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
957 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
958 obj_handle->obj_hdr->file_buff;
959 obj_handle->encap_uof_obj.chunk_hdr = (struct icp_qat_uof_chunkhdr *)
960 obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr);
961 obj_handle->uword_in_bytes = 6;
962 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
963 obj_handle->prod_rev = PID_MAJOR_REV |
964 (PID_MINOR_REV & handle->hal_handle->revision_id);
965 if (qat_uclo_check_uof_compat(obj_handle)) {
966 pr_err("QAT: uof incompatible\n ");
967 return -EINVAL;
968 }
969 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
970 if (!(obj_handle->obj_hdr->file_buff) ||
971 !(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
972 &(obj_handle->str_table)))) {
973 pr_err("QAT: uof doesn't have effective images");
974 goto out_err;
975 }
976 obj_handle->uimage_num =
977 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
978 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
979 if (!obj_handle->uimage_num)
980 goto out_err;
981 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
982 pr_err("QAT: Bad object\n ");
983 goto out_check_uof_aemask_err;
984 }
985 qat_uclo_init_uword_num(handle);
986 qat_uclo_map_initmem_table(&(obj_handle->encap_uof_obj),
987 &(obj_handle->init_mem_tab));
988 if (qat_uclo_set_ae_mode(handle))
989 goto out_check_uof_aemask_err;
990 return 0;
991out_check_uof_aemask_err:
992 for (ae = 0; ae < obj_handle->uimage_num; ae++)
993 kfree(obj_handle->ae_uimage[ae].page);
994out_err:
995 kfree(obj_handle->uword_buf);
996 return -EFAULT;
997}
998
999int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1000 void *addr_ptr, int mem_size)
1001{
1002 struct icp_qat_uof_filehdr *filehdr;
1003 struct icp_qat_uclo_objhandle *objhdl;
1004
1005 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1006 (sizeof(handle->hal_handle->ae_mask) * 8));
1007
1008 if (!handle || !addr_ptr || mem_size < 24)
1009 return -EINVAL;
1010 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1011 if (!objhdl)
1012 return -ENOMEM;
1013 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1014 if (!objhdl->obj_buf)
1015 goto out_objbuf_err;
1016 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1017 if (qat_uclo_check_format(filehdr))
1018 goto out_objhdr_err;
1019 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1020 ICP_QAT_UOF_OBJS);
1021 if (!objhdl->obj_hdr) {
1022 pr_err("QAT: object file chunk is null\n");
1023 goto out_objhdr_err;
1024 }
1025 handle->obj_handle = objhdl;
1026 if (qat_uclo_parse_uof_obj(handle))
1027 goto out_overlay_obj_err;
1028 return 0;
1029
1030out_overlay_obj_err:
1031 handle->obj_handle = NULL;
1032 kfree(objhdl->obj_hdr);
1033out_objhdr_err:
1034 kfree(objhdl->obj_buf);
1035out_objbuf_err:
1036 kfree(objhdl);
1037 return -ENOMEM;
1038}
1039
1040int qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1041{
1042 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1043 int a;
1044
1045 kfree(obj_handle->uword_buf);
1046 for (a = 0; a < obj_handle->uimage_num; a++)
1047 kfree(obj_handle->ae_uimage[a].page);
1048
1049 for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++)
1050 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1051 kfree(obj_handle->obj_hdr);
1052
1053 kfree(obj_handle->obj_buf);
1054 kfree(obj_handle);
1055 handle->obj_handle = NULL;
1056 return 0;
1057}
1058
1059static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1060 struct icp_qat_uclo_encap_page *encap_page,
1061 uint64_t *uword, unsigned int addr_p,
1062 unsigned int raddr, uint64_t fill)
1063{
1064 uint64_t uwrd = 0;
1065 unsigned int i;
1066
1067 if (!encap_page) {
1068 *uword = fill;
1069 return;
1070 }
1071 for (i = 0; i < encap_page->uwblock_num; i++) {
1072 if (raddr >= encap_page->uwblock[i].start_addr &&
1073 raddr <= encap_page->uwblock[i].start_addr +
1074 encap_page->uwblock[i].words_num - 1) {
1075 raddr -= encap_page->uwblock[i].start_addr;
1076 raddr *= obj_handle->uword_in_bytes;
1077 memcpy(&uwrd, (void *)(((unsigned long)
1078 encap_page->uwblock[i].micro_words) + raddr),
1079 obj_handle->uword_in_bytes);
1080 uwrd = uwrd & 0xbffffffffffull;
1081 }
1082 }
1083 *uword = uwrd;
1084 if (*uword == INVLD_UWORD)
1085 *uword = fill;
1086}
1087
1088static int qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1089 struct icp_qat_uclo_encap_page
1090 *encap_page, unsigned int ae)
1091{
1092 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1093 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1094 uint64_t fill_pat;
1095 int status = 0;
1096
1097 /* load the page starting at appropriate ustore address */
1098 /* get fill-pattern from an image -- they are all the same */
1099 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1100 sizeof(uint64_t));
1101 uw_physical_addr = encap_page->beg_addr_p;
1102 uw_relative_addr = 0;
1103 words_num = encap_page->micro_words_num;
1104 while (words_num) {
1105 if (words_num < UWORD_CPYBUF_SIZE)
1106 cpylen = words_num;
1107 else
1108 cpylen = UWORD_CPYBUF_SIZE;
1109
1110 /* load the buffer */
1111 for (i = 0; i < cpylen; i++)
1112 qat_uclo_fill_uwords(obj_handle, encap_page,
1113 &obj_handle->uword_buf[i],
1114 uw_physical_addr + i,
1115 uw_relative_addr + i, fill_pat);
1116
1117 /* copy the buffer to ustore */
1118 qat_hal_wr_uwords(handle, (unsigned char)ae,
1119 uw_physical_addr, cpylen,
1120 obj_handle->uword_buf);
1121
1122 uw_physical_addr += cpylen;
1123 uw_relative_addr += cpylen;
1124 words_num -= cpylen;
1125 }
1126 return status;
1127}
1128
1129static int
1130qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle,
1131 struct icp_qat_uof_image *image)
1132{
1133 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1134 unsigned int ctx_mask, s;
1135 struct icp_qat_uclo_page *page;
1136 unsigned char ae;
1137 int retval = 0;
1138 int ctx;
1139
1140 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1141 ctx_mask = 0xff;
1142 else
1143 ctx_mask = 0x55;
1144 /* load the default page and set assigned CTX PC
1145 * to the entrypoint address */
1146 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
1147 if (!test_bit(ae, (unsigned long *)&(image->ae_assigned)))
1148 continue;
1149 /* find the slice to which this image is assigned */
1150 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1151 if (image->ctx_assigned & obj_handle->ae_data[ae].
1152 ae_slices[s].ctx_mask_assigned)
1153 break;
1154 }
1155 if (s >= obj_handle->ae_data[ae].slice_num)
1156 continue;
1157 page = obj_handle->ae_data[ae].ae_slices[s].page;
1158 if (!page->encap_page->def_page)
1159 continue;
1160 if (qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae))
1161 return -EINVAL;
1162
1163 page = obj_handle->ae_data[ae].ae_slices[s].page;
1164 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1165 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1166 (ctx_mask & (1 << ctx)) ? page : NULL;
1167 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1168 image->ctx_assigned);
1169 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1170 image->entry_address);
1171 }
1172 return retval;
1173}
1174
1175int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1176{
1177 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1178 unsigned int i;
1179
1180 if (qat_uclo_init_globals(handle))
1181 return -EINVAL;
1182 for (i = 0; i < obj_handle->uimage_num; i++) {
1183 if (!(obj_handle->ae_uimage[i].img_ptr))
1184 return -EINVAL;
1185 if (qat_uclo_init_ustore(handle, &(obj_handle->ae_uimage[i])))
1186 return -EINVAL;
1187 if (qat_uclo_wr_uimage_pages(handle,
1188 obj_handle->ae_uimage[i].img_ptr))
1189 return -EINVAL;
1190 }
1191 return 0;
1192}