blob: b1a16eb5a36d7302f741af7e3bd13ff3c04c0008 [file] [log] [blame]
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
48#include <linux/ctype.h>
49#include <linux/kernel.h>
50
51#include "adf_accel_devices.h"
52#include "adf_common_drv.h"
53#include "icp_qat_uclo.h"
54#include "icp_qat_hal.h"
55#include "icp_qat_fw_loader_handle.h"
56
57#define UWORD_CPYBUF_SIZE 1024
58#define INVLD_UWORD 0xffffffffffull
59#define PID_MINOR_REV 0xf
60#define PID_MAJOR_REV (0xf << 4)
61
62static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
64{
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
74
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79 } else {
80 ae_slice->ctx_mask_assigned = 0;
81 }
Tadeusz Struk45cff262014-07-25 15:55:26 -070082 ae_slice->regions = kzalloc(sizeof(*ae_slice->regions), GFP_KERNEL);
83 if (!ae_slice->regions)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -070084 return -ENOMEM;
Tadeusz Struk45cff262014-07-25 15:55:26 -070085 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86 if (!ae_slice->page)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -070087 goto out_err;
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->regions;
91 ae_data->slice_num++;
92 return 0;
93out_err:
94 kfree(ae_slice->regions);
95 ae_slice->regions = NULL;
96 return -ENOMEM;
97}
98
99static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100{
101 unsigned int ss = 0;
102
103 if (!ae_data) {
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
105 return -EINVAL;
106 }
107
108 for (ss = 0; ss < ae_data->slice_num; ss++) {
109 kfree(ae_data->ae_slices[ss].regions);
110 ae_data->ae_slices[ss].regions = NULL;
111 kfree(ae_data->ae_slices[ss].page);
112 ae_data->ae_slices[ss].page = NULL;
113 }
114 return 0;
115}
116
117static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
119{
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
121 return NULL;
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
123}
124
125static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
126{
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
129
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132 return -EINVAL;
133 }
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad uof version, major 0x%x, minor 0x%x\n",
136 maj, min);
137 return -EINVAL;
138 }
139 return 0;
140}
141
142static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
145{
146 unsigned int outval;
147 unsigned char *ptr = (unsigned char *)val;
148
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
152 num_in_bytes -= 4;
153 ptr += 4;
154 addr += 4;
155 }
156}
157
158static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
160 unsigned int *val,
161 unsigned int num_in_bytes)
162{
163 unsigned int outval;
164 unsigned char *ptr = (unsigned char *)val;
165
166 addr >>= 0x2; /* convert to uword address */
167
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
171 num_in_bytes -= 4;
172 ptr += 4;
173 }
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700174}
175
176static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
177 unsigned char ae,
178 struct icp_qat_uof_batch_init
179 *umem_init_header)
180{
181 struct icp_qat_uof_batch_init *umem_init;
182
183 if (!umem_init_header)
184 return;
185 umem_init = umem_init_header->next;
186 while (umem_init) {
187 unsigned int addr, *value, size;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700188
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700189 ae = umem_init->ae;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
195 }
196}
197
198static void
199qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
201{
202 struct icp_qat_uof_batch_init *umem_init;
203
204 umem_init = *base;
205 while (umem_init) {
206 struct icp_qat_uof_batch_init *pre;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700207
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700208 pre = umem_init;
209 umem_init = umem_init->next;
210 kfree(pre);
211 }
212 *base = NULL;
213}
214
215static int qat_uclo_parse_num(char *str, unsigned int *num)
216{
217 char buf[16];
218 unsigned long ae = 0;
219 int i;
220
221 memset(buf, '\0', 16);
222 strncpy(buf, str, 15);
223 for (i = 0; i < 16; i++) {
224 if (!isdigit(buf[i])) {
225 buf[i] = '\0';
226 break;
227 }
228 }
229 if ((kstrtoul(buf, 10, &ae)))
230 return -EFAULT;
231
232 *num = (unsigned int)ae;
233 return 0;
234}
235
236static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
237 struct icp_qat_uof_initmem *init_mem,
238 unsigned int size_range, unsigned int *ae)
239{
240 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
241 char *str;
242
243 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
244 pr_err("QAT: initmem is out of range");
245 return -EINVAL;
246 }
247 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
248 pr_err("QAT: Memory scope for init_mem error\n");
249 return -EINVAL;
250 }
Tadeusz Struk45cff262014-07-25 15:55:26 -0700251 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700252 if (!str) {
253 pr_err("QAT: AE name assigned in uof init table is NULL\n");
254 return -EINVAL;
255 }
256 if (qat_uclo_parse_num(str, ae)) {
257 pr_err("QAT: Parse num for AE number failed\n");
258 return -EINVAL;
259 }
Tadeusz Struk45cff262014-07-25 15:55:26 -0700260 if (!test_bit(*ae, (unsigned long *)&handle->hal_handle->ae_mask)) {
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700261 pr_err("QAT: ae %d to be init is fused off\n", *ae);
262 return -EINVAL;
263 }
264 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
265 pr_err("QAT: ae %d out of range\n", *ae);
266 return -EINVAL;
267 }
268 return 0;
269}
270
271static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
272 *handle, struct icp_qat_uof_initmem
273 *init_mem, unsigned int ae,
274 struct icp_qat_uof_batch_init
275 **init_tab_base)
276{
277 struct icp_qat_uof_batch_init *init_header, *tail;
278 struct icp_qat_uof_batch_init *mem_init, *tail_old;
279 struct icp_qat_uof_memvar_attr *mem_val_attr;
280 unsigned int i, flag = 0;
281
282 mem_val_attr =
283 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
284 sizeof(struct icp_qat_uof_initmem));
285
286 init_header = *init_tab_base;
287 if (!init_header) {
288 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
289 if (!init_header)
290 return -ENOMEM;
291 init_header->size = 1;
292 *init_tab_base = init_header;
293 flag = 1;
294 }
295 tail_old = init_header;
296 while (tail_old->next)
297 tail_old = tail_old->next;
298 tail = tail_old;
299 for (i = 0; i < init_mem->val_attr_num; i++) {
300 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
301 if (!mem_init)
302 goto out_err;
303 mem_init->ae = ae;
304 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
305 mem_init->value = &mem_val_attr->value;
306 mem_init->size = 4;
307 mem_init->next = NULL;
308 tail->next = mem_init;
309 tail = mem_init;
310 init_header->size += qat_hal_get_ins_num();
311 mem_val_attr++;
312 }
313 return 0;
314out_err:
315 while (tail_old) {
316 mem_init = tail_old->next;
317 kfree(tail_old);
318 tail_old = mem_init;
319 }
320 if (flag)
321 kfree(*init_tab_base);
322 return -ENOMEM;
323}
324
325static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
326 struct icp_qat_uof_initmem *init_mem)
327{
328 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
329 unsigned int ae;
330
331 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
332 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
333 return -EINVAL;
334 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
Tadeusz Struk45cff262014-07-25 15:55:26 -0700335 &obj_handle->lm_init_tab[ae]))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700336 return -EINVAL;
337 return 0;
338}
339
340static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
341 struct icp_qat_uof_initmem *init_mem)
342{
343 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
344 unsigned int ae, ustore_size, uaddr, i;
345
346 ustore_size = obj_handle->ustore_phy_size;
347 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
348 return -EINVAL;
349 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
Tadeusz Struk45cff262014-07-25 15:55:26 -0700350 &obj_handle->umem_init_tab[ae]))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700351 return -EINVAL;
352 /* set the highest ustore address referenced */
353 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
354 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
355 if (obj_handle->ae_data[ae].ae_slices[i].
356 encap_image->uwords_num < uaddr)
357 obj_handle->ae_data[ae].ae_slices[i].
358 encap_image->uwords_num = uaddr;
359 }
360 return 0;
361}
362
363#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
364static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
365 struct icp_qat_uof_initmem *init_mem)
366{
367 unsigned int i;
368 struct icp_qat_uof_memvar_attr *mem_val_attr;
369
370 mem_val_attr =
371 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
372 sizeof(struct icp_qat_uof_initmem));
373
374 switch (init_mem->region) {
375 case ICP_QAT_UOF_SRAM_REGION:
376 if ((init_mem->addr + init_mem->num_in_bytes) >
377 ICP_DH895XCC_PESRAM_BAR_SIZE) {
378 pr_err("QAT: initmem on SRAM is out of range");
379 return -EINVAL;
380 }
381 for (i = 0; i < init_mem->val_attr_num; i++) {
382 qat_uclo_wr_sram_by_words(handle,
383 init_mem->addr +
384 mem_val_attr->offset_in_byte,
385 &mem_val_attr->value, 4);
386 mem_val_attr++;
387 }
388 break;
389 case ICP_QAT_UOF_LMEM_REGION:
390 if (qat_uclo_init_lmem_seg(handle, init_mem))
391 return -EINVAL;
392 break;
393 case ICP_QAT_UOF_UMEM_REGION:
394 if (qat_uclo_init_umem_seg(handle, init_mem))
395 return -EINVAL;
396 break;
397 default:
398 pr_err("QAT: initmem region error. region type=0x%x\n",
399 init_mem->region);
400 return -EINVAL;
401 }
402 return 0;
403}
404
405static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
406 struct icp_qat_uclo_encapme *image)
407{
408 unsigned int i;
409 struct icp_qat_uclo_encap_page *page;
410 struct icp_qat_uof_image *uof_image;
411 unsigned char ae;
412 unsigned int ustore_size;
413 unsigned int patt_pos;
414 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
415 uint64_t *fill_data;
416
417 uof_image = image->img_ptr;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700418 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700419 GFP_KERNEL);
420 if (!fill_data)
421 return -EFAULT;
422 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
423 memcpy(&fill_data[i], &uof_image->fill_pattern,
424 sizeof(uint64_t));
425 page = image->page;
426
427 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -0700428 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700429 continue;
430 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
431 patt_pos = page->beg_addr_p + page->micro_words_num;
432
433 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
434 page->beg_addr_p, &fill_data[0]);
435 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
436 ustore_size - patt_pos + 1,
437 &fill_data[page->beg_addr_p]);
438 }
439 kfree(fill_data);
440 return 0;
441}
442
443static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
444{
445 unsigned int i;
446 int status = 0;
447 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
448 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
449 int ae;
450
451 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
452 if (initmem->num_in_bytes) {
453 if (qat_uclo_init_ae_memory(handle, initmem))
454 return -EINVAL;
455 }
456 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
457 (unsigned long)initmem +
458 sizeof(struct icp_qat_uof_initmem)) +
459 (sizeof(struct icp_qat_uof_memvar_attr) *
460 initmem->val_attr_num));
461 }
462 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
463 if (qat_hal_batch_wr_lm(handle, ae,
464 obj_handle->lm_init_tab[ae])) {
465 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
466 return -EINVAL;
467 }
468 qat_uclo_cleanup_batch_init_list(handle,
469 &obj_handle->lm_init_tab[ae]);
470 qat_uclo_batch_wr_umem(handle, ae,
471 obj_handle->umem_init_tab[ae]);
472 qat_uclo_cleanup_batch_init_list(handle,
473 &obj_handle->
474 umem_init_tab[ae]);
475 }
476 return status;
477}
478
479static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
480 char *chunk_id, void *cur)
481{
482 int i;
483 struct icp_qat_uof_chunkhdr *chunk_hdr =
484 (struct icp_qat_uof_chunkhdr *)
485 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
486
487 for (i = 0; i < obj_hdr->num_chunks; i++) {
488 if ((cur < (void *)&chunk_hdr[i]) &&
Tadeusz Struk45cff262014-07-25 15:55:26 -0700489 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
490 ICP_QAT_UOF_OBJID_LEN)) {
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700491 return &chunk_hdr[i];
492 }
493 }
494 return NULL;
495}
496
497static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
498{
499 int i;
500 unsigned int topbit = 1 << 0xF;
501 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
502
503 reg ^= inbyte << 0x8;
504 for (i = 0; i < 0x8; i++) {
505 if (reg & topbit)
506 reg = (reg << 1) ^ 0x1021;
507 else
508 reg <<= 1;
509 }
510 return reg & 0xFFFF;
511}
512
513static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
514{
515 unsigned int chksum = 0;
516
517 if (ptr)
518 while (num--)
519 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
520 return chksum;
521}
522
523static struct icp_qat_uclo_objhdr *
524qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
525 char *chunk_id)
526{
527 struct icp_qat_uof_filechunkhdr *file_chunk;
528 struct icp_qat_uclo_objhdr *obj_hdr;
529 void *chunk;
530 int i;
531
532 file_chunk = (struct icp_qat_uof_filechunkhdr *)
533 (buf + sizeof(struct icp_qat_uof_filehdr));
534 for (i = 0; i < file_hdr->num_chunks; i++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -0700535 if (!strncmp(file_chunk->chunk_id, chunk_id,
536 ICP_QAT_UOF_OBJID_LEN)) {
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700537 chunk = buf + file_chunk->offset;
538 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
539 (char *)chunk, file_chunk->size))
540 break;
541 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
542 if (!obj_hdr)
543 break;
544 obj_hdr->file_buff = chunk;
545 obj_hdr->checksum = file_chunk->checksum;
546 obj_hdr->size = file_chunk->size;
547 return obj_hdr;
548 }
549 file_chunk++;
550 }
551 return NULL;
552}
553
554static unsigned int
555qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
556 struct icp_qat_uof_image *image)
557{
558 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
559 struct icp_qat_uof_objtable *neigh_reg_tab;
560 struct icp_qat_uof_code_page *code_page;
561
562 code_page = (struct icp_qat_uof_code_page *)
563 ((char *)image + sizeof(struct icp_qat_uof_image));
564 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
565 code_page->uc_var_tab_offset);
566 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
567 code_page->imp_var_tab_offset);
568 imp_expr_tab = (struct icp_qat_uof_objtable *)
569 (encap_uof_obj->beg_uof +
570 code_page->imp_expr_tab_offset);
571 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
572 imp_expr_tab->entry_num) {
573 pr_err("QAT: UOF can't contain imported variable to be parsed");
574 return -EINVAL;
575 }
576 neigh_reg_tab = (struct icp_qat_uof_objtable *)
577 (encap_uof_obj->beg_uof +
578 code_page->neigh_reg_tab_offset);
579 if (neigh_reg_tab->entry_num) {
580 pr_err("QAT: UOF can't contain shared control store feature");
581 return -EINVAL;
582 }
583 if (image->numpages > 1) {
584 pr_err("QAT: UOF can't contain multiple pages");
585 return -EINVAL;
586 }
587 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
588 pr_err("QAT: UOF can't use shared control store feature");
589 return -EFAULT;
590 }
591 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
592 pr_err("QAT: UOF can't use reloadable feature");
593 return -EFAULT;
594 }
595 return 0;
596}
597
598static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj
599 *encap_uof_obj,
600 struct icp_qat_uof_image *img,
601 struct icp_qat_uclo_encap_page *page)
602{
603 struct icp_qat_uof_code_page *code_page;
604 struct icp_qat_uof_code_area *code_area;
605 struct icp_qat_uof_objtable *uword_block_tab;
606 struct icp_qat_uof_uword_block *uwblock;
607 int i;
608
609 code_page = (struct icp_qat_uof_code_page *)
610 ((char *)img + sizeof(struct icp_qat_uof_image));
611 page->def_page = code_page->def_page;
612 page->page_region = code_page->page_region;
613 page->beg_addr_v = code_page->beg_addr_v;
614 page->beg_addr_p = code_page->beg_addr_p;
615 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
616 code_page->code_area_offset);
617 page->micro_words_num = code_area->micro_words_num;
618 uword_block_tab = (struct icp_qat_uof_objtable *)
619 (encap_uof_obj->beg_uof +
620 code_area->uword_block_tab);
621 page->uwblock_num = uword_block_tab->entry_num;
622 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
623 sizeof(struct icp_qat_uof_objtable));
624 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
625 for (i = 0; i < uword_block_tab->entry_num; i++)
626 page->uwblock[i].micro_words =
627 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
628}
629
630static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
631 struct icp_qat_uclo_encapme *ae_uimage,
632 int max_image)
633{
634 int a = 0, i;
635 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
636 struct icp_qat_uof_image *image;
637 struct icp_qat_uof_objtable *ae_regtab;
638 struct icp_qat_uof_objtable *init_reg_sym_tab;
639 struct icp_qat_uof_objtable *sbreak_tab;
640 struct icp_qat_uof_encap_obj *encap_uof_obj =
641 &obj_handle->encap_uof_obj;
642
643 for (a = 0; a < max_image; a++) {
644 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
645 ICP_QAT_UOF_IMAG, chunk_hdr);
646 if (!chunk_hdr)
647 break;
648 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
649 chunk_hdr->offset);
650 ae_regtab = (struct icp_qat_uof_objtable *)
651 (image->reg_tab_offset +
652 obj_handle->obj_hdr->file_buff);
653 ae_uimage[a].ae_reg_num = ae_regtab->entry_num;
654 ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *)
655 (((char *)ae_regtab) +
656 sizeof(struct icp_qat_uof_objtable));
657 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
658 (image->init_reg_sym_tab +
659 obj_handle->obj_hdr->file_buff);
660 ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num;
661 ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *)
662 (((char *)init_reg_sym_tab) +
663 sizeof(struct icp_qat_uof_objtable));
664 sbreak_tab = (struct icp_qat_uof_objtable *)
665 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
666 ae_uimage[a].sbreak_num = sbreak_tab->entry_num;
667 ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *)
668 (((char *)sbreak_tab) +
669 sizeof(struct icp_qat_uof_objtable));
670 ae_uimage[a].img_ptr = image;
671 if (qat_uclo_check_image_compat(encap_uof_obj, image))
672 goto out_err;
673 ae_uimage[a].page =
674 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
675 GFP_KERNEL);
676 if (!ae_uimage[a].page)
677 goto out_err;
678 qat_uclo_map_image_pages(encap_uof_obj, image,
679 ae_uimage[a].page);
680 }
681 return a;
682out_err:
683 for (i = 0; i < a; i++)
684 kfree(ae_uimage[i].page);
685 return 0;
686}
687
688static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
689{
690 int i, ae;
691 int mflag = 0;
692 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
693
694 for (ae = 0; ae <= max_ae; ae++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -0700695 if (!test_bit(ae,
696 (unsigned long *)&handle->hal_handle->ae_mask))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700697 continue;
698 for (i = 0; i < obj_handle->uimage_num; i++) {
699 if (!test_bit(ae, (unsigned long *)
Tadeusz Struk45cff262014-07-25 15:55:26 -0700700 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700701 continue;
702 mflag = 1;
703 if (qat_uclo_init_ae_data(obj_handle, ae, i))
704 return -EINVAL;
705 }
706 }
707 if (!mflag) {
708 pr_err("QAT: uimage uses AE not set");
709 return -EINVAL;
710 }
711 return 0;
712}
713
714static struct icp_qat_uof_strtable *
715qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
716 char *tab_name, struct icp_qat_uof_strtable *str_table)
717{
718 struct icp_qat_uof_chunkhdr *chunk_hdr;
719
720 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
721 obj_hdr->file_buff, tab_name, NULL);
722 if (chunk_hdr) {
723 int hdr_size;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700724
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700725 memcpy(&str_table->table_len, obj_hdr->file_buff +
726 chunk_hdr->offset, sizeof(str_table->table_len));
727 hdr_size = (char *)&str_table->strings - (char *)str_table;
728 str_table->strings = (unsigned long)obj_hdr->file_buff +
729 chunk_hdr->offset + hdr_size;
730 return str_table;
731 }
732 return NULL;
733}
734
735static void
736qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
737 struct icp_qat_uclo_init_mem_table *init_mem_tab)
738{
739 struct icp_qat_uof_chunkhdr *chunk_hdr;
740
741 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
742 ICP_QAT_UOF_IMEM, NULL);
743 if (chunk_hdr) {
744 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
745 chunk_hdr->offset, sizeof(unsigned int));
746 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
747 (encap_uof_obj->beg_uof + chunk_hdr->offset +
748 sizeof(unsigned int));
749 }
750}
751
752static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
753{
754 unsigned int maj_ver, prod_type = obj_handle->prod_type;
755
756 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
757 pr_err("QAT: uof type 0x%x not match with cur platform 0x%x\n",
758 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
759 return -EINVAL;
760 }
761 maj_ver = obj_handle->prod_rev & 0xff;
762 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
763 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
764 pr_err("QAT: uof majVer 0x%x out of range\n", maj_ver);
765 return -EINVAL;
766 }
767 return 0;
768}
769
770static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
771 unsigned char ae, unsigned char ctx_mask,
772 enum icp_qat_uof_regtype reg_type,
773 unsigned short reg_addr, unsigned int value)
774{
775 switch (reg_type) {
776 case ICP_GPA_ABS:
777 case ICP_GPB_ABS:
778 ctx_mask = 0;
779 case ICP_GPA_REL:
780 case ICP_GPB_REL:
781 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
782 reg_addr, value);
783 case ICP_SR_ABS:
784 case ICP_DR_ABS:
785 case ICP_SR_RD_ABS:
786 case ICP_DR_RD_ABS:
787 ctx_mask = 0;
788 case ICP_SR_REL:
789 case ICP_DR_REL:
790 case ICP_SR_RD_REL:
791 case ICP_DR_RD_REL:
792 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
793 reg_addr, value);
794 case ICP_SR_WR_ABS:
795 case ICP_DR_WR_ABS:
796 ctx_mask = 0;
797 case ICP_SR_WR_REL:
798 case ICP_DR_WR_REL:
799 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
800 reg_addr, value);
801 case ICP_NEIGH_REL:
802 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
803 default:
804 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
805 return -EFAULT;
806 }
807 return 0;
808}
809
810static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
811 unsigned int ae,
812 struct icp_qat_uclo_encapme *encap_ae)
813{
814 unsigned int i;
815 unsigned char ctx_mask;
816 struct icp_qat_uof_init_regsym *init_regsym;
817
818 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
819 ICP_QAT_UCLO_MAX_CTX)
820 ctx_mask = 0xff;
821 else
822 ctx_mask = 0x55;
823
824 for (i = 0; i < encap_ae->init_regsym_num; i++) {
825 unsigned int exp_res;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700826
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700827 init_regsym = &encap_ae->init_regsym[i];
828 exp_res = init_regsym->value;
829 switch (init_regsym->init_type) {
830 case ICP_QAT_UOF_INIT_REG:
831 qat_uclo_init_reg(handle, ae, ctx_mask,
832 (enum icp_qat_uof_regtype)
833 init_regsym->reg_type,
834 (unsigned short)init_regsym->reg_addr,
835 exp_res);
836 break;
837 case ICP_QAT_UOF_INIT_REG_CTX:
838 /* check if ctx is appropriate for the ctxMode */
839 if (!((1 << init_regsym->ctx) & ctx_mask)) {
840 pr_err("QAT: invalid ctx num = 0x%x\n",
841 init_regsym->ctx);
842 return -EINVAL;
843 }
844 qat_uclo_init_reg(handle, ae,
845 (unsigned char)
846 (1 << init_regsym->ctx),
847 (enum icp_qat_uof_regtype)
848 init_regsym->reg_type,
849 (unsigned short)init_regsym->reg_addr,
850 exp_res);
851 break;
852 case ICP_QAT_UOF_INIT_EXPR:
853 pr_err("QAT: INIT_EXPR feature not supported\n");
854 return -EINVAL;
855 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
856 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
857 return -EINVAL;
858 default:
859 break;
860 }
861 }
862 return 0;
863}
864
865static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
866{
867 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
868 unsigned int s, ae;
869
870 if (obj_handle->global_inited)
871 return 0;
872 if (obj_handle->init_mem_tab.entry_num) {
873 if (qat_uclo_init_memory(handle)) {
874 pr_err("QAT: initalize memory failed\n");
875 return -EINVAL;
876 }
877 }
878 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
879 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
880 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
881 continue;
882 if (qat_uclo_init_reg_sym(handle, ae,
883 obj_handle->ae_data[ae].
884 ae_slices[s].encap_image))
885 return -EINVAL;
886 }
887 }
888 obj_handle->global_inited = 1;
889 return 0;
890}
891
892static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
893{
894 unsigned char ae, nn_mode, s;
895 struct icp_qat_uof_image *uof_image;
896 struct icp_qat_uclo_aedata *ae_data;
897 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
898
899 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
900 if (!test_bit(ae,
Tadeusz Struk45cff262014-07-25 15:55:26 -0700901 (unsigned long *)&handle->hal_handle->ae_mask))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700902 continue;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700903 ae_data = &obj_handle->ae_data[ae];
Tadeusz Strukdf0088f2014-07-25 15:55:32 -0700904 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
905 ICP_QAT_UCLO_MAX_CTX); s++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -0700906 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700907 continue;
908 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
909 if (qat_hal_set_ae_ctx_mode(handle, ae,
910 (char)ICP_QAT_CTX_MODE
911 (uof_image->ae_mode))) {
912 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
913 return -EFAULT;
914 }
915 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
916 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
917 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
918 return -EFAULT;
919 }
920 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
921 (char)ICP_QAT_LOC_MEM0_MODE
922 (uof_image->ae_mode))) {
923 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
924 return -EFAULT;
925 }
926 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
927 (char)ICP_QAT_LOC_MEM1_MODE
928 (uof_image->ae_mode))) {
929 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
930 return -EFAULT;
931 }
932 }
933 }
934 return 0;
935}
936
937static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
938{
939 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
940 struct icp_qat_uclo_encapme *image;
941 int a;
942
943 for (a = 0; a < obj_handle->uimage_num; a++) {
944 image = &obj_handle->ae_uimage[a];
945 image->uwords_num = image->page->beg_addr_p +
946 image->page->micro_words_num;
947 }
948}
949
950static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
951{
952 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
953 unsigned int ae;
954
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700955 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700956 GFP_KERNEL);
957 if (!obj_handle->uword_buf)
958 return -ENOMEM;
959 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
960 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
961 obj_handle->obj_hdr->file_buff;
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700962 obj_handle->uword_in_bytes = 6;
963 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
964 obj_handle->prod_rev = PID_MAJOR_REV |
965 (PID_MINOR_REV & handle->hal_handle->revision_id);
966 if (qat_uclo_check_uof_compat(obj_handle)) {
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700967 pr_err("QAT: uof incompatible\n");
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700968 return -EINVAL;
969 }
970 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700971 if (!obj_handle->obj_hdr->file_buff ||
972 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
973 &obj_handle->str_table)) {
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700974 pr_err("QAT: uof doesn't have effective images\n");
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700975 goto out_err;
976 }
977 obj_handle->uimage_num =
978 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
979 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
980 if (!obj_handle->uimage_num)
981 goto out_err;
982 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700983 pr_err("QAT: Bad object\n");
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700984 goto out_check_uof_aemask_err;
985 }
986 qat_uclo_init_uword_num(handle);
Tadeusz Struk45cff262014-07-25 15:55:26 -0700987 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
988 &obj_handle->init_mem_tab);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -0700989 if (qat_uclo_set_ae_mode(handle))
990 goto out_check_uof_aemask_err;
991 return 0;
992out_check_uof_aemask_err:
993 for (ae = 0; ae < obj_handle->uimage_num; ae++)
994 kfree(obj_handle->ae_uimage[ae].page);
995out_err:
996 kfree(obj_handle->uword_buf);
997 return -EFAULT;
998}
999
1000int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1001 void *addr_ptr, int mem_size)
1002{
1003 struct icp_qat_uof_filehdr *filehdr;
1004 struct icp_qat_uclo_objhandle *objhdl;
1005
1006 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1007 (sizeof(handle->hal_handle->ae_mask) * 8));
1008
1009 if (!handle || !addr_ptr || mem_size < 24)
1010 return -EINVAL;
1011 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1012 if (!objhdl)
1013 return -ENOMEM;
1014 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1015 if (!objhdl->obj_buf)
1016 goto out_objbuf_err;
1017 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1018 if (qat_uclo_check_format(filehdr))
1019 goto out_objhdr_err;
1020 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1021 ICP_QAT_UOF_OBJS);
1022 if (!objhdl->obj_hdr) {
1023 pr_err("QAT: object file chunk is null\n");
1024 goto out_objhdr_err;
1025 }
1026 handle->obj_handle = objhdl;
1027 if (qat_uclo_parse_uof_obj(handle))
1028 goto out_overlay_obj_err;
1029 return 0;
1030
1031out_overlay_obj_err:
1032 handle->obj_handle = NULL;
1033 kfree(objhdl->obj_hdr);
1034out_objhdr_err:
1035 kfree(objhdl->obj_buf);
1036out_objbuf_err:
1037 kfree(objhdl);
1038 return -ENOMEM;
1039}
1040
Tadeusz Struk8f312d62014-06-24 15:19:40 -07001041void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001042{
1043 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1044 int a;
1045
Tadeusz Struk8f312d62014-06-24 15:19:40 -07001046 if (!obj_handle)
1047 return;
1048
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001049 kfree(obj_handle->uword_buf);
1050 for (a = 0; a < obj_handle->uimage_num; a++)
1051 kfree(obj_handle->ae_uimage[a].page);
1052
1053 for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++)
1054 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001055
Tadeusz Struk8f312d62014-06-24 15:19:40 -07001056 kfree(obj_handle->obj_hdr);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001057 kfree(obj_handle->obj_buf);
1058 kfree(obj_handle);
1059 handle->obj_handle = NULL;
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001060}
1061
1062static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1063 struct icp_qat_uclo_encap_page *encap_page,
1064 uint64_t *uword, unsigned int addr_p,
1065 unsigned int raddr, uint64_t fill)
1066{
1067 uint64_t uwrd = 0;
1068 unsigned int i;
1069
1070 if (!encap_page) {
1071 *uword = fill;
1072 return;
1073 }
1074 for (i = 0; i < encap_page->uwblock_num; i++) {
1075 if (raddr >= encap_page->uwblock[i].start_addr &&
1076 raddr <= encap_page->uwblock[i].start_addr +
1077 encap_page->uwblock[i].words_num - 1) {
1078 raddr -= encap_page->uwblock[i].start_addr;
1079 raddr *= obj_handle->uword_in_bytes;
1080 memcpy(&uwrd, (void *)(((unsigned long)
1081 encap_page->uwblock[i].micro_words) + raddr),
1082 obj_handle->uword_in_bytes);
1083 uwrd = uwrd & 0xbffffffffffull;
1084 }
1085 }
1086 *uword = uwrd;
1087 if (*uword == INVLD_UWORD)
1088 *uword = fill;
1089}
1090
Tadeusz Struk341b2a32014-07-25 15:55:09 -07001091static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1092 struct icp_qat_uclo_encap_page
1093 *encap_page, unsigned int ae)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001094{
1095 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1096 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1097 uint64_t fill_pat;
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001098
1099 /* load the page starting at appropriate ustore address */
1100 /* get fill-pattern from an image -- they are all the same */
1101 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1102 sizeof(uint64_t));
1103 uw_physical_addr = encap_page->beg_addr_p;
1104 uw_relative_addr = 0;
1105 words_num = encap_page->micro_words_num;
1106 while (words_num) {
1107 if (words_num < UWORD_CPYBUF_SIZE)
1108 cpylen = words_num;
1109 else
1110 cpylen = UWORD_CPYBUF_SIZE;
1111
1112 /* load the buffer */
1113 for (i = 0; i < cpylen; i++)
1114 qat_uclo_fill_uwords(obj_handle, encap_page,
1115 &obj_handle->uword_buf[i],
1116 uw_physical_addr + i,
1117 uw_relative_addr + i, fill_pat);
1118
1119 /* copy the buffer to ustore */
1120 qat_hal_wr_uwords(handle, (unsigned char)ae,
1121 uw_physical_addr, cpylen,
1122 obj_handle->uword_buf);
1123
1124 uw_physical_addr += cpylen;
1125 uw_relative_addr += cpylen;
1126 words_num -= cpylen;
1127 }
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001128}
1129
Tadeusz Struk341b2a32014-07-25 15:55:09 -07001130static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle,
1131 struct icp_qat_uof_image *image)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001132{
1133 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1134 unsigned int ctx_mask, s;
1135 struct icp_qat_uclo_page *page;
1136 unsigned char ae;
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001137 int ctx;
1138
1139 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1140 ctx_mask = 0xff;
1141 else
1142 ctx_mask = 0x55;
1143 /* load the default page and set assigned CTX PC
1144 * to the entrypoint address */
1145 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -07001146 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001147 continue;
1148 /* find the slice to which this image is assigned */
1149 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1150 if (image->ctx_assigned & obj_handle->ae_data[ae].
1151 ae_slices[s].ctx_mask_assigned)
1152 break;
1153 }
1154 if (s >= obj_handle->ae_data[ae].slice_num)
1155 continue;
1156 page = obj_handle->ae_data[ae].ae_slices[s].page;
1157 if (!page->encap_page->def_page)
1158 continue;
Tadeusz Struk341b2a32014-07-25 15:55:09 -07001159 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001160
1161 page = obj_handle->ae_data[ae].ae_slices[s].page;
1162 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1163 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1164 (ctx_mask & (1 << ctx)) ? page : NULL;
1165 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1166 image->ctx_assigned);
1167 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1168 image->entry_address);
1169 }
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001170}
1171
1172int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1173{
1174 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1175 unsigned int i;
1176
1177 if (qat_uclo_init_globals(handle))
1178 return -EINVAL;
1179 for (i = 0; i < obj_handle->uimage_num; i++) {
Tadeusz Struk45cff262014-07-25 15:55:26 -07001180 if (!obj_handle->ae_uimage[i].img_ptr)
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001181 return -EINVAL;
Tadeusz Struk45cff262014-07-25 15:55:26 -07001182 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001183 return -EINVAL;
Tadeusz Struk341b2a32014-07-25 15:55:09 -07001184 qat_uclo_wr_uimage_pages(handle,
1185 obj_handle->ae_uimage[i].img_ptr);
Tadeusz Strukb4b7e672014-06-05 13:43:47 -07001186 }
1187 return 0;
1188}