blob: 0c74bf1d2021829c16d67bda2b5b47ae15668c58 [file] [log] [blame]
Mark Browna4b12992014-03-12 23:04:35 +00001/*
2 * Intel SST Firmware Loader
3 *
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/firmware.h>
21#include <linux/export.h>
22#include <linux/platform_device.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmaengine.h>
25#include <linux/pci.h>
26
27#include <asm/page.h>
28#include <asm/pgtable.h>
29
30#include "sst-dsp.h"
31#include "sst-dsp-priv.h"
32
Liam Girdwood555f8a802014-05-05 17:31:37 +010033static void block_module_remove(struct sst_module *module);
34
Mark Browna4b12992014-03-12 23:04:35 +000035static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
36{
37 u32 i;
38
39 /* copy one 32 bit word at a time as 64 bit access is not supported */
40 for (i = 0; i < bytes; i += 4)
41 memcpy_toio(dest + i, src + i, 4);
42}
43
44/* create new generic firmware object */
45struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
46 const struct firmware *fw, void *private)
47{
48 struct sst_fw *sst_fw;
49 int err;
50
51 if (!dsp->ops->parse_fw)
52 return NULL;
53
54 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
55 if (sst_fw == NULL)
56 return NULL;
57
58 sst_fw->dsp = dsp;
59 sst_fw->private = private;
60 sst_fw->size = fw->size;
61
62 err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
63 if (err < 0) {
64 kfree(sst_fw);
65 return NULL;
66 }
67
68 /* allocate DMA buffer to store FW data */
69 sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
70 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
71 if (!sst_fw->dma_buf) {
72 dev_err(dsp->dev, "error: DMA alloc failed\n");
73 kfree(sst_fw);
74 return NULL;
75 }
76
77 /* copy FW data to DMA-able memory */
78 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
79
80 /* call core specific FW paser to load FW data into DSP */
81 err = dsp->ops->parse_fw(sst_fw);
82 if (err < 0) {
83 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
84 goto parse_err;
85 }
86
87 mutex_lock(&dsp->mutex);
88 list_add(&sst_fw->list, &dsp->fw_list);
89 mutex_unlock(&dsp->mutex);
90
91 return sst_fw;
92
93parse_err:
94 dma_free_coherent(dsp->dev, sst_fw->size,
95 sst_fw->dma_buf,
96 sst_fw->dmable_fw_paddr);
97 kfree(sst_fw);
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(sst_fw_new);
101
Liam Girdwood555f8a802014-05-05 17:31:37 +0100102int sst_fw_reload(struct sst_fw *sst_fw)
103{
104 struct sst_dsp *dsp = sst_fw->dsp;
105 int ret;
106
107 dev_dbg(dsp->dev, "reloading firmware\n");
108
109 /* call core specific FW paser to load FW data into DSP */
110 ret = dsp->ops->parse_fw(sst_fw);
111 if (ret < 0)
112 dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
113
114 return ret;
115}
116EXPORT_SYMBOL_GPL(sst_fw_reload);
117
118void sst_fw_unload(struct sst_fw *sst_fw)
119{
120 struct sst_dsp *dsp = sst_fw->dsp;
121 struct sst_module *module, *tmp;
122
123 dev_dbg(dsp->dev, "unloading firmware\n");
124
125 mutex_lock(&dsp->mutex);
126 list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
127 if (module->sst_fw == sst_fw) {
128 block_module_remove(module);
129 list_del(&module->list);
130 kfree(module);
131 }
132 }
133
134 mutex_unlock(&dsp->mutex);
135}
136EXPORT_SYMBOL_GPL(sst_fw_unload);
137
Mark Browna4b12992014-03-12 23:04:35 +0000138/* free single firmware object */
139void sst_fw_free(struct sst_fw *sst_fw)
140{
141 struct sst_dsp *dsp = sst_fw->dsp;
142
143 mutex_lock(&dsp->mutex);
144 list_del(&sst_fw->list);
145 mutex_unlock(&dsp->mutex);
146
147 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
148 sst_fw->dmable_fw_paddr);
149 kfree(sst_fw);
150}
151EXPORT_SYMBOL_GPL(sst_fw_free);
152
153/* free all firmware objects */
154void sst_fw_free_all(struct sst_dsp *dsp)
155{
156 struct sst_fw *sst_fw, *t;
157
158 mutex_lock(&dsp->mutex);
159 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
160
161 list_del(&sst_fw->list);
162 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
163 sst_fw->dmable_fw_paddr);
164 kfree(sst_fw);
165 }
166 mutex_unlock(&dsp->mutex);
167}
168EXPORT_SYMBOL_GPL(sst_fw_free_all);
169
170/* create a new SST generic module from FW template */
171struct sst_module *sst_module_new(struct sst_fw *sst_fw,
172 struct sst_module_template *template, void *private)
173{
174 struct sst_dsp *dsp = sst_fw->dsp;
175 struct sst_module *sst_module;
176
177 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
178 if (sst_module == NULL)
179 return NULL;
180
181 sst_module->id = template->id;
182 sst_module->dsp = dsp;
183 sst_module->sst_fw = sst_fw;
184
185 memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
186 memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
187
188 INIT_LIST_HEAD(&sst_module->block_list);
189
190 mutex_lock(&dsp->mutex);
191 list_add(&sst_module->list, &dsp->module_list);
192 mutex_unlock(&dsp->mutex);
193
194 return sst_module;
195}
196EXPORT_SYMBOL_GPL(sst_module_new);
197
198/* free firmware module and remove from available list */
199void sst_module_free(struct sst_module *sst_module)
200{
201 struct sst_dsp *dsp = sst_module->dsp;
202
203 mutex_lock(&dsp->mutex);
204 list_del(&sst_module->list);
205 mutex_unlock(&dsp->mutex);
206
207 kfree(sst_module);
208}
209EXPORT_SYMBOL_GPL(sst_module_free);
210
211static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
212 u32 offset)
213{
214 struct sst_mem_block *block;
215
216 list_for_each_entry(block, &dsp->free_block_list, list) {
217 if (block->type == type && block->offset == offset)
218 return block;
219 }
220
221 return NULL;
222}
223
224static int block_alloc_contiguous(struct sst_module *module,
225 struct sst_module_data *data, u32 offset, int size)
226{
227 struct list_head tmp = LIST_HEAD_INIT(tmp);
228 struct sst_dsp *dsp = module->dsp;
229 struct sst_mem_block *block;
230
231 while (size > 0) {
232 block = find_block(dsp, data->type, offset);
233 if (!block) {
234 list_splice(&tmp, &dsp->free_block_list);
235 return -ENOMEM;
236 }
237
238 list_move_tail(&block->list, &tmp);
239 offset += block->size;
240 size -= block->size;
241 }
242
243 list_splice(&tmp, &dsp->used_block_list);
244 return 0;
245}
246
247/* allocate free DSP blocks for module data - callers hold locks */
248static int block_alloc(struct sst_module *module,
249 struct sst_module_data *data)
250{
251 struct sst_dsp *dsp = module->dsp;
252 struct sst_mem_block *block, *tmp;
253 int ret = 0;
254
255 if (data->size == 0)
256 return 0;
257
258 /* find first free whole blocks that can hold module */
259 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
260
261 /* ignore blocks with wrong type */
262 if (block->type != data->type)
263 continue;
264
265 if (data->size > block->size)
266 continue;
267
268 data->offset = block->offset;
269 block->data_type = data->data_type;
270 block->bytes_used = data->size % block->size;
271 list_add(&block->module_list, &module->block_list);
272 list_move(&block->list, &dsp->used_block_list);
273 dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
274 module->id, block->type, block->index);
275 return 0;
276 }
277
278 /* then find free multiple blocks that can hold module */
279 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
280
281 /* ignore blocks with wrong type */
282 if (block->type != data->type)
283 continue;
284
285 /* do we span > 1 blocks */
286 if (data->size > block->size) {
287 ret = block_alloc_contiguous(module, data,
288 block->offset + block->size,
289 data->size - block->size);
290 if (ret == 0)
291 return ret;
292 }
293 }
294
295 /* not enough free block space */
296 return -ENOMEM;
297}
298
299/* remove module from memory - callers hold locks */
300static void block_module_remove(struct sst_module *module)
301{
302 struct sst_mem_block *block, *tmp;
303 struct sst_dsp *dsp = module->dsp;
304 int err;
305
306 /* disable each block */
307 list_for_each_entry(block, &module->block_list, module_list) {
308
309 if (block->ops && block->ops->disable) {
310 err = block->ops->disable(block);
311 if (err < 0)
312 dev_err(dsp->dev,
313 "error: cant disable block %d:%d\n",
314 block->type, block->index);
315 }
316 }
317
318 /* mark each block as free */
319 list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
320 list_del(&block->module_list);
321 list_move(&block->list, &dsp->free_block_list);
322 }
323}
324
325/* prepare the memory block to receive data from host - callers hold locks */
326static int block_module_prepare(struct sst_module *module)
327{
328 struct sst_mem_block *block;
329 int ret = 0;
330
331 /* enable each block so that's it'e ready for module P/S data */
332 list_for_each_entry(block, &module->block_list, module_list) {
333
334 if (block->ops && block->ops->enable) {
335 ret = block->ops->enable(block);
336 if (ret < 0) {
337 dev_err(module->dsp->dev,
338 "error: cant disable block %d:%d\n",
339 block->type, block->index);
340 goto err;
341 }
342 }
343 }
344 return ret;
345
346err:
347 list_for_each_entry(block, &module->block_list, module_list) {
348 if (block->ops && block->ops->disable)
349 block->ops->disable(block);
350 }
351 return ret;
352}
353
354/* allocate memory blocks for static module addresses - callers hold locks */
355static int block_alloc_fixed(struct sst_module *module,
356 struct sst_module_data *data)
357{
358 struct sst_dsp *dsp = module->dsp;
359 struct sst_mem_block *block, *tmp;
360 u32 end = data->offset + data->size, block_end;
361 int err;
362
363 /* only IRAM/DRAM blocks are managed */
364 if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
365 return 0;
366
367 /* are blocks already attached to this module */
368 list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
369
370 /* force compacting mem blocks of the same data_type */
371 if (block->data_type != data->data_type)
372 continue;
373
374 block_end = block->offset + block->size;
375
376 /* find block that holds section */
377 if (data->offset >= block->offset && end < block_end)
378 return 0;
379
380 /* does block span more than 1 section */
381 if (data->offset >= block->offset && data->offset < block_end) {
382
383 err = block_alloc_contiguous(module, data,
384 block->offset + block->size,
385 data->size - block->size + data->offset - block->offset);
386 if (err < 0)
387 return -ENOMEM;
388
389 /* module already owns blocks */
390 return 0;
391 }
392 }
393
394 /* find first free blocks that can hold section in free list */
395 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
396 block_end = block->offset + block->size;
397
398 /* find block that holds section */
399 if (data->offset >= block->offset && end < block_end) {
400
401 /* add block */
402 block->data_type = data->data_type;
403 list_move(&block->list, &dsp->used_block_list);
404 list_add(&block->module_list, &module->block_list);
405 return 0;
406 }
407
408 /* does block span more than 1 section */
409 if (data->offset >= block->offset && data->offset < block_end) {
410
411 err = block_alloc_contiguous(module, data,
412 block->offset + block->size,
413 data->size - block->size);
414 if (err < 0)
415 return -ENOMEM;
416
417 /* add block */
418 block->data_type = data->data_type;
419 list_move(&block->list, &dsp->used_block_list);
420 list_add(&block->module_list, &module->block_list);
421 return 0;
422 }
423
424 }
425
426 return -ENOMEM;
427}
428
429/* Load fixed module data into DSP memory blocks */
430int sst_module_insert_fixed_block(struct sst_module *module,
431 struct sst_module_data *data)
432{
433 struct sst_dsp *dsp = module->dsp;
434 int ret;
435
436 mutex_lock(&dsp->mutex);
437
438 /* alloc blocks that includes this section */
439 ret = block_alloc_fixed(module, data);
440 if (ret < 0) {
441 dev_err(dsp->dev,
442 "error: no free blocks for section at offset 0x%x size 0x%x\n",
443 data->offset, data->size);
444 mutex_unlock(&dsp->mutex);
445 return -ENOMEM;
446 }
447
448 /* prepare DSP blocks for module copy */
449 ret = block_module_prepare(module);
450 if (ret < 0) {
451 dev_err(dsp->dev, "error: fw module prepare failed\n");
452 goto err;
453 }
454
455 /* copy partial module data to blocks */
456 sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
457
458 mutex_unlock(&dsp->mutex);
459 return ret;
460
461err:
462 block_module_remove(module);
463 mutex_unlock(&dsp->mutex);
464 return ret;
465}
466EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
467
468/* Unload entire module from DSP memory */
469int sst_block_module_remove(struct sst_module *module)
470{
471 struct sst_dsp *dsp = module->dsp;
472
473 mutex_lock(&dsp->mutex);
474 block_module_remove(module);
475 mutex_unlock(&dsp->mutex);
476 return 0;
477}
478EXPORT_SYMBOL_GPL(sst_block_module_remove);
479
480/* register a DSP memory block for use with FW based modules */
481struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
482 u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
483 void *private)
484{
485 struct sst_mem_block *block;
486
487 block = kzalloc(sizeof(*block), GFP_KERNEL);
488 if (block == NULL)
489 return NULL;
490
491 block->offset = offset;
492 block->size = size;
493 block->index = index;
494 block->type = type;
495 block->dsp = dsp;
496 block->private = private;
497 block->ops = ops;
498
499 mutex_lock(&dsp->mutex);
500 list_add(&block->list, &dsp->free_block_list);
501 mutex_unlock(&dsp->mutex);
502
503 return block;
504}
505EXPORT_SYMBOL_GPL(sst_mem_block_register);
506
507/* unregister all DSP memory blocks */
508void sst_mem_block_unregister_all(struct sst_dsp *dsp)
509{
510 struct sst_mem_block *block, *tmp;
511
512 mutex_lock(&dsp->mutex);
513
514 /* unregister used blocks */
515 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
516 list_del(&block->list);
517 kfree(block);
518 }
519
520 /* unregister free blocks */
521 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
522 list_del(&block->list);
523 kfree(block);
524 }
525
526 mutex_unlock(&dsp->mutex);
527}
528EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
529
530/* allocate scratch buffer blocks */
531struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
532{
533 struct sst_module *sst_module, *scratch;
534 struct sst_mem_block *block, *tmp;
535 u32 block_size;
536 int ret = 0;
537
538 scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
539 if (scratch == NULL)
540 return NULL;
541
542 mutex_lock(&dsp->mutex);
543
544 /* calculate required scratch size */
545 list_for_each_entry(sst_module, &dsp->module_list, list) {
Christian Engelmayerdd1b94b2014-04-13 22:46:31 +0200546 if (scratch->s.size < sst_module->s.size)
Mark Browna4b12992014-03-12 23:04:35 +0000547 scratch->s.size = sst_module->s.size;
548 }
549
550 dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
551 scratch->s.size);
552
553 /* init scratch module */
554 scratch->dsp = dsp;
555 scratch->s.type = SST_MEM_DRAM;
556 scratch->s.data_type = SST_DATA_S;
557 INIT_LIST_HEAD(&scratch->block_list);
558
559 /* check free blocks before looking at used blocks for space */
560 if (!list_empty(&dsp->free_block_list))
561 block = list_first_entry(&dsp->free_block_list,
562 struct sst_mem_block, list);
563 else
564 block = list_first_entry(&dsp->used_block_list,
565 struct sst_mem_block, list);
566 block_size = block->size;
567
568 /* allocate blocks for module scratch buffers */
569 dev_dbg(dsp->dev, "allocating scratch blocks\n");
570 ret = block_alloc(scratch, &scratch->s);
571 if (ret < 0) {
572 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
573 goto err;
574 }
575
576 /* assign the same offset of scratch to each module */
577 list_for_each_entry(sst_module, &dsp->module_list, list)
578 sst_module->s.offset = scratch->s.offset;
579
580 mutex_unlock(&dsp->mutex);
581 return scratch;
582
583err:
584 list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
585 list_del(&block->module_list);
586 mutex_unlock(&dsp->mutex);
587 return NULL;
588}
589EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
590
591/* free all scratch blocks */
592void sst_mem_block_free_scratch(struct sst_dsp *dsp,
593 struct sst_module *scratch)
594{
595 struct sst_mem_block *block, *tmp;
596
597 mutex_lock(&dsp->mutex);
598
599 list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
600 list_del(&block->module_list);
601
602 mutex_unlock(&dsp->mutex);
603}
604EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
605
606/* get a module from it's unique ID */
607struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
608{
609 struct sst_module *module;
610
611 mutex_lock(&dsp->mutex);
612
613 list_for_each_entry(module, &dsp->module_list, list) {
614 if (module->id == id) {
615 mutex_unlock(&dsp->mutex);
616 return module;
617 }
618 }
619
620 mutex_unlock(&dsp->mutex);
621 return NULL;
622}
623EXPORT_SYMBOL_GPL(sst_module_get_from_id);