blob: 25993527370b9f5063a4541cd0c53e84d457aeea [file] [log] [blame]
Mark Browna4b12992014-03-12 23:04:35 +00001/*
2 * Intel SST Firmware Loader
3 *
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/firmware.h>
21#include <linux/export.h>
22#include <linux/platform_device.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmaengine.h>
25#include <linux/pci.h>
Liam Girdwoode9600bc2014-10-28 17:37:12 +000026#include <linux/acpi.h>
27
28/* supported DMA engine drivers */
Liam Girdwoode9600bc2014-10-28 17:37:12 +000029#include <linux/dma/dw.h>
Mark Browna4b12992014-03-12 23:04:35 +000030
31#include <asm/page.h>
32#include <asm/pgtable.h>
33
34#include "sst-dsp.h"
35#include "sst-dsp-priv.h"
36
Liam Girdwoode9600bc2014-10-28 17:37:12 +000037#define SST_DMA_RESOURCES 2
38#define SST_DSP_DMA_MAX_BURST 0x3
39#define SST_HSW_BLOCK_ANY 0xffffffff
40
41#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
42
43struct sst_dma {
44 struct sst_dsp *sst;
45
46 struct dw_dma_chip *chip;
47
48 struct dma_async_tx_descriptor *desc;
49 struct dma_chan *ch;
50};
Liam Girdwood555f8a802014-05-05 17:31:37 +010051
Vinod Koul7f266802014-10-20 20:54:34 +053052static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
Mark Browna4b12992014-03-12 23:04:35 +000053{
Jie Yang1cf8dfd2015-12-14 22:27:13 +080054 u32 tmp = 0;
55 int i, m, n;
56 const u8 *src_byte = src;
57
58 m = bytes / 4;
59 n = bytes % 4;
60
Vinod Koul7f266802014-10-20 20:54:34 +053061 /* __iowrite32_copy use 32bit size values so divide by 4 */
Jie Yang1cf8dfd2015-12-14 22:27:13 +080062 __iowrite32_copy((void *)dest, src, m);
63
64 if (n) {
65 for (i = 0; i < n; i++)
66 tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
67 __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
68 }
69
Mark Browna4b12992014-03-12 23:04:35 +000070}
71
Liam Girdwoode9600bc2014-10-28 17:37:12 +000072static void sst_dma_transfer_complete(void *arg)
73{
74 struct sst_dsp *sst = (struct sst_dsp *)arg;
75
76 dev_dbg(sst->dev, "DMA: callback\n");
77}
78
79static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
80 dma_addr_t src_addr, size_t size)
81{
82 struct dma_async_tx_descriptor *desc;
83 struct sst_dma *dma = sst->dma;
84
85 if (dma->ch == NULL) {
86 dev_err(sst->dev, "error: no DMA channel\n");
87 return -ENODEV;
88 }
89
90 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
91 (unsigned long)src_addr, (unsigned long)dest_addr, size);
92
93 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
94 src_addr, size, DMA_CTRL_ACK);
95 if (!desc){
96 dev_err(sst->dev, "error: dma prep memcpy failed\n");
97 return -EINVAL;
98 }
99
100 desc->callback = sst_dma_transfer_complete;
101 desc->callback_param = sst;
102
103 desc->tx_submit(desc);
104 dma_wait_for_async_tx(desc);
105
106 return 0;
107}
108
109/* copy to DSP */
110int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
111 dma_addr_t src_addr, size_t size)
112{
113 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
114 src_addr, size);
115}
116EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
117
118/* copy from DSP */
119int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
120 dma_addr_t src_addr, size_t size)
121{
122 return sst_dsp_dma_copy(sst, dest_addr,
123 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
124}
125EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
126
127/* remove module from memory - callers hold locks */
128static void block_list_remove(struct sst_dsp *dsp,
129 struct list_head *block_list)
130{
131 struct sst_mem_block *block, *tmp;
132 int err;
133
134 /* disable each block */
135 list_for_each_entry(block, block_list, module_list) {
136
137 if (block->ops && block->ops->disable) {
138 err = block->ops->disable(block);
139 if (err < 0)
140 dev_err(dsp->dev,
141 "error: cant disable block %d:%d\n",
142 block->type, block->index);
143 }
144 }
145
146 /* mark each block as free */
147 list_for_each_entry_safe(block, tmp, block_list, module_list) {
148 list_del(&block->module_list);
149 list_move(&block->list, &dsp->free_block_list);
150 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
151 block->type, block->index, block->offset);
152 }
153}
154
155/* prepare the memory block to receive data from host - callers hold locks */
156static int block_list_prepare(struct sst_dsp *dsp,
157 struct list_head *block_list)
158{
159 struct sst_mem_block *block;
160 int ret = 0;
161
162 /* enable each block so that's it'e ready for data */
163 list_for_each_entry(block, block_list, module_list) {
164
Jie Yang35c0a8c2014-10-30 21:21:52 +0800165 if (block->ops && block->ops->enable && !block->users) {
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000166 ret = block->ops->enable(block);
167 if (ret < 0) {
168 dev_err(dsp->dev,
169 "error: cant disable block %d:%d\n",
170 block->type, block->index);
171 goto err;
172 }
173 }
174 }
175 return ret;
176
177err:
178 list_for_each_entry(block, block_list, module_list) {
179 if (block->ops && block->ops->disable)
180 block->ops->disable(block);
181 }
182 return ret;
183}
184
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000185static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
186 int irq)
187{
188 struct dw_dma_chip *chip;
189 int err;
190
191 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
192 if (!chip)
193 return ERR_PTR(-ENOMEM);
194
195 chip->irq = irq;
196 chip->regs = devm_ioremap_resource(dev, mem);
197 if (IS_ERR(chip->regs))
198 return ERR_CAST(chip->regs);
199
200 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
201 if (err)
202 return ERR_PTR(err);
203
204 chip->dev = dev;
Andy Shevchenkob5e5a452015-10-05 14:53:50 +0300205
Andy Shevchenko3a14c662016-04-27 14:15:40 +0300206 err = dw_dma_probe(chip);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000207 if (err)
208 return ERR_PTR(err);
209
210 return chip;
211}
212
213static void dw_remove(struct dw_dma_chip *chip)
214{
215 dw_dma_remove(chip);
216}
217
218static bool dma_chan_filter(struct dma_chan *chan, void *param)
219{
220 struct sst_dsp *dsp = (struct sst_dsp *)param;
221
222 return chan->device->dev == dsp->dma_dev;
223}
224
225int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
226{
227 struct sst_dma *dma = dsp->dma;
228 struct dma_slave_config slave;
229 dma_cap_mask_t mask;
230 int ret;
231
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000232 dma_cap_zero(mask);
233 dma_cap_set(DMA_SLAVE, mask);
234 dma_cap_set(DMA_MEMCPY, mask);
235
236 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
237 if (dma->ch == NULL) {
238 dev_err(dsp->dev, "error: DMA request channel failed\n");
239 return -EIO;
240 }
241
242 memset(&slave, 0, sizeof(slave));
243 slave.direction = DMA_MEM_TO_DEV;
244 slave.src_addr_width =
245 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
246 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
247
248 ret = dmaengine_slave_config(dma->ch, &slave);
249 if (ret) {
250 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
251 ret);
252 dma_release_channel(dma->ch);
253 dma->ch = NULL;
254 }
255
256 return ret;
257}
258EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
259
260void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
261{
262 struct sst_dma *dma = dsp->dma;
263
264 if (!dma->ch)
265 return;
266
267 dma_release_channel(dma->ch);
268 dma->ch = NULL;
269}
270EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
271
272int sst_dma_new(struct sst_dsp *sst)
273{
274 struct sst_pdata *sst_pdata = sst->pdata;
275 struct sst_dma *dma;
276 struct resource mem;
277 const char *dma_dev_name;
278 int ret = 0;
279
Pierre-Louis Bossart62127552015-03-20 15:31:34 -0500280 if (sst->pdata->resindex_dma_base == -1)
281 /* DMA is not used, return and squelsh error messages */
282 return 0;
283
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000284 /* configure the correct platform data for whatever DMA engine
285 * is attached to the ADSP IP. */
286 switch (sst->pdata->dma_engine) {
287 case SST_DMA_TYPE_DW:
288 dma_dev_name = "dw_dmac";
289 break;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000290 default:
291 dev_err(sst->dev, "error: invalid DMA engine %d\n",
292 sst->pdata->dma_engine);
293 return -EINVAL;
294 }
295
296 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
297 if (!dma)
298 return -ENOMEM;
299
300 dma->sst = sst;
301
302 memset(&mem, 0, sizeof(mem));
303
304 mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
305 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
306 mem.flags = IORESOURCE_MEM;
307
308 /* now register DMA engine device */
309 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
310 if (IS_ERR(dma->chip)) {
311 dev_err(sst->dev, "error: DMA device register failed\n");
312 ret = PTR_ERR(dma->chip);
313 goto err_dma_dev;
314 }
315
316 sst->dma = dma;
317 sst->fw_use_dma = true;
318 return 0;
319
320err_dma_dev:
321 devm_kfree(sst->dev, dma);
322 return ret;
323}
324EXPORT_SYMBOL(sst_dma_new);
325
326void sst_dma_free(struct sst_dma *dma)
327{
328
329 if (dma == NULL)
330 return;
331
332 if (dma->ch)
333 dma_release_channel(dma->ch);
334
335 if (dma->chip)
336 dw_remove(dma->chip);
337
338}
339EXPORT_SYMBOL(sst_dma_free);
340
Mark Browna4b12992014-03-12 23:04:35 +0000341/* create new generic firmware object */
342struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
343 const struct firmware *fw, void *private)
344{
345 struct sst_fw *sst_fw;
346 int err;
347
348 if (!dsp->ops->parse_fw)
349 return NULL;
350
351 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
352 if (sst_fw == NULL)
353 return NULL;
354
355 sst_fw->dsp = dsp;
356 sst_fw->private = private;
357 sst_fw->size = fw->size;
358
Mark Browna4b12992014-03-12 23:04:35 +0000359 /* allocate DMA buffer to store FW data */
Liam Girdwood10df3502014-05-02 16:56:31 +0100360 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
Mark Browna4b12992014-03-12 23:04:35 +0000361 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
362 if (!sst_fw->dma_buf) {
363 dev_err(dsp->dev, "error: DMA alloc failed\n");
364 kfree(sst_fw);
365 return NULL;
366 }
367
368 /* copy FW data to DMA-able memory */
369 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
370
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000371 if (dsp->fw_use_dma) {
372 err = sst_dsp_dma_get_channel(dsp, 0);
373 if (err < 0)
374 goto chan_err;
375 }
376
Mark Browna4b12992014-03-12 23:04:35 +0000377 /* call core specific FW paser to load FW data into DSP */
378 err = dsp->ops->parse_fw(sst_fw);
379 if (err < 0) {
380 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
381 goto parse_err;
382 }
383
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000384 if (dsp->fw_use_dma)
385 sst_dsp_dma_put_channel(dsp);
386
Mark Browna4b12992014-03-12 23:04:35 +0000387 mutex_lock(&dsp->mutex);
388 list_add(&sst_fw->list, &dsp->fw_list);
389 mutex_unlock(&dsp->mutex);
390
391 return sst_fw;
392
393parse_err:
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000394 if (dsp->fw_use_dma)
395 sst_dsp_dma_put_channel(dsp);
396chan_err:
397 dma_free_coherent(dsp->dma_dev, sst_fw->size,
Mark Browna4b12992014-03-12 23:04:35 +0000398 sst_fw->dma_buf,
399 sst_fw->dmable_fw_paddr);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000400 sst_fw->dma_buf = NULL;
Mark Browna4b12992014-03-12 23:04:35 +0000401 kfree(sst_fw);
402 return NULL;
403}
404EXPORT_SYMBOL_GPL(sst_fw_new);
405
Liam Girdwood555f8a802014-05-05 17:31:37 +0100406int sst_fw_reload(struct sst_fw *sst_fw)
407{
408 struct sst_dsp *dsp = sst_fw->dsp;
409 int ret;
410
411 dev_dbg(dsp->dev, "reloading firmware\n");
412
413 /* call core specific FW paser to load FW data into DSP */
414 ret = dsp->ops->parse_fw(sst_fw);
415 if (ret < 0)
416 dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
417
418 return ret;
419}
420EXPORT_SYMBOL_GPL(sst_fw_reload);
421
422void sst_fw_unload(struct sst_fw *sst_fw)
423{
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000424 struct sst_dsp *dsp = sst_fw->dsp;
425 struct sst_module *module, *mtmp;
426 struct sst_module_runtime *runtime, *rtmp;
Liam Girdwood555f8a802014-05-05 17:31:37 +0100427
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000428 dev_dbg(dsp->dev, "unloading firmware\n");
Liam Girdwood555f8a802014-05-05 17:31:37 +0100429
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000430 mutex_lock(&dsp->mutex);
Liam Girdwood555f8a802014-05-05 17:31:37 +0100431
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000432 /* check module by module */
433 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
434 if (module->sst_fw == sst_fw) {
435
436 /* remove runtime modules */
437 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
438
439 block_list_remove(dsp, &runtime->block_list);
440 list_del(&runtime->list);
441 kfree(runtime);
442 }
443
444 /* now remove the module */
445 block_list_remove(dsp, &module->block_list);
446 list_del(&module->list);
447 kfree(module);
448 }
449 }
450
451 /* remove all scratch blocks */
452 block_list_remove(dsp, &dsp->scratch_block_list);
453
454 mutex_unlock(&dsp->mutex);
Liam Girdwood555f8a802014-05-05 17:31:37 +0100455}
456EXPORT_SYMBOL_GPL(sst_fw_unload);
457
Mark Browna4b12992014-03-12 23:04:35 +0000458/* free single firmware object */
459void sst_fw_free(struct sst_fw *sst_fw)
460{
461 struct sst_dsp *dsp = sst_fw->dsp;
462
463 mutex_lock(&dsp->mutex);
464 list_del(&sst_fw->list);
465 mutex_unlock(&dsp->mutex);
466
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000467 if (sst_fw->dma_buf)
468 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
Mark Browna4b12992014-03-12 23:04:35 +0000469 sst_fw->dmable_fw_paddr);
470 kfree(sst_fw);
471}
472EXPORT_SYMBOL_GPL(sst_fw_free);
473
474/* free all firmware objects */
475void sst_fw_free_all(struct sst_dsp *dsp)
476{
477 struct sst_fw *sst_fw, *t;
478
479 mutex_lock(&dsp->mutex);
480 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
481
482 list_del(&sst_fw->list);
483 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
484 sst_fw->dmable_fw_paddr);
485 kfree(sst_fw);
486 }
487 mutex_unlock(&dsp->mutex);
488}
489EXPORT_SYMBOL_GPL(sst_fw_free_all);
490
491/* create a new SST generic module from FW template */
492struct sst_module *sst_module_new(struct sst_fw *sst_fw,
493 struct sst_module_template *template, void *private)
494{
495 struct sst_dsp *dsp = sst_fw->dsp;
496 struct sst_module *sst_module;
497
498 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
499 if (sst_module == NULL)
500 return NULL;
501
502 sst_module->id = template->id;
503 sst_module->dsp = dsp;
504 sst_module->sst_fw = sst_fw;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000505 sst_module->scratch_size = template->scratch_size;
506 sst_module->persistent_size = template->persistent_size;
Jie Yang85b88a82014-12-23 09:24:50 +0800507 sst_module->entry = template->entry;
Lu, Han9449d392015-03-10 10:41:20 +0800508 sst_module->state = SST_MODULE_STATE_UNLOADED;
Mark Browna4b12992014-03-12 23:04:35 +0000509
510 INIT_LIST_HEAD(&sst_module->block_list);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000511 INIT_LIST_HEAD(&sst_module->runtime_list);
Mark Browna4b12992014-03-12 23:04:35 +0000512
513 mutex_lock(&dsp->mutex);
514 list_add(&sst_module->list, &dsp->module_list);
515 mutex_unlock(&dsp->mutex);
516
517 return sst_module;
518}
519EXPORT_SYMBOL_GPL(sst_module_new);
520
521/* free firmware module and remove from available list */
522void sst_module_free(struct sst_module *sst_module)
523{
524 struct sst_dsp *dsp = sst_module->dsp;
525
526 mutex_lock(&dsp->mutex);
527 list_del(&sst_module->list);
528 mutex_unlock(&dsp->mutex);
529
530 kfree(sst_module);
531}
532EXPORT_SYMBOL_GPL(sst_module_free);
533
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000534struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
535 int id, void *private)
536{
537 struct sst_dsp *dsp = module->dsp;
538 struct sst_module_runtime *runtime;
539
540 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
541 if (runtime == NULL)
542 return NULL;
543
544 runtime->id = id;
545 runtime->dsp = dsp;
546 runtime->module = module;
547 INIT_LIST_HEAD(&runtime->block_list);
548
549 mutex_lock(&dsp->mutex);
550 list_add(&runtime->list, &module->runtime_list);
551 mutex_unlock(&dsp->mutex);
552
553 return runtime;
554}
555EXPORT_SYMBOL_GPL(sst_module_runtime_new);
556
557void sst_module_runtime_free(struct sst_module_runtime *runtime)
558{
559 struct sst_dsp *dsp = runtime->dsp;
560
561 mutex_lock(&dsp->mutex);
562 list_del(&runtime->list);
563 mutex_unlock(&dsp->mutex);
564
565 kfree(runtime);
566}
567EXPORT_SYMBOL_GPL(sst_module_runtime_free);
568
569static struct sst_mem_block *find_block(struct sst_dsp *dsp,
570 struct sst_block_allocator *ba)
Mark Browna4b12992014-03-12 23:04:35 +0000571{
572 struct sst_mem_block *block;
573
574 list_for_each_entry(block, &dsp->free_block_list, list) {
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000575 if (block->type == ba->type && block->offset == ba->offset)
Mark Browna4b12992014-03-12 23:04:35 +0000576 return block;
577 }
578
579 return NULL;
580}
581
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000582/* Block allocator must be on block boundary */
583static int block_alloc_contiguous(struct sst_dsp *dsp,
584 struct sst_block_allocator *ba, struct list_head *block_list)
Mark Browna4b12992014-03-12 23:04:35 +0000585{
586 struct list_head tmp = LIST_HEAD_INIT(tmp);
Mark Browna4b12992014-03-12 23:04:35 +0000587 struct sst_mem_block *block;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000588 u32 block_start = SST_HSW_BLOCK_ANY;
589 int size = ba->size, offset = ba->offset;
Mark Browna4b12992014-03-12 23:04:35 +0000590
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000591 while (ba->size > 0) {
592
593 block = find_block(dsp, ba);
Mark Browna4b12992014-03-12 23:04:35 +0000594 if (!block) {
595 list_splice(&tmp, &dsp->free_block_list);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000596
597 ba->size = size;
598 ba->offset = offset;
Mark Browna4b12992014-03-12 23:04:35 +0000599 return -ENOMEM;
600 }
601
602 list_move_tail(&block->list, &tmp);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000603 ba->offset += block->size;
604 ba->size -= block->size;
Mark Browna4b12992014-03-12 23:04:35 +0000605 }
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000606 ba->size = size;
607 ba->offset = offset;
Mark Browna4b12992014-03-12 23:04:35 +0000608
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000609 list_for_each_entry(block, &tmp, list) {
610
611 if (block->offset < block_start)
612 block_start = block->offset;
613
614 list_add(&block->module_list, block_list);
615
616 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
617 block->type, block->index, block->offset);
618 }
Liam Girdwood84fbdd52014-05-02 16:56:29 +0100619
Mark Browna4b12992014-03-12 23:04:35 +0000620 list_splice(&tmp, &dsp->used_block_list);
621 return 0;
622}
623
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000624/* allocate first free DSP blocks for data - callers hold locks */
625static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
626 struct list_head *block_list)
Mark Browna4b12992014-03-12 23:04:35 +0000627{
Mark Browna4b12992014-03-12 23:04:35 +0000628 struct sst_mem_block *block, *tmp;
629 int ret = 0;
630
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000631 if (ba->size == 0)
Mark Browna4b12992014-03-12 23:04:35 +0000632 return 0;
633
634 /* find first free whole blocks that can hold module */
635 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
636
637 /* ignore blocks with wrong type */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000638 if (block->type != ba->type)
Mark Browna4b12992014-03-12 23:04:35 +0000639 continue;
640
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000641 if (ba->size > block->size)
Mark Browna4b12992014-03-12 23:04:35 +0000642 continue;
643
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000644 ba->offset = block->offset;
645 block->bytes_used = ba->size % block->size;
646 list_add(&block->module_list, block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000647 list_move(&block->list, &dsp->used_block_list);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000648 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
649 block->type, block->index, block->offset);
Mark Browna4b12992014-03-12 23:04:35 +0000650 return 0;
651 }
652
653 /* then find free multiple blocks that can hold module */
654 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
655
656 /* ignore blocks with wrong type */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000657 if (block->type != ba->type)
Mark Browna4b12992014-03-12 23:04:35 +0000658 continue;
659
660 /* do we span > 1 blocks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000661 if (ba->size > block->size) {
662
663 /* align ba to block boundary */
664 ba->offset = block->offset;
665
666 ret = block_alloc_contiguous(dsp, ba, block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000667 if (ret == 0)
668 return ret;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000669
Mark Browna4b12992014-03-12 23:04:35 +0000670 }
671 }
672
673 /* not enough free block space */
674 return -ENOMEM;
675}
676
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000677int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
678 struct list_head *block_list)
Mark Browna4b12992014-03-12 23:04:35 +0000679{
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000680 int ret;
Mark Browna4b12992014-03-12 23:04:35 +0000681
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000682 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
683 ba->size, ba->offset, ba->type);
Mark Browna4b12992014-03-12 23:04:35 +0000684
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000685 mutex_lock(&dsp->mutex);
686
687 ret = block_alloc(dsp, ba, block_list);
688 if (ret < 0) {
689 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
690 goto out;
Mark Browna4b12992014-03-12 23:04:35 +0000691 }
692
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000693 /* prepare DSP blocks for module usage */
694 ret = block_list_prepare(dsp, block_list);
695 if (ret < 0)
696 dev_err(dsp->dev, "error: prepare failed\n");
Mark Browna4b12992014-03-12 23:04:35 +0000697
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000698out:
699 mutex_unlock(&dsp->mutex);
Mark Browna4b12992014-03-12 23:04:35 +0000700 return ret;
701}
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000702EXPORT_SYMBOL_GPL(sst_alloc_blocks);
703
704int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
705{
706 mutex_lock(&dsp->mutex);
707 block_list_remove(dsp, block_list);
708 mutex_unlock(&dsp->mutex);
709 return 0;
710}
711EXPORT_SYMBOL_GPL(sst_free_blocks);
Mark Browna4b12992014-03-12 23:04:35 +0000712
713/* allocate memory blocks for static module addresses - callers hold locks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000714static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
715 struct list_head *block_list)
Mark Browna4b12992014-03-12 23:04:35 +0000716{
Mark Browna4b12992014-03-12 23:04:35 +0000717 struct sst_mem_block *block, *tmp;
Jie Yangd83901e2015-01-04 09:15:04 +0800718 struct sst_block_allocator ba_tmp = *ba;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000719 u32 end = ba->offset + ba->size, block_end;
Mark Browna4b12992014-03-12 23:04:35 +0000720 int err;
721
722 /* only IRAM/DRAM blocks are managed */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000723 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
Mark Browna4b12992014-03-12 23:04:35 +0000724 return 0;
725
726 /* are blocks already attached to this module */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000727 list_for_each_entry_safe(block, tmp, block_list, module_list) {
Mark Browna4b12992014-03-12 23:04:35 +0000728
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000729 /* ignore blocks with wrong type */
730 if (block->type != ba->type)
Mark Browna4b12992014-03-12 23:04:35 +0000731 continue;
732
733 block_end = block->offset + block->size;
734
735 /* find block that holds section */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000736 if (ba->offset >= block->offset && end <= block_end)
Mark Browna4b12992014-03-12 23:04:35 +0000737 return 0;
738
739 /* does block span more than 1 section */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000740 if (ba->offset >= block->offset && ba->offset < block_end) {
Mark Browna4b12992014-03-12 23:04:35 +0000741
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000742 /* align ba to block boundary */
Jie Yangd83901e2015-01-04 09:15:04 +0800743 ba_tmp.size -= block_end - ba->offset;
744 ba_tmp.offset = block_end;
745 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000746 if (err < 0)
747 return -ENOMEM;
748
749 /* module already owns blocks */
750 return 0;
751 }
752 }
753
754 /* find first free blocks that can hold section in free list */
755 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
756 block_end = block->offset + block->size;
757
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000758 /* ignore blocks with wrong type */
759 if (block->type != ba->type)
760 continue;
761
Mark Browna4b12992014-03-12 23:04:35 +0000762 /* find block that holds section */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000763 if (ba->offset >= block->offset && end <= block_end) {
Mark Browna4b12992014-03-12 23:04:35 +0000764
765 /* add block */
Mark Browna4b12992014-03-12 23:04:35 +0000766 list_move(&block->list, &dsp->used_block_list);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000767 list_add(&block->module_list, block_list);
768 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
769 block->type, block->index, block->offset);
Mark Browna4b12992014-03-12 23:04:35 +0000770 return 0;
771 }
772
773 /* does block span more than 1 section */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000774 if (ba->offset >= block->offset && ba->offset < block_end) {
Mark Browna4b12992014-03-12 23:04:35 +0000775
Jie Yang25f97542014-12-23 09:12:45 +0800776 /* add block */
777 list_move(&block->list, &dsp->used_block_list);
778 list_add(&block->module_list, block_list);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000779 /* align ba to block boundary */
Jie Yangd83901e2015-01-04 09:15:04 +0800780 ba_tmp.size -= block_end - ba->offset;
781 ba_tmp.offset = block_end;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000782
Jie Yangd83901e2015-01-04 09:15:04 +0800783 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000784 if (err < 0)
785 return -ENOMEM;
786
Mark Browna4b12992014-03-12 23:04:35 +0000787 return 0;
788 }
Mark Browna4b12992014-03-12 23:04:35 +0000789 }
790
791 return -ENOMEM;
792}
793
794/* Load fixed module data into DSP memory blocks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000795int sst_module_alloc_blocks(struct sst_module *module)
Mark Browna4b12992014-03-12 23:04:35 +0000796{
797 struct sst_dsp *dsp = module->dsp;
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000798 struct sst_fw *sst_fw = module->sst_fw;
799 struct sst_block_allocator ba;
Mark Browna4b12992014-03-12 23:04:35 +0000800 int ret;
801
Jie Yangc41cda12015-02-04 20:23:13 +0800802 memset(&ba, 0, sizeof(ba));
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000803 ba.size = module->size;
804 ba.type = module->type;
805 ba.offset = module->offset;
806
807 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
808 ba.size, ba.offset, ba.type);
809
Mark Browna4b12992014-03-12 23:04:35 +0000810 mutex_lock(&dsp->mutex);
811
812 /* alloc blocks that includes this section */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000813 ret = block_alloc_fixed(dsp, &ba, &module->block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000814 if (ret < 0) {
815 dev_err(dsp->dev,
816 "error: no free blocks for section at offset 0x%x size 0x%x\n",
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000817 module->offset, module->size);
Mark Browna4b12992014-03-12 23:04:35 +0000818 mutex_unlock(&dsp->mutex);
819 return -ENOMEM;
820 }
821
822 /* prepare DSP blocks for module copy */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000823 ret = block_list_prepare(dsp, &module->block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000824 if (ret < 0) {
825 dev_err(dsp->dev, "error: fw module prepare failed\n");
826 goto err;
827 }
828
829 /* copy partial module data to blocks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000830 if (dsp->fw_use_dma) {
831 ret = sst_dsp_dma_copyto(dsp,
832 dsp->addr.lpe_base + module->offset,
833 sst_fw->dmable_fw_paddr + module->data_offset,
834 module->size);
835 if (ret < 0) {
836 dev_err(dsp->dev, "error: module copy failed\n");
837 goto err;
838 }
839 } else
840 sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
841 module->size);
Mark Browna4b12992014-03-12 23:04:35 +0000842
843 mutex_unlock(&dsp->mutex);
844 return ret;
845
846err:
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000847 block_list_remove(dsp, &module->block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000848 mutex_unlock(&dsp->mutex);
849 return ret;
850}
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000851EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
Mark Browna4b12992014-03-12 23:04:35 +0000852
853/* Unload entire module from DSP memory */
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000854int sst_module_free_blocks(struct sst_module *module)
Mark Browna4b12992014-03-12 23:04:35 +0000855{
856 struct sst_dsp *dsp = module->dsp;
857
858 mutex_lock(&dsp->mutex);
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000859 block_list_remove(dsp, &module->block_list);
Mark Browna4b12992014-03-12 23:04:35 +0000860 mutex_unlock(&dsp->mutex);
861 return 0;
862}
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000863EXPORT_SYMBOL_GPL(sst_module_free_blocks);
864
865int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
866 int offset)
867{
868 struct sst_dsp *dsp = runtime->dsp;
869 struct sst_module *module = runtime->module;
870 struct sst_block_allocator ba;
871 int ret;
872
873 if (module->persistent_size == 0)
874 return 0;
875
Jie Yangc41cda12015-02-04 20:23:13 +0800876 memset(&ba, 0, sizeof(ba));
Liam Girdwoode9600bc2014-10-28 17:37:12 +0000877 ba.size = module->persistent_size;
878 ba.type = SST_MEM_DRAM;
879
880 mutex_lock(&dsp->mutex);
881
882 /* do we need to allocate at a fixed address ? */
883 if (offset != 0) {
884
885 ba.offset = offset;
886
887 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
888 ba.size, ba.type, ba.offset);
889
890 /* alloc blocks that includes this section */
891 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
892
893 } else {
894 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
895 ba.size, ba.type);
896
897 /* alloc blocks that includes this section */
898 ret = block_alloc(dsp, &ba, &runtime->block_list);
899 }
900 if (ret < 0) {
901 dev_err(dsp->dev,
902 "error: no free blocks for runtime module size 0x%x\n",
903 module->persistent_size);
904 mutex_unlock(&dsp->mutex);
905 return -ENOMEM;
906 }
907 runtime->persistent_offset = ba.offset;
908
909 /* prepare DSP blocks for module copy */
910 ret = block_list_prepare(dsp, &runtime->block_list);
911 if (ret < 0) {
912 dev_err(dsp->dev, "error: runtime block prepare failed\n");
913 goto err;
914 }
915
916 mutex_unlock(&dsp->mutex);
917 return ret;
918
919err:
920 block_list_remove(dsp, &module->block_list);
921 mutex_unlock(&dsp->mutex);
922 return ret;
923}
924EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
925
926int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
927{
928 struct sst_dsp *dsp = runtime->dsp;
929
930 mutex_lock(&dsp->mutex);
931 block_list_remove(dsp, &runtime->block_list);
932 mutex_unlock(&dsp->mutex);
933 return 0;
934}
935EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
936
937int sst_module_runtime_save(struct sst_module_runtime *runtime,
938 struct sst_module_runtime_context *context)
939{
940 struct sst_dsp *dsp = runtime->dsp;
941 struct sst_module *module = runtime->module;
942 int ret = 0;
943
944 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
945 runtime->id, runtime->persistent_offset,
946 module->persistent_size);
947
948 context->buffer = dma_alloc_coherent(dsp->dma_dev,
949 module->persistent_size,
950 &context->dma_buffer, GFP_DMA | GFP_KERNEL);
951 if (!context->buffer) {
952 dev_err(dsp->dev, "error: DMA context alloc failed\n");
953 return -ENOMEM;
954 }
955
956 mutex_lock(&dsp->mutex);
957
958 if (dsp->fw_use_dma) {
959
960 ret = sst_dsp_dma_get_channel(dsp, 0);
961 if (ret < 0)
962 goto err;
963
964 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
965 dsp->addr.lpe_base + runtime->persistent_offset,
966 module->persistent_size);
967 sst_dsp_dma_put_channel(dsp);
968 if (ret < 0) {
969 dev_err(dsp->dev, "error: context copy failed\n");
970 goto err;
971 }
972 } else
973 sst_memcpy32(context->buffer, dsp->addr.lpe +
974 runtime->persistent_offset,
975 module->persistent_size);
976
977err:
978 mutex_unlock(&dsp->mutex);
979 return ret;
980}
981EXPORT_SYMBOL_GPL(sst_module_runtime_save);
982
983int sst_module_runtime_restore(struct sst_module_runtime *runtime,
984 struct sst_module_runtime_context *context)
985{
986 struct sst_dsp *dsp = runtime->dsp;
987 struct sst_module *module = runtime->module;
988 int ret = 0;
989
990 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
991 runtime->id, runtime->persistent_offset,
992 module->persistent_size);
993
994 mutex_lock(&dsp->mutex);
995
996 if (!context->buffer) {
997 dev_info(dsp->dev, "no context buffer need to restore!\n");
998 goto err;
999 }
1000
1001 if (dsp->fw_use_dma) {
1002
1003 ret = sst_dsp_dma_get_channel(dsp, 0);
1004 if (ret < 0)
1005 goto err;
1006
1007 ret = sst_dsp_dma_copyto(dsp,
1008 dsp->addr.lpe_base + runtime->persistent_offset,
1009 context->dma_buffer, module->persistent_size);
1010 sst_dsp_dma_put_channel(dsp);
1011 if (ret < 0) {
1012 dev_err(dsp->dev, "error: module copy failed\n");
1013 goto err;
1014 }
1015 } else
1016 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1017 context->buffer, module->persistent_size);
1018
1019 dma_free_coherent(dsp->dma_dev, module->persistent_size,
1020 context->buffer, context->dma_buffer);
1021 context->buffer = NULL;
1022
1023err:
1024 mutex_unlock(&dsp->mutex);
1025 return ret;
1026}
1027EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
Mark Browna4b12992014-03-12 23:04:35 +00001028
1029/* register a DSP memory block for use with FW based modules */
1030struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
Julia Lawall93189ea2015-11-11 00:18:52 +01001031 u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
1032 u32 index, void *private)
Mark Browna4b12992014-03-12 23:04:35 +00001033{
1034 struct sst_mem_block *block;
1035
1036 block = kzalloc(sizeof(*block), GFP_KERNEL);
1037 if (block == NULL)
1038 return NULL;
1039
1040 block->offset = offset;
1041 block->size = size;
1042 block->index = index;
1043 block->type = type;
1044 block->dsp = dsp;
1045 block->private = private;
1046 block->ops = ops;
1047
1048 mutex_lock(&dsp->mutex);
1049 list_add(&block->list, &dsp->free_block_list);
1050 mutex_unlock(&dsp->mutex);
1051
1052 return block;
1053}
1054EXPORT_SYMBOL_GPL(sst_mem_block_register);
1055
1056/* unregister all DSP memory blocks */
1057void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1058{
1059 struct sst_mem_block *block, *tmp;
1060
1061 mutex_lock(&dsp->mutex);
1062
1063 /* unregister used blocks */
1064 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1065 list_del(&block->list);
1066 kfree(block);
1067 }
1068
1069 /* unregister free blocks */
1070 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1071 list_del(&block->list);
1072 kfree(block);
1073 }
1074
1075 mutex_unlock(&dsp->mutex);
1076}
1077EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1078
1079/* allocate scratch buffer blocks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001080int sst_block_alloc_scratch(struct sst_dsp *dsp)
Mark Browna4b12992014-03-12 23:04:35 +00001081{
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001082 struct sst_module *module;
1083 struct sst_block_allocator ba;
1084 int ret;
Mark Browna4b12992014-03-12 23:04:35 +00001085
1086 mutex_lock(&dsp->mutex);
1087
1088 /* calculate required scratch size */
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001089 dsp->scratch_size = 0;
1090 list_for_each_entry(module, &dsp->module_list, list) {
1091 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1092 module->id, module->scratch_size);
1093 if (dsp->scratch_size < module->scratch_size)
1094 dsp->scratch_size = module->scratch_size;
Mark Browna4b12992014-03-12 23:04:35 +00001095 }
1096
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001097 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1098 dsp->scratch_size);
Mark Browna4b12992014-03-12 23:04:35 +00001099
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001100 if (dsp->scratch_size == 0) {
1101 dev_info(dsp->dev, "no modules need scratch buffer\n");
1102 mutex_unlock(&dsp->mutex);
1103 return 0;
1104 }
Mark Browna4b12992014-03-12 23:04:35 +00001105
1106 /* allocate blocks for module scratch buffers */
1107 dev_dbg(dsp->dev, "allocating scratch blocks\n");
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001108
1109 ba.size = dsp->scratch_size;
1110 ba.type = SST_MEM_DRAM;
1111
1112 /* do we need to allocate at fixed offset */
1113 if (dsp->scratch_offset != 0) {
1114
1115 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1116 ba.size, ba.type, ba.offset);
1117
1118 ba.offset = dsp->scratch_offset;
1119
1120 /* alloc blocks that includes this section */
1121 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1122
1123 } else {
1124 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1125 ba.size, ba.type);
1126
1127 ba.offset = 0;
1128 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1129 }
Mark Browna4b12992014-03-12 23:04:35 +00001130 if (ret < 0) {
1131 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001132 mutex_unlock(&dsp->mutex);
1133 return ret;
1134 }
1135
1136 ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1137 if (ret < 0) {
1138 dev_err(dsp->dev, "error: scratch block prepare failed\n");
Sudip Mukherjee22a236b2014-11-02 12:04:41 +05301139 mutex_unlock(&dsp->mutex);
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001140 return ret;
Mark Browna4b12992014-03-12 23:04:35 +00001141 }
1142
1143 /* assign the same offset of scratch to each module */
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001144 dsp->scratch_offset = ba.offset;
Mark Browna4b12992014-03-12 23:04:35 +00001145 mutex_unlock(&dsp->mutex);
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001146 return dsp->scratch_size;
Mark Browna4b12992014-03-12 23:04:35 +00001147}
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001148EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
Mark Browna4b12992014-03-12 23:04:35 +00001149
1150/* free all scratch blocks */
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001151void sst_block_free_scratch(struct sst_dsp *dsp)
Mark Browna4b12992014-03-12 23:04:35 +00001152{
Mark Browna4b12992014-03-12 23:04:35 +00001153 mutex_lock(&dsp->mutex);
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001154 block_list_remove(dsp, &dsp->scratch_block_list);
Mark Browna4b12992014-03-12 23:04:35 +00001155 mutex_unlock(&dsp->mutex);
1156}
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001157EXPORT_SYMBOL_GPL(sst_block_free_scratch);
Mark Browna4b12992014-03-12 23:04:35 +00001158
1159/* get a module from it's unique ID */
1160struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1161{
1162 struct sst_module *module;
1163
1164 mutex_lock(&dsp->mutex);
1165
1166 list_for_each_entry(module, &dsp->module_list, list) {
1167 if (module->id == id) {
1168 mutex_unlock(&dsp->mutex);
1169 return module;
1170 }
1171 }
1172
1173 mutex_unlock(&dsp->mutex);
1174 return NULL;
1175}
1176EXPORT_SYMBOL_GPL(sst_module_get_from_id);
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001177
1178struct sst_module_runtime *sst_module_runtime_get_from_id(
1179 struct sst_module *module, u32 id)
1180{
1181 struct sst_module_runtime *runtime;
1182 struct sst_dsp *dsp = module->dsp;
1183
1184 mutex_lock(&dsp->mutex);
1185
1186 list_for_each_entry(runtime, &module->runtime_list, list) {
1187 if (runtime->id == id) {
1188 mutex_unlock(&dsp->mutex);
1189 return runtime;
1190 }
1191 }
1192
1193 mutex_unlock(&dsp->mutex);
1194 return NULL;
1195}
1196EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1197
1198/* returns block address in DSP address space */
1199u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1200 enum sst_mem_type type)
1201{
1202 switch (type) {
1203 case SST_MEM_IRAM:
1204 return offset - dsp->addr.iram_offset +
1205 dsp->addr.dsp_iram_offset;
1206 case SST_MEM_DRAM:
1207 return offset - dsp->addr.dram_offset +
1208 dsp->addr.dsp_dram_offset;
1209 default:
1210 return 0;
1211 }
1212}
1213EXPORT_SYMBOL_GPL(sst_dsp_get_offset);