Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Intel SST Firmware Loader |
| 3 | * |
| 4 | * Copyright (C) 2013, Intel Corporation. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License version |
| 8 | * 2 as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/firmware.h> |
| 21 | #include <linux/export.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/dma-mapping.h> |
| 24 | #include <linux/dmaengine.h> |
| 25 | #include <linux/pci.h> |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 26 | #include <linux/acpi.h> |
| 27 | |
| 28 | /* supported DMA engine drivers */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 29 | #include <linux/dma/dw.h> |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 30 | |
| 31 | #include <asm/page.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | |
| 34 | #include "sst-dsp.h" |
| 35 | #include "sst-dsp-priv.h" |
| 36 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 37 | #define SST_DMA_RESOURCES 2 |
| 38 | #define SST_DSP_DMA_MAX_BURST 0x3 |
| 39 | #define SST_HSW_BLOCK_ANY 0xffffffff |
| 40 | |
| 41 | #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000 |
| 42 | |
| 43 | struct sst_dma { |
| 44 | struct sst_dsp *sst; |
| 45 | |
| 46 | struct dw_dma_chip *chip; |
| 47 | |
| 48 | struct dma_async_tx_descriptor *desc; |
| 49 | struct dma_chan *ch; |
| 50 | }; |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 51 | |
Vinod Koul | 7f26680 | 2014-10-20 20:54:34 +0530 | [diff] [blame] | 52 | static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 53 | { |
Jie Yang | 1cf8dfd | 2015-12-14 22:27:13 +0800 | [diff] [blame] | 54 | u32 tmp = 0; |
| 55 | int i, m, n; |
| 56 | const u8 *src_byte = src; |
| 57 | |
| 58 | m = bytes / 4; |
| 59 | n = bytes % 4; |
| 60 | |
Vinod Koul | 7f26680 | 2014-10-20 20:54:34 +0530 | [diff] [blame] | 61 | /* __iowrite32_copy use 32bit size values so divide by 4 */ |
Jie Yang | 1cf8dfd | 2015-12-14 22:27:13 +0800 | [diff] [blame] | 62 | __iowrite32_copy((void *)dest, src, m); |
| 63 | |
| 64 | if (n) { |
| 65 | for (i = 0; i < n; i++) |
| 66 | tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8); |
| 67 | __iowrite32_copy((void *)(dest + m * 4), &tmp, 1); |
| 68 | } |
| 69 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 70 | } |
| 71 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 72 | static void sst_dma_transfer_complete(void *arg) |
| 73 | { |
| 74 | struct sst_dsp *sst = (struct sst_dsp *)arg; |
| 75 | |
| 76 | dev_dbg(sst->dev, "DMA: callback\n"); |
| 77 | } |
| 78 | |
| 79 | static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr, |
| 80 | dma_addr_t src_addr, size_t size) |
| 81 | { |
| 82 | struct dma_async_tx_descriptor *desc; |
| 83 | struct sst_dma *dma = sst->dma; |
| 84 | |
| 85 | if (dma->ch == NULL) { |
| 86 | dev_err(sst->dev, "error: no DMA channel\n"); |
| 87 | return -ENODEV; |
| 88 | } |
| 89 | |
| 90 | dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n", |
| 91 | (unsigned long)src_addr, (unsigned long)dest_addr, size); |
| 92 | |
| 93 | desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr, |
| 94 | src_addr, size, DMA_CTRL_ACK); |
| 95 | if (!desc){ |
| 96 | dev_err(sst->dev, "error: dma prep memcpy failed\n"); |
| 97 | return -EINVAL; |
| 98 | } |
| 99 | |
| 100 | desc->callback = sst_dma_transfer_complete; |
| 101 | desc->callback_param = sst; |
| 102 | |
| 103 | desc->tx_submit(desc); |
| 104 | dma_wait_for_async_tx(desc); |
| 105 | |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | /* copy to DSP */ |
| 110 | int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr, |
| 111 | dma_addr_t src_addr, size_t size) |
| 112 | { |
| 113 | return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP, |
| 114 | src_addr, size); |
| 115 | } |
| 116 | EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto); |
| 117 | |
| 118 | /* copy from DSP */ |
| 119 | int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr, |
| 120 | dma_addr_t src_addr, size_t size) |
| 121 | { |
| 122 | return sst_dsp_dma_copy(sst, dest_addr, |
| 123 | src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size); |
| 124 | } |
| 125 | EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom); |
| 126 | |
| 127 | /* remove module from memory - callers hold locks */ |
| 128 | static void block_list_remove(struct sst_dsp *dsp, |
| 129 | struct list_head *block_list) |
| 130 | { |
| 131 | struct sst_mem_block *block, *tmp; |
| 132 | int err; |
| 133 | |
| 134 | /* disable each block */ |
| 135 | list_for_each_entry(block, block_list, module_list) { |
| 136 | |
| 137 | if (block->ops && block->ops->disable) { |
| 138 | err = block->ops->disable(block); |
| 139 | if (err < 0) |
| 140 | dev_err(dsp->dev, |
| 141 | "error: cant disable block %d:%d\n", |
| 142 | block->type, block->index); |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | /* mark each block as free */ |
| 147 | list_for_each_entry_safe(block, tmp, block_list, module_list) { |
| 148 | list_del(&block->module_list); |
| 149 | list_move(&block->list, &dsp->free_block_list); |
| 150 | dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n", |
| 151 | block->type, block->index, block->offset); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | /* prepare the memory block to receive data from host - callers hold locks */ |
| 156 | static int block_list_prepare(struct sst_dsp *dsp, |
| 157 | struct list_head *block_list) |
| 158 | { |
| 159 | struct sst_mem_block *block; |
| 160 | int ret = 0; |
| 161 | |
| 162 | /* enable each block so that's it'e ready for data */ |
| 163 | list_for_each_entry(block, block_list, module_list) { |
| 164 | |
Jie Yang | 35c0a8c | 2014-10-30 21:21:52 +0800 | [diff] [blame] | 165 | if (block->ops && block->ops->enable && !block->users) { |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 166 | ret = block->ops->enable(block); |
| 167 | if (ret < 0) { |
| 168 | dev_err(dsp->dev, |
| 169 | "error: cant disable block %d:%d\n", |
| 170 | block->type, block->index); |
| 171 | goto err; |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | return ret; |
| 176 | |
| 177 | err: |
| 178 | list_for_each_entry(block, block_list, module_list) { |
| 179 | if (block->ops && block->ops->disable) |
| 180 | block->ops->disable(block); |
| 181 | } |
| 182 | return ret; |
| 183 | } |
| 184 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 185 | static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem, |
| 186 | int irq) |
| 187 | { |
| 188 | struct dw_dma_chip *chip; |
| 189 | int err; |
| 190 | |
| 191 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); |
| 192 | if (!chip) |
| 193 | return ERR_PTR(-ENOMEM); |
| 194 | |
| 195 | chip->irq = irq; |
| 196 | chip->regs = devm_ioremap_resource(dev, mem); |
| 197 | if (IS_ERR(chip->regs)) |
| 198 | return ERR_CAST(chip->regs); |
| 199 | |
| 200 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); |
| 201 | if (err) |
| 202 | return ERR_PTR(err); |
| 203 | |
| 204 | chip->dev = dev; |
Andy Shevchenko | b5e5a45 | 2015-10-05 14:53:50 +0300 | [diff] [blame] | 205 | |
Andy Shevchenko | 3a14c66 | 2016-04-27 14:15:40 +0300 | [diff] [blame^] | 206 | err = dw_dma_probe(chip); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 207 | if (err) |
| 208 | return ERR_PTR(err); |
| 209 | |
| 210 | return chip; |
| 211 | } |
| 212 | |
| 213 | static void dw_remove(struct dw_dma_chip *chip) |
| 214 | { |
| 215 | dw_dma_remove(chip); |
| 216 | } |
| 217 | |
| 218 | static bool dma_chan_filter(struct dma_chan *chan, void *param) |
| 219 | { |
| 220 | struct sst_dsp *dsp = (struct sst_dsp *)param; |
| 221 | |
| 222 | return chan->device->dev == dsp->dma_dev; |
| 223 | } |
| 224 | |
| 225 | int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id) |
| 226 | { |
| 227 | struct sst_dma *dma = dsp->dma; |
| 228 | struct dma_slave_config slave; |
| 229 | dma_cap_mask_t mask; |
| 230 | int ret; |
| 231 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 232 | dma_cap_zero(mask); |
| 233 | dma_cap_set(DMA_SLAVE, mask); |
| 234 | dma_cap_set(DMA_MEMCPY, mask); |
| 235 | |
| 236 | dma->ch = dma_request_channel(mask, dma_chan_filter, dsp); |
| 237 | if (dma->ch == NULL) { |
| 238 | dev_err(dsp->dev, "error: DMA request channel failed\n"); |
| 239 | return -EIO; |
| 240 | } |
| 241 | |
| 242 | memset(&slave, 0, sizeof(slave)); |
| 243 | slave.direction = DMA_MEM_TO_DEV; |
| 244 | slave.src_addr_width = |
| 245 | slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 246 | slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST; |
| 247 | |
| 248 | ret = dmaengine_slave_config(dma->ch, &slave); |
| 249 | if (ret) { |
| 250 | dev_err(dsp->dev, "error: unable to set DMA slave config %d\n", |
| 251 | ret); |
| 252 | dma_release_channel(dma->ch); |
| 253 | dma->ch = NULL; |
| 254 | } |
| 255 | |
| 256 | return ret; |
| 257 | } |
| 258 | EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel); |
| 259 | |
| 260 | void sst_dsp_dma_put_channel(struct sst_dsp *dsp) |
| 261 | { |
| 262 | struct sst_dma *dma = dsp->dma; |
| 263 | |
| 264 | if (!dma->ch) |
| 265 | return; |
| 266 | |
| 267 | dma_release_channel(dma->ch); |
| 268 | dma->ch = NULL; |
| 269 | } |
| 270 | EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel); |
| 271 | |
| 272 | int sst_dma_new(struct sst_dsp *sst) |
| 273 | { |
| 274 | struct sst_pdata *sst_pdata = sst->pdata; |
| 275 | struct sst_dma *dma; |
| 276 | struct resource mem; |
| 277 | const char *dma_dev_name; |
| 278 | int ret = 0; |
| 279 | |
Pierre-Louis Bossart | 6212755 | 2015-03-20 15:31:34 -0500 | [diff] [blame] | 280 | if (sst->pdata->resindex_dma_base == -1) |
| 281 | /* DMA is not used, return and squelsh error messages */ |
| 282 | return 0; |
| 283 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 284 | /* configure the correct platform data for whatever DMA engine |
| 285 | * is attached to the ADSP IP. */ |
| 286 | switch (sst->pdata->dma_engine) { |
| 287 | case SST_DMA_TYPE_DW: |
| 288 | dma_dev_name = "dw_dmac"; |
| 289 | break; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 290 | default: |
| 291 | dev_err(sst->dev, "error: invalid DMA engine %d\n", |
| 292 | sst->pdata->dma_engine); |
| 293 | return -EINVAL; |
| 294 | } |
| 295 | |
| 296 | dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL); |
| 297 | if (!dma) |
| 298 | return -ENOMEM; |
| 299 | |
| 300 | dma->sst = sst; |
| 301 | |
| 302 | memset(&mem, 0, sizeof(mem)); |
| 303 | |
| 304 | mem.start = sst->addr.lpe_base + sst_pdata->dma_base; |
| 305 | mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1; |
| 306 | mem.flags = IORESOURCE_MEM; |
| 307 | |
| 308 | /* now register DMA engine device */ |
| 309 | dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq); |
| 310 | if (IS_ERR(dma->chip)) { |
| 311 | dev_err(sst->dev, "error: DMA device register failed\n"); |
| 312 | ret = PTR_ERR(dma->chip); |
| 313 | goto err_dma_dev; |
| 314 | } |
| 315 | |
| 316 | sst->dma = dma; |
| 317 | sst->fw_use_dma = true; |
| 318 | return 0; |
| 319 | |
| 320 | err_dma_dev: |
| 321 | devm_kfree(sst->dev, dma); |
| 322 | return ret; |
| 323 | } |
| 324 | EXPORT_SYMBOL(sst_dma_new); |
| 325 | |
| 326 | void sst_dma_free(struct sst_dma *dma) |
| 327 | { |
| 328 | |
| 329 | if (dma == NULL) |
| 330 | return; |
| 331 | |
| 332 | if (dma->ch) |
| 333 | dma_release_channel(dma->ch); |
| 334 | |
| 335 | if (dma->chip) |
| 336 | dw_remove(dma->chip); |
| 337 | |
| 338 | } |
| 339 | EXPORT_SYMBOL(sst_dma_free); |
| 340 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 341 | /* create new generic firmware object */ |
| 342 | struct sst_fw *sst_fw_new(struct sst_dsp *dsp, |
| 343 | const struct firmware *fw, void *private) |
| 344 | { |
| 345 | struct sst_fw *sst_fw; |
| 346 | int err; |
| 347 | |
| 348 | if (!dsp->ops->parse_fw) |
| 349 | return NULL; |
| 350 | |
| 351 | sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL); |
| 352 | if (sst_fw == NULL) |
| 353 | return NULL; |
| 354 | |
| 355 | sst_fw->dsp = dsp; |
| 356 | sst_fw->private = private; |
| 357 | sst_fw->size = fw->size; |
| 358 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 359 | /* allocate DMA buffer to store FW data */ |
Liam Girdwood | 10df350 | 2014-05-02 16:56:31 +0100 | [diff] [blame] | 360 | sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size, |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 361 | &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL); |
| 362 | if (!sst_fw->dma_buf) { |
| 363 | dev_err(dsp->dev, "error: DMA alloc failed\n"); |
| 364 | kfree(sst_fw); |
| 365 | return NULL; |
| 366 | } |
| 367 | |
| 368 | /* copy FW data to DMA-able memory */ |
| 369 | memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size); |
| 370 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 371 | if (dsp->fw_use_dma) { |
| 372 | err = sst_dsp_dma_get_channel(dsp, 0); |
| 373 | if (err < 0) |
| 374 | goto chan_err; |
| 375 | } |
| 376 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 377 | /* call core specific FW paser to load FW data into DSP */ |
| 378 | err = dsp->ops->parse_fw(sst_fw); |
| 379 | if (err < 0) { |
| 380 | dev_err(dsp->dev, "error: parse fw failed %d\n", err); |
| 381 | goto parse_err; |
| 382 | } |
| 383 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 384 | if (dsp->fw_use_dma) |
| 385 | sst_dsp_dma_put_channel(dsp); |
| 386 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 387 | mutex_lock(&dsp->mutex); |
| 388 | list_add(&sst_fw->list, &dsp->fw_list); |
| 389 | mutex_unlock(&dsp->mutex); |
| 390 | |
| 391 | return sst_fw; |
| 392 | |
| 393 | parse_err: |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 394 | if (dsp->fw_use_dma) |
| 395 | sst_dsp_dma_put_channel(dsp); |
| 396 | chan_err: |
| 397 | dma_free_coherent(dsp->dma_dev, sst_fw->size, |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 398 | sst_fw->dma_buf, |
| 399 | sst_fw->dmable_fw_paddr); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 400 | sst_fw->dma_buf = NULL; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 401 | kfree(sst_fw); |
| 402 | return NULL; |
| 403 | } |
| 404 | EXPORT_SYMBOL_GPL(sst_fw_new); |
| 405 | |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 406 | int sst_fw_reload(struct sst_fw *sst_fw) |
| 407 | { |
| 408 | struct sst_dsp *dsp = sst_fw->dsp; |
| 409 | int ret; |
| 410 | |
| 411 | dev_dbg(dsp->dev, "reloading firmware\n"); |
| 412 | |
| 413 | /* call core specific FW paser to load FW data into DSP */ |
| 414 | ret = dsp->ops->parse_fw(sst_fw); |
| 415 | if (ret < 0) |
| 416 | dev_err(dsp->dev, "error: parse fw failed %d\n", ret); |
| 417 | |
| 418 | return ret; |
| 419 | } |
| 420 | EXPORT_SYMBOL_GPL(sst_fw_reload); |
| 421 | |
| 422 | void sst_fw_unload(struct sst_fw *sst_fw) |
| 423 | { |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 424 | struct sst_dsp *dsp = sst_fw->dsp; |
| 425 | struct sst_module *module, *mtmp; |
| 426 | struct sst_module_runtime *runtime, *rtmp; |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 427 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 428 | dev_dbg(dsp->dev, "unloading firmware\n"); |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 429 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 430 | mutex_lock(&dsp->mutex); |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 431 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 432 | /* check module by module */ |
| 433 | list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) { |
| 434 | if (module->sst_fw == sst_fw) { |
| 435 | |
| 436 | /* remove runtime modules */ |
| 437 | list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) { |
| 438 | |
| 439 | block_list_remove(dsp, &runtime->block_list); |
| 440 | list_del(&runtime->list); |
| 441 | kfree(runtime); |
| 442 | } |
| 443 | |
| 444 | /* now remove the module */ |
| 445 | block_list_remove(dsp, &module->block_list); |
| 446 | list_del(&module->list); |
| 447 | kfree(module); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | /* remove all scratch blocks */ |
| 452 | block_list_remove(dsp, &dsp->scratch_block_list); |
| 453 | |
| 454 | mutex_unlock(&dsp->mutex); |
Liam Girdwood | 555f8a80 | 2014-05-05 17:31:37 +0100 | [diff] [blame] | 455 | } |
| 456 | EXPORT_SYMBOL_GPL(sst_fw_unload); |
| 457 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 458 | /* free single firmware object */ |
| 459 | void sst_fw_free(struct sst_fw *sst_fw) |
| 460 | { |
| 461 | struct sst_dsp *dsp = sst_fw->dsp; |
| 462 | |
| 463 | mutex_lock(&dsp->mutex); |
| 464 | list_del(&sst_fw->list); |
| 465 | mutex_unlock(&dsp->mutex); |
| 466 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 467 | if (sst_fw->dma_buf) |
| 468 | dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf, |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 469 | sst_fw->dmable_fw_paddr); |
| 470 | kfree(sst_fw); |
| 471 | } |
| 472 | EXPORT_SYMBOL_GPL(sst_fw_free); |
| 473 | |
| 474 | /* free all firmware objects */ |
| 475 | void sst_fw_free_all(struct sst_dsp *dsp) |
| 476 | { |
| 477 | struct sst_fw *sst_fw, *t; |
| 478 | |
| 479 | mutex_lock(&dsp->mutex); |
| 480 | list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { |
| 481 | |
| 482 | list_del(&sst_fw->list); |
| 483 | dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf, |
| 484 | sst_fw->dmable_fw_paddr); |
| 485 | kfree(sst_fw); |
| 486 | } |
| 487 | mutex_unlock(&dsp->mutex); |
| 488 | } |
| 489 | EXPORT_SYMBOL_GPL(sst_fw_free_all); |
| 490 | |
| 491 | /* create a new SST generic module from FW template */ |
| 492 | struct sst_module *sst_module_new(struct sst_fw *sst_fw, |
| 493 | struct sst_module_template *template, void *private) |
| 494 | { |
| 495 | struct sst_dsp *dsp = sst_fw->dsp; |
| 496 | struct sst_module *sst_module; |
| 497 | |
| 498 | sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL); |
| 499 | if (sst_module == NULL) |
| 500 | return NULL; |
| 501 | |
| 502 | sst_module->id = template->id; |
| 503 | sst_module->dsp = dsp; |
| 504 | sst_module->sst_fw = sst_fw; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 505 | sst_module->scratch_size = template->scratch_size; |
| 506 | sst_module->persistent_size = template->persistent_size; |
Jie Yang | 85b88a8 | 2014-12-23 09:24:50 +0800 | [diff] [blame] | 507 | sst_module->entry = template->entry; |
Lu, Han | 9449d39 | 2015-03-10 10:41:20 +0800 | [diff] [blame] | 508 | sst_module->state = SST_MODULE_STATE_UNLOADED; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 509 | |
| 510 | INIT_LIST_HEAD(&sst_module->block_list); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 511 | INIT_LIST_HEAD(&sst_module->runtime_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 512 | |
| 513 | mutex_lock(&dsp->mutex); |
| 514 | list_add(&sst_module->list, &dsp->module_list); |
| 515 | mutex_unlock(&dsp->mutex); |
| 516 | |
| 517 | return sst_module; |
| 518 | } |
| 519 | EXPORT_SYMBOL_GPL(sst_module_new); |
| 520 | |
| 521 | /* free firmware module and remove from available list */ |
| 522 | void sst_module_free(struct sst_module *sst_module) |
| 523 | { |
| 524 | struct sst_dsp *dsp = sst_module->dsp; |
| 525 | |
| 526 | mutex_lock(&dsp->mutex); |
| 527 | list_del(&sst_module->list); |
| 528 | mutex_unlock(&dsp->mutex); |
| 529 | |
| 530 | kfree(sst_module); |
| 531 | } |
| 532 | EXPORT_SYMBOL_GPL(sst_module_free); |
| 533 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 534 | struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module, |
| 535 | int id, void *private) |
| 536 | { |
| 537 | struct sst_dsp *dsp = module->dsp; |
| 538 | struct sst_module_runtime *runtime; |
| 539 | |
| 540 | runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); |
| 541 | if (runtime == NULL) |
| 542 | return NULL; |
| 543 | |
| 544 | runtime->id = id; |
| 545 | runtime->dsp = dsp; |
| 546 | runtime->module = module; |
| 547 | INIT_LIST_HEAD(&runtime->block_list); |
| 548 | |
| 549 | mutex_lock(&dsp->mutex); |
| 550 | list_add(&runtime->list, &module->runtime_list); |
| 551 | mutex_unlock(&dsp->mutex); |
| 552 | |
| 553 | return runtime; |
| 554 | } |
| 555 | EXPORT_SYMBOL_GPL(sst_module_runtime_new); |
| 556 | |
| 557 | void sst_module_runtime_free(struct sst_module_runtime *runtime) |
| 558 | { |
| 559 | struct sst_dsp *dsp = runtime->dsp; |
| 560 | |
| 561 | mutex_lock(&dsp->mutex); |
| 562 | list_del(&runtime->list); |
| 563 | mutex_unlock(&dsp->mutex); |
| 564 | |
| 565 | kfree(runtime); |
| 566 | } |
| 567 | EXPORT_SYMBOL_GPL(sst_module_runtime_free); |
| 568 | |
| 569 | static struct sst_mem_block *find_block(struct sst_dsp *dsp, |
| 570 | struct sst_block_allocator *ba) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 571 | { |
| 572 | struct sst_mem_block *block; |
| 573 | |
| 574 | list_for_each_entry(block, &dsp->free_block_list, list) { |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 575 | if (block->type == ba->type && block->offset == ba->offset) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 576 | return block; |
| 577 | } |
| 578 | |
| 579 | return NULL; |
| 580 | } |
| 581 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 582 | /* Block allocator must be on block boundary */ |
| 583 | static int block_alloc_contiguous(struct sst_dsp *dsp, |
| 584 | struct sst_block_allocator *ba, struct list_head *block_list) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 585 | { |
| 586 | struct list_head tmp = LIST_HEAD_INIT(tmp); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 587 | struct sst_mem_block *block; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 588 | u32 block_start = SST_HSW_BLOCK_ANY; |
| 589 | int size = ba->size, offset = ba->offset; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 590 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 591 | while (ba->size > 0) { |
| 592 | |
| 593 | block = find_block(dsp, ba); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 594 | if (!block) { |
| 595 | list_splice(&tmp, &dsp->free_block_list); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 596 | |
| 597 | ba->size = size; |
| 598 | ba->offset = offset; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 599 | return -ENOMEM; |
| 600 | } |
| 601 | |
| 602 | list_move_tail(&block->list, &tmp); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 603 | ba->offset += block->size; |
| 604 | ba->size -= block->size; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 605 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 606 | ba->size = size; |
| 607 | ba->offset = offset; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 608 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 609 | list_for_each_entry(block, &tmp, list) { |
| 610 | |
| 611 | if (block->offset < block_start) |
| 612 | block_start = block->offset; |
| 613 | |
| 614 | list_add(&block->module_list, block_list); |
| 615 | |
| 616 | dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", |
| 617 | block->type, block->index, block->offset); |
| 618 | } |
Liam Girdwood | 84fbdd5 | 2014-05-02 16:56:29 +0100 | [diff] [blame] | 619 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 620 | list_splice(&tmp, &dsp->used_block_list); |
| 621 | return 0; |
| 622 | } |
| 623 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 624 | /* allocate first free DSP blocks for data - callers hold locks */ |
| 625 | static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba, |
| 626 | struct list_head *block_list) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 627 | { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 628 | struct sst_mem_block *block, *tmp; |
| 629 | int ret = 0; |
| 630 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 631 | if (ba->size == 0) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 632 | return 0; |
| 633 | |
| 634 | /* find first free whole blocks that can hold module */ |
| 635 | list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { |
| 636 | |
| 637 | /* ignore blocks with wrong type */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 638 | if (block->type != ba->type) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 639 | continue; |
| 640 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 641 | if (ba->size > block->size) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 642 | continue; |
| 643 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 644 | ba->offset = block->offset; |
| 645 | block->bytes_used = ba->size % block->size; |
| 646 | list_add(&block->module_list, block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 647 | list_move(&block->list, &dsp->used_block_list); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 648 | dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", |
| 649 | block->type, block->index, block->offset); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 650 | return 0; |
| 651 | } |
| 652 | |
| 653 | /* then find free multiple blocks that can hold module */ |
| 654 | list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { |
| 655 | |
| 656 | /* ignore blocks with wrong type */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 657 | if (block->type != ba->type) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 658 | continue; |
| 659 | |
| 660 | /* do we span > 1 blocks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 661 | if (ba->size > block->size) { |
| 662 | |
| 663 | /* align ba to block boundary */ |
| 664 | ba->offset = block->offset; |
| 665 | |
| 666 | ret = block_alloc_contiguous(dsp, ba, block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 667 | if (ret == 0) |
| 668 | return ret; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 669 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 670 | } |
| 671 | } |
| 672 | |
| 673 | /* not enough free block space */ |
| 674 | return -ENOMEM; |
| 675 | } |
| 676 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 677 | int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba, |
| 678 | struct list_head *block_list) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 679 | { |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 680 | int ret; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 681 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 682 | dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", |
| 683 | ba->size, ba->offset, ba->type); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 684 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 685 | mutex_lock(&dsp->mutex); |
| 686 | |
| 687 | ret = block_alloc(dsp, ba, block_list); |
| 688 | if (ret < 0) { |
| 689 | dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret); |
| 690 | goto out; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 691 | } |
| 692 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 693 | /* prepare DSP blocks for module usage */ |
| 694 | ret = block_list_prepare(dsp, block_list); |
| 695 | if (ret < 0) |
| 696 | dev_err(dsp->dev, "error: prepare failed\n"); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 697 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 698 | out: |
| 699 | mutex_unlock(&dsp->mutex); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 700 | return ret; |
| 701 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 702 | EXPORT_SYMBOL_GPL(sst_alloc_blocks); |
| 703 | |
| 704 | int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list) |
| 705 | { |
| 706 | mutex_lock(&dsp->mutex); |
| 707 | block_list_remove(dsp, block_list); |
| 708 | mutex_unlock(&dsp->mutex); |
| 709 | return 0; |
| 710 | } |
| 711 | EXPORT_SYMBOL_GPL(sst_free_blocks); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 712 | |
| 713 | /* allocate memory blocks for static module addresses - callers hold locks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 714 | static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba, |
| 715 | struct list_head *block_list) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 716 | { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 717 | struct sst_mem_block *block, *tmp; |
Jie Yang | d83901e | 2015-01-04 09:15:04 +0800 | [diff] [blame] | 718 | struct sst_block_allocator ba_tmp = *ba; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 719 | u32 end = ba->offset + ba->size, block_end; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 720 | int err; |
| 721 | |
| 722 | /* only IRAM/DRAM blocks are managed */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 723 | if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 724 | return 0; |
| 725 | |
| 726 | /* are blocks already attached to this module */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 727 | list_for_each_entry_safe(block, tmp, block_list, module_list) { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 728 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 729 | /* ignore blocks with wrong type */ |
| 730 | if (block->type != ba->type) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 731 | continue; |
| 732 | |
| 733 | block_end = block->offset + block->size; |
| 734 | |
| 735 | /* find block that holds section */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 736 | if (ba->offset >= block->offset && end <= block_end) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 737 | return 0; |
| 738 | |
| 739 | /* does block span more than 1 section */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 740 | if (ba->offset >= block->offset && ba->offset < block_end) { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 741 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 742 | /* align ba to block boundary */ |
Jie Yang | d83901e | 2015-01-04 09:15:04 +0800 | [diff] [blame] | 743 | ba_tmp.size -= block_end - ba->offset; |
| 744 | ba_tmp.offset = block_end; |
| 745 | err = block_alloc_contiguous(dsp, &ba_tmp, block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 746 | if (err < 0) |
| 747 | return -ENOMEM; |
| 748 | |
| 749 | /* module already owns blocks */ |
| 750 | return 0; |
| 751 | } |
| 752 | } |
| 753 | |
| 754 | /* find first free blocks that can hold section in free list */ |
| 755 | list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { |
| 756 | block_end = block->offset + block->size; |
| 757 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 758 | /* ignore blocks with wrong type */ |
| 759 | if (block->type != ba->type) |
| 760 | continue; |
| 761 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 762 | /* find block that holds section */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 763 | if (ba->offset >= block->offset && end <= block_end) { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 764 | |
| 765 | /* add block */ |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 766 | list_move(&block->list, &dsp->used_block_list); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 767 | list_add(&block->module_list, block_list); |
| 768 | dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", |
| 769 | block->type, block->index, block->offset); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 770 | return 0; |
| 771 | } |
| 772 | |
| 773 | /* does block span more than 1 section */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 774 | if (ba->offset >= block->offset && ba->offset < block_end) { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 775 | |
Jie Yang | 25f9754 | 2014-12-23 09:12:45 +0800 | [diff] [blame] | 776 | /* add block */ |
| 777 | list_move(&block->list, &dsp->used_block_list); |
| 778 | list_add(&block->module_list, block_list); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 779 | /* align ba to block boundary */ |
Jie Yang | d83901e | 2015-01-04 09:15:04 +0800 | [diff] [blame] | 780 | ba_tmp.size -= block_end - ba->offset; |
| 781 | ba_tmp.offset = block_end; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 782 | |
Jie Yang | d83901e | 2015-01-04 09:15:04 +0800 | [diff] [blame] | 783 | err = block_alloc_contiguous(dsp, &ba_tmp, block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 784 | if (err < 0) |
| 785 | return -ENOMEM; |
| 786 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 787 | return 0; |
| 788 | } |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 789 | } |
| 790 | |
| 791 | return -ENOMEM; |
| 792 | } |
| 793 | |
| 794 | /* Load fixed module data into DSP memory blocks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 795 | int sst_module_alloc_blocks(struct sst_module *module) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 796 | { |
| 797 | struct sst_dsp *dsp = module->dsp; |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 798 | struct sst_fw *sst_fw = module->sst_fw; |
| 799 | struct sst_block_allocator ba; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 800 | int ret; |
| 801 | |
Jie Yang | c41cda1 | 2015-02-04 20:23:13 +0800 | [diff] [blame] | 802 | memset(&ba, 0, sizeof(ba)); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 803 | ba.size = module->size; |
| 804 | ba.type = module->type; |
| 805 | ba.offset = module->offset; |
| 806 | |
| 807 | dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", |
| 808 | ba.size, ba.offset, ba.type); |
| 809 | |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 810 | mutex_lock(&dsp->mutex); |
| 811 | |
| 812 | /* alloc blocks that includes this section */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 813 | ret = block_alloc_fixed(dsp, &ba, &module->block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 814 | if (ret < 0) { |
| 815 | dev_err(dsp->dev, |
| 816 | "error: no free blocks for section at offset 0x%x size 0x%x\n", |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 817 | module->offset, module->size); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 818 | mutex_unlock(&dsp->mutex); |
| 819 | return -ENOMEM; |
| 820 | } |
| 821 | |
| 822 | /* prepare DSP blocks for module copy */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 823 | ret = block_list_prepare(dsp, &module->block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 824 | if (ret < 0) { |
| 825 | dev_err(dsp->dev, "error: fw module prepare failed\n"); |
| 826 | goto err; |
| 827 | } |
| 828 | |
| 829 | /* copy partial module data to blocks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 830 | if (dsp->fw_use_dma) { |
| 831 | ret = sst_dsp_dma_copyto(dsp, |
| 832 | dsp->addr.lpe_base + module->offset, |
| 833 | sst_fw->dmable_fw_paddr + module->data_offset, |
| 834 | module->size); |
| 835 | if (ret < 0) { |
| 836 | dev_err(dsp->dev, "error: module copy failed\n"); |
| 837 | goto err; |
| 838 | } |
| 839 | } else |
| 840 | sst_memcpy32(dsp->addr.lpe + module->offset, module->data, |
| 841 | module->size); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 842 | |
| 843 | mutex_unlock(&dsp->mutex); |
| 844 | return ret; |
| 845 | |
| 846 | err: |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 847 | block_list_remove(dsp, &module->block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 848 | mutex_unlock(&dsp->mutex); |
| 849 | return ret; |
| 850 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 851 | EXPORT_SYMBOL_GPL(sst_module_alloc_blocks); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 852 | |
| 853 | /* Unload entire module from DSP memory */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 854 | int sst_module_free_blocks(struct sst_module *module) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 855 | { |
| 856 | struct sst_dsp *dsp = module->dsp; |
| 857 | |
| 858 | mutex_lock(&dsp->mutex); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 859 | block_list_remove(dsp, &module->block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 860 | mutex_unlock(&dsp->mutex); |
| 861 | return 0; |
| 862 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 863 | EXPORT_SYMBOL_GPL(sst_module_free_blocks); |
| 864 | |
| 865 | int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime, |
| 866 | int offset) |
| 867 | { |
| 868 | struct sst_dsp *dsp = runtime->dsp; |
| 869 | struct sst_module *module = runtime->module; |
| 870 | struct sst_block_allocator ba; |
| 871 | int ret; |
| 872 | |
| 873 | if (module->persistent_size == 0) |
| 874 | return 0; |
| 875 | |
Jie Yang | c41cda1 | 2015-02-04 20:23:13 +0800 | [diff] [blame] | 876 | memset(&ba, 0, sizeof(ba)); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 877 | ba.size = module->persistent_size; |
| 878 | ba.type = SST_MEM_DRAM; |
| 879 | |
| 880 | mutex_lock(&dsp->mutex); |
| 881 | |
| 882 | /* do we need to allocate at a fixed address ? */ |
| 883 | if (offset != 0) { |
| 884 | |
| 885 | ba.offset = offset; |
| 886 | |
| 887 | dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n", |
| 888 | ba.size, ba.type, ba.offset); |
| 889 | |
| 890 | /* alloc blocks that includes this section */ |
| 891 | ret = block_alloc_fixed(dsp, &ba, &runtime->block_list); |
| 892 | |
| 893 | } else { |
| 894 | dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n", |
| 895 | ba.size, ba.type); |
| 896 | |
| 897 | /* alloc blocks that includes this section */ |
| 898 | ret = block_alloc(dsp, &ba, &runtime->block_list); |
| 899 | } |
| 900 | if (ret < 0) { |
| 901 | dev_err(dsp->dev, |
| 902 | "error: no free blocks for runtime module size 0x%x\n", |
| 903 | module->persistent_size); |
| 904 | mutex_unlock(&dsp->mutex); |
| 905 | return -ENOMEM; |
| 906 | } |
| 907 | runtime->persistent_offset = ba.offset; |
| 908 | |
| 909 | /* prepare DSP blocks for module copy */ |
| 910 | ret = block_list_prepare(dsp, &runtime->block_list); |
| 911 | if (ret < 0) { |
| 912 | dev_err(dsp->dev, "error: runtime block prepare failed\n"); |
| 913 | goto err; |
| 914 | } |
| 915 | |
| 916 | mutex_unlock(&dsp->mutex); |
| 917 | return ret; |
| 918 | |
| 919 | err: |
| 920 | block_list_remove(dsp, &module->block_list); |
| 921 | mutex_unlock(&dsp->mutex); |
| 922 | return ret; |
| 923 | } |
| 924 | EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks); |
| 925 | |
| 926 | int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime) |
| 927 | { |
| 928 | struct sst_dsp *dsp = runtime->dsp; |
| 929 | |
| 930 | mutex_lock(&dsp->mutex); |
| 931 | block_list_remove(dsp, &runtime->block_list); |
| 932 | mutex_unlock(&dsp->mutex); |
| 933 | return 0; |
| 934 | } |
| 935 | EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks); |
| 936 | |
| 937 | int sst_module_runtime_save(struct sst_module_runtime *runtime, |
| 938 | struct sst_module_runtime_context *context) |
| 939 | { |
| 940 | struct sst_dsp *dsp = runtime->dsp; |
| 941 | struct sst_module *module = runtime->module; |
| 942 | int ret = 0; |
| 943 | |
| 944 | dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n", |
| 945 | runtime->id, runtime->persistent_offset, |
| 946 | module->persistent_size); |
| 947 | |
| 948 | context->buffer = dma_alloc_coherent(dsp->dma_dev, |
| 949 | module->persistent_size, |
| 950 | &context->dma_buffer, GFP_DMA | GFP_KERNEL); |
| 951 | if (!context->buffer) { |
| 952 | dev_err(dsp->dev, "error: DMA context alloc failed\n"); |
| 953 | return -ENOMEM; |
| 954 | } |
| 955 | |
| 956 | mutex_lock(&dsp->mutex); |
| 957 | |
| 958 | if (dsp->fw_use_dma) { |
| 959 | |
| 960 | ret = sst_dsp_dma_get_channel(dsp, 0); |
| 961 | if (ret < 0) |
| 962 | goto err; |
| 963 | |
| 964 | ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer, |
| 965 | dsp->addr.lpe_base + runtime->persistent_offset, |
| 966 | module->persistent_size); |
| 967 | sst_dsp_dma_put_channel(dsp); |
| 968 | if (ret < 0) { |
| 969 | dev_err(dsp->dev, "error: context copy failed\n"); |
| 970 | goto err; |
| 971 | } |
| 972 | } else |
| 973 | sst_memcpy32(context->buffer, dsp->addr.lpe + |
| 974 | runtime->persistent_offset, |
| 975 | module->persistent_size); |
| 976 | |
| 977 | err: |
| 978 | mutex_unlock(&dsp->mutex); |
| 979 | return ret; |
| 980 | } |
| 981 | EXPORT_SYMBOL_GPL(sst_module_runtime_save); |
| 982 | |
| 983 | int sst_module_runtime_restore(struct sst_module_runtime *runtime, |
| 984 | struct sst_module_runtime_context *context) |
| 985 | { |
| 986 | struct sst_dsp *dsp = runtime->dsp; |
| 987 | struct sst_module *module = runtime->module; |
| 988 | int ret = 0; |
| 989 | |
| 990 | dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n", |
| 991 | runtime->id, runtime->persistent_offset, |
| 992 | module->persistent_size); |
| 993 | |
| 994 | mutex_lock(&dsp->mutex); |
| 995 | |
| 996 | if (!context->buffer) { |
| 997 | dev_info(dsp->dev, "no context buffer need to restore!\n"); |
| 998 | goto err; |
| 999 | } |
| 1000 | |
| 1001 | if (dsp->fw_use_dma) { |
| 1002 | |
| 1003 | ret = sst_dsp_dma_get_channel(dsp, 0); |
| 1004 | if (ret < 0) |
| 1005 | goto err; |
| 1006 | |
| 1007 | ret = sst_dsp_dma_copyto(dsp, |
| 1008 | dsp->addr.lpe_base + runtime->persistent_offset, |
| 1009 | context->dma_buffer, module->persistent_size); |
| 1010 | sst_dsp_dma_put_channel(dsp); |
| 1011 | if (ret < 0) { |
| 1012 | dev_err(dsp->dev, "error: module copy failed\n"); |
| 1013 | goto err; |
| 1014 | } |
| 1015 | } else |
| 1016 | sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset, |
| 1017 | context->buffer, module->persistent_size); |
| 1018 | |
| 1019 | dma_free_coherent(dsp->dma_dev, module->persistent_size, |
| 1020 | context->buffer, context->dma_buffer); |
| 1021 | context->buffer = NULL; |
| 1022 | |
| 1023 | err: |
| 1024 | mutex_unlock(&dsp->mutex); |
| 1025 | return ret; |
| 1026 | } |
| 1027 | EXPORT_SYMBOL_GPL(sst_module_runtime_restore); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1028 | |
| 1029 | /* register a DSP memory block for use with FW based modules */ |
| 1030 | struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset, |
Julia Lawall | 93189ea | 2015-11-11 00:18:52 +0100 | [diff] [blame] | 1031 | u32 size, enum sst_mem_type type, const struct sst_block_ops *ops, |
| 1032 | u32 index, void *private) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1033 | { |
| 1034 | struct sst_mem_block *block; |
| 1035 | |
| 1036 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
| 1037 | if (block == NULL) |
| 1038 | return NULL; |
| 1039 | |
| 1040 | block->offset = offset; |
| 1041 | block->size = size; |
| 1042 | block->index = index; |
| 1043 | block->type = type; |
| 1044 | block->dsp = dsp; |
| 1045 | block->private = private; |
| 1046 | block->ops = ops; |
| 1047 | |
| 1048 | mutex_lock(&dsp->mutex); |
| 1049 | list_add(&block->list, &dsp->free_block_list); |
| 1050 | mutex_unlock(&dsp->mutex); |
| 1051 | |
| 1052 | return block; |
| 1053 | } |
| 1054 | EXPORT_SYMBOL_GPL(sst_mem_block_register); |
| 1055 | |
| 1056 | /* unregister all DSP memory blocks */ |
| 1057 | void sst_mem_block_unregister_all(struct sst_dsp *dsp) |
| 1058 | { |
| 1059 | struct sst_mem_block *block, *tmp; |
| 1060 | |
| 1061 | mutex_lock(&dsp->mutex); |
| 1062 | |
| 1063 | /* unregister used blocks */ |
| 1064 | list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) { |
| 1065 | list_del(&block->list); |
| 1066 | kfree(block); |
| 1067 | } |
| 1068 | |
| 1069 | /* unregister free blocks */ |
| 1070 | list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { |
| 1071 | list_del(&block->list); |
| 1072 | kfree(block); |
| 1073 | } |
| 1074 | |
| 1075 | mutex_unlock(&dsp->mutex); |
| 1076 | } |
| 1077 | EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all); |
| 1078 | |
| 1079 | /* allocate scratch buffer blocks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1080 | int sst_block_alloc_scratch(struct sst_dsp *dsp) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1081 | { |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1082 | struct sst_module *module; |
| 1083 | struct sst_block_allocator ba; |
| 1084 | int ret; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1085 | |
| 1086 | mutex_lock(&dsp->mutex); |
| 1087 | |
| 1088 | /* calculate required scratch size */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1089 | dsp->scratch_size = 0; |
| 1090 | list_for_each_entry(module, &dsp->module_list, list) { |
| 1091 | dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n", |
| 1092 | module->id, module->scratch_size); |
| 1093 | if (dsp->scratch_size < module->scratch_size) |
| 1094 | dsp->scratch_size = module->scratch_size; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1095 | } |
| 1096 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1097 | dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n", |
| 1098 | dsp->scratch_size); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1099 | |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1100 | if (dsp->scratch_size == 0) { |
| 1101 | dev_info(dsp->dev, "no modules need scratch buffer\n"); |
| 1102 | mutex_unlock(&dsp->mutex); |
| 1103 | return 0; |
| 1104 | } |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1105 | |
| 1106 | /* allocate blocks for module scratch buffers */ |
| 1107 | dev_dbg(dsp->dev, "allocating scratch blocks\n"); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1108 | |
| 1109 | ba.size = dsp->scratch_size; |
| 1110 | ba.type = SST_MEM_DRAM; |
| 1111 | |
| 1112 | /* do we need to allocate at fixed offset */ |
| 1113 | if (dsp->scratch_offset != 0) { |
| 1114 | |
| 1115 | dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n", |
| 1116 | ba.size, ba.type, ba.offset); |
| 1117 | |
| 1118 | ba.offset = dsp->scratch_offset; |
| 1119 | |
| 1120 | /* alloc blocks that includes this section */ |
| 1121 | ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list); |
| 1122 | |
| 1123 | } else { |
| 1124 | dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n", |
| 1125 | ba.size, ba.type); |
| 1126 | |
| 1127 | ba.offset = 0; |
| 1128 | ret = block_alloc(dsp, &ba, &dsp->scratch_block_list); |
| 1129 | } |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1130 | if (ret < 0) { |
| 1131 | dev_err(dsp->dev, "error: can't alloc scratch blocks\n"); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1132 | mutex_unlock(&dsp->mutex); |
| 1133 | return ret; |
| 1134 | } |
| 1135 | |
| 1136 | ret = block_list_prepare(dsp, &dsp->scratch_block_list); |
| 1137 | if (ret < 0) { |
| 1138 | dev_err(dsp->dev, "error: scratch block prepare failed\n"); |
Sudip Mukherjee | 22a236b | 2014-11-02 12:04:41 +0530 | [diff] [blame] | 1139 | mutex_unlock(&dsp->mutex); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1140 | return ret; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1141 | } |
| 1142 | |
| 1143 | /* assign the same offset of scratch to each module */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1144 | dsp->scratch_offset = ba.offset; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1145 | mutex_unlock(&dsp->mutex); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1146 | return dsp->scratch_size; |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1147 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1148 | EXPORT_SYMBOL_GPL(sst_block_alloc_scratch); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1149 | |
| 1150 | /* free all scratch blocks */ |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1151 | void sst_block_free_scratch(struct sst_dsp *dsp) |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1152 | { |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1153 | mutex_lock(&dsp->mutex); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1154 | block_list_remove(dsp, &dsp->scratch_block_list); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1155 | mutex_unlock(&dsp->mutex); |
| 1156 | } |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1157 | EXPORT_SYMBOL_GPL(sst_block_free_scratch); |
Mark Brown | a4b1299 | 2014-03-12 23:04:35 +0000 | [diff] [blame] | 1158 | |
| 1159 | /* get a module from it's unique ID */ |
| 1160 | struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id) |
| 1161 | { |
| 1162 | struct sst_module *module; |
| 1163 | |
| 1164 | mutex_lock(&dsp->mutex); |
| 1165 | |
| 1166 | list_for_each_entry(module, &dsp->module_list, list) { |
| 1167 | if (module->id == id) { |
| 1168 | mutex_unlock(&dsp->mutex); |
| 1169 | return module; |
| 1170 | } |
| 1171 | } |
| 1172 | |
| 1173 | mutex_unlock(&dsp->mutex); |
| 1174 | return NULL; |
| 1175 | } |
| 1176 | EXPORT_SYMBOL_GPL(sst_module_get_from_id); |
Liam Girdwood | e9600bc | 2014-10-28 17:37:12 +0000 | [diff] [blame] | 1177 | |
| 1178 | struct sst_module_runtime *sst_module_runtime_get_from_id( |
| 1179 | struct sst_module *module, u32 id) |
| 1180 | { |
| 1181 | struct sst_module_runtime *runtime; |
| 1182 | struct sst_dsp *dsp = module->dsp; |
| 1183 | |
| 1184 | mutex_lock(&dsp->mutex); |
| 1185 | |
| 1186 | list_for_each_entry(runtime, &module->runtime_list, list) { |
| 1187 | if (runtime->id == id) { |
| 1188 | mutex_unlock(&dsp->mutex); |
| 1189 | return runtime; |
| 1190 | } |
| 1191 | } |
| 1192 | |
| 1193 | mutex_unlock(&dsp->mutex); |
| 1194 | return NULL; |
| 1195 | } |
| 1196 | EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id); |
| 1197 | |
| 1198 | /* returns block address in DSP address space */ |
| 1199 | u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset, |
| 1200 | enum sst_mem_type type) |
| 1201 | { |
| 1202 | switch (type) { |
| 1203 | case SST_MEM_IRAM: |
| 1204 | return offset - dsp->addr.iram_offset + |
| 1205 | dsp->addr.dsp_iram_offset; |
| 1206 | case SST_MEM_DRAM: |
| 1207 | return offset - dsp->addr.dram_offset + |
| 1208 | dsp->addr.dsp_dram_offset; |
| 1209 | default: |
| 1210 | return 0; |
| 1211 | } |
| 1212 | } |
| 1213 | EXPORT_SYMBOL_GPL(sst_dsp_get_offset); |