blob: 0ab1309ef274d1a1eabdc7aec70f6d6f1fb4e689 [file] [log] [blame]
Mark Browna4b12992014-03-12 23:04:35 +00001/*
2 * Intel SST Haswell/Broadwell IPC Support
3 *
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/device.h>
21#include <linux/wait.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <linux/export.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
Mark Browna4b12992014-03-12 23:04:35 +000028#include <linux/platform_device.h>
29#include <linux/kthread.h>
30#include <linux/firmware.h>
31#include <linux/dma-mapping.h>
32#include <linux/debugfs.h>
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +000033#include <linux/pm_runtime.h>
Mark Browna4b12992014-03-12 23:04:35 +000034
35#include "sst-haswell-ipc.h"
36#include "sst-dsp.h"
37#include "sst-dsp-priv.h"
38
39/* Global Message - Generic */
40#define IPC_GLB_TYPE_SHIFT 24
41#define IPC_GLB_TYPE_MASK (0x1f << IPC_GLB_TYPE_SHIFT)
42#define IPC_GLB_TYPE(x) (x << IPC_GLB_TYPE_SHIFT)
43
44/* Global Message - Reply */
45#define IPC_GLB_REPLY_SHIFT 0
46#define IPC_GLB_REPLY_MASK (0x1f << IPC_GLB_REPLY_SHIFT)
47#define IPC_GLB_REPLY_TYPE(x) (x << IPC_GLB_REPLY_TYPE_SHIFT)
48
49/* Stream Message - Generic */
50#define IPC_STR_TYPE_SHIFT 20
51#define IPC_STR_TYPE_MASK (0xf << IPC_STR_TYPE_SHIFT)
52#define IPC_STR_TYPE(x) (x << IPC_STR_TYPE_SHIFT)
53#define IPC_STR_ID_SHIFT 16
54#define IPC_STR_ID_MASK (0xf << IPC_STR_ID_SHIFT)
55#define IPC_STR_ID(x) (x << IPC_STR_ID_SHIFT)
56
57/* Stream Message - Reply */
58#define IPC_STR_REPLY_SHIFT 0
59#define IPC_STR_REPLY_MASK (0x1f << IPC_STR_REPLY_SHIFT)
60
61/* Stream Stage Message - Generic */
62#define IPC_STG_TYPE_SHIFT 12
63#define IPC_STG_TYPE_MASK (0xf << IPC_STG_TYPE_SHIFT)
64#define IPC_STG_TYPE(x) (x << IPC_STG_TYPE_SHIFT)
65#define IPC_STG_ID_SHIFT 10
66#define IPC_STG_ID_MASK (0x3 << IPC_STG_ID_SHIFT)
67#define IPC_STG_ID(x) (x << IPC_STG_ID_SHIFT)
68
69/* Stream Stage Message - Reply */
70#define IPC_STG_REPLY_SHIFT 0
71#define IPC_STG_REPLY_MASK (0x1f << IPC_STG_REPLY_SHIFT)
72
73/* Debug Log Message - Generic */
74#define IPC_LOG_OP_SHIFT 20
75#define IPC_LOG_OP_MASK (0xf << IPC_LOG_OP_SHIFT)
76#define IPC_LOG_OP_TYPE(x) (x << IPC_LOG_OP_SHIFT)
77#define IPC_LOG_ID_SHIFT 16
78#define IPC_LOG_ID_MASK (0xf << IPC_LOG_ID_SHIFT)
79#define IPC_LOG_ID(x) (x << IPC_LOG_ID_SHIFT)
80
81/* IPC message timeout (msecs) */
82#define IPC_TIMEOUT_MSECS 300
83#define IPC_BOOT_MSECS 200
84#define IPC_MSG_WAIT 0
85#define IPC_MSG_NOWAIT 1
86
87/* Firmware Ready Message */
88#define IPC_FW_READY (0x1 << 29)
89#define IPC_STATUS_MASK (0x3 << 30)
90
91#define IPC_EMPTY_LIST_SIZE 8
92#define IPC_MAX_STREAMS 4
93
94/* Mailbox */
95#define IPC_MAX_MAILBOX_BYTES 256
96
Jie Yanga0a7c482015-01-12 17:17:34 +080097#define INVALID_STREAM_HW_ID 0xffffffff
98
Mark Browna4b12992014-03-12 23:04:35 +000099/* Global Message - Types and Replies */
100enum ipc_glb_type {
101 IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
102 IPC_GLB_PERFORMANCE_MONITOR = 1, /* Performance monitoring actions */
103 IPC_GLB_ALLOCATE_STREAM = 3, /* Request to allocate new stream */
104 IPC_GLB_FREE_STREAM = 4, /* Request to free stream */
105 IPC_GLB_GET_FW_CAPABILITIES = 5, /* Retrieves firmware capabilities */
106 IPC_GLB_STREAM_MESSAGE = 6, /* Message directed to stream or its stages */
107 /* Request to store firmware context during D0->D3 transition */
108 IPC_GLB_REQUEST_DUMP = 7,
109 /* Request to restore firmware context during D3->D0 transition */
110 IPC_GLB_RESTORE_CONTEXT = 8,
111 IPC_GLB_GET_DEVICE_FORMATS = 9, /* Set device format */
112 IPC_GLB_SET_DEVICE_FORMATS = 10, /* Get device format */
113 IPC_GLB_SHORT_REPLY = 11,
114 IPC_GLB_ENTER_DX_STATE = 12,
115 IPC_GLB_GET_MIXER_STREAM_INFO = 13, /* Request mixer stream params */
116 IPC_GLB_DEBUG_LOG_MESSAGE = 14, /* Message to or from the debug logger. */
117 IPC_GLB_REQUEST_TRANSFER = 16, /* < Request Transfer for host */
118 IPC_GLB_MAX_IPC_MESSAGE_TYPE = 17, /* Maximum message number */
119};
120
121enum ipc_glb_reply {
122 IPC_GLB_REPLY_SUCCESS = 0, /* The operation was successful. */
123 IPC_GLB_REPLY_ERROR_INVALID_PARAM = 1, /* Invalid parameter was passed. */
124 IPC_GLB_REPLY_UNKNOWN_MESSAGE_TYPE = 2, /* Uknown message type was resceived. */
125 IPC_GLB_REPLY_OUT_OF_RESOURCES = 3, /* No resources to satisfy the request. */
126 IPC_GLB_REPLY_BUSY = 4, /* The system or resource is busy. */
127 IPC_GLB_REPLY_PENDING = 5, /* The action was scheduled for processing. */
128 IPC_GLB_REPLY_FAILURE = 6, /* Critical error happened. */
129 IPC_GLB_REPLY_INVALID_REQUEST = 7, /* Request can not be completed. */
130 IPC_GLB_REPLY_STAGE_UNINITIALIZED = 8, /* Processing stage was uninitialized. */
131 IPC_GLB_REPLY_NOT_FOUND = 9, /* Required resource can not be found. */
132 IPC_GLB_REPLY_SOURCE_NOT_STARTED = 10, /* Source was not started. */
133};
134
135/* Stream Message - Types */
136enum ipc_str_operation {
137 IPC_STR_RESET = 0,
138 IPC_STR_PAUSE = 1,
139 IPC_STR_RESUME = 2,
140 IPC_STR_STAGE_MESSAGE = 3,
141 IPC_STR_NOTIFICATION = 4,
142 IPC_STR_MAX_MESSAGE
143};
144
145/* Stream Stage Message Types */
146enum ipc_stg_operation {
147 IPC_STG_GET_VOLUME = 0,
148 IPC_STG_SET_VOLUME,
149 IPC_STG_SET_WRITE_POSITION,
150 IPC_STG_SET_FX_ENABLE,
151 IPC_STG_SET_FX_DISABLE,
152 IPC_STG_SET_FX_GET_PARAM,
153 IPC_STG_SET_FX_SET_PARAM,
154 IPC_STG_SET_FX_GET_INFO,
155 IPC_STG_MUTE_LOOPBACK,
156 IPC_STG_MAX_MESSAGE
157};
158
159/* Stream Stage Message Types For Notification*/
160enum ipc_stg_operation_notify {
161 IPC_POSITION_CHANGED = 0,
162 IPC_STG_GLITCH,
163 IPC_STG_MAX_NOTIFY
164};
165
166enum ipc_glitch_type {
167 IPC_GLITCH_UNDERRUN = 1,
168 IPC_GLITCH_DECODER_ERROR,
169 IPC_GLITCH_DOUBLED_WRITE_POS,
170 IPC_GLITCH_MAX
171};
172
173/* Debug Control */
174enum ipc_debug_operation {
175 IPC_DEBUG_ENABLE_LOG = 0,
176 IPC_DEBUG_DISABLE_LOG = 1,
177 IPC_DEBUG_REQUEST_LOG_DUMP = 2,
178 IPC_DEBUG_NOTIFY_LOG_DUMP = 3,
179 IPC_DEBUG_MAX_DEBUG_LOG
180};
181
182/* Firmware Ready */
183struct sst_hsw_ipc_fw_ready {
184 u32 inbox_offset;
185 u32 outbox_offset;
186 u32 inbox_size;
187 u32 outbox_size;
188 u32 fw_info_size;
Jie Yang249addd2014-07-15 08:51:12 +0800189 u8 fw_info[IPC_MAX_MAILBOX_BYTES - 5 * sizeof(u32)];
Mark Browna4b12992014-03-12 23:04:35 +0000190} __attribute__((packed));
191
192struct ipc_message {
193 struct list_head list;
194 u32 header;
195
196 /* direction wrt host CPU */
197 char tx_data[IPC_MAX_MAILBOX_BYTES];
198 size_t tx_size;
199 char rx_data[IPC_MAX_MAILBOX_BYTES];
200 size_t rx_size;
201
202 wait_queue_head_t waitq;
203 bool pending;
204 bool complete;
205 bool wait;
206 int errno;
207};
208
209struct sst_hsw_stream;
210struct sst_hsw;
211
212/* Stream infomation */
213struct sst_hsw_stream {
214 /* configuration */
215 struct sst_hsw_ipc_stream_alloc_req request;
216 struct sst_hsw_ipc_stream_alloc_reply reply;
217 struct sst_hsw_ipc_stream_free_req free_req;
218
219 /* Mixer info */
220 u32 mute_volume[SST_HSW_NO_CHANNELS];
221 u32 mute[SST_HSW_NO_CHANNELS];
222
223 /* runtime info */
224 struct sst_hsw *hsw;
225 int host_id;
226 bool commited;
227 bool running;
228
229 /* Notification work */
230 struct work_struct notify_work;
231 u32 header;
232
233 /* Position info from DSP */
234 struct sst_hsw_ipc_stream_set_position wpos;
235 struct sst_hsw_ipc_stream_get_position rpos;
236 struct sst_hsw_ipc_stream_glitch_position glitch;
237
238 /* Volume info */
239 struct sst_hsw_ipc_volume_req vol_req;
240
241 /* driver callback */
242 u32 (*notify_position)(struct sst_hsw_stream *stream, void *data);
243 void *pdata;
244
245 struct list_head node;
246};
247
248/* FW log ring information */
249struct sst_hsw_log_stream {
250 dma_addr_t dma_addr;
251 unsigned char *dma_area;
252 unsigned char *ring_descr;
253 int pages;
254 int size;
255
256 /* Notification work */
257 struct work_struct notify_work;
258 wait_queue_head_t readers_wait_q;
259 struct mutex rw_mutex;
260
261 u32 last_pos;
262 u32 curr_pos;
263 u32 reader_pos;
264
265 /* fw log config */
266 u32 config[SST_HSW_FW_LOG_CONFIG_DWORDS];
267
268 struct sst_hsw *hsw;
269};
270
271/* SST Haswell IPC data */
272struct sst_hsw {
273 struct device *dev;
274 struct sst_dsp *dsp;
275 struct platform_device *pdev_pcm;
276
277 /* FW config */
278 struct sst_hsw_ipc_fw_ready fw_ready;
279 struct sst_hsw_ipc_fw_version version;
Mark Browna4b12992014-03-12 23:04:35 +0000280 bool fw_done;
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +0000281 struct sst_fw *sst_fw;
Mark Browna4b12992014-03-12 23:04:35 +0000282
283 /* stream */
284 struct list_head stream_list;
285
286 /* global mixer */
287 struct sst_hsw_ipc_stream_info_reply mixer_info;
288 enum sst_hsw_volume_curve curve_type;
289 u32 curve_duration;
290 u32 mute[SST_HSW_NO_CHANNELS];
291 u32 mute_volume[SST_HSW_NO_CHANNELS];
292
293 /* DX */
294 struct sst_hsw_ipc_dx_reply dx;
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +0000295 void *dx_context;
296 dma_addr_t dx_context_paddr;
Mark Browna4b12992014-03-12 23:04:35 +0000297
298 /* boot */
299 wait_queue_head_t boot_wait;
300 bool boot_complete;
301 bool shutdown;
302
303 /* IPC messaging */
304 struct list_head tx_list;
305 struct list_head rx_list;
306 struct list_head empty_list;
307 wait_queue_head_t wait_txq;
308 struct task_struct *tx_thread;
309 struct kthread_worker kworker;
310 struct kthread_work kwork;
311 bool pending;
312 struct ipc_message *msg;
313
314 /* FW log stream */
315 struct sst_hsw_log_stream log_stream;
316};
317
318#define CREATE_TRACE_POINTS
319#include <trace/events/hswadsp.h>
320
321static inline u32 msg_get_global_type(u32 msg)
322{
323 return (msg & IPC_GLB_TYPE_MASK) >> IPC_GLB_TYPE_SHIFT;
324}
325
326static inline u32 msg_get_global_reply(u32 msg)
327{
328 return (msg & IPC_GLB_REPLY_MASK) >> IPC_GLB_REPLY_SHIFT;
329}
330
331static inline u32 msg_get_stream_type(u32 msg)
332{
333 return (msg & IPC_STR_TYPE_MASK) >> IPC_STR_TYPE_SHIFT;
334}
335
336static inline u32 msg_get_stage_type(u32 msg)
337{
338 return (msg & IPC_STG_TYPE_MASK) >> IPC_STG_TYPE_SHIFT;
339}
340
Mark Browna4b12992014-03-12 23:04:35 +0000341static inline u32 msg_get_stream_id(u32 msg)
342{
343 return (msg & IPC_STR_ID_MASK) >> IPC_STR_ID_SHIFT;
344}
345
346static inline u32 msg_get_notify_reason(u32 msg)
347{
348 return (msg & IPC_STG_TYPE_MASK) >> IPC_STG_TYPE_SHIFT;
349}
350
351u32 create_channel_map(enum sst_hsw_channel_config config)
352{
353 switch (config) {
354 case SST_HSW_CHANNEL_CONFIG_MONO:
355 return (0xFFFFFFF0 | SST_HSW_CHANNEL_CENTER);
356 case SST_HSW_CHANNEL_CONFIG_STEREO:
357 return (0xFFFFFF00 | SST_HSW_CHANNEL_LEFT
358 | (SST_HSW_CHANNEL_RIGHT << 4));
359 case SST_HSW_CHANNEL_CONFIG_2_POINT_1:
360 return (0xFFFFF000 | SST_HSW_CHANNEL_LEFT
361 | (SST_HSW_CHANNEL_RIGHT << 4)
362 | (SST_HSW_CHANNEL_LFE << 8 ));
363 case SST_HSW_CHANNEL_CONFIG_3_POINT_0:
364 return (0xFFFFF000 | SST_HSW_CHANNEL_LEFT
365 | (SST_HSW_CHANNEL_CENTER << 4)
366 | (SST_HSW_CHANNEL_RIGHT << 8));
367 case SST_HSW_CHANNEL_CONFIG_3_POINT_1:
368 return (0xFFFF0000 | SST_HSW_CHANNEL_LEFT
369 | (SST_HSW_CHANNEL_CENTER << 4)
370 | (SST_HSW_CHANNEL_RIGHT << 8)
371 | (SST_HSW_CHANNEL_LFE << 12));
372 case SST_HSW_CHANNEL_CONFIG_QUATRO:
373 return (0xFFFF0000 | SST_HSW_CHANNEL_LEFT
374 | (SST_HSW_CHANNEL_RIGHT << 4)
375 | (SST_HSW_CHANNEL_LEFT_SURROUND << 8)
376 | (SST_HSW_CHANNEL_RIGHT_SURROUND << 12));
377 case SST_HSW_CHANNEL_CONFIG_4_POINT_0:
378 return (0xFFFF0000 | SST_HSW_CHANNEL_LEFT
379 | (SST_HSW_CHANNEL_CENTER << 4)
380 | (SST_HSW_CHANNEL_RIGHT << 8)
381 | (SST_HSW_CHANNEL_CENTER_SURROUND << 12));
382 case SST_HSW_CHANNEL_CONFIG_5_POINT_0:
383 return (0xFFF00000 | SST_HSW_CHANNEL_LEFT
384 | (SST_HSW_CHANNEL_CENTER << 4)
385 | (SST_HSW_CHANNEL_RIGHT << 8)
386 | (SST_HSW_CHANNEL_LEFT_SURROUND << 12)
387 | (SST_HSW_CHANNEL_RIGHT_SURROUND << 16));
388 case SST_HSW_CHANNEL_CONFIG_5_POINT_1:
389 return (0xFF000000 | SST_HSW_CHANNEL_CENTER
390 | (SST_HSW_CHANNEL_LEFT << 4)
391 | (SST_HSW_CHANNEL_RIGHT << 8)
392 | (SST_HSW_CHANNEL_LEFT_SURROUND << 12)
393 | (SST_HSW_CHANNEL_RIGHT_SURROUND << 16)
394 | (SST_HSW_CHANNEL_LFE << 20));
395 case SST_HSW_CHANNEL_CONFIG_DUAL_MONO:
396 return (0xFFFFFF00 | SST_HSW_CHANNEL_LEFT
397 | (SST_HSW_CHANNEL_LEFT << 4));
398 default:
399 return 0xFFFFFFFF;
400 }
401}
402
403static struct sst_hsw_stream *get_stream_by_id(struct sst_hsw *hsw,
404 int stream_id)
405{
406 struct sst_hsw_stream *stream;
407
408 list_for_each_entry(stream, &hsw->stream_list, node) {
409 if (stream->reply.stream_hw_id == stream_id)
410 return stream;
411 }
412
413 return NULL;
414}
415
416static void ipc_shim_dbg(struct sst_hsw *hsw, const char *text)
417{
418 struct sst_dsp *sst = hsw->dsp;
419 u32 isr, ipcd, imrx, ipcx;
420
421 ipcx = sst_dsp_shim_read_unlocked(sst, SST_IPCX);
422 isr = sst_dsp_shim_read_unlocked(sst, SST_ISRX);
423 ipcd = sst_dsp_shim_read_unlocked(sst, SST_IPCD);
424 imrx = sst_dsp_shim_read_unlocked(sst, SST_IMRX);
425
426 dev_err(hsw->dev, "ipc: --%s-- ipcx 0x%8.8x isr 0x%8.8x ipcd 0x%8.8x imrx 0x%8.8x\n",
427 text, ipcx, isr, ipcd, imrx);
428}
429
430/* locks held by caller */
431static struct ipc_message *msg_get_empty(struct sst_hsw *hsw)
432{
433 struct ipc_message *msg = NULL;
434
435 if (!list_empty(&hsw->empty_list)) {
436 msg = list_first_entry(&hsw->empty_list, struct ipc_message,
437 list);
438 list_del(&msg->list);
439 }
440
441 return msg;
442}
443
444static void ipc_tx_msgs(struct kthread_work *work)
445{
446 struct sst_hsw *hsw =
447 container_of(work, struct sst_hsw, kwork);
448 struct ipc_message *msg;
449 unsigned long flags;
450 u32 ipcx;
451
452 spin_lock_irqsave(&hsw->dsp->spinlock, flags);
453
454 if (list_empty(&hsw->tx_list) || hsw->pending) {
455 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
456 return;
457 }
458
Paweł Piskorski94ce3342014-08-01 23:09:44 +0800459 /* if the DSP is busy, we will TX messages after IRQ.
460 * also postpone if we are in the middle of procesing completion irq*/
Mark Browna4b12992014-03-12 23:04:35 +0000461 ipcx = sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX);
Paweł Piskorski94ce3342014-08-01 23:09:44 +0800462 if (ipcx & (SST_IPCX_BUSY | SST_IPCX_DONE)) {
Mark Browna4b12992014-03-12 23:04:35 +0000463 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
464 return;
465 }
466
467 msg = list_first_entry(&hsw->tx_list, struct ipc_message, list);
468
469 list_move(&msg->list, &hsw->rx_list);
470
471 /* send the message */
472 sst_dsp_outbox_write(hsw->dsp, msg->tx_data, msg->tx_size);
473 sst_dsp_ipc_msg_tx(hsw->dsp, msg->header | SST_IPCX_BUSY);
474
475 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
476}
477
478/* locks held by caller */
479static void tx_msg_reply_complete(struct sst_hsw *hsw, struct ipc_message *msg)
480{
481 msg->complete = true;
482 trace_ipc_reply("completed", msg->header);
483
484 if (!msg->wait)
485 list_add_tail(&msg->list, &hsw->empty_list);
486 else
487 wake_up(&msg->waitq);
488}
489
490static int tx_wait_done(struct sst_hsw *hsw, struct ipc_message *msg,
491 void *rx_data)
492{
493 unsigned long flags;
494 int ret;
495
496 /* wait for DSP completion (in all cases atm inc pending) */
497 ret = wait_event_timeout(msg->waitq, msg->complete,
498 msecs_to_jiffies(IPC_TIMEOUT_MSECS));
499
500 spin_lock_irqsave(&hsw->dsp->spinlock, flags);
501 if (ret == 0) {
502 ipc_shim_dbg(hsw, "message timeout");
503
504 trace_ipc_error("error message timeout for", msg->header);
Liam Girdwood97cfc752014-08-01 23:08:38 +0800505 list_del(&msg->list);
Mark Browna4b12992014-03-12 23:04:35 +0000506 ret = -ETIMEDOUT;
507 } else {
508
509 /* copy the data returned from DSP */
510 if (msg->rx_size)
511 memcpy(rx_data, msg->rx_data, msg->rx_size);
512 ret = msg->errno;
513 }
514
515 list_add_tail(&msg->list, &hsw->empty_list);
516 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
517 return ret;
518}
519
520static int ipc_tx_message(struct sst_hsw *hsw, u32 header, void *tx_data,
521 size_t tx_bytes, void *rx_data, size_t rx_bytes, int wait)
522{
523 struct ipc_message *msg;
524 unsigned long flags;
525
526 spin_lock_irqsave(&hsw->dsp->spinlock, flags);
527
528 msg = msg_get_empty(hsw);
529 if (msg == NULL) {
530 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
531 return -EBUSY;
532 }
533
534 if (tx_bytes)
535 memcpy(msg->tx_data, tx_data, tx_bytes);
536
537 msg->header = header;
538 msg->tx_size = tx_bytes;
539 msg->rx_size = rx_bytes;
540 msg->wait = wait;
541 msg->errno = 0;
542 msg->pending = false;
543 msg->complete = false;
544
545 list_add_tail(&msg->list, &hsw->tx_list);
546 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
547
548 queue_kthread_work(&hsw->kworker, &hsw->kwork);
549
550 if (wait)
551 return tx_wait_done(hsw, msg, rx_data);
552 else
553 return 0;
554}
555
556static inline int ipc_tx_message_wait(struct sst_hsw *hsw, u32 header,
557 void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
558{
559 return ipc_tx_message(hsw, header, tx_data, tx_bytes, rx_data,
560 rx_bytes, 1);
561}
562
563static inline int ipc_tx_message_nowait(struct sst_hsw *hsw, u32 header,
564 void *tx_data, size_t tx_bytes)
565{
566 return ipc_tx_message(hsw, header, tx_data, tx_bytes, NULL, 0, 0);
567}
568
569static void hsw_fw_ready(struct sst_hsw *hsw, u32 header)
570{
571 struct sst_hsw_ipc_fw_ready fw_ready;
572 u32 offset;
Jie Yang249addd2014-07-15 08:51:12 +0800573 u8 fw_info[IPC_MAX_MAILBOX_BYTES - 5 * sizeof(u32)];
574 char *tmp[5], *pinfo;
575 int i = 0;
Mark Browna4b12992014-03-12 23:04:35 +0000576
577 offset = (header & 0x1FFFFFFF) << 3;
578
579 dev_dbg(hsw->dev, "ipc: DSP is ready 0x%8.8x offset %d\n",
580 header, offset);
581
582 /* copy data from the DSP FW ready offset */
583 sst_dsp_read(hsw->dsp, &fw_ready, offset, sizeof(fw_ready));
584
585 sst_dsp_mailbox_init(hsw->dsp, fw_ready.inbox_offset,
586 fw_ready.inbox_size, fw_ready.outbox_offset,
587 fw_ready.outbox_size);
588
589 hsw->boot_complete = true;
590 wake_up(&hsw->boot_wait);
591
592 dev_dbg(hsw->dev, " mailbox upstream 0x%x - size 0x%x\n",
593 fw_ready.inbox_offset, fw_ready.inbox_size);
594 dev_dbg(hsw->dev, " mailbox downstream 0x%x - size 0x%x\n",
595 fw_ready.outbox_offset, fw_ready.outbox_size);
Jie Yang249addd2014-07-15 08:51:12 +0800596 if (fw_ready.fw_info_size < sizeof(fw_ready.fw_info)) {
597 fw_ready.fw_info[fw_ready.fw_info_size] = 0;
598 dev_dbg(hsw->dev, " Firmware info: %s \n", fw_ready.fw_info);
599
600 /* log the FW version info got from the mailbox here. */
601 memcpy(fw_info, fw_ready.fw_info, fw_ready.fw_info_size);
602 pinfo = &fw_info[0];
603 for (i = 0; i < sizeof(tmp) / sizeof(char *); i++)
604 tmp[i] = strsep(&pinfo, " ");
605 dev_info(hsw->dev, "FW loaded, mailbox readback FW info: type %s, - "
606 "version: %s.%s, build %s, source commit id: %s\n",
607 tmp[0], tmp[1], tmp[2], tmp[3], tmp[4]);
608 }
Mark Browna4b12992014-03-12 23:04:35 +0000609}
610
611static void hsw_notification_work(struct work_struct *work)
612{
613 struct sst_hsw_stream *stream = container_of(work,
614 struct sst_hsw_stream, notify_work);
615 struct sst_hsw_ipc_stream_glitch_position *glitch = &stream->glitch;
616 struct sst_hsw_ipc_stream_get_position *pos = &stream->rpos;
617 struct sst_hsw *hsw = stream->hsw;
618 u32 reason;
619
620 reason = msg_get_notify_reason(stream->header);
621
622 switch (reason) {
623 case IPC_STG_GLITCH:
624 trace_ipc_notification("DSP stream under/overrun",
625 stream->reply.stream_hw_id);
626 sst_dsp_inbox_read(hsw->dsp, glitch, sizeof(*glitch));
627
628 dev_err(hsw->dev, "glitch %d pos 0x%x write pos 0x%x\n",
629 glitch->glitch_type, glitch->present_pos,
630 glitch->write_pos);
631 break;
632
633 case IPC_POSITION_CHANGED:
634 trace_ipc_notification("DSP stream position changed for",
635 stream->reply.stream_hw_id);
Dan Carpenter7897ab72014-04-16 18:38:11 +0300636 sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
Mark Browna4b12992014-03-12 23:04:35 +0000637
638 if (stream->notify_position)
639 stream->notify_position(stream, stream->pdata);
640
641 break;
642 default:
643 dev_err(hsw->dev, "error: unknown notification 0x%x\n",
644 stream->header);
645 break;
646 }
647
648 /* tell DSP that notification has been handled */
Jie Yang09a34aa2015-01-21 07:20:23 +0800649 sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
Mark Browna4b12992014-03-12 23:04:35 +0000650 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
651
652 /* unmask busy interrupt */
Jie Yang09a34aa2015-01-21 07:20:23 +0800653 sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
Mark Browna4b12992014-03-12 23:04:35 +0000654}
655
656static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
657{
658 struct ipc_message *msg;
659
660 /* clear reply bits & status bits */
661 header &= ~(IPC_STATUS_MASK | IPC_GLB_REPLY_MASK);
662
663 if (list_empty(&hsw->rx_list)) {
664 dev_err(hsw->dev, "error: rx list empty but received 0x%x\n",
665 header);
666 return NULL;
667 }
668
669 list_for_each_entry(msg, &hsw->rx_list, list) {
670 if (msg->header == header)
671 return msg;
672 }
673
674 return NULL;
675}
676
677static void hsw_stream_update(struct sst_hsw *hsw, struct ipc_message *msg)
678{
679 struct sst_hsw_stream *stream;
680 u32 header = msg->header & ~(IPC_STATUS_MASK | IPC_GLB_REPLY_MASK);
681 u32 stream_id = msg_get_stream_id(header);
682 u32 stream_msg = msg_get_stream_type(header);
683
684 stream = get_stream_by_id(hsw, stream_id);
685 if (stream == NULL)
686 return;
687
688 switch (stream_msg) {
689 case IPC_STR_STAGE_MESSAGE:
690 case IPC_STR_NOTIFICATION:
Liam Girdwood81552612014-07-30 20:09:47 +0800691 break;
Mark Browna4b12992014-03-12 23:04:35 +0000692 case IPC_STR_RESET:
Liam Girdwood81552612014-07-30 20:09:47 +0800693 trace_ipc_notification("stream reset", stream->reply.stream_hw_id);
Mark Browna4b12992014-03-12 23:04:35 +0000694 break;
695 case IPC_STR_PAUSE:
696 stream->running = false;
697 trace_ipc_notification("stream paused",
698 stream->reply.stream_hw_id);
699 break;
700 case IPC_STR_RESUME:
701 stream->running = true;
702 trace_ipc_notification("stream running",
703 stream->reply.stream_hw_id);
704 break;
705 }
706}
707
708static int hsw_process_reply(struct sst_hsw *hsw, u32 header)
709{
710 struct ipc_message *msg;
711 u32 reply = msg_get_global_reply(header);
712
713 trace_ipc_reply("processing -->", header);
714
715 msg = reply_find_msg(hsw, header);
716 if (msg == NULL) {
717 trace_ipc_error("error: can't find message header", header);
718 return -EIO;
719 }
720
721 /* first process the header */
722 switch (reply) {
723 case IPC_GLB_REPLY_PENDING:
724 trace_ipc_pending_reply("received", header);
725 msg->pending = true;
726 hsw->pending = true;
727 return 1;
728 case IPC_GLB_REPLY_SUCCESS:
729 if (msg->pending) {
730 trace_ipc_pending_reply("completed", header);
731 sst_dsp_inbox_read(hsw->dsp, msg->rx_data,
732 msg->rx_size);
733 hsw->pending = false;
734 } else {
735 /* copy data from the DSP */
736 sst_dsp_outbox_read(hsw->dsp, msg->rx_data,
737 msg->rx_size);
738 }
739 break;
740 /* these will be rare - but useful for debug */
741 case IPC_GLB_REPLY_UNKNOWN_MESSAGE_TYPE:
742 trace_ipc_error("error: unknown message type", header);
743 msg->errno = -EBADMSG;
744 break;
745 case IPC_GLB_REPLY_OUT_OF_RESOURCES:
746 trace_ipc_error("error: out of resources", header);
747 msg->errno = -ENOMEM;
748 break;
749 case IPC_GLB_REPLY_BUSY:
750 trace_ipc_error("error: reply busy", header);
751 msg->errno = -EBUSY;
752 break;
753 case IPC_GLB_REPLY_FAILURE:
754 trace_ipc_error("error: reply failure", header);
755 msg->errno = -EINVAL;
756 break;
757 case IPC_GLB_REPLY_STAGE_UNINITIALIZED:
758 trace_ipc_error("error: stage uninitialized", header);
759 msg->errno = -EINVAL;
760 break;
761 case IPC_GLB_REPLY_NOT_FOUND:
762 trace_ipc_error("error: reply not found", header);
763 msg->errno = -EINVAL;
764 break;
765 case IPC_GLB_REPLY_SOURCE_NOT_STARTED:
766 trace_ipc_error("error: source not started", header);
767 msg->errno = -EINVAL;
768 break;
769 case IPC_GLB_REPLY_INVALID_REQUEST:
770 trace_ipc_error("error: invalid request", header);
771 msg->errno = -EINVAL;
772 break;
773 case IPC_GLB_REPLY_ERROR_INVALID_PARAM:
774 trace_ipc_error("error: invalid parameter", header);
775 msg->errno = -EINVAL;
776 break;
777 default:
778 trace_ipc_error("error: unknown reply", header);
779 msg->errno = -EINVAL;
780 break;
781 }
782
783 /* update any stream states */
Paweł Piskorskid6e08612014-08-01 23:10:43 +0800784 if (msg_get_global_type(header) == IPC_GLB_STREAM_MESSAGE)
785 hsw_stream_update(hsw, msg);
Mark Browna4b12992014-03-12 23:04:35 +0000786
787 /* wake up and return the error if we have waiters on this message ? */
788 list_del(&msg->list);
789 tx_msg_reply_complete(hsw, msg);
790
791 return 1;
792}
793
794static int hsw_stream_message(struct sst_hsw *hsw, u32 header)
795{
796 u32 stream_msg, stream_id, stage_type;
797 struct sst_hsw_stream *stream;
798 int handled = 0;
799
800 stream_msg = msg_get_stream_type(header);
801 stream_id = msg_get_stream_id(header);
802 stage_type = msg_get_stage_type(header);
803
804 stream = get_stream_by_id(hsw, stream_id);
805 if (stream == NULL)
806 return handled;
807
808 stream->header = header;
809
810 switch (stream_msg) {
811 case IPC_STR_STAGE_MESSAGE:
812 dev_err(hsw->dev, "error: stage msg not implemented 0x%8.8x\n",
813 header);
814 break;
815 case IPC_STR_NOTIFICATION:
816 schedule_work(&stream->notify_work);
817 break;
818 default:
819 /* handle pending message complete request */
820 handled = hsw_process_reply(hsw, header);
821 break;
822 }
823
824 return handled;
825}
826
827static int hsw_log_message(struct sst_hsw *hsw, u32 header)
828{
829 u32 operation = (header & IPC_LOG_OP_MASK) >> IPC_LOG_OP_SHIFT;
830 struct sst_hsw_log_stream *stream = &hsw->log_stream;
831 int ret = 1;
832
833 if (operation != IPC_DEBUG_REQUEST_LOG_DUMP) {
834 dev_err(hsw->dev,
835 "error: log msg not implemented 0x%8.8x\n", header);
836 return 0;
837 }
838
839 mutex_lock(&stream->rw_mutex);
840 stream->last_pos = stream->curr_pos;
841 sst_dsp_inbox_read(
842 hsw->dsp, &stream->curr_pos, sizeof(stream->curr_pos));
843 mutex_unlock(&stream->rw_mutex);
844
845 schedule_work(&stream->notify_work);
846
847 return ret;
848}
849
850static int hsw_process_notification(struct sst_hsw *hsw)
851{
852 struct sst_dsp *sst = hsw->dsp;
853 u32 type, header;
854 int handled = 1;
855
856 header = sst_dsp_shim_read_unlocked(sst, SST_IPCD);
857 type = msg_get_global_type(header);
858
859 trace_ipc_request("processing -->", header);
860
861 /* FW Ready is a special case */
862 if (!hsw->boot_complete && header & IPC_FW_READY) {
863 hsw_fw_ready(hsw, header);
864 return handled;
865 }
866
867 switch (type) {
868 case IPC_GLB_GET_FW_VERSION:
869 case IPC_GLB_ALLOCATE_STREAM:
870 case IPC_GLB_FREE_STREAM:
871 case IPC_GLB_GET_FW_CAPABILITIES:
872 case IPC_GLB_REQUEST_DUMP:
873 case IPC_GLB_GET_DEVICE_FORMATS:
874 case IPC_GLB_SET_DEVICE_FORMATS:
875 case IPC_GLB_ENTER_DX_STATE:
876 case IPC_GLB_GET_MIXER_STREAM_INFO:
877 case IPC_GLB_MAX_IPC_MESSAGE_TYPE:
878 case IPC_GLB_RESTORE_CONTEXT:
879 case IPC_GLB_SHORT_REPLY:
880 dev_err(hsw->dev, "error: message type %d header 0x%x\n",
881 type, header);
882 break;
883 case IPC_GLB_STREAM_MESSAGE:
884 handled = hsw_stream_message(hsw, header);
885 break;
886 case IPC_GLB_DEBUG_LOG_MESSAGE:
887 handled = hsw_log_message(hsw, header);
888 break;
889 default:
890 dev_err(hsw->dev, "error: unexpected type %d hdr 0x%8.8x\n",
891 type, header);
892 break;
893 }
894
895 return handled;
896}
897
898static irqreturn_t hsw_irq_thread(int irq, void *context)
899{
900 struct sst_dsp *sst = (struct sst_dsp *) context;
901 struct sst_hsw *hsw = sst_dsp_get_thread_context(sst);
902 u32 ipcx, ipcd;
903 int handled;
904 unsigned long flags;
905
906 spin_lock_irqsave(&sst->spinlock, flags);
907
908 ipcx = sst_dsp_ipc_msg_rx(hsw->dsp);
909 ipcd = sst_dsp_shim_read_unlocked(sst, SST_IPCD);
910
911 /* reply message from DSP */
912 if (ipcx & SST_IPCX_DONE) {
913
914 /* Handle Immediate reply from DSP Core */
915 handled = hsw_process_reply(hsw, ipcx);
916
917 if (handled > 0) {
918 /* clear DONE bit - tell DSP we have completed */
919 sst_dsp_shim_update_bits_unlocked(sst, SST_IPCX,
920 SST_IPCX_DONE, 0);
921
922 /* unmask Done interrupt */
923 sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX,
924 SST_IMRX_DONE, 0);
925 }
926 }
927
928 /* new message from DSP */
929 if (ipcd & SST_IPCD_BUSY) {
930
931 /* Handle Notification and Delayed reply from DSP Core */
932 handled = hsw_process_notification(hsw);
933
934 /* clear BUSY bit and set DONE bit - accept new messages */
935 if (handled > 0) {
936 sst_dsp_shim_update_bits_unlocked(sst, SST_IPCD,
937 SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
938
939 /* unmask busy interrupt */
940 sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX,
941 SST_IMRX_BUSY, 0);
942 }
943 }
944
945 spin_unlock_irqrestore(&sst->spinlock, flags);
946
947 /* continue to send any remaining messages... */
948 queue_kthread_work(&hsw->kworker, &hsw->kwork);
949
950 return IRQ_HANDLED;
951}
952
953int sst_hsw_fw_get_version(struct sst_hsw *hsw,
954 struct sst_hsw_ipc_fw_version *version)
955{
956 int ret;
957
958 ret = ipc_tx_message_wait(hsw, IPC_GLB_TYPE(IPC_GLB_GET_FW_VERSION),
959 NULL, 0, version, sizeof(*version));
960 if (ret < 0)
961 dev_err(hsw->dev, "error: get version failed\n");
962
963 return ret;
964}
965
966/* Mixer Controls */
Mark Browna4b12992014-03-12 23:04:35 +0000967int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
968 u32 stage_id, u32 channel, u32 *volume)
969{
970 if (channel > 1)
971 return -EINVAL;
972
973 sst_dsp_read(hsw->dsp, volume,
Christian Engelmayerbf657d22014-04-13 19:56:36 +0200974 stream->reply.volume_register_address[channel],
975 sizeof(*volume));
Mark Browna4b12992014-03-12 23:04:35 +0000976
977 return 0;
978}
979
Mark Browna4b12992014-03-12 23:04:35 +0000980/* stream volume */
981int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
982 struct sst_hsw_stream *stream, u32 stage_id, u32 channel, u32 volume)
983{
984 struct sst_hsw_ipc_volume_req *req;
985 u32 header;
986 int ret;
987
988 trace_ipc_request("set stream volume", stream->reply.stream_hw_id);
989
Jie Yangf1e59822014-11-25 21:00:53 +0800990 if (channel >= 2 && channel != SST_HSW_CHANNELS_ALL)
Mark Browna4b12992014-03-12 23:04:35 +0000991 return -EINVAL;
992
Mark Browna4b12992014-03-12 23:04:35 +0000993 header = IPC_GLB_TYPE(IPC_GLB_STREAM_MESSAGE) |
994 IPC_STR_TYPE(IPC_STR_STAGE_MESSAGE);
995 header |= (stream->reply.stream_hw_id << IPC_STR_ID_SHIFT);
996 header |= (IPC_STG_SET_VOLUME << IPC_STG_TYPE_SHIFT);
997 header |= (stage_id << IPC_STG_ID_SHIFT);
998
999 req = &stream->vol_req;
Mark Browna4b12992014-03-12 23:04:35 +00001000 req->target_volume = volume;
1001
Jie Yangf1e59822014-11-25 21:00:53 +08001002 /* set both at same time ? */
1003 if (channel == SST_HSW_CHANNELS_ALL) {
1004 if (hsw->mute[0] && hsw->mute[1]) {
1005 hsw->mute_volume[0] = hsw->mute_volume[1] = volume;
1006 return 0;
1007 } else if (hsw->mute[0])
1008 req->channel = 1;
1009 else if (hsw->mute[1])
1010 req->channel = 0;
1011 else
1012 req->channel = SST_HSW_CHANNELS_ALL;
1013 } else {
1014 /* set only 1 channel */
1015 if (hsw->mute[channel]) {
1016 hsw->mute_volume[channel] = volume;
1017 return 0;
1018 }
1019 req->channel = channel;
1020 }
1021
Mark Browna4b12992014-03-12 23:04:35 +00001022 ret = ipc_tx_message_wait(hsw, header, req, sizeof(*req), NULL, 0);
1023 if (ret < 0) {
1024 dev_err(hsw->dev, "error: set stream volume failed\n");
1025 return ret;
1026 }
1027
1028 return 0;
1029}
1030
Mark Browna4b12992014-03-12 23:04:35 +00001031int sst_hsw_mixer_get_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
1032 u32 *volume)
1033{
1034 if (channel > 1)
1035 return -EINVAL;
1036
1037 sst_dsp_read(hsw->dsp, volume,
1038 hsw->mixer_info.volume_register_address[channel],
1039 sizeof(*volume));
1040
1041 return 0;
1042}
1043
Mark Browna4b12992014-03-12 23:04:35 +00001044/* global mixer volume */
1045int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
1046 u32 volume)
1047{
1048 struct sst_hsw_ipc_volume_req req;
1049 u32 header;
1050 int ret;
1051
1052 trace_ipc_request("set mixer volume", volume);
1053
Jie Yangf1e59822014-11-25 21:00:53 +08001054 if (channel >= 2 && channel != SST_HSW_CHANNELS_ALL)
1055 return -EINVAL;
1056
Mark Browna4b12992014-03-12 23:04:35 +00001057 /* set both at same time ? */
Jie Yangf1e59822014-11-25 21:00:53 +08001058 if (channel == SST_HSW_CHANNELS_ALL) {
Mark Browna4b12992014-03-12 23:04:35 +00001059 if (hsw->mute[0] && hsw->mute[1]) {
1060 hsw->mute_volume[0] = hsw->mute_volume[1] = volume;
1061 return 0;
1062 } else if (hsw->mute[0])
1063 req.channel = 1;
1064 else if (hsw->mute[1])
1065 req.channel = 0;
1066 else
Jie Yangf1e59822014-11-25 21:00:53 +08001067 req.channel = SST_HSW_CHANNELS_ALL;
Mark Browna4b12992014-03-12 23:04:35 +00001068 } else {
1069 /* set only 1 channel */
1070 if (hsw->mute[channel]) {
1071 hsw->mute_volume[channel] = volume;
1072 return 0;
1073 }
1074 req.channel = channel;
1075 }
1076
1077 header = IPC_GLB_TYPE(IPC_GLB_STREAM_MESSAGE) |
1078 IPC_STR_TYPE(IPC_STR_STAGE_MESSAGE);
1079 header |= (hsw->mixer_info.mixer_hw_id << IPC_STR_ID_SHIFT);
1080 header |= (IPC_STG_SET_VOLUME << IPC_STG_TYPE_SHIFT);
1081 header |= (stage_id << IPC_STG_ID_SHIFT);
1082
1083 req.curve_duration = hsw->curve_duration;
1084 req.curve_type = hsw->curve_type;
1085 req.target_volume = volume;
1086
1087 ret = ipc_tx_message_wait(hsw, header, &req, sizeof(req), NULL, 0);
1088 if (ret < 0) {
1089 dev_err(hsw->dev, "error: set mixer volume failed\n");
1090 return ret;
1091 }
1092
1093 return 0;
1094}
1095
1096/* Stream API */
1097struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
1098 u32 (*notify_position)(struct sst_hsw_stream *stream, void *data),
1099 void *data)
1100{
1101 struct sst_hsw_stream *stream;
Wenkai Dud132cb02014-04-23 13:29:30 +03001102 struct sst_dsp *sst = hsw->dsp;
1103 unsigned long flags;
Mark Browna4b12992014-03-12 23:04:35 +00001104
1105 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
1106 if (stream == NULL)
1107 return NULL;
1108
Wenkai Dud132cb02014-04-23 13:29:30 +03001109 spin_lock_irqsave(&sst->spinlock, flags);
Jie Yanga0a7c482015-01-12 17:17:34 +08001110 stream->reply.stream_hw_id = INVALID_STREAM_HW_ID;
Mark Browna4b12992014-03-12 23:04:35 +00001111 list_add(&stream->node, &hsw->stream_list);
1112 stream->notify_position = notify_position;
1113 stream->pdata = data;
1114 stream->hsw = hsw;
1115 stream->host_id = id;
1116
1117 /* work to process notification messages */
1118 INIT_WORK(&stream->notify_work, hsw_notification_work);
Wenkai Dud132cb02014-04-23 13:29:30 +03001119 spin_unlock_irqrestore(&sst->spinlock, flags);
Mark Browna4b12992014-03-12 23:04:35 +00001120
1121 return stream;
1122}
1123
1124int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1125{
1126 u32 header;
1127 int ret = 0;
Wenkai Dud132cb02014-04-23 13:29:30 +03001128 struct sst_dsp *sst = hsw->dsp;
1129 unsigned long flags;
Mark Browna4b12992014-03-12 23:04:35 +00001130
Jie Yangf81677b2015-01-07 22:07:05 +08001131 if (!stream) {
1132 dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
1133 return 0;
1134 }
1135
Mark Browna4b12992014-03-12 23:04:35 +00001136 /* dont free DSP streams that are not commited */
1137 if (!stream->commited)
1138 goto out;
1139
1140 trace_ipc_request("stream free", stream->host_id);
1141
1142 stream->free_req.stream_id = stream->reply.stream_hw_id;
1143 header = IPC_GLB_TYPE(IPC_GLB_FREE_STREAM);
1144
1145 ret = ipc_tx_message_wait(hsw, header, &stream->free_req,
1146 sizeof(stream->free_req), NULL, 0);
1147 if (ret < 0) {
1148 dev_err(hsw->dev, "error: free stream %d failed\n",
1149 stream->free_req.stream_id);
1150 return -EAGAIN;
1151 }
1152
1153 trace_hsw_stream_free_req(stream, &stream->free_req);
1154
1155out:
Jarkko Nikulade30a2c2014-04-24 10:34:36 +03001156 cancel_work_sync(&stream->notify_work);
Wenkai Dud132cb02014-04-23 13:29:30 +03001157 spin_lock_irqsave(&sst->spinlock, flags);
Mark Browna4b12992014-03-12 23:04:35 +00001158 list_del(&stream->node);
1159 kfree(stream);
Wenkai Dud132cb02014-04-23 13:29:30 +03001160 spin_unlock_irqrestore(&sst->spinlock, flags);
Mark Browna4b12992014-03-12 23:04:35 +00001161
1162 return ret;
1163}
1164
1165int sst_hsw_stream_set_bits(struct sst_hsw *hsw,
1166 struct sst_hsw_stream *stream, enum sst_hsw_bitdepth bits)
1167{
1168 if (stream->commited) {
1169 dev_err(hsw->dev, "error: stream committed for set bits\n");
1170 return -EINVAL;
1171 }
1172
1173 stream->request.format.bitdepth = bits;
1174 return 0;
1175}
1176
1177int sst_hsw_stream_set_channels(struct sst_hsw *hsw,
1178 struct sst_hsw_stream *stream, int channels)
1179{
1180 if (stream->commited) {
1181 dev_err(hsw->dev, "error: stream committed for set channels\n");
1182 return -EINVAL;
1183 }
1184
Mark Browna4b12992014-03-12 23:04:35 +00001185 stream->request.format.ch_num = channels;
1186 return 0;
1187}
1188
1189int sst_hsw_stream_set_rate(struct sst_hsw *hsw,
1190 struct sst_hsw_stream *stream, int rate)
1191{
1192 if (stream->commited) {
1193 dev_err(hsw->dev, "error: stream committed for set rate\n");
1194 return -EINVAL;
1195 }
1196
1197 stream->request.format.frequency = rate;
1198 return 0;
1199}
1200
1201int sst_hsw_stream_set_map_config(struct sst_hsw *hsw,
1202 struct sst_hsw_stream *stream, u32 map,
1203 enum sst_hsw_channel_config config)
1204{
1205 if (stream->commited) {
1206 dev_err(hsw->dev, "error: stream committed for set map\n");
1207 return -EINVAL;
1208 }
1209
1210 stream->request.format.map = map;
1211 stream->request.format.config = config;
1212 return 0;
1213}
1214
1215int sst_hsw_stream_set_style(struct sst_hsw *hsw,
1216 struct sst_hsw_stream *stream, enum sst_hsw_interleaving style)
1217{
1218 if (stream->commited) {
1219 dev_err(hsw->dev, "error: stream committed for set style\n");
1220 return -EINVAL;
1221 }
1222
1223 stream->request.format.style = style;
1224 return 0;
1225}
1226
1227int sst_hsw_stream_set_valid(struct sst_hsw *hsw,
1228 struct sst_hsw_stream *stream, u32 bits)
1229{
1230 if (stream->commited) {
1231 dev_err(hsw->dev, "error: stream committed for set valid bits\n");
1232 return -EINVAL;
1233 }
1234
1235 stream->request.format.valid_bit = bits;
1236 return 0;
1237}
1238
1239/* Stream Configuration */
1240int sst_hsw_stream_format(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1241 enum sst_hsw_stream_path_id path_id,
1242 enum sst_hsw_stream_type stream_type,
1243 enum sst_hsw_stream_format format_id)
1244{
1245 if (stream->commited) {
1246 dev_err(hsw->dev, "error: stream committed for set format\n");
1247 return -EINVAL;
1248 }
1249
1250 stream->request.path_id = path_id;
1251 stream->request.stream_type = stream_type;
1252 stream->request.format_id = format_id;
1253
1254 trace_hsw_stream_alloc_request(stream, &stream->request);
1255
1256 return 0;
1257}
1258
1259int sst_hsw_stream_buffer(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1260 u32 ring_pt_address, u32 num_pages,
1261 u32 ring_size, u32 ring_offset, u32 ring_first_pfn)
1262{
1263 if (stream->commited) {
1264 dev_err(hsw->dev, "error: stream committed for buffer\n");
1265 return -EINVAL;
1266 }
1267
1268 stream->request.ringinfo.ring_pt_address = ring_pt_address;
1269 stream->request.ringinfo.num_pages = num_pages;
1270 stream->request.ringinfo.ring_size = ring_size;
1271 stream->request.ringinfo.ring_offset = ring_offset;
1272 stream->request.ringinfo.ring_first_pfn = ring_first_pfn;
1273
1274 trace_hsw_stream_buffer(stream);
1275
1276 return 0;
1277}
1278
1279int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001280 struct sst_hsw_stream *stream, struct sst_module_runtime *runtime)
Mark Browna4b12992014-03-12 23:04:35 +00001281{
1282 struct sst_hsw_module_map *map = &stream->request.map;
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001283 struct sst_dsp *dsp = sst_hsw_get_dsp(hsw);
1284 struct sst_module *module = runtime->module;
Mark Browna4b12992014-03-12 23:04:35 +00001285
1286 if (stream->commited) {
1287 dev_err(hsw->dev, "error: stream committed for set module\n");
1288 return -EINVAL;
1289 }
1290
1291 /* only support initial module atm */
1292 map->module_entries_count = 1;
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001293 map->module_entries[0].module_id = module->id;
1294 map->module_entries[0].entry_point = module->entry;
Mark Browna4b12992014-03-12 23:04:35 +00001295
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001296 stream->request.persistent_mem.offset =
1297 sst_dsp_get_offset(dsp, runtime->persistent_offset, SST_MEM_DRAM);
1298 stream->request.persistent_mem.size = module->persistent_size;
Mark Browna4b12992014-03-12 23:04:35 +00001299
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001300 stream->request.scratch_mem.offset =
1301 sst_dsp_get_offset(dsp, dsp->scratch_offset, SST_MEM_DRAM);
1302 stream->request.scratch_mem.size = dsp->scratch_size;
Mark Browna4b12992014-03-12 23:04:35 +00001303
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001304 dev_dbg(hsw->dev, "module %d runtime %d using:\n", module->id,
1305 runtime->id);
1306 dev_dbg(hsw->dev, " persistent offset 0x%x bytes 0x%x\n",
1307 stream->request.persistent_mem.offset,
1308 stream->request.persistent_mem.size);
1309 dev_dbg(hsw->dev, " scratch offset 0x%x bytes 0x%x\n",
1310 stream->request.scratch_mem.offset,
1311 stream->request.scratch_mem.size);
Mark Browna4b12992014-03-12 23:04:35 +00001312
1313 return 0;
1314}
1315
1316int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1317{
1318 struct sst_hsw_ipc_stream_alloc_req *str_req = &stream->request;
1319 struct sst_hsw_ipc_stream_alloc_reply *reply = &stream->reply;
1320 u32 header;
1321 int ret;
1322
Jie Yangf81677b2015-01-07 22:07:05 +08001323 if (!stream) {
1324 dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
1325 return 0;
1326 }
1327
1328 if (stream->commited) {
1329 dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
1330 return 0;
1331 }
1332
Mark Browna4b12992014-03-12 23:04:35 +00001333 trace_ipc_request("stream alloc", stream->host_id);
1334
1335 header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
1336
1337 ret = ipc_tx_message_wait(hsw, header, str_req, sizeof(*str_req),
1338 reply, sizeof(*reply));
1339 if (ret < 0) {
1340 dev_err(hsw->dev, "error: stream commit failed\n");
1341 return ret;
1342 }
1343
1344 stream->commited = 1;
1345 trace_hsw_stream_alloc_reply(stream);
1346
1347 return 0;
1348}
1349
1350/* Stream Information - these calls could be inline but we want the IPC
1351 ABI to be opaque to client PCM drivers to cope with any future ABI changes */
Mark Browna4b12992014-03-12 23:04:35 +00001352int sst_hsw_mixer_get_info(struct sst_hsw *hsw)
1353{
1354 struct sst_hsw_ipc_stream_info_reply *reply;
1355 u32 header;
1356 int ret;
1357
1358 reply = &hsw->mixer_info;
1359 header = IPC_GLB_TYPE(IPC_GLB_GET_MIXER_STREAM_INFO);
1360
1361 trace_ipc_request("get global mixer info", 0);
1362
1363 ret = ipc_tx_message_wait(hsw, header, NULL, 0, reply, sizeof(*reply));
1364 if (ret < 0) {
1365 dev_err(hsw->dev, "error: get stream info failed\n");
1366 return ret;
1367 }
1368
1369 trace_hsw_mixer_info_reply(reply);
1370
1371 return 0;
1372}
1373
1374/* Send stream command */
1375static int sst_hsw_stream_operations(struct sst_hsw *hsw, int type,
1376 int stream_id, int wait)
1377{
1378 u32 header;
1379
1380 header = IPC_GLB_TYPE(IPC_GLB_STREAM_MESSAGE) | IPC_STR_TYPE(type);
1381 header |= (stream_id << IPC_STR_ID_SHIFT);
1382
1383 if (wait)
1384 return ipc_tx_message_wait(hsw, header, NULL, 0, NULL, 0);
1385 else
1386 return ipc_tx_message_nowait(hsw, header, NULL, 0);
1387}
1388
1389/* Stream ALSA trigger operations */
1390int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1391 int wait)
1392{
1393 int ret;
1394
Jie Yangf81677b2015-01-07 22:07:05 +08001395 if (!stream) {
1396 dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
1397 return 0;
1398 }
1399
Mark Browna4b12992014-03-12 23:04:35 +00001400 trace_ipc_request("stream pause", stream->reply.stream_hw_id);
1401
1402 ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
1403 stream->reply.stream_hw_id, wait);
1404 if (ret < 0)
1405 dev_err(hsw->dev, "error: failed to pause stream %d\n",
1406 stream->reply.stream_hw_id);
1407
1408 return ret;
1409}
1410
1411int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
1412 int wait)
1413{
1414 int ret;
1415
Jie Yangf81677b2015-01-07 22:07:05 +08001416 if (!stream) {
1417 dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
1418 return 0;
1419 }
1420
Mark Browna4b12992014-03-12 23:04:35 +00001421 trace_ipc_request("stream resume", stream->reply.stream_hw_id);
1422
1423 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
1424 stream->reply.stream_hw_id, wait);
1425 if (ret < 0)
1426 dev_err(hsw->dev, "error: failed to resume stream %d\n",
1427 stream->reply.stream_hw_id);
1428
1429 return ret;
1430}
1431
1432int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1433{
1434 int ret, tries = 10;
1435
Jie Yangf81677b2015-01-07 22:07:05 +08001436 if (!stream) {
1437 dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
1438 return 0;
1439 }
1440
Mark Browna4b12992014-03-12 23:04:35 +00001441 /* dont reset streams that are not commited */
1442 if (!stream->commited)
1443 return 0;
1444
1445 /* wait for pause to complete before we reset the stream */
1446 while (stream->running && tries--)
1447 msleep(1);
1448 if (!tries) {
1449 dev_err(hsw->dev, "error: reset stream %d still running\n",
1450 stream->reply.stream_hw_id);
1451 return -EINVAL;
1452 }
1453
1454 trace_ipc_request("stream reset", stream->reply.stream_hw_id);
1455
1456 ret = sst_hsw_stream_operations(hsw, IPC_STR_RESET,
1457 stream->reply.stream_hw_id, 1);
1458 if (ret < 0)
1459 dev_err(hsw->dev, "error: failed to reset stream %d\n",
1460 stream->reply.stream_hw_id);
1461 return ret;
1462}
1463
1464/* Stream pointer positions */
Liam Girdwood51b4e242014-05-02 16:56:33 +01001465u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
Mark Browna4b12992014-03-12 23:04:35 +00001466 struct sst_hsw_stream *stream)
1467{
Liam Girdwood51b4e242014-05-02 16:56:33 +01001468 u32 rpos;
1469
1470 sst_dsp_read(hsw->dsp, &rpos,
1471 stream->reply.read_position_register_address, sizeof(rpos));
1472
1473 return rpos;
1474}
1475
1476/* Stream presentation (monotonic) positions */
1477u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
1478 struct sst_hsw_stream *stream)
1479{
1480 u64 ppos;
1481
1482 sst_dsp_read(hsw->dsp, &ppos,
1483 stream->reply.presentation_position_register_address,
1484 sizeof(ppos));
1485
1486 return ppos;
Mark Browna4b12992014-03-12 23:04:35 +00001487}
1488
Mark Browna4b12992014-03-12 23:04:35 +00001489/* physical BE config */
1490int sst_hsw_device_set_config(struct sst_hsw *hsw,
1491 enum sst_hsw_device_id dev, enum sst_hsw_device_mclk mclk,
1492 enum sst_hsw_device_mode mode, u32 clock_divider)
1493{
1494 struct sst_hsw_ipc_device_config_req config;
1495 u32 header;
1496 int ret;
1497
1498 trace_ipc_request("set device config", dev);
1499
1500 config.ssp_interface = dev;
1501 config.clock_frequency = mclk;
1502 config.mode = mode;
1503 config.clock_divider = clock_divider;
Liam Girdwoodf07e51c2014-10-16 15:29:15 +01001504 if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER)
1505 config.channels = 4;
1506 else
1507 config.channels = 2;
Mark Browna4b12992014-03-12 23:04:35 +00001508
1509 trace_hsw_device_config_req(&config);
1510
1511 header = IPC_GLB_TYPE(IPC_GLB_SET_DEVICE_FORMATS);
1512
1513 ret = ipc_tx_message_wait(hsw, header, &config, sizeof(config),
1514 NULL, 0);
1515 if (ret < 0)
1516 dev_err(hsw->dev, "error: set device formats failed\n");
1517
1518 return ret;
1519}
1520EXPORT_SYMBOL_GPL(sst_hsw_device_set_config);
1521
1522/* DX Config */
1523int sst_hsw_dx_set_state(struct sst_hsw *hsw,
1524 enum sst_hsw_dx_state state, struct sst_hsw_ipc_dx_reply *dx)
1525{
1526 u32 header, state_;
Liam Girdwood543ec632014-07-30 20:11:26 +08001527 int ret, item;
Mark Browna4b12992014-03-12 23:04:35 +00001528
1529 header = IPC_GLB_TYPE(IPC_GLB_ENTER_DX_STATE);
1530 state_ = state;
1531
1532 trace_ipc_request("PM enter Dx state", state);
1533
1534 ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
Dan Carpenter7897ab72014-04-16 18:38:11 +03001535 dx, sizeof(*dx));
Mark Browna4b12992014-03-12 23:04:35 +00001536 if (ret < 0) {
1537 dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
1538 return ret;
1539 }
1540
Liam Girdwood543ec632014-07-30 20:11:26 +08001541 for (item = 0; item < dx->entries_no; item++) {
1542 dev_dbg(hsw->dev,
1543 "Item[%d] offset[%x] - size[%x] - source[%x]\n",
1544 item, dx->mem_info[item].offset,
1545 dx->mem_info[item].size,
1546 dx->mem_info[item].source);
1547 }
Mark Browna4b12992014-03-12 23:04:35 +00001548 dev_dbg(hsw->dev, "ipc: got %d entry numbers for state %d\n",
1549 dx->entries_no, state);
1550
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001551 return ret;
Mark Browna4b12992014-03-12 23:04:35 +00001552}
1553
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001554struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
1555 int mod_id, int offset)
Mark Browna4b12992014-03-12 23:04:35 +00001556{
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001557 struct sst_dsp *dsp = hsw->dsp;
1558 struct sst_module *module;
1559 struct sst_module_runtime *runtime;
1560 int err;
Mark Browna4b12992014-03-12 23:04:35 +00001561
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001562 module = sst_module_get_from_id(dsp, mod_id);
1563 if (module == NULL) {
1564 dev_err(dsp->dev, "error: failed to get module %d for pcm\n",
1565 mod_id);
1566 return NULL;
1567 }
Mark Browna4b12992014-03-12 23:04:35 +00001568
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001569 runtime = sst_module_runtime_new(module, mod_id, NULL);
1570 if (runtime == NULL) {
1571 dev_err(dsp->dev, "error: failed to create module %d runtime\n",
1572 mod_id);
1573 return NULL;
1574 }
Mark Browna4b12992014-03-12 23:04:35 +00001575
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001576 err = sst_module_runtime_alloc_blocks(runtime, offset);
1577 if (err < 0) {
1578 dev_err(dsp->dev, "error: failed to alloc blocks for module %d runtime\n",
1579 mod_id);
1580 sst_module_runtime_free(runtime);
1581 return NULL;
1582 }
Mark Browna4b12992014-03-12 23:04:35 +00001583
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001584 dev_dbg(dsp->dev, "runtime id %d created for module %d\n", runtime->id,
1585 mod_id);
1586 return runtime;
1587}
Mark Browna4b12992014-03-12 23:04:35 +00001588
Liam Girdwoode9600bc2014-10-28 17:37:12 +00001589void sst_hsw_runtime_module_free(struct sst_module_runtime *runtime)
1590{
1591 sst_module_runtime_free_blocks(runtime);
1592 sst_module_runtime_free(runtime);
Mark Browna4b12992014-03-12 23:04:35 +00001593}
1594
Liam Girdwood35e03a82014-10-30 14:58:19 +00001595#ifdef CONFIG_PM
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001596static int sst_hsw_dx_state_dump(struct sst_hsw *hsw)
1597{
1598 struct sst_dsp *sst = hsw->dsp;
1599 u32 item, offset, size;
1600 int ret = 0;
1601
1602 trace_ipc_request("PM state dump. Items #", SST_HSW_MAX_DX_REGIONS);
1603
1604 if (hsw->dx.entries_no > SST_HSW_MAX_DX_REGIONS) {
1605 dev_err(hsw->dev,
1606 "error: number of FW context regions greater than %d\n",
1607 SST_HSW_MAX_DX_REGIONS);
1608 memset(&hsw->dx, 0, sizeof(hsw->dx));
1609 return -EINVAL;
1610 }
1611
1612 ret = sst_dsp_dma_get_channel(sst, 0);
1613 if (ret < 0) {
1614 dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
1615 return ret;
1616 }
1617
1618 /* set on-demond mode on engine 0 channel 3 */
1619 sst_dsp_shim_update_bits(sst, SST_HMDC,
1620 SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH,
1621 SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH);
1622
1623 for (item = 0; item < hsw->dx.entries_no; item++) {
1624 if (hsw->dx.mem_info[item].source == SST_HSW_DX_TYPE_MEMORY_DUMP
1625 && hsw->dx.mem_info[item].offset > DSP_DRAM_ADDR_OFFSET
1626 && hsw->dx.mem_info[item].offset <
1627 DSP_DRAM_ADDR_OFFSET + SST_HSW_DX_CONTEXT_SIZE) {
1628
1629 offset = hsw->dx.mem_info[item].offset
1630 - DSP_DRAM_ADDR_OFFSET;
1631 size = (hsw->dx.mem_info[item].size + 3) & (~3);
1632
1633 ret = sst_dsp_dma_copyfrom(sst, hsw->dx_context_paddr + offset,
1634 sst->addr.lpe_base + offset, size);
1635 if (ret < 0) {
1636 dev_err(hsw->dev,
1637 "error: FW context dump failed\n");
1638 memset(&hsw->dx, 0, sizeof(hsw->dx));
1639 goto out;
1640 }
1641 }
1642 }
1643
1644out:
1645 sst_dsp_dma_put_channel(sst);
1646 return ret;
1647}
1648
1649static int sst_hsw_dx_state_restore(struct sst_hsw *hsw)
1650{
1651 struct sst_dsp *sst = hsw->dsp;
1652 u32 item, offset, size;
1653 int ret;
1654
1655 for (item = 0; item < hsw->dx.entries_no; item++) {
1656 if (hsw->dx.mem_info[item].source == SST_HSW_DX_TYPE_MEMORY_DUMP
1657 && hsw->dx.mem_info[item].offset > DSP_DRAM_ADDR_OFFSET
1658 && hsw->dx.mem_info[item].offset <
1659 DSP_DRAM_ADDR_OFFSET + SST_HSW_DX_CONTEXT_SIZE) {
1660
1661 offset = hsw->dx.mem_info[item].offset
1662 - DSP_DRAM_ADDR_OFFSET;
1663 size = (hsw->dx.mem_info[item].size + 3) & (~3);
1664
1665 ret = sst_dsp_dma_copyto(sst, sst->addr.lpe_base + offset,
1666 hsw->dx_context_paddr + offset, size);
1667 if (ret < 0) {
1668 dev_err(hsw->dev,
1669 "error: FW context restore failed\n");
1670 return ret;
1671 }
1672 }
1673 }
1674
1675 return 0;
1676}
1677
1678static void sst_hsw_drop_all(struct sst_hsw *hsw)
1679{
1680 struct ipc_message *msg, *tmp;
1681 unsigned long flags;
1682 int tx_drop_cnt = 0, rx_drop_cnt = 0;
1683
1684 /* drop all TX and Rx messages before we stall + reset DSP */
1685 spin_lock_irqsave(&hsw->dsp->spinlock, flags);
1686
1687 list_for_each_entry_safe(msg, tmp, &hsw->tx_list, list) {
1688 list_move(&msg->list, &hsw->empty_list);
1689 tx_drop_cnt++;
1690 }
1691
1692 list_for_each_entry_safe(msg, tmp, &hsw->rx_list, list) {
1693 list_move(&msg->list, &hsw->empty_list);
1694 rx_drop_cnt++;
1695 }
1696
1697 spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
1698
1699 if (tx_drop_cnt || rx_drop_cnt)
1700 dev_err(hsw->dev, "dropped IPC msg RX=%d, TX=%d\n",
1701 tx_drop_cnt, rx_drop_cnt);
1702}
1703
1704int sst_hsw_dsp_load(struct sst_hsw *hsw)
1705{
1706 struct sst_dsp *dsp = hsw->dsp;
1707 int ret;
1708
1709 dev_dbg(hsw->dev, "loading audio DSP....");
1710
1711 ret = sst_dsp_wake(dsp);
1712 if (ret < 0) {
1713 dev_err(hsw->dev, "error: failed to wake audio DSP\n");
1714 return -ENODEV;
1715 }
1716
1717 ret = sst_dsp_dma_get_channel(dsp, 0);
1718 if (ret < 0) {
1719 dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
1720 return ret;
1721 }
1722
1723 ret = sst_fw_reload(hsw->sst_fw);
1724 if (ret < 0) {
1725 dev_err(hsw->dev, "error: SST FW reload failed\n");
1726 sst_dsp_dma_put_channel(dsp);
1727 return -ENOMEM;
1728 }
1729
1730 sst_dsp_dma_put_channel(dsp);
1731 return 0;
1732}
1733
1734static int sst_hsw_dsp_restore(struct sst_hsw *hsw)
1735{
1736 struct sst_dsp *dsp = hsw->dsp;
1737 int ret;
1738
1739 dev_dbg(hsw->dev, "restoring audio DSP....");
1740
1741 ret = sst_dsp_dma_get_channel(dsp, 0);
1742 if (ret < 0) {
1743 dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
1744 return ret;
1745 }
1746
1747 ret = sst_hsw_dx_state_restore(hsw);
1748 if (ret < 0) {
1749 dev_err(hsw->dev, "error: SST FW context restore failed\n");
1750 sst_dsp_dma_put_channel(dsp);
1751 return -ENOMEM;
1752 }
1753 sst_dsp_dma_put_channel(dsp);
1754
1755 /* wait for DSP boot completion */
1756 sst_dsp_boot(dsp);
1757
1758 return ret;
1759}
1760
1761int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
1762{
1763 int ret;
1764
1765 dev_dbg(hsw->dev, "audio dsp runtime suspend\n");
1766
1767 ret = sst_hsw_dx_set_state(hsw, SST_HSW_DX_STATE_D3, &hsw->dx);
1768 if (ret < 0)
1769 return ret;
1770
1771 sst_dsp_stall(hsw->dsp);
1772
1773 ret = sst_hsw_dx_state_dump(hsw);
1774 if (ret < 0)
1775 return ret;
1776
1777 sst_hsw_drop_all(hsw);
1778
1779 return 0;
1780}
1781
1782int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
1783{
1784 sst_fw_unload(hsw->sst_fw);
1785 sst_block_free_scratch(hsw->dsp);
1786
1787 hsw->boot_complete = false;
1788
1789 sst_dsp_sleep(hsw->dsp);
1790
1791 return 0;
1792}
1793
1794int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
1795{
1796 struct device *dev = hsw->dev;
1797 int ret;
1798
1799 dev_dbg(dev, "audio dsp runtime resume\n");
1800
1801 if (hsw->boot_complete)
1802 return 1; /* tell caller no action is required */
1803
1804 ret = sst_hsw_dsp_restore(hsw);
1805 if (ret < 0)
1806 dev_err(dev, "error: audio DSP boot failure\n");
1807
1808 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
1809 msecs_to_jiffies(IPC_BOOT_MSECS));
1810 if (ret == 0) {
Liam Girdwoodb891f622014-10-30 14:34:00 +00001811 dev_err(hsw->dev, "error: audio DSP boot timeout IPCD 0x%x IPCX 0x%x\n",
1812 sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCD),
1813 sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX));
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001814 return -EIO;
1815 }
1816
1817 /* Set ADSP SSP port settings */
1818 ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0,
1819 SST_HSW_DEVICE_MCLK_FREQ_24_MHZ,
1820 SST_HSW_DEVICE_CLOCK_MASTER, 9);
1821 if (ret < 0)
1822 dev_err(dev, "error: SSP re-initialization failed\n");
1823
1824 return ret;
1825}
1826#endif
1827
Mark Browna4b12992014-03-12 23:04:35 +00001828static int msg_empty_list_init(struct sst_hsw *hsw)
1829{
1830 int i;
1831
1832 hsw->msg = kzalloc(sizeof(struct ipc_message) *
1833 IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
1834 if (hsw->msg == NULL)
1835 return -ENOMEM;
1836
1837 for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
1838 init_waitqueue_head(&hsw->msg[i].waitq);
1839 list_add(&hsw->msg[i].list, &hsw->empty_list);
1840 }
1841
1842 return 0;
1843}
1844
Mark Browna4b12992014-03-12 23:04:35 +00001845struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw)
1846{
1847 return hsw->dsp;
1848}
1849
1850static struct sst_dsp_device hsw_dev = {
1851 .thread = hsw_irq_thread,
1852 .ops = &haswell_ops,
1853};
1854
1855int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
1856{
1857 struct sst_hsw_ipc_fw_version version;
1858 struct sst_hsw *hsw;
Mark Browna4b12992014-03-12 23:04:35 +00001859 int ret;
1860
1861 dev_dbg(dev, "initialising Audio DSP IPC\n");
1862
1863 hsw = devm_kzalloc(dev, sizeof(*hsw), GFP_KERNEL);
1864 if (hsw == NULL)
1865 return -ENOMEM;
1866
1867 hsw->dev = dev;
1868 INIT_LIST_HEAD(&hsw->stream_list);
1869 INIT_LIST_HEAD(&hsw->tx_list);
1870 INIT_LIST_HEAD(&hsw->rx_list);
1871 INIT_LIST_HEAD(&hsw->empty_list);
1872 init_waitqueue_head(&hsw->boot_wait);
1873 init_waitqueue_head(&hsw->wait_txq);
1874
1875 ret = msg_empty_list_init(hsw);
1876 if (ret < 0)
Imre Deak9cf0e452014-05-30 10:52:29 +03001877 return -ENOMEM;
Mark Browna4b12992014-03-12 23:04:35 +00001878
1879 /* start the IPC message thread */
1880 init_kthread_worker(&hsw->kworker);
1881 hsw->tx_thread = kthread_run(kthread_worker_fn,
Kees Cook35386322014-05-22 11:43:55 -07001882 &hsw->kworker, "%s",
Mark Browna4b12992014-03-12 23:04:35 +00001883 dev_name(hsw->dev));
1884 if (IS_ERR(hsw->tx_thread)) {
1885 ret = PTR_ERR(hsw->tx_thread);
1886 dev_err(hsw->dev, "error: failed to create message TX task\n");
Imre Deak9cf0e452014-05-30 10:52:29 +03001887 goto err_free_msg;
Mark Browna4b12992014-03-12 23:04:35 +00001888 }
1889 init_kthread_work(&hsw->kwork, ipc_tx_msgs);
1890
1891 hsw_dev.thread_context = hsw;
1892
1893 /* init SST shim */
1894 hsw->dsp = sst_dsp_new(dev, &hsw_dev, pdata);
1895 if (hsw->dsp == NULL) {
1896 ret = -ENODEV;
Imre Deak9cf0e452014-05-30 10:52:29 +03001897 goto dsp_err;
Mark Browna4b12992014-03-12 23:04:35 +00001898 }
1899
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001900 /* allocate DMA buffer for context storage */
1901 hsw->dx_context = dma_alloc_coherent(hsw->dsp->dma_dev,
1902 SST_HSW_DX_CONTEXT_SIZE, &hsw->dx_context_paddr, GFP_KERNEL);
1903 if (hsw->dx_context == NULL) {
1904 ret = -ENOMEM;
1905 goto dma_err;
1906 }
1907
Mark Browna4b12992014-03-12 23:04:35 +00001908 /* keep the DSP in reset state for base FW loading */
1909 sst_dsp_reset(hsw->dsp);
1910
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001911 hsw->sst_fw = sst_fw_new(hsw->dsp, pdata->fw, hsw);
1912 if (hsw->sst_fw == NULL) {
Mark Browna4b12992014-03-12 23:04:35 +00001913 ret = -ENODEV;
1914 dev_err(dev, "error: failed to load firmware\n");
1915 goto fw_err;
1916 }
1917
1918 /* wait for DSP boot completion */
1919 sst_dsp_boot(hsw->dsp);
1920 ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
1921 msecs_to_jiffies(IPC_BOOT_MSECS));
1922 if (ret == 0) {
1923 ret = -EIO;
Liam Girdwoodb891f622014-10-30 14:34:00 +00001924 dev_err(hsw->dev, "error: audio DSP boot timeout IPCD 0x%x IPCX 0x%x\n",
1925 sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCD),
1926 sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX));
Mark Browna4b12992014-03-12 23:04:35 +00001927 goto boot_err;
1928 }
1929
1930 /* get the FW version */
1931 sst_hsw_fw_get_version(hsw, &version);
Mark Browna4b12992014-03-12 23:04:35 +00001932
1933 /* get the globalmixer */
1934 ret = sst_hsw_mixer_get_info(hsw);
1935 if (ret < 0) {
1936 dev_err(hsw->dev, "error: failed to get stream info\n");
1937 goto boot_err;
1938 }
1939
1940 pdata->dsp = hsw;
1941 return 0;
1942
1943boot_err:
1944 sst_dsp_reset(hsw->dsp);
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001945 sst_fw_free(hsw->sst_fw);
Mark Browna4b12992014-03-12 23:04:35 +00001946fw_err:
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001947 dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
1948 hsw->dx_context, hsw->dx_context_paddr);
1949dma_err:
Mark Browna4b12992014-03-12 23:04:35 +00001950 sst_dsp_free(hsw->dsp);
Imre Deak9cf0e452014-05-30 10:52:29 +03001951dsp_err:
1952 kthread_stop(hsw->tx_thread);
1953err_free_msg:
Mark Browna4b12992014-03-12 23:04:35 +00001954 kfree(hsw->msg);
Imre Deak9cf0e452014-05-30 10:52:29 +03001955
Mark Browna4b12992014-03-12 23:04:35 +00001956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(sst_hsw_dsp_init);
1959
1960void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata)
1961{
1962 struct sst_hsw *hsw = pdata->dsp;
1963
1964 sst_dsp_reset(hsw->dsp);
1965 sst_fw_free_all(hsw->dsp);
Liam Girdwoodaed3c7b2014-10-29 17:40:42 +00001966 dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
1967 hsw->dx_context, hsw->dx_context_paddr);
Mark Browna4b12992014-03-12 23:04:35 +00001968 sst_dsp_free(hsw->dsp);
Imre Deak9cf0e452014-05-30 10:52:29 +03001969 kthread_stop(hsw->tx_thread);
Mark Browna4b12992014-03-12 23:04:35 +00001970 kfree(hsw->msg);
1971}
1972EXPORT_SYMBOL_GPL(sst_hsw_dsp_free);