blob: 9e898b87a32e108561c289c5228bce27a13c3997 [file] [log] [blame]
Chris Lewdc33e3a2018-04-04 10:19:32 -07001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Chris Lew38722582016-11-21 18:30:24 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/types.h>
16#include <linux/spinlock.h>
17#include <linux/completion.h>
18#include <linux/platform_device.h>
19#include <linux/mailbox_controller.h>
20#include <linux/module.h>
21#include <linux/of_irq.h>
22#include <linux/kthread.h>
23#include <linux/workqueue.h>
24#include <linux/mailbox/qmp.h>
25
26#define QMP_MAGIC 0x4d41494c /* MAIL */
27#define QMP_VERSION 0x1
28#define QMP_FEATURES 0x0
Chris Lew38722582016-11-21 18:30:24 -080029#define QMP_TOUT_MS 5000
Chris Lew16d88de2017-09-12 16:02:33 -070030#define QMP_TX_TOUT_MS 1000
Chris Lew38722582016-11-21 18:30:24 -080031
32#define QMP_MBOX_LINK_DOWN 0xFFFF0000
33#define QMP_MBOX_LINK_UP 0x0000FFFF
34#define QMP_MBOX_CH_DISCONNECTED 0xFFFF0000
35#define QMP_MBOX_CH_CONNECTED 0x0000FFFF
36
37#define MSG_RAM_ALIGN_BYTES 3
38
39/**
40 * enum qmp_local_state - definition of the local state machine
41 * @LINK_DISCONNECTED: Init state, waiting for ucore to start
42 * @LINK_NEGOTIATION: Set local link state to up, wait for ucore ack
43 * @LINK_CONNECTED: Link state up, channel not connected
44 * @LOCAL_CONNECTING: Channel opening locally, wait for ucore ack
45 * @LOCAL_CONNECTED: Channel opened locally
46 * @CHANNEL_CONNECTED: Channel fully opened
47 * @LOCAL_DISCONNECTING: Channel closing locally, wait for ucore ack
48 */
49enum qmp_local_state {
50 LINK_DISCONNECTED,
51 LINK_NEGOTIATION,
52 LINK_CONNECTED,
53 LOCAL_CONNECTING,
54 LOCAL_CONNECTED,
55 CHANNEL_CONNECTED,
56 LOCAL_DISCONNECTING,
57};
58
59/**
60 * struct channel_desc - description of a core's link, channel and mailbox state
61 * @link_state Current link state of core
62 * @link_state_ack Ack for other core to use when link state changes
63 * @ch_state Current channel state of core
64 * @ch_state_ack Ack for other core to use when channel state changes
65 * @mailbox_size Size of this core's mailbox
66 * @mailbox_offset Location of core's mailbox from a base smem location
67 */
68struct channel_desc {
69 u32 link_state;
70 u32 link_state_ack;
71 u32 ch_state;
72 u32 ch_state_ack;
73 u32 mailbox_size;
74 u32 mailbox_offset;
75};
76
77/**
78 * struct mbox_desc - description of the protocol's mailbox state
79 * @magic Magic number field to be set by ucore
80 * @version Version field to be set by ucore
81 * @features Features field to be set by ucore
82 * @ucore Channel descriptor to hold state of ucore
83 * @mcore Channel descriptor to hold state of mcore
84 * @reserved Reserved in case of future use
85 *
86 * This structure resides in SMEM and contains the control information for the
87 * mailbox channel. Each core in the link will have one channel descriptor
88 */
89struct mbox_desc {
90 u32 magic;
91 u32 version;
92 u32 features;
93 struct channel_desc ucore;
94 struct channel_desc mcore;
95 u32 reserved;
96};
97
98/**
99 * struct qmp_core_version - local structure to hold version and features
100 * @version Version field to indicate what version the ucore supports
101 * @features Features field to indicate what features the ucore supports
102 */
103struct qmp_core_version {
104 u32 version;
105 u32 features;
106};
107
108/**
Chris Lew6d815562017-06-13 11:50:06 -0700109 * struct qmp_mbox - local information for managing a single mailbox
110 * @list: List head for adding mbox to linked list
111 * @ctrl: Controller for this mailbox
112 * @priority: Priority of mailbox in the linked list
113 * @num_assigned: Number of channels assigned for allocated pool
114 * @num_shutdown: Number of channels that have shutdown
115 * @desc: Reference to the mailbox descriptor in SMEM
116 * @rx_disabled: Disable rx if multiple client are sending from this mbox
Chris Lew38722582016-11-21 18:30:24 -0800117 * @tx_sent: True if tx is sent and remote proc has not sent ack
Chris Lew6d815562017-06-13 11:50:06 -0700118 * @idx_in_flight: current channel idx whos tx is in flight
Chris Lew38722582016-11-21 18:30:24 -0800119 * @mcore_mbox_offset: Offset of mcore mbox from the msgram start
120 * @mcore_mbox_size: Size of the mcore mbox
Chris Lew6d815562017-06-13 11:50:06 -0700121 * @rx_pkt: buffer to pass to client, holds copied data from mailbox
122 * @version: Version and features received during link negotiation
123 * @local_state: Current state of the mailbox protocol
124 * @state_lock: Serialize mailbox state changes
125 * @tx_lock: Serialize access for writes to mailbox
126 * @link_complete: Use to block until link negotiation with remote proc
127 * @ch_complete: Use to block until the channel is fully opened
128 * @ch_in_use: True if this mailbox's channel owned by a client
129 * @dwork: Delayed work to detect timed out tx
130 */
131struct qmp_mbox {
132 struct list_head list;
133 struct mbox_controller ctrl;
134 int priority;
135 u32 num_assigned;
136 u32 num_shutdown;
137
138 void __iomem *desc;
139 bool rx_disabled;
140 bool tx_sent;
141 u32 idx_in_flight;
142 u32 mcore_mbox_offset;
143 u32 mcore_mbox_size;
144 struct qmp_pkt rx_pkt;
145
146 struct qmp_core_version version;
147 enum qmp_local_state local_state;
148 struct mutex state_lock;
149 spinlock_t tx_lock;
150
151 struct completion link_complete;
152 struct completion ch_complete;
153 struct delayed_work dwork;
154 struct qmp_device *mdev;
155};
156
157/**
158 * struct qmp_device - local information for managing a single qmp edge
159 * @dev: The device that corresponds to this edge
160 * @name: The name of this mailbox
161 * @mboxes: The mbox controller for this mailbox
Chris Lew38722582016-11-21 18:30:24 -0800162 * @msgram: Reference to the start of msgram
Chris Lew38722582016-11-21 18:30:24 -0800163 * @tx_irq_reg: Reference to the register to send an irq to remote proc
164 * @rx_reset_reg: Reference to the register to reset the rx irq, if
165 * applicable
Chris Lew6d815562017-06-13 11:50:06 -0700166 * @kwork: kwork for rx handling
Chris Lew38722582016-11-21 18:30:24 -0800167 * @kworker: Handle to entitiy to process incoming data
168 * @task: Handle to task context used to run @kworker
Chris Lew6d815562017-06-13 11:50:06 -0700169 * @irq_mask: Mask written to @tx_irq_reg to trigger irq
170 * @rx_irq_line: The incoming interrupt line
171 * @rx_work: Work to be executed when an irq is received
172 * @tx_irq_count: Number of tx interrupts triggered
173 * @rx_irq_count: Number of rx interrupts received
Chris Lew38722582016-11-21 18:30:24 -0800174 */
175struct qmp_device {
176 struct device *dev;
Chris Lew38722582016-11-21 18:30:24 -0800177 const char *name;
Chris Lew6d815562017-06-13 11:50:06 -0700178 struct list_head mboxes;
179
Chris Lew38722582016-11-21 18:30:24 -0800180 void __iomem *msgram;
Chris Lew38722582016-11-21 18:30:24 -0800181 void __iomem *tx_irq_reg;
182 void __iomem *rx_reset_reg;
Chris Lew6d815562017-06-13 11:50:06 -0700183
Chris Lew38722582016-11-21 18:30:24 -0800184 struct kthread_work kwork;
185 struct kthread_worker kworker;
186 struct task_struct *task;
Chris Lew6d815562017-06-13 11:50:06 -0700187
188 u32 irq_mask;
189 u32 rx_irq_line;
190 u32 tx_irq_count;
191 u32 rx_irq_count;
Chris Lew38722582016-11-21 18:30:24 -0800192};
193
194/**
195 * send_irq() - send an irq to a remote entity as an event signal.
196 * @mdev: Which remote entity that should receive the irq.
197 */
198static void send_irq(struct qmp_device *mdev)
199{
200 /*
201 * Any data associated with this event must be visable to the remote
202 * before the interrupt is triggered
203 */
204 wmb();
205 writel_relaxed(mdev->irq_mask, mdev->tx_irq_reg);
206 mdev->tx_irq_count++;
207}
208
Chris Lew6d815562017-06-13 11:50:06 -0700209static void memcpy32_toio(void __iomem *dest, void *src, size_t size)
Chris Lew38722582016-11-21 18:30:24 -0800210{
211 u32 *dest_local = (u32 *)dest;
212 u32 *src_local = (u32 *)src;
213
214 WARN_ON(size & MSG_RAM_ALIGN_BYTES);
215 size /= sizeof(u32);
216 while (size--)
217 iowrite32(*src_local++, dest_local++);
218}
219
Chris Lew6d815562017-06-13 11:50:06 -0700220static void memcpy32_fromio(void *dest, void __iomem *src, size_t size)
Chris Lew38722582016-11-21 18:30:24 -0800221{
222 u32 *dest_local = (u32 *)dest;
223 u32 *src_local = (u32 *)src;
224
225 WARN_ON(size & MSG_RAM_ALIGN_BYTES);
226 size /= sizeof(u32);
227 while (size--)
228 *dest_local++ = ioread32(src_local++);
229}
230
231/**
Chris Lew16d88de2017-09-12 16:02:33 -0700232 * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
Chris Lew38722582016-11-21 18:30:24 -0800233 * @work: Structure for work that was scheduled.
234 */
235static void qmp_notify_timeout(struct work_struct *work)
236{
237 struct delayed_work *dwork = to_delayed_work(work);
Chris Lew6d815562017-06-13 11:50:06 -0700238 struct qmp_mbox *mbox = container_of(dwork, struct qmp_mbox, dwork);
239 struct mbox_chan *chan = &mbox->ctrl.chans[mbox->idx_in_flight];
Chris Lew16d88de2017-09-12 16:02:33 -0700240 int err = -ETIME;
Chris Lew6d815562017-06-13 11:50:06 -0700241 unsigned long flags;
Chris Lew38722582016-11-21 18:30:24 -0800242
Chris Lew6d815562017-06-13 11:50:06 -0700243 spin_lock_irqsave(&mbox->tx_lock, flags);
244 if (!mbox->tx_sent) {
245 spin_unlock_irqrestore(&mbox->tx_lock, flags);
246 return;
247 }
248 pr_err("%s: qmp tx timeout for %d\n", __func__, mbox->idx_in_flight);
Chris Lew16d88de2017-09-12 16:02:33 -0700249 iowrite32(0, mbox->mdev->msgram + mbox->mcore_mbox_offset);
Chris Lew6d815562017-06-13 11:50:06 -0700250 mbox->tx_sent = false;
251 spin_unlock_irqrestore(&mbox->tx_lock, flags);
Chris Lew38722582016-11-21 18:30:24 -0800252 mbox_chan_txdone(chan, err);
253}
254
Chris Lew6d815562017-06-13 11:50:06 -0700255static inline void qmp_schedule_tx_timeout(struct qmp_mbox *mbox)
256{
257 schedule_delayed_work(&mbox->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
258}
259
260/**
261 * set_ucore_link_ack() - set the link ack in the ucore channel desc.
262 * @mbox: the mailbox for the field that is being set.
263 * @state: the value to set the ack field to.
264 */
265static void set_ucore_link_ack(struct qmp_mbox *mbox, u32 state)
266{
267 u32 offset;
268
269 offset = offsetof(struct mbox_desc, ucore);
270 offset += offsetof(struct channel_desc, link_state_ack);
271 iowrite32(state, mbox->desc + offset);
272}
273
274/**
275 * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
276 * @mbox: the mailbox for the field that is being set.
277 * @state: the value to set the ack field to.
278 */
279static void set_ucore_ch_ack(struct qmp_mbox *mbox, u32 state)
280{
281 u32 offset;
282
283 offset = offsetof(struct mbox_desc, ucore);
284 offset += offsetof(struct channel_desc, ch_state_ack);
285 iowrite32(state, mbox->desc + offset);
286}
287
288/**
289 * set_mcore_ch() - set the channel state in the mcore channel desc.
290 * @mbox: the mailbox for the field that is being set.
291 * @state: the value to set the channel field to.
292 */
293static void set_mcore_ch(struct qmp_mbox *mbox, u32 state)
294{
295 u32 offset;
296
297 offset = offsetof(struct mbox_desc, mcore);
298 offset += offsetof(struct channel_desc, ch_state);
299 iowrite32(state, mbox->desc + offset);
300}
301
Chris Lew38722582016-11-21 18:30:24 -0800302/**
303 * qmp_startup() - Start qmp mailbox channel for communication. Waits for
304 * remote subsystem to open channel if link is not
305 * initated or until timeout.
306 * @chan: mailbox channel that is being opened.
307 *
308 * Return: 0 on succes or standard Linux error code.
309 */
310static int qmp_startup(struct mbox_chan *chan)
311{
Chris Lew6d815562017-06-13 11:50:06 -0700312 struct qmp_mbox *mbox = chan->con_priv;
Chris Lew38722582016-11-21 18:30:24 -0800313
Chris Lew6d815562017-06-13 11:50:06 -0700314 if (!mbox)
Chris Lew38722582016-11-21 18:30:24 -0800315 return -EINVAL;
316
Chris Lew6d815562017-06-13 11:50:06 -0700317 mutex_lock(&mbox->state_lock);
318 if (!completion_done(&mbox->link_complete)) {
319 mutex_unlock(&mbox->state_lock);
Chris Lew38722582016-11-21 18:30:24 -0800320 return -EAGAIN;
321 }
322
Chris Lew6d815562017-06-13 11:50:06 -0700323 set_mcore_ch(mbox, QMP_MBOX_CH_CONNECTED);
324 mbox->local_state = LOCAL_CONNECTING;
325 mutex_unlock(&mbox->state_lock);
Chris Lew38722582016-11-21 18:30:24 -0800326
Chris Lew6d815562017-06-13 11:50:06 -0700327 send_irq(mbox->mdev);
328 wait_for_completion_interruptible_timeout(&mbox->ch_complete,
Chris Lew38722582016-11-21 18:30:24 -0800329 msecs_to_jiffies(QMP_TOUT_MS));
330 return 0;
331}
332
Chris Lew38722582016-11-21 18:30:24 -0800333
334/**
335 * qmp_send_data() - Copy the data to the channel's mailbox and notify
336 * remote subsystem of new data. This function will
337 * return an error if the previous message sent has
338 * not been read. Cannot Sleep.
339 * @chan: mailbox channel that data is to be sent over.
340 * @data: Data to be sent to remote processor, should be in the format of
341 * a qmp_pkt.
342 *
343 * Return: 0 on succes or standard Linux error code.
344 */
345static int qmp_send_data(struct mbox_chan *chan, void *data)
346{
Chris Lew6d815562017-06-13 11:50:06 -0700347 struct qmp_mbox *mbox = chan->con_priv;
348 struct qmp_device *mdev;
Chris Lew38722582016-11-21 18:30:24 -0800349 struct qmp_pkt *pkt = (struct qmp_pkt *)data;
350 void __iomem *addr;
351 unsigned long flags;
Chris Lew6d815562017-06-13 11:50:06 -0700352 int i;
Chris Lew38722582016-11-21 18:30:24 -0800353
Chris Lew6d815562017-06-13 11:50:06 -0700354 if (!mbox || !data || mbox->local_state != CHANNEL_CONNECTED)
Chris Lew38722582016-11-21 18:30:24 -0800355 return -EINVAL;
Chris Lew6d815562017-06-13 11:50:06 -0700356 mdev = mbox->mdev;
Chris Lew38722582016-11-21 18:30:24 -0800357
Chris Lew6d815562017-06-13 11:50:06 -0700358 spin_lock_irqsave(&mbox->tx_lock, flags);
359 addr = mdev->msgram + mbox->mcore_mbox_offset;
360 if (mbox->tx_sent) {
361 spin_unlock_irqrestore(&mbox->tx_lock, flags);
Chris Lew6c5dd862017-10-04 13:42:09 -0700362 return -EAGAIN;
Chris Lew38722582016-11-21 18:30:24 -0800363 }
364
Chris Lew6d815562017-06-13 11:50:06 -0700365 if (pkt->size + sizeof(pkt->size) > mbox->mcore_mbox_size) {
366 spin_unlock_irqrestore(&mbox->tx_lock, flags);
Chris Lew38722582016-11-21 18:30:24 -0800367 return -EINVAL;
368 }
Chris Lew6d815562017-06-13 11:50:06 -0700369
Chris Lew38722582016-11-21 18:30:24 -0800370 memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size);
371 iowrite32(pkt->size, addr);
Chris Lew6d815562017-06-13 11:50:06 -0700372 mbox->tx_sent = true;
373 for (i = 0; i < mbox->ctrl.num_chans; i++) {
374 if (chan == &mbox->ctrl.chans[i])
375 mbox->idx_in_flight = i;
376 }
Chris Lew38722582016-11-21 18:30:24 -0800377 send_irq(mdev);
Chris Lew6d815562017-06-13 11:50:06 -0700378 qmp_schedule_tx_timeout(mbox);
379 spin_unlock_irqrestore(&mbox->tx_lock, flags);
Chris Lew38722582016-11-21 18:30:24 -0800380 return 0;
381}
382
383/**
384 * qmp_shutdown() - Disconnect this mailbox channel so the client does not
385 * receive anymore data and can reliquish control
386 * of the channel
387 * @chan: mailbox channel to be shutdown.
388 */
389static void qmp_shutdown(struct mbox_chan *chan)
390{
Chris Lew6d815562017-06-13 11:50:06 -0700391 struct qmp_mbox *mbox = chan->con_priv;
Chris Lew38722582016-11-21 18:30:24 -0800392
Chris Lew6d815562017-06-13 11:50:06 -0700393 mutex_lock(&mbox->state_lock);
394 mbox->num_shutdown++;
395 if (mbox->num_shutdown < mbox->num_assigned) {
396 mutex_unlock(&mbox->state_lock);
397 return;
Chris Lew38722582016-11-21 18:30:24 -0800398 }
Chris Lew6d815562017-06-13 11:50:06 -0700399
400 if (mbox->local_state != LINK_DISCONNECTED) {
401 mbox->local_state = LOCAL_DISCONNECTING;
402 set_mcore_ch(mbox, QMP_MBOX_CH_DISCONNECTED);
403 send_irq(mbox->mdev);
404 }
405 mbox->num_shutdown = 0;
406 mbox->num_assigned = 0;
407 mutex_unlock(&mbox->state_lock);
Chris Lew38722582016-11-21 18:30:24 -0800408}
409
410/**
411 * qmp_last_tx_done() - qmp does not support polling operations, print
412 * error of unexpected usage and return true to
413 * resume operation.
414 * @chan: Corresponding mailbox channel for requested last tx.
415 *
416 * Return: true
417 */
418static bool qmp_last_tx_done(struct mbox_chan *chan)
419{
420 pr_err("In %s, unexpected usage of last_tx_done\n", __func__);
421 return true;
422}
423
424/**
425 * qmp_recv_data() - received notification that data is available in the
426 * mailbox. Copy data from mailbox and pass to client.
Chris Lew6d815562017-06-13 11:50:06 -0700427 * @mbox: mailbox device that received the notification.
Chris Lew38722582016-11-21 18:30:24 -0800428 * @mbox_of: offset of mailbox from msgram start.
429 */
Chris Lew6d815562017-06-13 11:50:06 -0700430static void qmp_recv_data(struct qmp_mbox *mbox, u32 mbox_of)
Chris Lew38722582016-11-21 18:30:24 -0800431{
432 void __iomem *addr;
433 struct qmp_pkt *pkt;
434
Chris Lew6d815562017-06-13 11:50:06 -0700435 addr = mbox->mdev->msgram + mbox_of;
436 pkt = &mbox->rx_pkt;
Chris Lew38722582016-11-21 18:30:24 -0800437 pkt->size = ioread32(addr);
438
Chris Lew6d815562017-06-13 11:50:06 -0700439 if (pkt->size > mbox->mcore_mbox_size)
Chris Lew38722582016-11-21 18:30:24 -0800440 pr_err("%s: Invalid mailbox packet\n", __func__);
441 else {
442 memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size);
Chris Lew6d815562017-06-13 11:50:06 -0700443 mbox_chan_received_data(&mbox->ctrl.chans[mbox->idx_in_flight],
444 pkt);
Chris Lew38722582016-11-21 18:30:24 -0800445 }
446 iowrite32(0, addr);
Chris Lew6d815562017-06-13 11:50:06 -0700447 send_irq(mbox->mdev);
Chris Lew38722582016-11-21 18:30:24 -0800448}
449
450/**
451 * init_mcore_state() - initialize the mcore state of a mailbox.
452 * @mdev: mailbox device to be initialized.
453 */
Chris Lew6d815562017-06-13 11:50:06 -0700454static void init_mcore_state(struct qmp_mbox *mbox)
Chris Lew38722582016-11-21 18:30:24 -0800455{
456 struct channel_desc mcore;
457 u32 offset = offsetof(struct mbox_desc, mcore);
458
459 mcore.link_state = QMP_MBOX_LINK_UP;
460 mcore.link_state_ack = QMP_MBOX_LINK_DOWN;
461 mcore.ch_state = QMP_MBOX_CH_DISCONNECTED;
462 mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED;
Chris Lew6d815562017-06-13 11:50:06 -0700463 mcore.mailbox_size = mbox->mcore_mbox_size;
464 mcore.mailbox_offset = mbox->mcore_mbox_offset;
465 memcpy32_toio(mbox->desc + offset, &mcore, sizeof(mcore));
466}
467
468/**
469 * qmp_irq_handler() - handle irq from remote entitity.
470 * @irq: irq number for the trggered interrupt.
471 * @priv: private pointer to qmp mbox device.
472 */
473static irqreturn_t qmp_irq_handler(int irq, void *priv)
474{
475 struct qmp_device *mdev = (struct qmp_device *)priv;
476
477 if (mdev->rx_reset_reg)
478 writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
479
480 kthread_queue_work(&mdev->kworker, &mdev->kwork);
481 mdev->rx_irq_count++;
482
483 return IRQ_HANDLED;
Chris Lew38722582016-11-21 18:30:24 -0800484}
485
486/**
487 * __qmp_rx_worker() - Handle incoming messages from remote processor.
Chris Lew6d815562017-06-13 11:50:06 -0700488 * @mbox: mailbox device that received notification.
Chris Lew38722582016-11-21 18:30:24 -0800489 */
Chris Lew6d815562017-06-13 11:50:06 -0700490static void __qmp_rx_worker(struct qmp_mbox *mbox)
Chris Lew38722582016-11-21 18:30:24 -0800491{
Chris Lew6d815562017-06-13 11:50:06 -0700492 u32 msg_len, idx;
Chris Lew38722582016-11-21 18:30:24 -0800493 struct mbox_desc desc;
Chris Lew6d815562017-06-13 11:50:06 -0700494 struct qmp_device *mdev = mbox->mdev;
495 unsigned long flags;
Chris Lew38722582016-11-21 18:30:24 -0800496
Chris Lew6d815562017-06-13 11:50:06 -0700497 memcpy_fromio(&desc, mbox->desc, sizeof(desc));
Chris Lew38722582016-11-21 18:30:24 -0800498 if (desc.magic != QMP_MAGIC)
499 return;
500
Chris Lew6d815562017-06-13 11:50:06 -0700501 mutex_lock(&mbox->state_lock);
502 switch (mbox->local_state) {
Chris Lew38722582016-11-21 18:30:24 -0800503 case LINK_DISCONNECTED:
Chris Lew6d815562017-06-13 11:50:06 -0700504 mbox->version.version = desc.version;
505 mbox->version.features = desc.features;
506 set_ucore_link_ack(mbox, desc.ucore.link_state);
Chris Lew38722582016-11-21 18:30:24 -0800507 if (desc.mcore.mailbox_size) {
Chris Lew6d815562017-06-13 11:50:06 -0700508 mbox->mcore_mbox_size = desc.mcore.mailbox_size;
509 mbox->mcore_mbox_offset = desc.mcore.mailbox_offset;
Chris Lew38722582016-11-21 18:30:24 -0800510 }
Chris Lew6d815562017-06-13 11:50:06 -0700511 init_mcore_state(mbox);
512 mbox->local_state = LINK_NEGOTIATION;
513 mbox->rx_pkt.data = devm_kzalloc(mdev->dev,
Chris Lew38722582016-11-21 18:30:24 -0800514 desc.ucore.mailbox_size,
515 GFP_KERNEL);
Chris Lew6d815562017-06-13 11:50:06 -0700516 if (!mbox->rx_pkt.data) {
Chris Lew38722582016-11-21 18:30:24 -0800517 pr_err("In %s: failed to allocate rx pkt\n", __func__);
518 break;
519 }
520 send_irq(mdev);
521 break;
522 case LINK_NEGOTIATION:
523 if (desc.mcore.link_state_ack != QMP_MBOX_LINK_UP ||
524 desc.mcore.link_state != QMP_MBOX_LINK_UP) {
525 pr_err("In %s: rx interrupt without negotiation ack\n",
526 __func__);
527 break;
528 }
Chris Lew6d815562017-06-13 11:50:06 -0700529 mbox->local_state = LINK_CONNECTED;
530 complete_all(&mbox->link_complete);
Chris Lew38722582016-11-21 18:30:24 -0800531 break;
532 case LINK_CONNECTED:
533 if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
534 pr_err("In %s: rx interrupt without channel open\n",
535 __func__);
536 break;
537 }
Chris Lew6d815562017-06-13 11:50:06 -0700538 set_ucore_ch_ack(mbox, desc.ucore.ch_state);
Chris Lew38722582016-11-21 18:30:24 -0800539 send_irq(mdev);
540 break;
541 case LOCAL_CONNECTING:
542 if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED &&
543 desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED)
Chris Lew6d815562017-06-13 11:50:06 -0700544 mbox->local_state = LOCAL_CONNECTED;
Chris Lew38722582016-11-21 18:30:24 -0800545
546 if (desc.ucore.ch_state != desc.ucore.ch_state_ack) {
Chris Lew6d815562017-06-13 11:50:06 -0700547 set_ucore_ch_ack(mbox, desc.ucore.ch_state);
Chris Lew38722582016-11-21 18:30:24 -0800548 send_irq(mdev);
549 }
Chris Lew6d815562017-06-13 11:50:06 -0700550 if (mbox->local_state == LOCAL_CONNECTED &&
Chris Lew38722582016-11-21 18:30:24 -0800551 desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED &&
552 desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) {
Chris Lew6d815562017-06-13 11:50:06 -0700553 mbox->local_state = CHANNEL_CONNECTED;
554 complete_all(&mbox->ch_complete);
Chris Lew38722582016-11-21 18:30:24 -0800555 }
556 break;
557 case LOCAL_CONNECTED:
558 if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
559 pr_err("In %s: rx interrupt without remote channel open\n",
560 __func__);
561 break;
562 }
Chris Lew6d815562017-06-13 11:50:06 -0700563 set_ucore_ch_ack(mbox, desc.ucore.ch_state);
564 mbox->local_state = CHANNEL_CONNECTED;
Chris Lew38722582016-11-21 18:30:24 -0800565 send_irq(mdev);
Chris Lew6d815562017-06-13 11:50:06 -0700566 complete_all(&mbox->ch_complete);
Chris Lew38722582016-11-21 18:30:24 -0800567 break;
568 case CHANNEL_CONNECTED:
569 if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) {
Chris Lew6d815562017-06-13 11:50:06 -0700570 set_ucore_ch_ack(mbox, desc.ucore.ch_state);
571 mbox->local_state = LOCAL_CONNECTED;
Chris Lew38722582016-11-21 18:30:24 -0800572 send_irq(mdev);
573 }
574
575 msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset);
Chris Lew6d815562017-06-13 11:50:06 -0700576 if (msg_len && !mbox->rx_disabled)
577 qmp_recv_data(mbox, desc.ucore.mailbox_offset);
Chris Lew38722582016-11-21 18:30:24 -0800578
Chris Lew6d815562017-06-13 11:50:06 -0700579 spin_lock_irqsave(&mbox->tx_lock, flags);
580 idx = mbox->idx_in_flight;
581 if (mbox->tx_sent) {
Chris Lew38722582016-11-21 18:30:24 -0800582 msg_len = ioread32(mdev->msgram +
Chris Lew6d815562017-06-13 11:50:06 -0700583 mbox->mcore_mbox_offset);
Chris Lew38722582016-11-21 18:30:24 -0800584 if (msg_len == 0) {
Chris Lew6d815562017-06-13 11:50:06 -0700585 mbox->tx_sent = false;
586 cancel_delayed_work(&mbox->dwork);
587 spin_unlock_irqrestore(&mbox->tx_lock, flags);
588 mbox_chan_txdone(&mbox->ctrl.chans[idx], 0);
589 spin_lock_irqsave(&mbox->tx_lock, flags);
Chris Lew38722582016-11-21 18:30:24 -0800590 }
591 }
Chris Lew6d815562017-06-13 11:50:06 -0700592 spin_unlock_irqrestore(&mbox->tx_lock, flags);
Chris Lew38722582016-11-21 18:30:24 -0800593 break;
594 case LOCAL_DISCONNECTING:
595 if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED &&
596 desc.mcore.ch_state == desc.mcore.ch_state_ack)
Chris Lew6d815562017-06-13 11:50:06 -0700597 mbox->local_state = LINK_CONNECTED;
598 reinit_completion(&mbox->ch_complete);
Chris Lew38722582016-11-21 18:30:24 -0800599 break;
600 default:
601 pr_err("In %s: Local Channel State corrupted\n", __func__);
602 }
Chris Lew6d815562017-06-13 11:50:06 -0700603 mutex_unlock(&mbox->state_lock);
Chris Lew38722582016-11-21 18:30:24 -0800604}
605
606static void rx_worker(struct kthread_work *work)
607{
608 struct qmp_device *mdev;
Chris Lew6d815562017-06-13 11:50:06 -0700609 struct qmp_mbox *mbox;
Chris Lew38722582016-11-21 18:30:24 -0800610
611 mdev = container_of(work, struct qmp_device, kwork);
Chris Lew6d815562017-06-13 11:50:06 -0700612 list_for_each_entry(mbox, &mdev->mboxes, list) {
613 __qmp_rx_worker(mbox);
614 }
Chris Lew38722582016-11-21 18:30:24 -0800615}
616
617/**
618 * qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
619 * device. Make sure the channel is not already in use.
620 * @mbox: Mailbox device controlls the requested channel.
621 * @spec: Device tree arguments to specify which channel is requested.
622 */
623static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox,
624 const struct of_phandle_args *spec)
625{
Chris Lew6d815562017-06-13 11:50:06 -0700626 struct qmp_mbox *dev = container_of(mbox, struct qmp_mbox, ctrl);
627 struct mbox_chan *chan;
Chris Lew38722582016-11-21 18:30:24 -0800628
Chris Lew6d815562017-06-13 11:50:06 -0700629 if (dev->num_assigned >= mbox->num_chans || !dev->ctrl.chans) {
630 pr_err("%s: QMP out of channels\n", __func__);
631 return ERR_PTR(-ENOMEM);
Chris Lew38722582016-11-21 18:30:24 -0800632 }
Chris Lew6d815562017-06-13 11:50:06 -0700633
634 mutex_lock(&dev->state_lock);
635 chan = &dev->ctrl.chans[dev->num_assigned++];
636 mutex_unlock(&dev->state_lock);
637
638 return chan;
Chris Lew38722582016-11-21 18:30:24 -0800639}
640
641/**
Chris Lew6d815562017-06-13 11:50:06 -0700642 * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
643 * @mdev: mailbox device to cleanup.
644 */
645static void cleanup_workqueue(struct qmp_device *mdev)
646{
647 kthread_flush_worker(&mdev->kworker);
648 kthread_stop(mdev->task);
649 mdev->task = NULL;
650}
651
652static int qmp_mbox_remove(struct platform_device *pdev)
653{
654 struct qmp_device *mdev = platform_get_drvdata(pdev);
655 struct qmp_mbox *mbox = NULL;
656
657 disable_irq(mdev->rx_irq_line);
658 cleanup_workqueue(mdev);
659
660 list_for_each_entry(mbox, &mdev->mboxes, list) {
661 mbox_controller_unregister(&mbox->ctrl);
662 }
663 return 0;
664}
665
666/**
667 * get_mbox_num_chans() - Find how many mbox channels need to be allocated
668 *
669 * @node: device node for this mailbox.
670 *
671 * Return: the number of phandles referring to this device node
672 */
673static u32 get_mbox_num_chans(struct device_node *node)
674{
675 int i, j, ret;
676 u32 num_chans = 0;
677 struct device_node *np;
678 struct of_phandle_args p;
679
680 for_each_node_with_property(np, "mboxes") {
681 if (!of_device_is_available(np))
682 continue;
683 i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
684 for (j = 0; j < i; j++) {
685 ret = of_parse_phandle_with_args(np, "mboxes",
686 "#mbox-cells", j, &p);
687 if (!ret && p.np == node) {
688 num_chans++;
689 break;
690 }
691 }
692 }
693 if (num_chans)
694 return num_chans;
695
696 return 1;
697}
698
699/**
700 * mdev_add_mbox() - Add a mailbox to qmp device based on priority
701 *
702 * @mdev: qmp device to add mailbox to.
703 * @new: new mailbox to add to qmp device.
704 */
705static void mdev_add_mbox(struct qmp_device *mdev, struct qmp_mbox *new)
706{
707 struct qmp_mbox *mbox;
708
709 list_for_each_entry(mbox, &mdev->mboxes, list) {
710 if (mbox->priority > new->priority)
711 continue;
712 list_add_tail(&new->list, &mbox->list);
713 return;
714 }
715 list_add_tail(&new->list, &mdev->mboxes);
716}
717
718static struct mbox_chan_ops qmp_mbox_ops = {
719 .startup = qmp_startup,
720 .shutdown = qmp_shutdown,
721 .send_data = qmp_send_data,
722 .last_tx_done = qmp_last_tx_done,
723};
724
725static const struct of_device_id qmp_mbox_match_table[] = {
726 { .compatible = "qcom,qmp-mbox" },
727 {},
728};
729
730/**
731 * qmp_mbox_init() - Parse the device tree for qmp mailbox and init structure
732 *
733 * @n: child device node representing a mailbox.
734 * @mbox: device structure for this edge.
Chris Lew38722582016-11-21 18:30:24 -0800735 *
736 * Return: 0 on succes or standard Linux error code.
737 */
Chris Lew6d815562017-06-13 11:50:06 -0700738static int qmp_mbox_init(struct device_node *n, struct qmp_device *mdev)
Chris Lew38722582016-11-21 18:30:24 -0800739{
Chris Lew6d815562017-06-13 11:50:06 -0700740 int rc, i;
741 char *key;
742 struct qmp_mbox *mbox;
743 struct mbox_chan *chans;
744 u32 mbox_of, mbox_size, desc_of, priority, num_chans;
745
746 key = "mbox-desc-offset";
747 rc = of_property_read_u32(n, key, &desc_of);
748 if (rc) {
749 pr_err("%s: missing key %s\n", __func__, key);
750 return 0;
751 }
752 key = "priority";
753 rc = of_property_read_u32(n, key, &priority);
754 if (rc) {
755 pr_err("%s: missing key %s\n", __func__, key);
756 return 0;
757 }
758 mbox = devm_kzalloc(mdev->dev, sizeof(*mbox), GFP_KERNEL);
759 if (!mbox)
760 return -ENOMEM;
761
762 rc = of_property_read_u32(n, "mbox-offset", &mbox_of);
763 if (!rc)
764 mbox->mcore_mbox_offset = mbox_of;
765 rc = of_property_read_u32(n, "mbox-size", &mbox_size);
766 if (!rc)
767 mbox->mcore_mbox_size = mbox_size;
768
769 mbox->mdev = mdev;
770 mbox->priority = priority;
771 mbox->desc = mdev->msgram + desc_of;
772 num_chans = get_mbox_num_chans(n);
773 mbox->rx_disabled = (num_chans > 1) ? true : false;
774 chans = devm_kzalloc(mdev->dev, sizeof(*chans) * num_chans, GFP_KERNEL);
775 if (!chans)
776 return -ENOMEM;
777
778 for (i = 0; i < num_chans; i++)
779 chans[i].con_priv = mbox;
780
781 mbox->ctrl.dev = mdev->dev;
782 mbox->ctrl.ops = &qmp_mbox_ops;
783 mbox->ctrl.chans = chans;
784 mbox->ctrl.num_chans = num_chans;
785 mbox->ctrl.txdone_irq = true;
786 mbox->ctrl.txdone_poll = false;
787 mbox->ctrl.of_xlate = qmp_mbox_of_xlate;
788
789 rc = mbox_controller_register(&mbox->ctrl);
790 if (rc) {
791 pr_err("%s: failed to register mbox controller %d\n", __func__,
792 rc);
793 return rc;
794 }
795 spin_lock_init(&mbox->tx_lock);
796 mutex_init(&mbox->state_lock);
797 mbox->local_state = LINK_DISCONNECTED;
798 init_completion(&mbox->link_complete);
799 init_completion(&mbox->ch_complete);
800 mbox->tx_sent = false;
801 mbox->num_assigned = 0;
802 INIT_DELAYED_WORK(&mbox->dwork, qmp_notify_timeout);
803
804 mdev_add_mbox(mdev, mbox);
805 return 0;
806}
807
808
809/**
810 * qmp_edge_init() - Parse the device tree information for QMP, map io
811 * memory and register for needed interrupts
812 * @pdev: platform device for this driver.
813 *
814 * Return: 0 on succes or standard Linux error code.
815 */
816static int qmp_edge_init(struct platform_device *pdev)
817{
818 struct qmp_device *mdev = platform_get_drvdata(pdev);
Chris Lew38722582016-11-21 18:30:24 -0800819 struct device_node *node = pdev->dev.of_node;
Chris Lew6d815562017-06-13 11:50:06 -0700820 struct resource *msgram_r, *tx_irq_reg_r;
Chris Lew38722582016-11-21 18:30:24 -0800821 char *key;
822 int rc;
Chris Lew38722582016-11-21 18:30:24 -0800823
824 key = "label";
Chris Lew6d815562017-06-13 11:50:06 -0700825 mdev->name = of_get_property(node, key, NULL);
826 if (!mdev->name) {
Chris Lew38722582016-11-21 18:30:24 -0800827 pr_err("%s: missing key %s\n", __func__, key);
828 return -ENODEV;
829 }
830
831 key = "msgram";
832 msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
833 if (!msgram_r) {
834 pr_err("%s: missing key %s\n", __func__, key);
835 return -ENODEV;
836 }
837
838 key = "irq-reg-base";
839 tx_irq_reg_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
840 if (!tx_irq_reg_r) {
841 pr_err("%s: missing key %s\n", __func__, key);
842 return -ENODEV;
843 }
844
845 key = "qcom,irq-mask";
Chris Lew6d815562017-06-13 11:50:06 -0700846 rc = of_property_read_u32(node, key, &mdev->irq_mask);
Chris Lew38722582016-11-21 18:30:24 -0800847 if (rc) {
848 pr_err("%s: missing key %s\n", __func__, key);
849 return -ENODEV;
850 }
851
852 key = "interrupts";
Chris Lew6d815562017-06-13 11:50:06 -0700853 mdev->rx_irq_line = irq_of_parse_and_map(node, 0);
854 if (!mdev->rx_irq_line) {
Chris Lew38722582016-11-21 18:30:24 -0800855 pr_err("%s: missing key %s\n", __func__, key);
856 return -ENODEV;
857 }
858
Chris Lew6d815562017-06-13 11:50:06 -0700859 mdev->dev = &pdev->dev;
Chris Lew38722582016-11-21 18:30:24 -0800860 mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start,
861 resource_size(tx_irq_reg_r));
Chris Lew6d815562017-06-13 11:50:06 -0700862 mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
863 resource_size(msgram_r));
864 if (!mdev->msgram || !mdev->tx_irq_reg)
865 return -EIO;
Chris Lew38722582016-11-21 18:30:24 -0800866
Chris Lew6d815562017-06-13 11:50:06 -0700867 INIT_LIST_HEAD(&mdev->mboxes);
Chris Lew38722582016-11-21 18:30:24 -0800868 return 0;
869}
870
Chris Lew38722582016-11-21 18:30:24 -0800871static int qmp_mbox_probe(struct platform_device *pdev)
872{
Chris Lew6d815562017-06-13 11:50:06 -0700873 struct device_node *edge_node = pdev->dev.of_node;
Chris Lew38722582016-11-21 18:30:24 -0800874 struct qmp_device *mdev;
Chris Lew38722582016-11-21 18:30:24 -0800875 int ret = 0;
876
877 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
878 if (!mdev)
879 return -ENOMEM;
Chris Lew38722582016-11-21 18:30:24 -0800880
Chris Lew6d815562017-06-13 11:50:06 -0700881 platform_set_drvdata(pdev, mdev);
882 ret = qmp_edge_init(pdev);
Chris Lew38722582016-11-21 18:30:24 -0800883 if (ret)
884 return ret;
885
Chris Lew6d815562017-06-13 11:50:06 -0700886 ret = qmp_mbox_init(edge_node, mdev);
887 if (ret)
888 return ret;
Chris Lew38722582016-11-21 18:30:24 -0800889
Chris Lew38722582016-11-21 18:30:24 -0800890 kthread_init_work(&mdev->kwork, rx_worker);
891 kthread_init_worker(&mdev->kworker);
892 mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s",
893 mdev->name);
Chris Lew38722582016-11-21 18:30:24 -0800894
895 ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler,
Chris Lewdc33e3a2018-04-04 10:19:32 -0700896 IRQF_TRIGGER_RISING | IRQF_SHARED,
Chris Lew6d815562017-06-13 11:50:06 -0700897 edge_node->name, mdev);
Chris Lew38722582016-11-21 18:30:24 -0800898 if (ret < 0) {
Chris Lew6d815562017-06-13 11:50:06 -0700899 qmp_mbox_remove(pdev);
Chris Lew38722582016-11-21 18:30:24 -0800900 pr_err("%s: request irq on %d failed: %d\n", __func__,
901 mdev->rx_irq_line, ret);
902 return ret;
903 }
904 ret = enable_irq_wake(mdev->rx_irq_line);
905 if (ret < 0)
906 pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__,
907 mdev->rx_irq_line, ret);
908
Chris Lew6d815562017-06-13 11:50:06 -0700909 /* Trigger RX */
Chris Lew38722582016-11-21 18:30:24 -0800910 qmp_irq_handler(0, mdev);
911 return 0;
912}
913
Chris Lew38722582016-11-21 18:30:24 -0800914static struct platform_driver qmp_mbox_driver = {
915 .probe = qmp_mbox_probe,
916 .remove = qmp_mbox_remove,
917 .driver = {
918 .name = "qmp_mbox",
919 .owner = THIS_MODULE,
920 .of_match_table = qmp_mbox_match_table,
921 },
922};
923
924static int __init qmp_init(void)
925{
926 int rc = 0;
927
928 rc = platform_driver_register(&qmp_mbox_driver);
929 if (rc)
930 pr_err("%s: qmp_mbox_driver reg failed %d\n", __func__, rc);
931 return rc;
932}
933arch_initcall(qmp_init);
934
935MODULE_DESCRIPTION("MSM QTI Mailbox Protocol");
936MODULE_LICENSE("GPL v2");