blob: 49875cd6836506bb46ff8b2b1e444246ee84f492 [file] [log] [blame]
Sudeep Hollaaa4f8862017-03-28 11:36:07 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018 ARM Ltd.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/mailbox_client.h>
22#include <linux/module.h>
23#include <linux/of_address.h>
24#include <linux/of_device.h>
25#include <linux/semaphore.h>
26#include <linux/slab.h>
27
28#include "common.h"
29
30#define MSG_ID_SHIFT 0
31#define MSG_ID_MASK 0xff
32#define MSG_TYPE_SHIFT 8
33#define MSG_TYPE_MASK 0x3
34#define MSG_PROTOCOL_ID_SHIFT 10
35#define MSG_PROTOCOL_ID_MASK 0xff
36#define MSG_TOKEN_ID_SHIFT 18
37#define MSG_TOKEN_ID_MASK 0x3ff
38#define MSG_XTRACT_TOKEN(header) \
39 (((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK)
40
41enum scmi_error_codes {
42 SCMI_SUCCESS = 0, /* Success */
43 SCMI_ERR_SUPPORT = -1, /* Not supported */
44 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
45 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
46 SCMI_ERR_ENTRY = -4, /* Not found */
47 SCMI_ERR_RANGE = -5, /* Value out of range */
48 SCMI_ERR_BUSY = -6, /* Device busy */
49 SCMI_ERR_COMMS = -7, /* Communication Error */
50 SCMI_ERR_GENERIC = -8, /* Generic Error */
51 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
52 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
53 SCMI_ERR_MAX
54};
55
56/* List of all SCMI devices active in system */
57static LIST_HEAD(scmi_list);
58/* Protection for the entire list */
59static DEFINE_MUTEX(scmi_list_mutex);
60
61/**
62 * struct scmi_xfers_info - Structure to manage transfer information
63 *
64 * @xfer_block: Preallocated Message array
65 * @xfer_alloc_table: Bitmap table for allocated messages.
66 * Index of this bitmap table is also used for message
67 * sequence identifier.
68 * @xfer_lock: Protection for message allocation
69 */
70struct scmi_xfers_info {
71 struct scmi_xfer *xfer_block;
72 unsigned long *xfer_alloc_table;
73 /* protect transfer allocation */
74 spinlock_t xfer_lock;
75};
76
77/**
78 * struct scmi_desc - Description of SoC integration
79 *
80 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
81 * @max_msg: Maximum number of messages that can be pending
82 * simultaneously in the system
83 * @max_msg_size: Maximum size of data per message that can be handled.
84 */
85struct scmi_desc {
86 int max_rx_timeout_ms;
87 int max_msg;
88 int max_msg_size;
89};
90
91/**
92 * struct scmi_info - Structure representing a SCMI instance
93 *
94 * @dev: Device pointer
95 * @desc: SoC description for this instance
96 * @handle: Instance of SCMI handle to send to clients
Sudeep Hollab6f20ff2017-06-06 11:16:15 +010097 * @version: SCMI revision information containing protocol version,
98 * implementation version and (sub-)vendor identification.
Sudeep Hollaaa4f8862017-03-28 11:36:07 +010099 * @cl: Mailbox Client
100 * @tx_chan: Transmit mailbox channel
101 * @tx_payload: Transmit mailbox channel payload area
102 * @minfo: Message info
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100103 * @protocols_imp: list of protocols implemented, currently maximum of
104 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100105 * @node: list head
106 * @users: Number of users of this instance
107 */
108struct scmi_info {
109 struct device *dev;
110 const struct scmi_desc *desc;
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100111 struct scmi_revision_info version;
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100112 struct scmi_handle handle;
113 struct mbox_client cl;
114 struct mbox_chan *tx_chan;
115 void __iomem *tx_payload;
116 struct scmi_xfers_info minfo;
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100117 u8 *protocols_imp;
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100118 struct list_head node;
119 int users;
120};
121
122#define client_to_scmi_info(c) container_of(c, struct scmi_info, cl)
123#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
124
125/*
126 * SCMI specification requires all parameters, message headers, return
127 * arguments or any protocol data to be expressed in little endian
128 * format only.
129 */
130struct scmi_shared_mem {
131 __le32 reserved;
132 __le32 channel_status;
133#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
134#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
135 __le32 reserved1[2];
136 __le32 flags;
137#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
138 __le32 length;
139 __le32 msg_header;
140 u8 msg_payload[0];
141};
142
143static const int scmi_linux_errmap[] = {
144 /* better than switch case as long as return value is continuous */
145 0, /* SCMI_SUCCESS */
146 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
147 -EINVAL, /* SCMI_ERR_PARAM */
148 -EACCES, /* SCMI_ERR_ACCESS */
149 -ENOENT, /* SCMI_ERR_ENTRY */
150 -ERANGE, /* SCMI_ERR_RANGE */
151 -EBUSY, /* SCMI_ERR_BUSY */
152 -ECOMM, /* SCMI_ERR_COMMS */
153 -EIO, /* SCMI_ERR_GENERIC */
154 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
155 -EPROTO, /* SCMI_ERR_PROTOCOL */
156};
157
158static inline int scmi_to_linux_errno(int errno)
159{
160 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
161 return scmi_linux_errmap[-errno];
162 return -EIO;
163}
164
165/**
166 * scmi_dump_header_dbg() - Helper to dump a message header.
167 *
168 * @dev: Device pointer corresponding to the SCMI entity
169 * @hdr: pointer to header.
170 */
171static inline void scmi_dump_header_dbg(struct device *dev,
172 struct scmi_msg_hdr *hdr)
173{
174 dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
175 hdr->id, hdr->seq, hdr->protocol_id);
176}
177
178static void scmi_fetch_response(struct scmi_xfer *xfer,
179 struct scmi_shared_mem __iomem *mem)
180{
181 xfer->hdr.status = ioread32(mem->msg_payload);
182 /* Skip the length of header and statues in payload area i.e 8 bytes*/
183 xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
184
185 /* Take a copy to the rx buffer.. */
186 memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
187}
188
189/**
190 * scmi_rx_callback() - mailbox client callback for receive messages
191 *
192 * @cl: client pointer
193 * @m: mailbox message
194 *
195 * Processes one received message to appropriate transfer information and
196 * signals completion of the transfer.
197 *
198 * NOTE: This function will be invoked in IRQ context, hence should be
199 * as optimal as possible.
200 */
201static void scmi_rx_callback(struct mbox_client *cl, void *m)
202{
203 u16 xfer_id;
204 struct scmi_xfer *xfer;
205 struct scmi_info *info = client_to_scmi_info(cl);
206 struct scmi_xfers_info *minfo = &info->minfo;
207 struct device *dev = info->dev;
208 struct scmi_shared_mem __iomem *mem = info->tx_payload;
209
210 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
211
212 /*
213 * Are we even expecting this?
214 */
215 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
216 dev_err(dev, "message for %d is not expected!\n", xfer_id);
217 return;
218 }
219
220 xfer = &minfo->xfer_block[xfer_id];
221
222 scmi_dump_header_dbg(dev, &xfer->hdr);
223 /* Is the message of valid length? */
224 if (xfer->rx.len > info->desc->max_msg_size) {
225 dev_err(dev, "unable to handle %zu xfer(max %d)\n",
226 xfer->rx.len, info->desc->max_msg_size);
227 return;
228 }
229
230 scmi_fetch_response(xfer, mem);
231 complete(&xfer->done);
232}
233
234/**
235 * pack_scmi_header() - packs and returns 32-bit header
236 *
237 * @hdr: pointer to header containing all the information on message id,
238 * protocol id and sequence id.
239 */
240static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
241{
242 return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) |
243 ((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) |
244 ((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT);
245}
246
247/**
248 * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
249 *
250 * @cl: client pointer
251 * @m: mailbox message
252 *
253 * This function prepares the shared memory which contains the header and the
254 * payload.
255 */
256static void scmi_tx_prepare(struct mbox_client *cl, void *m)
257{
258 struct scmi_xfer *t = m;
259 struct scmi_info *info = client_to_scmi_info(cl);
260 struct scmi_shared_mem __iomem *mem = info->tx_payload;
261
262 /* Mark channel busy + clear error */
263 iowrite32(0x0, &mem->channel_status);
264 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
265 &mem->flags);
266 iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
267 iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
268 if (t->tx.buf)
269 memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
270}
271
272/**
273 * scmi_one_xfer_get() - Allocate one message
274 *
275 * @handle: SCMI entity handle
276 *
277 * Helper function which is used by various command functions that are
278 * exposed to clients of this driver for allocating a message traffic event.
279 *
280 * This function can sleep depending on pending requests already in the system
281 * for the SCMI entity. Further, this also holds a spinlock to maintain
282 * integrity of internal data structures.
283 *
284 * Return: 0 if all went fine, else corresponding error.
285 */
286static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
287{
288 u16 xfer_id;
289 struct scmi_xfer *xfer;
290 unsigned long flags, bit_pos;
291 struct scmi_info *info = handle_to_scmi_info(handle);
292 struct scmi_xfers_info *minfo = &info->minfo;
293
294 /* Keep the locked section as small as possible */
295 spin_lock_irqsave(&minfo->xfer_lock, flags);
296 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
297 info->desc->max_msg);
298 if (bit_pos == info->desc->max_msg) {
299 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
300 return ERR_PTR(-ENOMEM);
301 }
302 set_bit(bit_pos, minfo->xfer_alloc_table);
303 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
304
305 xfer_id = bit_pos;
306
307 xfer = &minfo->xfer_block[xfer_id];
308 xfer->hdr.seq = xfer_id;
309 reinit_completion(&xfer->done);
310
311 return xfer;
312}
313
314/**
315 * scmi_one_xfer_put() - Release a message
316 *
317 * @minfo: transfer info pointer
318 * @xfer: message that was reserved by scmi_one_xfer_get
319 *
320 * This holds a spinlock to maintain integrity of internal data structures.
321 */
322void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
323{
324 unsigned long flags;
325 struct scmi_info *info = handle_to_scmi_info(handle);
326 struct scmi_xfers_info *minfo = &info->minfo;
327
328 /*
329 * Keep the locked section as small as possible
330 * NOTE: we might escape with smp_mb and no lock here..
331 * but just be conservative and symmetric.
332 */
333 spin_lock_irqsave(&minfo->xfer_lock, flags);
334 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
335 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
336}
337
338/**
339 * scmi_do_xfer() - Do one transfer
340 *
341 * @info: Pointer to SCMI entity information
342 * @xfer: Transfer to initiate and wait for response
343 *
344 * Return: -ETIMEDOUT in case of no response, if transmit error,
345 * return corresponding error, else if all goes well,
346 * return 0.
347 */
348int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
349{
350 int ret;
351 int timeout;
352 struct scmi_info *info = handle_to_scmi_info(handle);
353 struct device *dev = info->dev;
354
355 ret = mbox_send_message(info->tx_chan, xfer);
356 if (ret < 0) {
357 dev_dbg(dev, "mbox send fail %d\n", ret);
358 return ret;
359 }
360
361 /* mbox_send_message returns non-negative value on success, so reset */
362 ret = 0;
363
364 /* And we wait for the response. */
365 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
366 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
367 dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
368 (void *)_RET_IP_);
369 ret = -ETIMEDOUT;
370 } else if (xfer->hdr.status) {
371 ret = scmi_to_linux_errno(xfer->hdr.status);
372 }
373 /*
374 * NOTE: we might prefer not to need the mailbox ticker to manage the
375 * transfer queueing since the protocol layer queues things by itself.
376 * Unfortunately, we have to kick the mailbox framework after we have
377 * received our message.
378 */
379 mbox_client_txdone(info->tx_chan, ret);
380
381 return ret;
382}
383
384/**
385 * scmi_one_xfer_init() - Allocate and initialise one message
386 *
387 * @handle: SCMI entity handle
388 * @msg_id: Message identifier
389 * @msg_prot_id: Protocol identifier for the message
390 * @tx_size: transmit message size
391 * @rx_size: receive message size
392 * @p: pointer to the allocated and initialised message
393 *
394 * This function allocates the message using @scmi_one_xfer_get and
395 * initialise the header.
396 *
397 * Return: 0 if all went fine with @p pointing to message, else
398 * corresponding error.
399 */
400int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
401 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
402{
403 int ret;
404 struct scmi_xfer *xfer;
405 struct scmi_info *info = handle_to_scmi_info(handle);
406 struct device *dev = info->dev;
407
408 /* Ensure we have sane transfer sizes */
409 if (rx_size > info->desc->max_msg_size ||
410 tx_size > info->desc->max_msg_size)
411 return -ERANGE;
412
413 xfer = scmi_one_xfer_get(handle);
414 if (IS_ERR(xfer)) {
415 ret = PTR_ERR(xfer);
416 dev_err(dev, "failed to get free message slot(%d)\n", ret);
417 return ret;
418 }
419
420 xfer->tx.len = tx_size;
421 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
422 xfer->hdr.id = msg_id;
423 xfer->hdr.protocol_id = prot_id;
424 xfer->hdr.poll_completion = false;
425
426 *p = xfer;
427 return 0;
428}
429
430/**
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100431 * scmi_version_get() - command to get the revision of the SCMI entity
432 *
433 * @handle: Handle to SCMI entity information
434 *
435 * Updates the SCMI information in the internal data structure.
436 *
437 * Return: 0 if all went fine, else return appropriate error.
438 */
439int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
440 u32 *version)
441{
442 int ret;
443 __le32 *rev_info;
444 struct scmi_xfer *t;
445
446 ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0,
447 sizeof(*version), &t);
448 if (ret)
449 return ret;
450
451 ret = scmi_do_xfer(handle, t);
452 if (!ret) {
453 rev_info = t->rx.buf;
454 *version = le32_to_cpu(*rev_info);
455 }
456
457 scmi_one_xfer_put(handle, t);
458 return ret;
459}
460
461void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
462 u8 *prot_imp)
463{
464 struct scmi_info *info = handle_to_scmi_info(handle);
465
466 info->protocols_imp = prot_imp;
467}
468
469/**
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100470 * scmi_handle_get() - Get the SCMI handle for a device
471 *
472 * @dev: pointer to device for which we want SCMI handle
473 *
474 * NOTE: The function does not track individual clients of the framework
475 * and is expected to be maintained by caller of SCMI protocol library.
476 * scmi_handle_put must be balanced with successful scmi_handle_get
477 *
478 * Return: pointer to handle if successful, NULL on error
479 */
480struct scmi_handle *scmi_handle_get(struct device *dev)
481{
482 struct list_head *p;
483 struct scmi_info *info;
484 struct scmi_handle *handle = NULL;
485
486 mutex_lock(&scmi_list_mutex);
487 list_for_each(p, &scmi_list) {
488 info = list_entry(p, struct scmi_info, node);
489 if (dev->parent == info->dev) {
490 handle = &info->handle;
491 info->users++;
492 break;
493 }
494 }
495 mutex_unlock(&scmi_list_mutex);
496
497 return handle;
498}
499
500/**
501 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
502 *
503 * @handle: handle acquired by scmi_handle_get
504 *
505 * NOTE: The function does not track individual clients of the framework
506 * and is expected to be maintained by caller of SCMI protocol library.
507 * scmi_handle_put must be balanced with successful scmi_handle_get
508 *
509 * Return: 0 is successfully released
510 * if null was passed, it returns -EINVAL;
511 */
512int scmi_handle_put(const struct scmi_handle *handle)
513{
514 struct scmi_info *info;
515
516 if (!handle)
517 return -EINVAL;
518
519 info = handle_to_scmi_info(handle);
520 mutex_lock(&scmi_list_mutex);
521 if (!WARN_ON(!info->users))
522 info->users--;
523 mutex_unlock(&scmi_list_mutex);
524
525 return 0;
526}
527
528static const struct scmi_desc scmi_generic_desc = {
529 .max_rx_timeout_ms = 30, /* we may increase this if required */
530 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
531 .max_msg_size = 128,
532};
533
534/* Each compatible listed below must have descriptor associated with it */
535static const struct of_device_id scmi_of_match[] = {
536 { .compatible = "arm,scmi", .data = &scmi_generic_desc },
537 { /* Sentinel */ },
538};
539
540MODULE_DEVICE_TABLE(of, scmi_of_match);
541
542static int scmi_xfer_info_init(struct scmi_info *sinfo)
543{
544 int i;
545 struct scmi_xfer *xfer;
546 struct device *dev = sinfo->dev;
547 const struct scmi_desc *desc = sinfo->desc;
548 struct scmi_xfers_info *info = &sinfo->minfo;
549
550 /* Pre-allocated messages, no more than what hdr.seq can support */
551 if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) {
552 dev_err(dev, "Maximum message of %d exceeds supported %d\n",
553 desc->max_msg, MSG_TOKEN_ID_MASK + 1);
554 return -EINVAL;
555 }
556
557 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
558 sizeof(*info->xfer_block), GFP_KERNEL);
559 if (!info->xfer_block)
560 return -ENOMEM;
561
562 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
563 sizeof(long), GFP_KERNEL);
564 if (!info->xfer_alloc_table)
565 return -ENOMEM;
566
567 bitmap_zero(info->xfer_alloc_table, desc->max_msg);
568
569 /* Pre-initialize the buffer pointer to pre-allocated buffers */
570 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
571 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
572 GFP_KERNEL);
573 if (!xfer->rx.buf)
574 return -ENOMEM;
575
576 xfer->tx.buf = xfer->rx.buf;
577 init_completion(&xfer->done);
578 }
579
580 spin_lock_init(&info->xfer_lock);
581
582 return 0;
583}
584
585static int scmi_mailbox_check(struct device_node *np)
586{
587 struct of_phandle_args arg;
588
589 return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
590}
591
592static int scmi_mbox_free_channel(struct scmi_info *info)
593{
594 if (!IS_ERR_OR_NULL(info->tx_chan)) {
595 mbox_free_channel(info->tx_chan);
596 info->tx_chan = NULL;
597 }
598
599 return 0;
600}
601
602static int scmi_remove(struct platform_device *pdev)
603{
604 int ret = 0;
605 struct scmi_info *info = platform_get_drvdata(pdev);
606
607 mutex_lock(&scmi_list_mutex);
608 if (info->users)
609 ret = -EBUSY;
610 else
611 list_del(&info->node);
612 mutex_unlock(&scmi_list_mutex);
613
614 if (!ret)
615 /* Safe to free channels since no more users */
616 return scmi_mbox_free_channel(info);
617
618 return ret;
619}
620
621static inline int scmi_mbox_chan_setup(struct scmi_info *info)
622{
623 int ret;
624 struct resource res;
625 resource_size_t size;
626 struct device *dev = info->dev;
627 struct device_node *shmem, *np = dev->of_node;
628 struct mbox_client *cl;
629
630 cl = &info->cl;
631 cl->dev = dev;
632 cl->rx_callback = scmi_rx_callback;
633 cl->tx_prepare = scmi_tx_prepare;
634 cl->tx_block = false;
635 cl->knows_txdone = true;
636
637 shmem = of_parse_phandle(np, "shmem", 0);
638 ret = of_address_to_resource(shmem, 0, &res);
639 of_node_put(shmem);
640 if (ret) {
641 dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
642 return ret;
643 }
644
645 size = resource_size(&res);
646 info->tx_payload = devm_ioremap(dev, res.start, size);
647 if (!info->tx_payload) {
648 dev_err(dev, "failed to ioremap SCMI Tx payload\n");
649 return -EADDRNOTAVAIL;
650 }
651
652 /* Transmit channel is first entry i.e. index 0 */
653 info->tx_chan = mbox_request_channel(cl, 0);
654 if (IS_ERR(info->tx_chan)) {
655 ret = PTR_ERR(info->tx_chan);
656 if (ret != -EPROBE_DEFER)
657 dev_err(dev, "failed to request SCMI Tx mailbox\n");
658 return ret;
659 }
660
661 return 0;
662}
663
664static int scmi_probe(struct platform_device *pdev)
665{
666 int ret;
667 struct scmi_handle *handle;
668 const struct scmi_desc *desc;
669 struct scmi_info *info;
670 struct device *dev = &pdev->dev;
671 struct device_node *np = dev->of_node;
672
673 /* Only mailbox method supported, check for the presence of one */
674 if (scmi_mailbox_check(np)) {
675 dev_err(dev, "no mailbox found in %pOF\n", np);
676 return -EINVAL;
677 }
678
679 desc = of_match_device(scmi_of_match, dev)->data;
680
681 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
682 if (!info)
683 return -ENOMEM;
684
685 info->dev = dev;
686 info->desc = desc;
687 INIT_LIST_HEAD(&info->node);
688
689 ret = scmi_xfer_info_init(info);
690 if (ret)
691 return ret;
692
693 platform_set_drvdata(pdev, info);
694
695 handle = &info->handle;
696 handle->dev = info->dev;
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100697 handle->version = &info->version;
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100698
699 ret = scmi_mbox_chan_setup(info);
700 if (ret)
701 return ret;
702
Sudeep Hollab6f20ff2017-06-06 11:16:15 +0100703 ret = scmi_base_protocol_init(handle);
704 if (ret) {
705 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
706 scmi_mbox_free_channel(info);
707 return ret;
708 }
709
Sudeep Hollaaa4f8862017-03-28 11:36:07 +0100710 mutex_lock(&scmi_list_mutex);
711 list_add_tail(&info->node, &scmi_list);
712 mutex_unlock(&scmi_list_mutex);
713
714 return 0;
715}
716
717static struct platform_driver scmi_driver = {
718 .driver = {
719 .name = "arm-scmi",
720 .of_match_table = scmi_of_match,
721 },
722 .probe = scmi_probe,
723 .remove = scmi_remove,
724};
725
726module_platform_driver(scmi_driver);
727
728MODULE_ALIAS("platform: arm-scmi");
729MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
730MODULE_DESCRIPTION("ARM SCMI protocol driver");
731MODULE_LICENSE("GPL v2");