blob: 567f29077f4a7a7bdfe9da5c431aa01f4840108e [file] [log] [blame]
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/idr.h>
19#include <linux/pm_runtime.h>
20#include <linux/slimbus/slimbus.h>
21
22#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
23
24#define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24)
25#define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16)
26#define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF)
27
28#define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF)
29#define SLIM_GRP_TO_NCHAN(hdl) ((u16)(hdl >> 8) & 0xFF)
30
31#define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p))
32#define SLIM_MGR_PORT(p) ((0xFF << 16) | (p))
33#define SLIM_LA_MANAGER 0xFF
34
35#define SLIM_START_GRP (1 << 8)
36#define SLIM_END_GRP (1 << 9)
37
38#define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3)
39#define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME
40
41static DEFINE_MUTEX(slim_lock);
42static DEFINE_IDR(ctrl_idr);
43static struct device_type slim_dev_type;
44static struct device_type slim_ctrl_type;
45
46#define DEFINE_SLIM_LDEST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
47 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
48 len, 0, la, false, rbuf, wbuf, NULL, }
49
50#define DEFINE_SLIM_BCAST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
51 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
52 len, 0, la, false, rbuf, wbuf, NULL, }
53
54static const struct slim_device_id *slim_match(const struct slim_device_id *id,
55 const struct slim_device *slim_dev)
56{
57 while (id->name[0]) {
58 if (strcmp(slim_dev->name, id->name) == 0)
59 return id;
60 id++;
61 }
62 return NULL;
63}
64
65const struct slim_device_id *slim_get_device_id(const struct slim_device *sdev)
66{
67 const struct slim_driver *sdrv = to_slim_driver(sdev->dev.driver);
68
69 return slim_match(sdrv->id_table, sdev);
70}
71EXPORT_SYMBOL(slim_get_device_id);
72
73static int slim_device_match(struct device *dev, struct device_driver *driver)
74{
75 struct slim_device *slim_dev;
76 struct slim_driver *drv = to_slim_driver(driver);
77
78 if (dev->type == &slim_dev_type)
79 slim_dev = to_slim_device(dev);
80 else
81 return 0;
82 if (drv->id_table)
83 return slim_match(drv->id_table, slim_dev) != NULL;
84
85 if (driver->name)
86 return strcmp(slim_dev->name, driver->name) == 0;
87 return 0;
88}
89
90#ifdef CONFIG_PM_SLEEP
91static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
92{
93 struct slim_device *slim_dev = NULL;
94 struct slim_driver *driver;
95
96 if (dev->type == &slim_dev_type)
97 slim_dev = to_slim_device(dev);
98
99 if (!slim_dev || !dev->driver)
100 return 0;
101
102 driver = to_slim_driver(dev->driver);
103 if (!driver->suspend)
104 return 0;
105
106 return driver->suspend(slim_dev, mesg);
107}
108
109static int slim_legacy_resume(struct device *dev)
110{
111 struct slim_device *slim_dev = NULL;
112 struct slim_driver *driver;
113
114 if (dev->type == &slim_dev_type)
115 slim_dev = to_slim_device(dev);
116
117 if (!slim_dev || !dev->driver)
118 return 0;
119
120 driver = to_slim_driver(dev->driver);
121 if (!driver->resume)
122 return 0;
123
124 return driver->resume(slim_dev);
125}
126
127static int slim_pm_suspend(struct device *dev)
128{
129 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
130
131 if (pm)
132 return pm_generic_suspend(dev);
133 else
134 return slim_legacy_suspend(dev, PMSG_SUSPEND);
135}
136
137static int slim_pm_resume(struct device *dev)
138{
139 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
140
141 if (pm)
142 return pm_generic_resume(dev);
143 else
144 return slim_legacy_resume(dev);
145}
146
147#else
148#define slim_pm_suspend NULL
149#define slim_pm_resume NULL
150#endif
151
152static const struct dev_pm_ops slimbus_pm = {
153 .suspend = slim_pm_suspend,
154 .resume = slim_pm_resume,
155 SET_RUNTIME_PM_OPS(
156 pm_generic_suspend,
157 pm_generic_resume,
158 NULL
159 )
160};
161struct bus_type slimbus_type = {
162 .name = "slimbus",
163 .match = slim_device_match,
164 .pm = &slimbus_pm,
165};
166EXPORT_SYMBOL(slimbus_type);
167
168struct device slimbus_dev = {
169 .init_name = "slimbus",
170};
171
172static void __exit slimbus_exit(void)
173{
174 device_unregister(&slimbus_dev);
175 bus_unregister(&slimbus_type);
176}
177
178static int __init slimbus_init(void)
179{
180 int retval;
181
182 retval = bus_register(&slimbus_type);
183 if (!retval)
184 retval = device_register(&slimbus_dev);
185
186 if (retval)
187 bus_unregister(&slimbus_type);
188
189 return retval;
190}
191postcore_initcall(slimbus_init);
192module_exit(slimbus_exit);
193
194static int slim_drv_probe(struct device *dev)
195{
196 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
197 struct slim_device *sbdev = to_slim_device(dev);
198 struct slim_controller *ctrl = sbdev->ctrl;
199
200 if (sdrv->probe) {
201 int ret;
202
203 ret = sdrv->probe(sbdev);
204 if (ret)
205 return ret;
206 if (sdrv->device_up)
207 queue_work(ctrl->wq, &sbdev->wd);
208 return 0;
209 }
210 return -ENODEV;
211}
212
213static int slim_drv_remove(struct device *dev)
214{
215 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
216 struct slim_device *sbdev = to_slim_device(dev);
217
218 sbdev->notified = false;
219 if (sdrv->remove)
220 return sdrv->remove(to_slim_device(dev));
221 return -ENODEV;
222}
223
224static void slim_drv_shutdown(struct device *dev)
225{
226 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
227
228 if (sdrv->shutdown)
229 sdrv->shutdown(to_slim_device(dev));
230}
231
232/*
233 * slim_driver_register: Client driver registration with slimbus
234 * @drv:Client driver to be associated with client-device.
235 * This API will register the client driver with the slimbus
236 * It is called from the driver's module-init function.
237 */
238int slim_driver_register(struct slim_driver *drv)
239{
240 drv->driver.bus = &slimbus_type;
241 if (drv->probe)
242 drv->driver.probe = slim_drv_probe;
243
244 if (drv->remove)
245 drv->driver.remove = slim_drv_remove;
246
247 if (drv->shutdown)
248 drv->driver.shutdown = slim_drv_shutdown;
249
250 return driver_register(&drv->driver);
251}
252EXPORT_SYMBOL(slim_driver_register);
253
254/*
255 * slim_driver_unregister: Undo effects of slim_driver_register
256 * @drv: Client driver to be unregistered
257 */
258void slim_driver_unregister(struct slim_driver *drv)
259{
260 if (drv)
261 driver_unregister(&drv->driver);
262}
263EXPORT_SYMBOL(slim_driver_unregister);
264
265#define slim_ctrl_attr_gr NULL
266
267static void slim_ctrl_release(struct device *dev)
268{
269 struct slim_controller *ctrl = to_slim_controller(dev);
270
271 complete(&ctrl->dev_released);
272}
273
274static struct device_type slim_ctrl_type = {
275 .groups = slim_ctrl_attr_gr,
276 .release = slim_ctrl_release,
277};
278
279static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
280{
281 if (!ctrl || !get_device(&ctrl->dev))
282 return NULL;
283
284 return ctrl;
285}
286
287static void slim_ctrl_put(struct slim_controller *ctrl)
288{
289 if (ctrl)
290 put_device(&ctrl->dev);
291}
292
293#define slim_device_attr_gr NULL
294#define slim_device_uevent NULL
295static void slim_dev_release(struct device *dev)
296{
297 struct slim_device *sbdev = to_slim_device(dev);
298
299 slim_ctrl_put(sbdev->ctrl);
300}
301
302static struct device_type slim_dev_type = {
303 .groups = slim_device_attr_gr,
304 .uevent = slim_device_uevent,
305 .release = slim_dev_release,
306};
307
308static void slim_report(struct work_struct *work)
309{
310 struct slim_driver *sbdrv;
311 struct slim_device *sbdev =
312 container_of(work, struct slim_device, wd);
313 if (!sbdev->dev.driver)
314 return;
315 /* check if device-up or down needs to be called */
316 if ((!sbdev->reported && !sbdev->notified) ||
317 (sbdev->reported && sbdev->notified))
318 return;
319
320 sbdrv = to_slim_driver(sbdev->dev.driver);
321 /*
322 * address no longer valid, means device reported absent, whereas
323 * address valid, means device reported present
324 */
325 if (sbdev->notified && !sbdev->reported) {
326 sbdev->notified = false;
327 if (sbdrv->device_down)
328 sbdrv->device_down(sbdev);
329 } else if (!sbdev->notified && sbdev->reported) {
330 sbdev->notified = true;
331 if (sbdrv->device_up)
332 sbdrv->device_up(sbdev);
333 }
334}
335
336/*
337 * slim_add_device: Add a new device without register board info.
338 * @ctrl: Controller to which this device is to be added to.
339 * Called when device doesn't have an explicit client-driver to be probed, or
340 * the client-driver is a module installed dynamically.
341 */
342int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
343{
344 sbdev->dev.bus = &slimbus_type;
345 sbdev->dev.parent = ctrl->dev.parent;
346 sbdev->dev.type = &slim_dev_type;
347 sbdev->dev.driver = NULL;
348 sbdev->ctrl = ctrl;
349 slim_ctrl_get(ctrl);
350 dev_set_name(&sbdev->dev, "%s", sbdev->name);
351 mutex_init(&sbdev->sldev_reconf);
352 INIT_LIST_HEAD(&sbdev->mark_define);
353 INIT_LIST_HEAD(&sbdev->mark_suspend);
354 INIT_LIST_HEAD(&sbdev->mark_removal);
355 INIT_WORK(&sbdev->wd, slim_report);
356 mutex_lock(&ctrl->m_ctrl);
357 list_add_tail(&sbdev->dev_list, &ctrl->devs);
358 mutex_unlock(&ctrl->m_ctrl);
359 /* probe slave on this controller */
360 return device_register(&sbdev->dev);
361}
362EXPORT_SYMBOL(slim_add_device);
363
364struct sbi_boardinfo {
365 struct list_head list;
366 struct slim_boardinfo board_info;
367};
368
369static LIST_HEAD(board_list);
370static LIST_HEAD(slim_ctrl_list);
371static DEFINE_MUTEX(board_lock);
372
373/* If controller is not present, only add to boards list */
374static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
375 struct slim_boardinfo *bi)
376{
377 int ret;
378
379 if (ctrl->nr != bi->bus_num)
380 return;
381
382 ret = slim_add_device(ctrl, bi->slim_slave);
383 if (ret != 0)
384 dev_err(ctrl->dev.parent, "can't create new device for %s\n",
385 bi->slim_slave->name);
386}
387
388/*
389 * slim_register_board_info: Board-initialization routine.
390 * @info: List of all devices on all controllers present on the board.
391 * @n: number of entries.
392 * API enumerates respective devices on corresponding controller.
393 * Called from board-init function.
394 */
395int slim_register_board_info(struct slim_boardinfo const *info, unsigned int n)
396{
397 struct sbi_boardinfo *bi;
398 int i;
399
400 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
401 if (!bi)
402 return -ENOMEM;
403
404 for (i = 0; i < n; i++, bi++, info++) {
405 struct slim_controller *ctrl;
406
407 memcpy(&bi->board_info, info, sizeof(*info));
408 mutex_lock(&board_lock);
409 list_add_tail(&bi->list, &board_list);
410 list_for_each_entry(ctrl, &slim_ctrl_list, list)
411 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
412 mutex_unlock(&board_lock);
413 }
414 return 0;
415}
416EXPORT_SYMBOL(slim_register_board_info);
417
418/*
419 * slim_ctrl_add_boarddevs: Add devices registered by board-info
420 * @ctrl: Controller to which these devices are to be added to.
421 * This API is called by controller when it is up and running.
422 * If devices on a controller were registered before controller,
423 * this will make sure that they get probed when controller is up.
424 */
425void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
426{
427 struct sbi_boardinfo *bi;
428
429 mutex_lock(&board_lock);
430 list_add_tail(&ctrl->list, &slim_ctrl_list);
431 list_for_each_entry(bi, &board_list, list)
432 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
433 mutex_unlock(&board_lock);
434}
435EXPORT_SYMBOL(slim_ctrl_add_boarddevs);
436
437/*
438 * slim_busnum_to_ctrl: Map bus number to controller
439 * @busnum: Bus number
440 * Returns controller representing this bus number
441 */
442struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
443{
444 struct slim_controller *ctrl;
445
446 mutex_lock(&board_lock);
447 list_for_each_entry(ctrl, &slim_ctrl_list, list)
448 if (bus_num == ctrl->nr) {
449 mutex_unlock(&board_lock);
450 return ctrl;
451 }
452 mutex_unlock(&board_lock);
453 return NULL;
454}
455EXPORT_SYMBOL(slim_busnum_to_ctrl);
456
457static int slim_register_controller(struct slim_controller *ctrl)
458{
459 int ret = 0;
460
461 /* Can't register until after driver model init */
462 if (WARN_ON(!slimbus_type.p)) {
463 ret = -EPROBE_DEFER;
464 goto out_list;
465 }
466
467 dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
468 ctrl->dev.bus = &slimbus_type;
469 ctrl->dev.type = &slim_ctrl_type;
470 ctrl->num_dev = 0;
471 if (!ctrl->min_cg)
472 ctrl->min_cg = SLIM_MIN_CLK_GEAR;
473 if (!ctrl->max_cg)
474 ctrl->max_cg = SLIM_MAX_CLK_GEAR;
475 spin_lock_init(&ctrl->txn_lock);
476 mutex_init(&ctrl->m_ctrl);
477 mutex_init(&ctrl->sched.m_reconf);
478 ret = device_register(&ctrl->dev);
479 if (ret)
480 goto out_list;
481
482 dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name,
483 &ctrl->dev);
484
485 if (ctrl->nports) {
486 ctrl->ports = kcalloc(ctrl->nports, sizeof(struct slim_port),
487 GFP_KERNEL);
488 if (!ctrl->ports) {
489 ret = -ENOMEM;
490 goto err_port_failed;
491 }
492 }
493 if (ctrl->nchans) {
494 ctrl->chans = kcalloc(ctrl->nchans, sizeof(struct slim_ich),
495 GFP_KERNEL);
496 if (!ctrl->chans) {
497 ret = -ENOMEM;
498 goto err_chan_failed;
499 }
500
501 ctrl->sched.chc1 = kcalloc(ctrl->nchans,
502 sizeof(struct slim_ich *), GFP_KERNEL);
503 if (!ctrl->sched.chc1) {
504 kfree(ctrl->chans);
505 ret = -ENOMEM;
506 goto err_chan_failed;
507 }
508 ctrl->sched.chc3 = kcalloc(ctrl->nchans,
509 sizeof(struct slim_ich *), GFP_KERNEL);
510 if (!ctrl->sched.chc3) {
511 kfree(ctrl->sched.chc1);
512 kfree(ctrl->chans);
513 ret = -ENOMEM;
514 goto err_chan_failed;
515 }
516 }
517#ifdef DEBUG
518 ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
519#endif
520 init_completion(&ctrl->pause_comp);
521
522 INIT_LIST_HEAD(&ctrl->devs);
523 ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
524 if (!ctrl->wq)
525 goto err_workq_failed;
526
527 return 0;
528
529err_workq_failed:
530 kfree(ctrl->sched.chc3);
531 kfree(ctrl->sched.chc1);
532 kfree(ctrl->chans);
533err_chan_failed:
534 kfree(ctrl->ports);
535err_port_failed:
536 device_unregister(&ctrl->dev);
537out_list:
538 mutex_lock(&slim_lock);
539 idr_remove(&ctrl_idr, ctrl->nr);
540 mutex_unlock(&slim_lock);
541 return ret;
542}
543
544/* slim_remove_device: Remove the effect of slim_add_device() */
545void slim_remove_device(struct slim_device *sbdev)
546{
547 struct slim_controller *ctrl = sbdev->ctrl;
548
549 mutex_lock(&ctrl->m_ctrl);
550 list_del_init(&sbdev->dev_list);
551 mutex_unlock(&ctrl->m_ctrl);
552 device_unregister(&sbdev->dev);
553}
554EXPORT_SYMBOL(slim_remove_device);
555
556static void slim_ctrl_remove_device(struct slim_controller *ctrl,
557 struct slim_boardinfo *bi)
558{
559 if (ctrl->nr == bi->bus_num)
560 slim_remove_device(bi->slim_slave);
561}
562
563/*
564 * slim_del_controller: Controller tear-down.
565 * Controller added with the above API is teared down using this API.
566 */
567int slim_del_controller(struct slim_controller *ctrl)
568{
569 struct slim_controller *found;
570 struct sbi_boardinfo *bi;
571
572 /* First make sure that this bus was added */
573 mutex_lock(&slim_lock);
574 found = idr_find(&ctrl_idr, ctrl->nr);
575 mutex_unlock(&slim_lock);
576 if (found != ctrl)
577 return -EINVAL;
578
579 /* Remove all clients */
580 mutex_lock(&board_lock);
581 list_for_each_entry(bi, &board_list, list)
582 slim_ctrl_remove_device(ctrl, &bi->board_info);
583 mutex_unlock(&board_lock);
584
585 init_completion(&ctrl->dev_released);
586 device_unregister(&ctrl->dev);
587
588 wait_for_completion(&ctrl->dev_released);
589 list_del(&ctrl->list);
590 destroy_workqueue(ctrl->wq);
591 /* free bus id */
592 mutex_lock(&slim_lock);
593 idr_remove(&ctrl_idr, ctrl->nr);
594 mutex_unlock(&slim_lock);
595
596 kfree(ctrl->sched.chc1);
597 kfree(ctrl->sched.chc3);
598#ifdef DEBUG
599 kfree(ctrl->sched.slots);
600#endif
601 kfree(ctrl->chans);
602 kfree(ctrl->ports);
603
604 return 0;
605}
606EXPORT_SYMBOL(slim_del_controller);
607
608/*
609 * slim_add_numbered_controller: Controller bring-up.
610 * @ctrl: Controller to be registered.
611 * A controller is registered with the framework using this API. ctrl->nr is the
612 * desired number with which slimbus framework registers the controller.
613 * Function will return -EBUSY if the number is in use.
614 */
615int slim_add_numbered_controller(struct slim_controller *ctrl)
616{
617 int id;
618
619 mutex_lock(&slim_lock);
620 id = idr_alloc(&ctrl_idr, ctrl, ctrl->nr, ctrl->nr + 1, GFP_KERNEL);
621 mutex_unlock(&slim_lock);
622
623 if (id < 0)
624 return id;
625
626 ctrl->nr = id;
627 return slim_register_controller(ctrl);
628}
629EXPORT_SYMBOL(slim_add_numbered_controller);
630
631/*
632 * slim_report_absent: Controller calls this function when a device
633 * reports absent, OR when the device cannot be communicated with
634 * @sbdev: Device that cannot be reached, or sent report absent
635 */
636void slim_report_absent(struct slim_device *sbdev)
637{
638 struct slim_controller *ctrl;
639 int i;
640
641 if (!sbdev)
642 return;
643 ctrl = sbdev->ctrl;
644 if (!ctrl)
645 return;
646 /* invalidate logical addresses */
647 mutex_lock(&ctrl->m_ctrl);
648 for (i = 0; i < ctrl->num_dev; i++) {
649 if (sbdev->laddr == ctrl->addrt[i].laddr)
650 ctrl->addrt[i].valid = false;
651 }
652 mutex_unlock(&ctrl->m_ctrl);
653 sbdev->reported = false;
654 queue_work(ctrl->wq, &sbdev->wd);
655}
656EXPORT_SYMBOL(slim_report_absent);
657
658static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc);
659/*
660 * slim_framer_booted: This function is called by controller after the active
661 * framer has booted (using Bus Reset sequence, or after it has shutdown and has
662 * come back up). Components, devices on the bus may be in undefined state,
663 * and this function triggers their drivers to do the needful
664 * to bring them back in Reset state so that they can acquire sync, report
665 * present and be operational again.
666 */
667void slim_framer_booted(struct slim_controller *ctrl)
668{
669 struct slim_device *sbdev;
670 struct list_head *pos, *next;
671 int i;
672
673 if (!ctrl)
674 return;
675
676 /* Since framer has rebooted, reset all data channels */
677 mutex_lock(&ctrl->sched.m_reconf);
678 for (i = 0; i < ctrl->nchans; i++) {
679 struct slim_ich *slc = &ctrl->chans[i];
680
681 if (slc->state > SLIM_CH_DEFINED)
682 slim_remove_ch(ctrl, slc);
683 }
684 mutex_unlock(&ctrl->sched.m_reconf);
685 mutex_lock(&ctrl->m_ctrl);
686 list_for_each_safe(pos, next, &ctrl->devs) {
687 struct slim_driver *sbdrv;
688
689 sbdev = list_entry(pos, struct slim_device, dev_list);
690 mutex_unlock(&ctrl->m_ctrl);
691 if (sbdev && sbdev->dev.driver) {
692 sbdrv = to_slim_driver(sbdev->dev.driver);
693 if (sbdrv->reset_device)
694 sbdrv->reset_device(sbdev);
695 }
696 mutex_lock(&ctrl->m_ctrl);
697 }
698 mutex_unlock(&ctrl->m_ctrl);
699}
700EXPORT_SYMBOL(slim_framer_booted);
701
702/*
703 * slim_msg_response: Deliver Message response received from a device to the
704 * framework.
705 * @ctrl: Controller handle
706 * @reply: Reply received from the device
707 * @len: Length of the reply
708 * @tid: Transaction ID received with which framework can associate reply.
709 * Called by controller to inform framework about the response received.
710 * This helps in making the API asynchronous, and controller-driver doesn't need
711 * to manage 1 more table other than the one managed by framework mapping TID
712 * with buffers
713 */
714void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
715{
716 int i;
717 unsigned long flags;
718 bool async;
719 struct slim_msg_txn *txn;
720
721 spin_lock_irqsave(&ctrl->txn_lock, flags);
722 txn = ctrl->txnt[tid];
723 if (txn == NULL || txn->rbuf == NULL) {
724 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
725 if (txn == NULL)
726 dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
727 tid, len);
728 else
729 dev_err(&ctrl->dev, "Invalid client buffer passed\n");
730 return;
731 }
732 async = txn->async;
733 for (i = 0; i < len; i++)
734 txn->rbuf[i] = reply[i];
735 if (txn->comp)
736 complete(txn->comp);
737 ctrl->txnt[tid] = NULL;
738 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
739 if (async)
740 kfree(txn);
741}
742EXPORT_SYMBOL(slim_msg_response);
743
744static int slim_processtxn(struct slim_controller *ctrl,
745 struct slim_msg_txn *txn, bool need_tid)
746{
747 u8 i = 0;
748 int ret = 0;
749 unsigned long flags;
750
751 if (need_tid) {
752 spin_lock_irqsave(&ctrl->txn_lock, flags);
753 for (i = 0; i < ctrl->last_tid; i++) {
754 if (ctrl->txnt[i] == NULL)
755 break;
756 }
757 if (i >= ctrl->last_tid) {
758 if (ctrl->last_tid == 255) {
759 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
760 return -ENOMEM;
761 }
762 ctrl->last_tid++;
763 }
764 ctrl->txnt[i] = txn;
765 txn->tid = i;
766 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
767 }
768
769 ret = ctrl->xfer_msg(ctrl, txn);
770 return ret;
771}
772
773static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
774 u8 e_len, u8 *entry)
775{
776 u8 i;
777
778 for (i = 0; i < ctrl->num_dev; i++) {
779 if (ctrl->addrt[i].valid &&
780 memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
781 *entry = i;
782 return 0;
783 }
784 }
785 return -ENXIO;
786}
787
788/*
789 * slim_assign_laddr: Assign logical address to a device enumerated.
790 * @ctrl: Controller with which device is enumerated.
791 * @e_addr: 6-byte elemental address of the device.
792 * @e_len: buffer length for e_addr
793 * @laddr: Return logical address (if valid flag is false)
794 * @valid: true if laddr holds a valid address that controller wants to
795 * set for this enumeration address. Otherwise framework sets index into
796 * address table as logical address.
797 * Called by controller in response to REPORT_PRESENT. Framework will assign
798 * a logical address to this enumeration address.
799 * Function returns -EXFULL to indicate that all logical addresses are already
800 * taken.
801 */
802int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
803 u8 e_len, u8 *laddr, bool valid)
804{
805 int ret;
806 u8 i = 0;
807 bool exists = false;
808 struct slim_device *sbdev;
809 struct list_head *pos, *next;
810 void *new_addrt = NULL;
811
812 mutex_lock(&ctrl->m_ctrl);
813 /* already assigned */
814 if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
815 *laddr = ctrl->addrt[i].laddr;
816 exists = true;
817 } else {
818 if (ctrl->num_dev >= 254) {
819 ret = -EXFULL;
820 goto ret_assigned_laddr;
821 }
822 for (i = 0; i < ctrl->num_dev; i++) {
823 if (ctrl->addrt[i].valid == false)
824 break;
825 }
826 if (i == ctrl->num_dev) {
827 new_addrt = krealloc(ctrl->addrt,
828 (ctrl->num_dev + 1) *
829 sizeof(struct slim_addrt),
830 GFP_KERNEL);
831 if (!new_addrt) {
832 ret = -ENOMEM;
833 goto ret_assigned_laddr;
834 }
835 ctrl->addrt = new_addrt;
836 ctrl->num_dev++;
837 }
838 memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
839 ctrl->addrt[i].valid = true;
840 /* Preferred address is index into table */
841 if (!valid)
842 *laddr = i;
843 }
844
845 ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
846 *laddr);
847 if (ret) {
848 ctrl->addrt[i].valid = false;
849 goto ret_assigned_laddr;
850 }
851 ctrl->addrt[i].laddr = *laddr;
852
853 dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
854ret_assigned_laddr:
855 mutex_unlock(&ctrl->m_ctrl);
856 if (exists || ret)
857 return ret;
858
859 pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
860 e_addr[1], e_addr[2]);
861 mutex_lock(&ctrl->m_ctrl);
862 list_for_each_safe(pos, next, &ctrl->devs) {
863 sbdev = list_entry(pos, struct slim_device, dev_list);
864 if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
865 struct slim_driver *sbdrv;
866
867 sbdev->laddr = *laddr;
868 sbdev->reported = true;
869 if (sbdev->dev.driver) {
870 sbdrv = to_slim_driver(sbdev->dev.driver);
871 if (sbdrv->device_up)
872 queue_work(ctrl->wq, &sbdev->wd);
873 }
874 break;
875 }
876 }
877 mutex_unlock(&ctrl->m_ctrl);
878 return 0;
879}
880EXPORT_SYMBOL(slim_assign_laddr);
881
882/*
883 * slim_get_logical_addr: Return the logical address of a slimbus device.
884 * @sb: client handle requesting the adddress.
885 * @e_addr: Elemental address of the device.
886 * @e_len: Length of e_addr
887 * @laddr: output buffer to store the address
888 * context: can sleep
889 * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
890 * the device with this elemental address is not found.
891 */
892int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
893 u8 e_len, u8 *laddr)
894{
895 int ret = 0;
896 u8 entry;
897 struct slim_controller *ctrl = sb->ctrl;
898
899 if (!ctrl || !laddr || !e_addr || e_len != 6)
900 return -EINVAL;
901 mutex_lock(&ctrl->m_ctrl);
902 ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
903 if (!ret)
904 *laddr = ctrl->addrt[entry].laddr;
905 mutex_unlock(&ctrl->m_ctrl);
906 if (ret == -ENXIO && ctrl->get_laddr) {
907 ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
908 if (!ret)
909 ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
910 true);
911 }
912 return ret;
913}
914EXPORT_SYMBOL(slim_get_logical_addr);
915
916static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
917 u8 *rbuf, const u8 *wbuf, u8 len)
918{
919 if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
920 return -EINVAL;
921 switch (oper) {
922 case SLIM_MSG_MC_REQUEST_VALUE:
923 case SLIM_MSG_MC_REQUEST_INFORMATION:
924 if (rbuf == NULL)
925 return -EINVAL;
926 return 0;
927 case SLIM_MSG_MC_CHANGE_VALUE:
928 case SLIM_MSG_MC_CLEAR_INFORMATION:
929 if (wbuf == NULL)
930 return -EINVAL;
931 return 0;
932 case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
933 case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
934 if (rbuf == NULL || wbuf == NULL)
935 return -EINVAL;
936 return 0;
937 default:
938 return -EINVAL;
939 }
940}
941
942static u16 slim_slicecodefromsize(u32 req)
943{
944 u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
945
946 if (req >= 8)
947 return 0;
948 else
949 return codetosize[req];
950}
951
952static u16 slim_slicesize(u32 code)
953{
954 u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
955
956 if (code == 0)
957 code = 1;
958 if (code > 16)
959 code = 16;
960 return sizetocode[code - 1];
961}
962
963
964/* Message APIs Unicast message APIs used by slimbus slave drivers */
965
966/*
967 * Message API access routines.
968 * @sb: client handle requesting elemental message reads, writes.
969 * @msg: Input structure for start-offset, number of bytes to read.
970 * @rbuf: data buffer to be filled with values read.
971 * @len: data buffer size
972 * @wbuf: data buffer containing value/information to be written
973 * context: can sleep
974 * Returns:
975 * -EINVAL: Invalid parameters
976 * -ETIMEDOUT: If controller could not complete the request. This may happen if
977 * the bus lines are not clocked, controller is not powered-on, slave with
978 * given address is not enumerated/responding.
979 */
980int slim_request_val_element(struct slim_device *sb,
981 struct slim_ele_access *msg, u8 *buf, u8 len)
982{
983 struct slim_controller *ctrl = sb->ctrl;
984
985 if (!ctrl)
986 return -EINVAL;
987 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
988 NULL, len);
989}
990EXPORT_SYMBOL(slim_request_val_element);
991
992int slim_request_inf_element(struct slim_device *sb,
993 struct slim_ele_access *msg, u8 *buf, u8 len)
994{
995 struct slim_controller *ctrl = sb->ctrl;
996
997 if (!ctrl)
998 return -EINVAL;
999 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
1000 buf, NULL, len);
1001}
1002EXPORT_SYMBOL(slim_request_inf_element);
1003
1004int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
1005 const u8 *buf, u8 len)
1006{
1007 struct slim_controller *ctrl = sb->ctrl;
1008
1009 if (!ctrl)
1010 return -EINVAL;
1011 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
1012 len);
1013}
1014EXPORT_SYMBOL(slim_change_val_element);
1015
1016int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
1017 u8 *buf, u8 len)
1018{
1019 struct slim_controller *ctrl = sb->ctrl;
1020
1021 if (!ctrl)
1022 return -EINVAL;
1023 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
1024 buf, len);
1025}
1026EXPORT_SYMBOL(slim_clear_inf_element);
1027
1028int slim_request_change_val_element(struct slim_device *sb,
1029 struct slim_ele_access *msg, u8 *rbuf,
1030 const u8 *wbuf, u8 len)
1031{
1032 struct slim_controller *ctrl = sb->ctrl;
1033
1034 if (!ctrl)
1035 return -EINVAL;
1036 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
1037 rbuf, wbuf, len);
1038}
1039EXPORT_SYMBOL(slim_request_change_val_element);
1040
1041int slim_request_clear_inf_element(struct slim_device *sb,
1042 struct slim_ele_access *msg, u8 *rbuf,
1043 const u8 *wbuf, u8 len)
1044{
1045 struct slim_controller *ctrl = sb->ctrl;
1046
1047 if (!ctrl)
1048 return -EINVAL;
1049 return slim_xfer_msg(ctrl, sb, msg,
1050 SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
1051 rbuf, wbuf, len);
1052}
1053EXPORT_SYMBOL(slim_request_clear_inf_element);
1054
1055/*
1056 * Broadcast message API:
1057 * call this API directly with sbdev = NULL.
1058 * For broadcast reads, make sure that buffers are big-enough to incorporate
1059 * replies from all logical addresses.
1060 * All controllers may not support broadcast
1061 */
1062int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
1063 struct slim_ele_access *msg, u16 mc, u8 *rbuf,
1064 const u8 *wbuf, u8 len)
1065{
1066 DECLARE_COMPLETION_ONSTACK(complete);
1067 DEFINE_SLIM_LDEST_TXN(txn_stack, mc, len, 6, rbuf, wbuf, sbdev->laddr);
1068 struct slim_msg_txn *txn;
1069 int ret;
1070 u16 sl, cur;
1071
1072 if (msg->comp && rbuf) {
1073 txn = kmalloc(sizeof(struct slim_msg_txn),
1074 GFP_KERNEL);
1075 if (IS_ERR_OR_NULL(txn))
1076 return PTR_ERR(txn);
1077 *txn = txn_stack;
1078 txn->async = true;
1079 txn->comp = msg->comp;
1080 } else {
1081 txn = &txn_stack;
1082 if (rbuf)
1083 txn->comp = &complete;
1084 }
1085
1086 ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
1087 if (ret)
1088 goto xfer_err;
1089
1090 sl = slim_slicesize(len);
1091 dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
1092 msg->start_offset, len, mc, sl);
1093
1094 cur = slim_slicecodefromsize(sl);
1095 txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
1096
1097 if (wbuf)
1098 txn->rl += len;
1099 if (rbuf) {
1100 unsigned long flags;
1101
1102 txn->rl++;
1103 ret = slim_processtxn(ctrl, txn, true);
1104
1105 /* sync read */
1106 if (!ret && !msg->comp) {
1107 ret = wait_for_completion_timeout(&complete, HZ);
1108 if (!ret) {
1109 dev_err(&ctrl->dev, "slimbus Read timed out");
1110 spin_lock_irqsave(&ctrl->txn_lock, flags);
1111 /* Invalidate the transaction */
1112 ctrl->txnt[txn->tid] = NULL;
1113 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1114 ret = -ETIMEDOUT;
1115 } else
1116 ret = 0;
1117 } else if (ret < 0 && !msg->comp) {
1118 dev_err(&ctrl->dev, "slimbus Read error");
1119 spin_lock_irqsave(&ctrl->txn_lock, flags);
1120 /* Invalidate the transaction */
1121 ctrl->txnt[txn->tid] = NULL;
1122 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1123 }
1124
1125 } else
1126 ret = slim_processtxn(ctrl, txn, false);
1127xfer_err:
1128 return ret;
1129}
1130EXPORT_SYMBOL(slim_xfer_msg);
1131
1132/*
1133 * User message:
1134 * slim_user_msg: Send user message that is interpreted by destination device
1135 * @sb: Client handle sending the message
1136 * @la: Destination device for this user message
1137 * @mt: Message Type (Soruce-referred, or Destination-referred)
1138 * @mc: Message Code
1139 * @msg: Message structure (start offset, number of bytes) to be sent
1140 * @buf: data buffer to be sent
1141 * @len: data buffer size in bytes
1142 */
1143int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
1144 struct slim_ele_access *msg, u8 *buf, u8 len)
1145{
1146 if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
1147 return -EINVAL;
1148 if (!sb->ctrl->xfer_user_msg)
1149 return -EPROTONOSUPPORT;
1150 return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
1151}
1152EXPORT_SYMBOL(slim_user_msg);
1153
1154/*
1155 * Queue bulk of message writes:
1156 * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
1157 * @sb: Client handle sending these messages
1158 * @la: Destination device for these messages
1159 * @mt: Message Type
1160 * @mc: Message Code
1161 * @msgs: List of messages to be written in bulk
1162 * @n: Number of messages in the list
1163 * @cb: Callback if client needs this to be non-blocking
1164 * @ctx: Context for this callback
1165 * If supported by controller, this message list will be sent in bulk to the HW
1166 * If the client specifies this to be non-blocking, the callback will be
1167 * called from atomic context.
1168 */
1169int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
1170 struct slim_val_inf msgs[], int n,
1171 int (*comp_cb)(void *ctx, int err), void *ctx)
1172{
Karthikeyan Ramasubramanianf005ce72017-01-25 11:58:17 -07001173 int i, ret = 0;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001174
Karthikeyan Ramasubramanianf005ce72017-01-25 11:58:17 -07001175 if (!sb || !sb->ctrl || !msgs || n <= 0)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001176 return -EINVAL;
1177 if (!sb->ctrl->xfer_bulk_wr) {
1178 pr_warn("controller does not support bulk WR, serializing");
1179 for (i = 0; i < n; i++) {
1180 struct slim_ele_access ele;
1181
1182 ele.comp = NULL;
1183 ele.start_offset = msgs[i].start_offset;
1184 ele.num_bytes = msgs[i].num_bytes;
1185 ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
1186 msgs[i].rbuf, msgs[i].wbuf,
1187 ele.num_bytes);
1188 if (ret)
1189 return ret;
1190 }
1191 return ret;
1192 }
1193 return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
1194 comp_cb, ctx);
1195}
1196EXPORT_SYMBOL(slim_bulk_msg_write);
1197
1198/*
1199 * slim_alloc_mgrports: Allocate port on manager side.
1200 * @sb: device/client handle.
1201 * @req: Port request type.
1202 * @nports: Number of ports requested
1203 * @rh: output buffer to store the port handles
1204 * @hsz: size of buffer storing handles
1205 * context: can sleep
1206 * This port will be typically used by SW. e.g. client driver wants to receive
1207 * some data from audio codec HW using a data channel.
1208 * Port allocated using this API will be used to receive the data.
1209 * If half-duplex ports are requested, two adjacent ports are allocated for
1210 * 1 half-duplex port. So the handle-buffer size should be twice the number
1211 * of half-duplex ports to be allocated.
1212 * -EDQUOT is returned if all ports are in use.
1213 */
1214int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
1215 int nports, u32 *rh, int hsz)
1216{
1217 int i, j;
1218 int ret = -EINVAL;
1219 int nphysp = nports;
1220 struct slim_controller *ctrl = sb->ctrl;
1221
1222 if (!rh || !ctrl)
1223 return -EINVAL;
1224 if (req == SLIM_REQ_HALF_DUP)
1225 nphysp *= 2;
1226 if (hsz/sizeof(u32) < nphysp)
1227 return -EINVAL;
1228 mutex_lock(&ctrl->m_ctrl);
1229
1230 for (i = 0; i < ctrl->nports; i++) {
1231 bool multiok = true;
1232
1233 if (ctrl->ports[i].state != SLIM_P_FREE)
1234 continue;
1235 /* Start half duplex channel at even port */
1236 if (req == SLIM_REQ_HALF_DUP && (i % 2))
1237 continue;
1238 /* Allocate ports contiguously for multi-ch */
1239 if (ctrl->nports < (i + nphysp)) {
1240 i = ctrl->nports;
1241 break;
1242 }
1243 if (req == SLIM_REQ_MULTI_CH) {
1244 multiok = true;
1245 for (j = i; j < i + nphysp; j++) {
1246 if (ctrl->ports[j].state != SLIM_P_FREE) {
1247 multiok = false;
1248 break;
1249 }
1250 }
1251 if (!multiok)
1252 continue;
1253 }
1254 break;
1255 }
1256 if (i >= ctrl->nports) {
1257 ret = -EDQUOT;
1258 goto alloc_err;
1259 }
1260 ret = 0;
1261 for (j = i; j < i + nphysp; j++) {
1262 ctrl->ports[j].state = SLIM_P_UNCFG;
1263 ctrl->ports[j].req = req;
1264 if (req == SLIM_REQ_HALF_DUP && (j % 2))
1265 ctrl->ports[j].flow = SLIM_SINK;
1266 else
1267 ctrl->ports[j].flow = SLIM_SRC;
1268 if (ctrl->alloc_port)
1269 ret = ctrl->alloc_port(ctrl, j);
1270 if (ret) {
1271 for (; j >= i; j--)
1272 ctrl->ports[j].state = SLIM_P_FREE;
1273 goto alloc_err;
1274 }
1275 *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
1276 }
1277alloc_err:
1278 mutex_unlock(&ctrl->m_ctrl);
1279 return ret;
1280}
1281EXPORT_SYMBOL(slim_alloc_mgrports);
1282
1283/* Deallocate the port(s) allocated using the API above */
1284int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
1285{
1286 int i;
1287 struct slim_controller *ctrl = sb->ctrl;
1288
1289 if (!ctrl || !hdl)
1290 return -EINVAL;
1291
1292 mutex_lock(&ctrl->m_ctrl);
1293
1294 for (i = 0; i < nports; i++) {
1295 u8 pn;
1296
1297 pn = SLIM_HDL_TO_PORT(hdl[i]);
1298
1299 if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
1300 int j, ret;
1301
1302 if (pn >= ctrl->nports) {
1303 dev_err(&ctrl->dev, "invalid port number");
1304 ret = -EINVAL;
1305 } else {
1306 dev_err(&ctrl->dev,
1307 "Can't dealloc connected port:%d", i);
1308 ret = -EISCONN;
1309 }
1310 for (j = i - 1; j >= 0; j--) {
1311 pn = SLIM_HDL_TO_PORT(hdl[j]);
1312 ctrl->ports[pn].state = SLIM_P_UNCFG;
1313 }
1314 mutex_unlock(&ctrl->m_ctrl);
1315 return ret;
1316 }
1317 if (ctrl->dealloc_port)
1318 ctrl->dealloc_port(ctrl, pn);
1319 ctrl->ports[pn].state = SLIM_P_FREE;
1320 }
1321 mutex_unlock(&ctrl->m_ctrl);
1322 return 0;
1323}
1324EXPORT_SYMBOL(slim_dealloc_mgrports);
1325
1326/*
1327 * slim_config_mgrports: Configure manager side ports
1328 * @sb: device/client handle.
1329 * @ph: array of port handles for which this configuration is valid
1330 * @nports: Number of ports in ph
1331 * @cfg: configuration requested for port(s)
1332 * Configure port settings if they are different than the default ones.
1333 * Returns success if the config could be applied. Returns -EISCONN if the
1334 * port is in use
1335 */
1336int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
1337 struct slim_port_cfg *cfg)
1338{
1339 int i;
1340 struct slim_controller *ctrl;
1341
1342 if (!sb || !ph || !nports || !sb->ctrl || !cfg)
1343 return -EINVAL;
1344
1345 ctrl = sb->ctrl;
1346 mutex_lock(&ctrl->sched.m_reconf);
1347 for (i = 0; i < nports; i++) {
1348 u8 pn = SLIM_HDL_TO_PORT(ph[i]);
1349
1350 if (ctrl->ports[pn].state == SLIM_P_CFG)
1351 return -EISCONN;
1352 ctrl->ports[pn].cfg = *cfg;
1353 }
1354 mutex_unlock(&ctrl->sched.m_reconf);
1355 return 0;
1356}
1357EXPORT_SYMBOL(slim_config_mgrports);
1358
1359/*
1360 * slim_get_slaveport: Get slave port handle
1361 * @la: slave device logical address.
1362 * @idx: port index at slave
1363 * @rh: return handle
1364 * @flw: Flow type (source or destination)
1365 * This API only returns a slave port's representation as expected by slimbus
1366 * driver. This port is not managed by the slimbus driver. Caller is expected
1367 * to have visibility of this port since it's a device-port.
1368 */
1369int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
1370{
1371 if (rh == NULL)
1372 return -EINVAL;
1373 *rh = SLIM_PORT_HDL(la, flw, idx);
1374 return 0;
1375}
1376EXPORT_SYMBOL(slim_get_slaveport);
1377
1378static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
1379 enum slim_port_flow flow)
1380{
1381 int ret;
1382 u8 buf[2];
1383 u32 la = SLIM_HDL_TO_LA(ph);
1384 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1385 DEFINE_SLIM_LDEST_TXN(txn, 0, 2, 6, NULL, buf, la);
1386
1387 if (flow == SLIM_SRC)
1388 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1389 else
1390 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1391 buf[0] = pn;
1392 buf[1] = ctrl->chans[ch].chan;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001393 if (la == SLIM_LA_MANAGER) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001394 ctrl->ports[pn].flow = flow;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001395 ctrl->ports[pn].ch = &ctrl->chans[ch].prop;
1396 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001397 ret = slim_processtxn(ctrl, &txn, false);
1398 if (!ret && la == SLIM_LA_MANAGER)
1399 ctrl->ports[pn].state = SLIM_P_CFG;
1400 return ret;
1401}
1402
1403static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
1404{
1405 int ret;
1406 u32 la = SLIM_HDL_TO_LA(ph);
1407 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1408 DEFINE_SLIM_LDEST_TXN(txn, 0, 1, 5, NULL, &pn, la);
1409
1410 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1411 ret = slim_processtxn(ctrl, &txn, false);
1412 if (ret)
1413 return ret;
1414 if (la == SLIM_LA_MANAGER) {
1415 ctrl->ports[pn].state = SLIM_P_UNCFG;
1416 ctrl->ports[pn].cfg.watermark = 0;
1417 ctrl->ports[pn].cfg.port_opts = 0;
1418 ctrl->ports[pn].ch = NULL;
1419 }
1420 return 0;
1421}
1422
1423/*
1424 * slim_connect_src: Connect source port to channel.
1425 * @sb: client handle
1426 * @srch: source handle to be connected to this channel
1427 * @chanh: Channel with which the ports need to be associated with.
1428 * Per slimbus specification, a channel may have 1 source port.
1429 * Channel specified in chanh needs to be allocated first.
1430 * Returns -EALREADY if source is already configured for this channel.
1431 * Returns -ENOTCONN if channel is not allocated
1432 * Returns -EINVAL if invalid direction is specified for non-manager port,
1433 * or if the manager side port number is out of bounds, or in incorrect state
1434 */
1435int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
1436{
1437 struct slim_controller *ctrl = sb->ctrl;
1438 int ret;
1439 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1440 struct slim_ich *slc = &ctrl->chans[chan];
1441 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
1442 u8 la = SLIM_HDL_TO_LA(srch);
1443 u8 pn = SLIM_HDL_TO_PORT(srch);
1444
1445 /* manager ports don't have direction when they are allocated */
1446 if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
1447 return -EINVAL;
1448
1449 mutex_lock(&ctrl->sched.m_reconf);
1450
1451 if (la == SLIM_LA_MANAGER) {
1452 if (pn >= ctrl->nports ||
1453 ctrl->ports[pn].state != SLIM_P_UNCFG) {
1454 ret = -EINVAL;
1455 goto connect_src_err;
1456 }
1457 }
1458
1459 if (slc->state == SLIM_CH_FREE) {
1460 ret = -ENOTCONN;
1461 goto connect_src_err;
1462 }
1463 /*
1464 * Once channel is removed, its ports can be considered disconnected
1465 * So its ports can be reassigned. Source port is zeroed
1466 * when channel is deallocated.
1467 */
1468 if (slc->srch) {
1469 ret = -EALREADY;
1470 goto connect_src_err;
1471 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001472 ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
1473
1474 if (!ret)
1475 slc->srch = srch;
1476
1477connect_src_err:
1478 mutex_unlock(&ctrl->sched.m_reconf);
1479 return ret;
1480}
1481EXPORT_SYMBOL(slim_connect_src);
1482
1483/*
1484 * slim_connect_sink: Connect sink port(s) to channel.
1485 * @sb: client handle
1486 * @sinkh: sink handle(s) to be connected to this channel
1487 * @nsink: number of sinks
1488 * @chanh: Channel with which the ports need to be associated with.
1489 * Per slimbus specification, a channel may have multiple sink-ports.
1490 * Channel specified in chanh needs to be allocated first.
1491 * Returns -EALREADY if sink is already configured for this channel.
1492 * Returns -ENOTCONN if channel is not allocated
1493 * Returns -EINVAL if invalid parameters are passed, or invalid direction is
1494 * specified for non-manager port, or if the manager side port number is out of
1495 * bounds, or in incorrect state
1496 */
1497int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
1498{
1499 struct slim_controller *ctrl = sb->ctrl;
1500 int j;
1501 int ret = 0;
1502 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1503 struct slim_ich *slc = &ctrl->chans[chan];
1504 void *new_sinkh = NULL;
1505
1506 if (!sinkh || !nsink)
1507 return -EINVAL;
1508
1509 mutex_lock(&ctrl->sched.m_reconf);
1510
1511 /*
1512 * Once channel is removed, its ports can be considered disconnected
1513 * So its ports can be reassigned. Sink ports are freed when channel
1514 * is deallocated.
1515 */
1516 if (slc->state == SLIM_CH_FREE) {
1517 ret = -ENOTCONN;
1518 goto connect_sink_err;
1519 }
1520
1521 for (j = 0; j < nsink; j++) {
1522 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
1523 u8 la = SLIM_HDL_TO_LA(sinkh[j]);
1524 u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
1525
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001526 if (la != SLIM_LA_MANAGER && flow != SLIM_SINK)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001527 ret = -EINVAL;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001528 else if (la == SLIM_LA_MANAGER &&
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001529 (pn >= ctrl->nports ||
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001530 ctrl->ports[pn].state != SLIM_P_UNCFG))
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001531 ret = -EINVAL;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001532 else
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001533 ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001534
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001535 if (ret) {
1536 for (j = j - 1; j >= 0; j--)
1537 disconnect_port_ch(ctrl, sinkh[j]);
1538 goto connect_sink_err;
1539 }
1540 }
1541
1542 new_sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
1543 GFP_KERNEL);
1544 if (!new_sinkh) {
1545 ret = -ENOMEM;
1546 for (j = 0; j < nsink; j++)
1547 disconnect_port_ch(ctrl, sinkh[j]);
1548 goto connect_sink_err;
1549 }
1550
1551 slc->sinkh = new_sinkh;
1552 memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
1553 slc->nsink += nsink;
1554
1555connect_sink_err:
1556 mutex_unlock(&ctrl->sched.m_reconf);
1557 return ret;
1558}
1559EXPORT_SYMBOL(slim_connect_sink);
1560
1561/*
1562 * slim_disconnect_ports: Disconnect port(s) from channel
1563 * @sb: client handle
1564 * @ph: ports to be disconnected
1565 * @nph: number of ports.
1566 * Disconnects ports from a channel.
1567 */
1568int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
1569{
1570 struct slim_controller *ctrl = sb->ctrl;
1571 int i;
1572
1573 mutex_lock(&ctrl->sched.m_reconf);
1574
1575 for (i = 0; i < nph; i++)
1576 disconnect_port_ch(ctrl, ph[i]);
1577 mutex_unlock(&ctrl->sched.m_reconf);
1578 return 0;
1579}
1580EXPORT_SYMBOL(slim_disconnect_ports);
1581
1582/*
1583 * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
1584 * @sb: client handle
1585 * @ph: port-handle
1586 * @iobuf: buffer to be transferred or populated
1587 * @len: buffer size.
1588 * @comp: completion signal to indicate transfer done or error.
1589 * context: can sleep
1590 * Returns number of bytes transferred/received if used synchronously.
1591 * Will return 0 if used asynchronously.
1592 * Client will call slim_port_get_xfer_status to get error and/or number of
1593 * bytes transferred if used asynchronously.
1594 */
1595int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
1596 struct completion *comp)
1597{
1598 struct slim_controller *ctrl = sb->ctrl;
1599 u8 pn = SLIM_HDL_TO_PORT(ph);
1600
1601 dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
1602 return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
1603}
1604EXPORT_SYMBOL(slim_port_xfer);
1605
1606/*
1607 * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
1608 * after completion is done.
1609 * @sb: client handle
1610 * @ph: port-handle
1611 * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
1612 * @done_len: Number of bytes transferred.
1613 * This can be called when port_xfer complition is signalled.
1614 * The API will return port transfer error (underflow/overflow/disconnect)
1615 * and/or done_len will reflect number of bytes transferred. Note that
1616 * done_len may be valid even if port error (overflow/underflow) has happened.
1617 * e.g. If the transfer was scheduled with a few bytes to be transferred and
1618 * client has not supplied more data to be transferred, done_len will indicate
1619 * number of bytes transferred with underflow error. To avoid frequent underflow
1620 * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
1621 * channel has data to be transferred even if client is not ready to transfer
1622 * data all the time. done_buf will indicate address of the last buffer
1623 * processed from the multiple transfers.
1624 */
1625enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
1626 phys_addr_t *done_buf, u32 *done_len)
1627{
1628 struct slim_controller *ctrl = sb->ctrl;
1629 u8 pn = SLIM_HDL_TO_PORT(ph);
1630 u32 la = SLIM_HDL_TO_LA(ph);
1631 enum slim_port_err err;
1632
1633 dev_dbg(&ctrl->dev, "get status port num:%d", pn);
1634 /*
1635 * Framework only has insight into ports managed by ported device
1636 * used by the manager and not slave
1637 */
1638 if (la != SLIM_LA_MANAGER) {
1639 if (done_buf)
1640 *done_buf = 0;
1641 if (done_len)
1642 *done_len = 0;
1643 return SLIM_P_NOT_OWNED;
1644 }
1645 err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
1646 if (err == SLIM_P_INPROGRESS)
1647 err = ctrl->ports[pn].err;
1648 return err;
1649}
1650EXPORT_SYMBOL(slim_port_get_xfer_status);
1651
1652static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1653{
1654 struct slim_ich **arr;
1655 int i, j;
1656 int *len;
1657 int sl = slc->seglen << slc->rootexp;
1658 /* Channel is already active and other end is transmitting data */
1659 if (slc->state >= SLIM_CH_ACTIVE)
1660 return;
1661 if (slc->coeff == SLIM_COEFF_1) {
1662 arr = ctrl->sched.chc1;
1663 len = &ctrl->sched.num_cc1;
1664 } else {
1665 arr = ctrl->sched.chc3;
1666 len = &ctrl->sched.num_cc3;
1667 sl *= 3;
1668 }
1669
1670 *len += 1;
1671
1672 /* Insert the channel based on rootexp and seglen */
1673 for (i = 0; i < *len - 1; i++) {
1674 /*
1675 * Primary key: exp low to high.
1676 * Secondary key: seglen: high to low
1677 */
1678 if ((slc->rootexp > arr[i]->rootexp) ||
1679 ((slc->rootexp == arr[i]->rootexp) &&
1680 (slc->seglen < arr[i]->seglen)))
1681 continue;
1682 else
1683 break;
1684 }
1685 for (j = *len - 1; j > i; j--)
1686 arr[j] = arr[j - 1];
1687 arr[i] = slc;
1688 if (!ctrl->allocbw)
1689 ctrl->sched.usedslots += sl;
1690}
1691
1692static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1693{
1694 struct slim_ich **arr;
1695 int i;
1696 u32 la, ph;
1697 int *len;
1698
1699 if (slc->coeff == SLIM_COEFF_1) {
1700 arr = ctrl->sched.chc1;
1701 len = &ctrl->sched.num_cc1;
1702 } else {
1703 arr = ctrl->sched.chc3;
1704 len = &ctrl->sched.num_cc3;
1705 }
1706
1707 for (i = 0; i < *len; i++) {
1708 if (arr[i] == slc)
1709 break;
1710 }
1711 if (i >= *len)
1712 return -EXFULL;
1713 for (; i < *len - 1; i++)
1714 arr[i] = arr[i + 1];
1715 *len -= 1;
1716 arr[*len] = NULL;
1717
1718 slc->state = SLIM_CH_ALLOCATED;
1719 slc->def = 0;
1720 slc->newintr = 0;
1721 slc->newoff = 0;
1722 for (i = 0; i < slc->nsink; i++) {
1723 ph = slc->sinkh[i];
1724 la = SLIM_HDL_TO_LA(ph);
1725 /*
1726 * For ports managed by manager's ported device, no need to send
1727 * disconnect. It is client's responsibility to call disconnect
1728 * on ports owned by the slave device
1729 */
1730 if (la == SLIM_LA_MANAGER) {
1731 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1732 ctrl->ports[SLIM_HDL_TO_PORT(ph)].ch = NULL;
1733 }
1734 }
1735
1736 ph = slc->srch;
1737 la = SLIM_HDL_TO_LA(ph);
1738 if (la == SLIM_LA_MANAGER) {
1739 u8 pn = SLIM_HDL_TO_PORT(ph);
1740
1741 ctrl->ports[pn].state = SLIM_P_UNCFG;
1742 ctrl->ports[pn].cfg.watermark = 0;
1743 ctrl->ports[pn].cfg.port_opts = 0;
1744 }
1745
1746 kfree(slc->sinkh);
1747 slc->sinkh = NULL;
1748 slc->srch = 0;
1749 slc->nsink = 0;
1750 return 0;
1751}
1752
1753static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
1754{
1755 u32 rate = 0, rate4k = 0, rate11k = 0;
1756 u32 exp = 0;
1757 u32 pr = 0;
1758 bool exact = true;
1759 bool done = false;
1760 enum slim_ch_rate ratefam;
1761
1762 if (prop->prot >= SLIM_ASYNC_SMPLX)
1763 return 0;
1764 if (prop->baser == SLIM_RATE_1HZ) {
1765 rate = prop->ratem / 4000;
1766 rate4k = rate;
1767 if (rate * 4000 == prop->ratem)
1768 ratefam = SLIM_RATE_4000HZ;
1769 else {
1770 rate = prop->ratem / 11025;
1771 rate11k = rate;
1772 if (rate * 11025 == prop->ratem)
1773 ratefam = SLIM_RATE_11025HZ;
1774 else
1775 ratefam = SLIM_RATE_1HZ;
1776 }
1777 } else {
1778 ratefam = prop->baser;
1779 rate = prop->ratem;
1780 }
1781 if (ratefam == SLIM_RATE_1HZ) {
1782 exact = false;
1783 if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
1784 rate = rate4k + 1;
1785 ratefam = SLIM_RATE_4000HZ;
1786 } else {
1787 rate = rate11k + 1;
1788 ratefam = SLIM_RATE_11025HZ;
1789 }
1790 }
1791 /* covert rate to coeff-exp */
1792 while (!done) {
1793 while ((rate & 0x1) != 0x1) {
1794 rate >>= 1;
1795 exp++;
1796 }
1797 if (rate > 3) {
1798 /* roundup if not exact */
1799 rate++;
1800 exact = false;
1801 } else
1802 done = true;
1803 }
1804 if (ratefam == SLIM_RATE_4000HZ) {
1805 if (rate == 1)
1806 pr = 0x10;
1807 else {
1808 pr = 0;
1809 exp++;
1810 }
1811 } else {
1812 pr = 8;
1813 exp++;
1814 }
1815 if (exp <= 7) {
1816 pr |= exp;
1817 if (exact)
1818 pr |= 0x80;
1819 } else
1820 pr = 0;
1821 return pr;
1822}
1823
1824static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
1825{
1826 struct slim_controller *ctrl = sb->ctrl;
1827 u32 chrate = 0;
1828 u32 exp = 0;
1829 u32 coeff = 0;
1830 bool exact = true;
1831 bool done = false;
1832 int ret = 0;
1833 struct slim_ich *slc = &ctrl->chans[chan];
1834 struct slim_ch *prop = &slc->prop;
1835
1836 slc->prrate = slim_calc_prrate(ctrl, prop);
1837 dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
1838 if (prop->baser == SLIM_RATE_4000HZ)
1839 chrate = 4000 * prop->ratem;
1840 else if (prop->baser == SLIM_RATE_11025HZ)
1841 chrate = 11025 * prop->ratem;
1842 else
1843 chrate = prop->ratem;
1844 /* max allowed sample freq = 768 seg/frame */
1845 if (chrate > 3600000)
1846 return -EDQUOT;
1847 if (prop->baser == SLIM_RATE_4000HZ &&
1848 ctrl->a_framer->superfreq == 4000)
1849 coeff = prop->ratem;
1850 else if (prop->baser == SLIM_RATE_11025HZ &&
1851 ctrl->a_framer->superfreq == 3675)
1852 coeff = 3 * prop->ratem;
1853 else {
1854 u32 tempr = 0;
1855
1856 tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
1857 coeff = tempr / ctrl->a_framer->rootfreq;
1858 if (coeff * ctrl->a_framer->rootfreq != tempr) {
1859 coeff++;
1860 exact = false;
1861 }
1862 }
1863
1864 /* convert coeff to coeff-exponent */
1865 exp = 0;
1866 while (!done) {
1867 while ((coeff & 0x1) != 0x1) {
1868 coeff >>= 1;
1869 exp++;
1870 }
1871 if (coeff > 3) {
1872 coeff++;
1873 exact = false;
1874 } else
1875 done = true;
1876 }
1877 if (prop->prot == SLIM_HARD_ISO && !exact)
1878 return -EPROTONOSUPPORT;
1879 else if (prop->prot == SLIM_AUTO_ISO) {
1880 if (exact)
1881 prop->prot = SLIM_HARD_ISO;
1882 else
1883 prop->prot = SLIM_PUSH;
1884 }
1885 slc->rootexp = exp;
1886 slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
1887 if (prop->prot != SLIM_HARD_ISO)
1888 slc->seglen++;
1889 if (prop->prot >= SLIM_EXT_SMPLX)
1890 slc->seglen++;
1891 /* convert coeff to enum */
1892 if (coeff == 1) {
1893 if (exp > 9)
1894 ret = -EIO;
1895 coeff = SLIM_COEFF_1;
1896 } else {
1897 if (exp > 8)
1898 ret = -EIO;
1899 coeff = SLIM_COEFF_3;
1900 }
1901 slc->coeff = coeff;
1902
1903 return ret;
1904}
1905
1906/*
1907 * slim_alloc_ch: Allocate a slimbus channel and return its handle.
1908 * @sb: client handle.
1909 * @chanh: return channel handle
1910 * Slimbus channels are limited to 256 per specification.
1911 * -EXFULL is returned if all channels are in use.
1912 * Although slimbus specification supports 256 channels, a controller may not
1913 * support that many channels.
1914 */
1915int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
1916{
1917 struct slim_controller *ctrl = sb->ctrl;
1918 u16 i;
1919
1920 if (!ctrl)
1921 return -EINVAL;
1922 mutex_lock(&ctrl->sched.m_reconf);
1923 for (i = 0; i < ctrl->nchans; i++) {
1924 if (ctrl->chans[i].state == SLIM_CH_FREE)
1925 break;
1926 }
1927 if (i >= ctrl->nchans) {
1928 mutex_unlock(&ctrl->sched.m_reconf);
1929 return -EXFULL;
1930 }
1931 *chanh = i;
1932 ctrl->chans[i].nextgrp = 0;
1933 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1934 ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
1935
1936 mutex_unlock(&ctrl->sched.m_reconf);
1937 return 0;
1938}
1939EXPORT_SYMBOL(slim_alloc_ch);
1940
1941/*
1942 * slim_query_ch: Get reference-counted handle for a channel number. Every
1943 * channel is reference counted by upto one as producer and the others as
1944 * consumer)
1945 * @sb: client handle
1946 * @chan: slimbus channel number
1947 * @chanh: return channel handle
1948 * If request channel number is not in use, it is allocated, and reference
1949 * count is set to one. If the channel was was already allocated, this API
1950 * will return handle to that channel and reference count is incremented.
1951 * -EXFULL is returned if all channels are in use
1952 */
1953int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
1954{
1955 struct slim_controller *ctrl = sb->ctrl;
1956 u16 i, j;
1957 int ret = 0;
1958
1959 if (!ctrl || !chanh)
1960 return -EINVAL;
1961 mutex_lock(&ctrl->sched.m_reconf);
1962 /* start with modulo number */
1963 i = ch % ctrl->nchans;
1964
1965 for (j = 0; j < ctrl->nchans; j++) {
1966 if (ctrl->chans[i].chan == ch) {
1967 *chanh = i;
1968 ctrl->chans[i].ref++;
1969 if (ctrl->chans[i].state == SLIM_CH_FREE)
1970 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1971 goto query_out;
1972 }
1973 i = (i + 1) % ctrl->nchans;
1974 }
1975
1976 /* Channel not in table yet */
1977 ret = -EXFULL;
1978 for (j = 0; j < ctrl->nchans; j++) {
1979 if (ctrl->chans[i].state == SLIM_CH_FREE) {
1980 ctrl->chans[i].state =
1981 SLIM_CH_ALLOCATED;
1982 *chanh = i;
1983 ctrl->chans[i].ref++;
1984 ctrl->chans[i].chan = ch;
1985 ctrl->chans[i].nextgrp = 0;
1986 ret = 0;
1987 break;
1988 }
1989 i = (i + 1) % ctrl->nchans;
1990 }
1991query_out:
1992 mutex_unlock(&ctrl->sched.m_reconf);
1993 dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
1994 ch, i, ctrl->chans[i].ref, ret);
1995 return ret;
1996}
1997EXPORT_SYMBOL(slim_query_ch);
1998
1999/*
2000 * slim_dealloc_ch: Deallocate channel allocated using the API above
2001 * -EISCONN is returned if the channel is tried to be deallocated without
2002 * being removed first.
2003 * -ENOTCONN is returned if deallocation is tried on a channel that's not
2004 * allocated.
2005 */
2006int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
2007{
2008 struct slim_controller *ctrl = sb->ctrl;
2009 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
2010 struct slim_ich *slc = &ctrl->chans[chan];
2011
2012 if (!ctrl)
2013 return -EINVAL;
2014
2015 mutex_lock(&ctrl->sched.m_reconf);
2016 if (slc->state == SLIM_CH_FREE) {
2017 mutex_unlock(&ctrl->sched.m_reconf);
2018 return -ENOTCONN;
2019 }
2020 if (slc->ref > 1) {
2021 slc->ref--;
2022 mutex_unlock(&ctrl->sched.m_reconf);
2023 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
2024 slc->chan, chanh, slc->ref);
2025 return 0;
2026 }
2027 if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
2028 dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
2029 mutex_unlock(&ctrl->sched.m_reconf);
2030 return -EISCONN;
2031 }
2032 slc->ref--;
2033 slc->state = SLIM_CH_FREE;
2034 mutex_unlock(&ctrl->sched.m_reconf);
2035 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
2036 slc->chan, chanh, slc->ref);
2037 return 0;
2038}
2039EXPORT_SYMBOL(slim_dealloc_ch);
2040
2041/*
2042 * slim_get_ch_state: Channel state.
2043 * This API returns the channel's state (active, suspended, inactive etc)
2044 */
2045enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
2046{
2047 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
2048 struct slim_ich *slc = &sb->ctrl->chans[chan];
2049
2050 return slc->state;
2051}
2052EXPORT_SYMBOL(slim_get_ch_state);
2053
2054/*
2055 * slim_define_ch: Define a channel.This API defines channel parameters for a
2056 * given channel.
2057 * @sb: client handle.
2058 * @prop: slim_ch structure with channel parameters desired to be used.
2059 * @chanh: list of channels to be defined.
2060 * @nchan: number of channels in a group (1 if grp is false)
2061 * @grp: Are the channels grouped
2062 * @grph: return group handle if grouping of channels is desired.
2063 * Channels can be grouped if multiple channels use same parameters
2064 * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
2065 * and given 1 handle for simplicity and avoid repeatedly calling the API)
2066 * -EISCONN is returned if channel is already used with different parameters.
2067 * -ENXIO is returned if the channel is not yet allocated.
2068 */
2069int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
2070 u8 nchan, bool grp, u16 *grph)
2071{
2072 struct slim_controller *ctrl = sb->ctrl;
2073 int i, ret = 0;
2074
2075 if (!ctrl || !chanh || !prop || !nchan)
2076 return -EINVAL;
2077 mutex_lock(&ctrl->sched.m_reconf);
2078 for (i = 0; i < nchan; i++) {
2079 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
2080 struct slim_ich *slc = &ctrl->chans[chan];
2081
2082 dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
2083 (int)ctrl->chans[chan].state);
2084 if (slc->state < SLIM_CH_ALLOCATED) {
2085 ret = -ENXIO;
2086 goto err_define_ch;
2087 }
2088 if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
2089 if (prop->ratem != slc->prop.ratem ||
2090 prop->sampleszbits != slc->prop.sampleszbits ||
2091 prop->baser != slc->prop.baser) {
2092 ret = -EISCONN;
2093 goto err_define_ch;
2094 }
2095 } else if (slc->state > SLIM_CH_DEFINED) {
2096 ret = -EISCONN;
2097 goto err_define_ch;
2098 } else {
2099 ctrl->chans[chan].prop = *prop;
2100 ret = slim_nextdefine_ch(sb, chan);
2101 if (ret)
2102 goto err_define_ch;
2103 }
2104 if (i < (nchan - 1))
2105 ctrl->chans[chan].nextgrp = chanh[i + 1];
2106 if (i == 0)
2107 ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
2108 if (i == (nchan - 1))
2109 ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
2110 }
2111
2112 if (grp)
2113 *grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
2114 for (i = 0; i < nchan; i++) {
2115 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
2116 struct slim_ich *slc = &ctrl->chans[chan];
2117
2118 if (slc->state == SLIM_CH_ALLOCATED)
2119 slc->state = SLIM_CH_DEFINED;
2120 }
2121err_define_ch:
2122 dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
2123 mutex_unlock(&ctrl->sched.m_reconf);
2124 return ret;
2125}
2126EXPORT_SYMBOL(slim_define_ch);
2127
2128static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
2129{
2130 u32 code = 0;
2131
2132 if (*ctrlw == *subfrml) {
2133 *ctrlw = 8;
2134 *subfrml = 8;
2135 *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
2136 - SLIM_GDE_SLOTS_PER_SUPERFRAME;
2137 return 0;
2138 }
2139 if (*subfrml == 6) {
2140 code = 0;
2141 *msgsl = 256;
2142 } else if (*subfrml == 8) {
2143 code = 1;
2144 *msgsl = 192;
2145 } else if (*subfrml == 24) {
2146 code = 2;
2147 *msgsl = 64;
2148 } else { /* 32 */
2149 code = 3;
2150 *msgsl = 48;
2151 }
2152
2153 if (*ctrlw < 8) {
2154 if (*ctrlw >= 6) {
2155 *ctrlw = 6;
2156 code |= 0x14;
2157 } else {
2158 if (*ctrlw == 5)
2159 *ctrlw = 4;
2160 code |= (*ctrlw << 2);
2161 }
2162 } else {
2163 code -= 2;
2164 if (*ctrlw >= 24) {
2165 *ctrlw = 24;
2166 code |= 0x1e;
2167 } else if (*ctrlw >= 16) {
2168 *ctrlw = 16;
2169 code |= 0x1c;
2170 } else if (*ctrlw >= 12) {
2171 *ctrlw = 12;
2172 code |= 0x1a;
2173 } else {
2174 *ctrlw = 8;
2175 code |= 0x18;
2176 }
2177 }
2178
2179 *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2180 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2181 return code;
2182}
2183
2184static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
2185 int sz, u32 shft)
2186{
2187 int i;
2188 u32 oldoff;
2189
2190 for (i = 0; i < sz; i++) {
2191 struct slim_ich *slc;
2192
2193 if (ach[i] == NULL)
2194 continue;
2195 slc = ach[i];
2196 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2197 continue;
2198 oldoff = slc->newoff;
2199 slc->newoff += shft;
2200 /* seg. offset must be <= interval */
2201 if (slc->newoff >= slc->newintr)
2202 slc->newoff -= slc->newintr;
2203 }
2204}
2205
2206static inline int slim_sched_4k_coeff1_chans(struct slim_controller *ctrl,
2207 struct slim_ich **slc, int *coeff, int *opensl1,
2208 u32 expshft, u32 curintr, u32 curmaxsl,
2209 int curexp, int finalexp)
2210{
2211 int coeff1;
2212 struct slim_ich *slc1;
2213
2214 if (unlikely(!coeff || !slc || !ctrl || !opensl1))
2215 return -EINVAL;
2216
2217 coeff1 = *coeff;
2218 slc1 = *slc;
2219 while ((coeff1 < ctrl->sched.num_cc1) &&
2220 (curexp == (int)(slc1->rootexp + expshft))) {
2221 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2222 coeff1++;
2223 slc1 = ctrl->sched.chc1[coeff1];
2224 continue;
2225 }
2226 if (opensl1[1] >= opensl1[0] ||
2227 (finalexp == (int)slc1->rootexp &&
2228 curintr <= 24 && opensl1[0] == curmaxsl)) {
2229 opensl1[1] -= slc1->seglen;
2230 slc1->newoff = curmaxsl + opensl1[1];
2231 if (opensl1[1] < 0 && opensl1[0] == curmaxsl) {
2232 opensl1[0] += opensl1[1];
2233 opensl1[1] = 0;
2234 if (opensl1[0] < 0) {
2235 dev_dbg(&ctrl->dev,
2236 "reconfig failed:%d\n",
2237 __LINE__);
2238 return -EXFULL;
2239 }
2240 }
2241 } else {
2242 if (slc1->seglen > opensl1[0]) {
2243 dev_dbg(&ctrl->dev,
2244 "reconfig failed:%d\n", __LINE__);
2245 return -EXFULL;
2246 }
2247 slc1->newoff = opensl1[0] - slc1->seglen;
2248 opensl1[0] = slc1->newoff;
2249 }
2250 slc1->newintr = curintr;
2251 coeff1++;
2252 slc1 = ctrl->sched.chc1[coeff1];
2253 }
2254 *coeff = coeff1;
2255 *slc = slc1;
2256 return 0;
2257}
2258
2259static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
2260 u32 *ctrlw, u32 *subfrml)
2261{
2262 int coeff1, coeff3;
2263 enum slim_ch_coeff bias;
2264 struct slim_controller *ctrl = sb->ctrl;
2265 int last1 = ctrl->sched.num_cc1 - 1;
2266 int last3 = ctrl->sched.num_cc3 - 1;
2267
2268 /*
2269 * Find first channels with coeff 1 & 3 as starting points for
2270 * scheduling
2271 */
2272 for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
2273 struct slim_ich *slc = ctrl->sched.chc3[coeff3];
2274
2275 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2276 continue;
2277 else
2278 break;
2279 }
2280 for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
2281 struct slim_ich *slc = ctrl->sched.chc1[coeff1];
2282
2283 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2284 continue;
2285 else
2286 break;
2287 }
2288 if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
2289 *ctrlw = 8;
2290 *subfrml = 8;
2291 return 0;
2292 } else if (coeff3 == ctrl->sched.num_cc3)
2293 bias = SLIM_COEFF_1;
2294 else
2295 bias = SLIM_COEFF_3;
2296
2297 /*
2298 * Find last chan in coeff1, 3 list, we will use to know when we
2299 * have done scheduling all coeff1 channels
2300 */
2301 while (last1 >= 0) {
2302 if (ctrl->sched.chc1[last1] != NULL &&
2303 (ctrl->sched.chc1[last1])->state !=
2304 SLIM_CH_PENDING_REMOVAL)
2305 break;
2306 last1--;
2307 }
2308 while (last3 >= 0) {
2309 if (ctrl->sched.chc3[last3] != NULL &&
2310 (ctrl->sched.chc3[last3])->state !=
2311 SLIM_CH_PENDING_REMOVAL)
2312 break;
2313 last3--;
2314 }
2315
2316 if (bias == SLIM_COEFF_1) {
2317 struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
2318 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2319 int curexp, finalexp;
2320 u32 curintr, curmaxsl;
2321 int opensl1[2];
2322 int maxctrlw1;
2323 int ret;
2324
2325 finalexp = (ctrl->sched.chc1[last1])->rootexp;
2326 curexp = (int)expshft - 1;
2327
2328 curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
2329 curmaxsl = curintr >> 1;
2330 opensl1[0] = opensl1[1] = curmaxsl;
2331
2332 while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
2333 curintr >>= 1;
2334 curmaxsl >>= 1;
2335
2336 /* update 4K family open slot records */
2337 if (opensl1[1] < opensl1[0])
2338 opensl1[1] -= curmaxsl;
2339 else
2340 opensl1[1] = opensl1[0] - curmaxsl;
2341 opensl1[0] = curmaxsl;
2342 if (opensl1[1] < 0) {
2343 opensl1[0] += opensl1[1];
2344 opensl1[1] = 0;
2345 }
2346 if (opensl1[0] <= 0) {
2347 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2348 __LINE__);
2349 return -EXFULL;
2350 }
2351 curexp++;
2352 /* schedule 4k family channels */
2353 ret = slim_sched_4k_coeff1_chans(ctrl, &slc1, &coeff1,
2354 opensl1, expshft, curintr, curmaxsl,
2355 curexp, finalexp);
2356 if (ret)
2357 return ret;
2358 }
2359 /* Leave some slots for messaging space */
2360 if (opensl1[1] <= 0 && opensl1[0] <= 0)
2361 return -EXFULL;
2362 if (opensl1[1] > opensl1[0]) {
2363 int temp = opensl1[0];
2364
2365 opensl1[0] = opensl1[1];
2366 opensl1[1] = temp;
2367 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2368 ctrl->sched.num_cc1, curmaxsl);
2369 }
2370 /* choose subframe mode to maximize bw */
2371 maxctrlw1 = opensl1[0];
2372 if (opensl1[0] == curmaxsl)
2373 maxctrlw1 += opensl1[1];
2374 if (curintr >= 24) {
2375 *subfrml = 24;
2376 *ctrlw = maxctrlw1;
2377 } else if (curintr == 12) {
2378 if (maxctrlw1 > opensl1[1] * 4) {
2379 *subfrml = 24;
2380 *ctrlw = maxctrlw1;
2381 } else {
2382 *subfrml = 6;
2383 *ctrlw = opensl1[1];
2384 }
2385 } else {
2386 *subfrml = 6;
2387 *ctrlw = maxctrlw1;
2388 }
2389 } else {
2390 struct slim_ich *slc1 = NULL;
2391 struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
2392 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2393 int curexp, finalexp, exp1;
2394 u32 curintr, curmaxsl;
2395 int opensl3[2];
2396 int opensl1[6];
2397 bool opensl1valid = false;
2398 int maxctrlw1, maxctrlw3, i;
2399
2400 finalexp = (ctrl->sched.chc3[last3])->rootexp;
2401 if (last1 >= 0) {
2402 slc1 = ctrl->sched.chc1[coeff1];
2403 exp1 = (ctrl->sched.chc1[last1])->rootexp;
2404 if (exp1 > finalexp)
2405 finalexp = exp1;
2406 }
2407 curexp = (int)expshft - 1;
2408
2409 curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
2410 curmaxsl = curintr >> 1;
2411 opensl3[0] = opensl3[1] = curmaxsl;
2412
2413 while (coeff1 < ctrl->sched.num_cc1 ||
2414 coeff3 < ctrl->sched.num_cc3 ||
2415 curintr > 32) {
2416 curintr >>= 1;
2417 curmaxsl >>= 1;
2418
2419 /* update 12k family open slot records */
2420 if (opensl3[1] < opensl3[0])
2421 opensl3[1] -= curmaxsl;
2422 else
2423 opensl3[1] = opensl3[0] - curmaxsl;
2424 opensl3[0] = curmaxsl;
2425 if (opensl3[1] < 0) {
2426 opensl3[0] += opensl3[1];
2427 opensl3[1] = 0;
2428 }
2429 if (opensl3[0] <= 0) {
2430 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2431 __LINE__);
2432 return -EXFULL;
2433 }
2434 curexp++;
2435
2436 /* schedule 12k family channels */
2437 while (coeff3 < ctrl->sched.num_cc3 &&
2438 curexp == (int)slc3->rootexp + expshft) {
2439 if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
2440 coeff3++;
2441 slc3 = ctrl->sched.chc3[coeff3];
2442 continue;
2443 }
2444 opensl1valid = false;
2445 if (opensl3[1] >= opensl3[0] ||
2446 (finalexp == (int)slc3->rootexp &&
2447 curintr <= 32 &&
2448 opensl3[0] == curmaxsl &&
2449 last1 < 0)) {
2450 opensl3[1] -= slc3->seglen;
2451 slc3->newoff = curmaxsl + opensl3[1];
2452 if (opensl3[1] < 0 &&
2453 opensl3[0] == curmaxsl) {
2454 opensl3[0] += opensl3[1];
2455 opensl3[1] = 0;
2456 }
2457 if (opensl3[0] < 0) {
2458 dev_dbg(&ctrl->dev,
2459 "reconfig failed:%d\n",
2460 __LINE__);
2461 return -EXFULL;
2462 }
2463 } else {
2464 if (slc3->seglen > opensl3[0]) {
2465 dev_dbg(&ctrl->dev,
2466 "reconfig failed:%d\n",
2467 __LINE__);
2468 return -EXFULL;
2469 }
2470 slc3->newoff = opensl3[0] -
2471 slc3->seglen;
2472 opensl3[0] = slc3->newoff;
2473 }
2474 slc3->newintr = curintr;
2475 coeff3++;
2476 slc3 = ctrl->sched.chc3[coeff3];
2477 }
2478 /* update 4k openslot records */
2479 if (opensl1valid == false) {
2480 for (i = 0; i < 3; i++) {
2481 opensl1[i * 2] = opensl3[0];
2482 opensl1[(i * 2) + 1] = opensl3[1];
2483 }
2484 } else {
2485 int opensl1p[6];
2486
2487 memcpy(opensl1p, opensl1, sizeof(opensl1));
2488 for (i = 0; i < 3; i++) {
2489 if (opensl1p[i] < opensl1p[i + 3])
2490 opensl1[(i * 2) + 1] =
2491 opensl1p[i];
2492 else
2493 opensl1[(i * 2) + 1] =
2494 opensl1p[i + 3];
2495 }
2496 for (i = 0; i < 3; i++) {
2497 opensl1[(i * 2) + 1] -= curmaxsl;
2498 opensl1[i * 2] = curmaxsl;
2499 if (opensl1[(i * 2) + 1] < 0) {
2500 opensl1[i * 2] +=
2501 opensl1[(i * 2) + 1];
2502 opensl1[(i * 2) + 1] = 0;
2503 }
2504 if (opensl1[i * 2] < 0) {
2505 dev_dbg(&ctrl->dev,
2506 "reconfig failed:%d\n",
2507 __LINE__);
2508 return -EXFULL;
2509 }
2510 }
2511 }
2512 /* schedule 4k family channels */
2513 while (coeff1 < ctrl->sched.num_cc1 &&
2514 curexp == (int)slc1->rootexp + expshft) {
2515 /* searchorder effective when opensl valid */
2516 static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
2517 int maxopensl = 0;
2518 int maxi = 0;
2519
2520 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2521 coeff1++;
2522 slc1 = ctrl->sched.chc1[coeff1];
2523 continue;
2524 }
2525 opensl1valid = true;
2526 for (i = 0; i < 6; i++) {
2527 if (opensl1[srcho[i]] > maxopensl) {
2528 maxopensl = opensl1[srcho[i]];
2529 maxi = srcho[i];
2530 }
2531 }
2532 opensl1[maxi] -= slc1->seglen;
2533 slc1->newoff = (curmaxsl * maxi) +
2534 opensl1[maxi];
2535 if (opensl1[maxi] < 0 && (maxi & 1) == 1 &&
2536 opensl1[maxi - 1] == curmaxsl) {
2537 opensl1[maxi - 1] += opensl1[maxi];
2538 if (opensl3[0] > opensl1[maxi - 1])
2539 opensl3[0] = opensl1[maxi - 1];
2540 opensl3[1] = 0;
2541 opensl1[maxi] = 0;
2542 if (opensl1[maxi - 1] < 0) {
2543 dev_dbg(&ctrl->dev,
2544 "reconfig failed:%d\n",
2545 __LINE__);
2546 return -EXFULL;
2547 }
2548 } else if (opensl1[maxi] < 0) {
2549 dev_dbg(&ctrl->dev,
2550 "reconfig failed:%d\n",
2551 __LINE__);
2552 return -EXFULL;
2553 } else if (opensl3[maxi & 1] > opensl1[maxi]) {
2554 opensl3[maxi & 1] = opensl1[maxi];
2555 }
2556 slc1->newintr = curintr * 3;
2557 coeff1++;
2558 slc1 = ctrl->sched.chc1[coeff1];
2559 }
2560 }
2561 /* Leave some slots for messaging space */
2562 if (opensl3[1] <= 0 && opensl3[0] <= 0)
2563 return -EXFULL;
2564 /* swap 1st and 2nd bucket if 2nd bucket has more open slots */
2565 if (opensl3[1] > opensl3[0]) {
2566 int temp = opensl3[0];
2567
2568 opensl3[0] = opensl3[1];
2569 opensl3[1] = temp;
2570 temp = opensl1[5];
2571 opensl1[5] = opensl1[4];
2572 opensl1[4] = opensl1[3];
2573 opensl1[3] = opensl1[2];
2574 opensl1[2] = opensl1[1];
2575 opensl1[1] = opensl1[0];
2576 opensl1[0] = temp;
2577 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2578 ctrl->sched.num_cc1, curmaxsl);
2579 shiftsegoffsets(ctrl, ctrl->sched.chc3,
2580 ctrl->sched.num_cc3, curmaxsl);
2581 }
2582 /* subframe mode to maximize BW */
2583 maxctrlw3 = opensl3[0];
2584 maxctrlw1 = opensl1[0];
2585 if (opensl3[0] == curmaxsl)
2586 maxctrlw3 += opensl3[1];
2587 for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
2588 maxctrlw1 += opensl1[i + 1];
2589 if (curintr >= 32) {
2590 *subfrml = 32;
2591 *ctrlw = maxctrlw3;
2592 } else if (curintr == 16) {
2593 if (maxctrlw3 > (opensl3[1] * 4)) {
2594 *subfrml = 32;
2595 *ctrlw = maxctrlw3;
2596 } else {
2597 *subfrml = 8;
2598 *ctrlw = opensl3[1];
2599 }
2600 } else {
2601 if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
2602 *subfrml = 24;
2603 *ctrlw = maxctrlw1;
2604 } else {
2605 *subfrml = 8;
2606 *ctrlw = maxctrlw3;
2607 }
2608 }
2609 }
2610 return 0;
2611}
2612
2613#ifdef DEBUG
2614static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2615 u32 subfrml, u32 clkgear)
2616{
2617 int sl, i;
2618 int cc1 = 0;
2619 int cc3 = 0;
2620 struct slim_ich *slc = NULL;
2621
2622 if (!ctrl->sched.slots)
2623 return 0;
2624 memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
2625 dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
2626 for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
2627 for (i = 0; i < ctrlw; i++)
2628 ctrl->sched.slots[sl + i] = 33;
2629 }
2630 while (cc1 < ctrl->sched.num_cc1) {
2631 slc = ctrl->sched.chc1[cc1];
2632 if (slc == NULL) {
2633 dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
2634 cc1);
2635 return -EIO;
2636 }
2637 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2638 (slc - ctrl->chans), slc->newoff,
2639 slc->newintr, slc->seglen);
2640
2641 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2642 for (sl = slc->newoff;
2643 sl < SLIM_SL_PER_SUPERFRAME;
2644 sl += slc->newintr) {
2645 for (i = 0; i < slc->seglen; i++) {
2646 if (ctrl->sched.slots[sl + i])
2647 return -EXFULL;
2648 ctrl->sched.slots[sl + i] = cc1 + 1;
2649 }
2650 }
2651 }
2652 cc1++;
2653 }
2654 while (cc3 < ctrl->sched.num_cc3) {
2655 slc = ctrl->sched.chc3[cc3];
2656 if (slc == NULL) {
2657 dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
2658 cc3);
2659 return -EIO;
2660 }
2661 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2662 (slc - ctrl->chans), slc->newoff,
2663 slc->newintr, slc->seglen);
2664 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2665 for (sl = slc->newoff;
2666 sl < SLIM_SL_PER_SUPERFRAME;
2667 sl += slc->newintr) {
2668 for (i = 0; i < slc->seglen; i++) {
2669 if (ctrl->sched.slots[sl + i])
2670 return -EXFULL;
2671 ctrl->sched.slots[sl + i] = cc3 + 1;
2672 }
2673 }
2674 }
2675 cc3++;
2676 }
2677
2678 return 0;
2679}
2680#else
2681static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2682 u32 subfrml, u32 clkgear)
2683{
2684 return 0;
2685}
2686#endif
2687
2688static void slim_sort_chan_grp(struct slim_controller *ctrl,
2689 struct slim_ich *slc)
2690{
2691 u8 last = (u8)-1;
2692 u8 second = 0;
2693
2694 for (; last > 0; last--) {
2695 struct slim_ich *slc1 = slc;
2696 struct slim_ich *slc2;
2697 u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
2698
2699 slc2 = &ctrl->chans[next];
2700 for (second = 1; second <= last && slc2 &&
2701 (slc2->state == SLIM_CH_ACTIVE ||
2702 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
2703 if (slc1->newoff > slc2->newoff) {
2704 u32 temp = slc2->newoff;
2705
2706 slc2->newoff = slc1->newoff;
2707 slc1->newoff = temp;
2708 }
2709 if (slc2->nextgrp & SLIM_END_GRP) {
2710 last = second;
2711 break;
2712 }
2713 slc1 = slc2;
2714 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
2715 slc2 = &ctrl->chans[next];
2716 }
2717 if (slc2 == NULL)
2718 last = second - 1;
2719 }
2720}
2721
2722
2723static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
2724{
2725 u32 msgsl = 0;
2726 u32 ctrlw = 0;
2727 u32 subfrml = 0;
2728 int ret = -EIO;
2729 struct slim_controller *ctrl = sb->ctrl;
2730 u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
2731 u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2732 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2733 *clkgear = SLIM_MAX_CLK_GEAR;
2734
2735 dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
2736 dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
2737 ctrl->sched.usedslots,
2738 ctrl->sched.pending_msgsl, *clkgear);
2739 /*
2740 * If number of slots are 0, that means channels are inactive.
2741 * It is very likely that the manager will call clock pause very soon.
2742 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
2743 * minimum amount of time.
2744 */
2745 if (ctrl->sched.usedslots != 0) {
2746 while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
2747 *clkgear -= 1;
2748 usedsl *= 2;
2749 }
2750 }
2751
2752 /*
2753 * Try scheduling data channels at current clock gear, if all channels
2754 * can be scheduled, or reserved BW can't be satisfied, increase clock
2755 * gear and try again
2756 */
2757 for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
2758 ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
2759
2760 if (ret == 0) {
2761 *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
2762 if ((msgsl >> (ctrl->max_cg - *clkgear) <
2763 ctrl->sched.pending_msgsl) &&
2764 (*clkgear < ctrl->max_cg))
2765 continue;
2766 else
2767 break;
2768 }
2769 }
2770 if (ret == 0) {
2771 int i;
2772 /* Sort channel-groups */
2773 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2774 struct slim_ich *slc = ctrl->sched.chc1[i];
2775
2776 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2777 continue;
2778 if ((slc->nextgrp & SLIM_START_GRP) &&
2779 !(slc->nextgrp & SLIM_END_GRP)) {
2780 slim_sort_chan_grp(ctrl, slc);
2781 }
2782 }
2783 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2784 struct slim_ich *slc = ctrl->sched.chc3[i];
2785
2786 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2787 continue;
2788 if ((slc->nextgrp & SLIM_START_GRP) &&
2789 !(slc->nextgrp & SLIM_END_GRP)) {
2790 slim_sort_chan_grp(ctrl, slc);
2791 }
2792 }
2793
2794 ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
2795 }
2796
2797 return ret;
2798}
2799
2800static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
2801{
2802 struct slim_ich **arr;
2803 int len, i;
2804
2805 if (coeff == SLIM_COEFF_1) {
2806 arr = ctrl->sched.chc1;
2807 len = ctrl->sched.num_cc1;
2808 } else {
2809 arr = ctrl->sched.chc3;
2810 len = ctrl->sched.num_cc3;
2811 }
2812 for (i = 0; i < len; i++) {
2813 struct slim_ich *slc = arr[i];
2814
2815 if (slc->state == SLIM_CH_ACTIVE ||
Stephen Boyd6a95fe72017-03-01 17:06:32 -08002816 slc->state == SLIM_CH_SUSPENDED) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002817 slc->offset = slc->newoff;
2818 slc->interval = slc->newintr;
Stephen Boyd6a95fe72017-03-01 17:06:32 -08002819 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002820 }
2821}
2822static void slim_chan_changes(struct slim_device *sb, bool revert)
2823{
2824 struct slim_controller *ctrl = sb->ctrl;
2825
2826 while (!list_empty(&sb->mark_define)) {
2827 struct slim_ich *slc;
2828 struct slim_pending_ch *pch =
2829 list_entry(sb->mark_define.next,
2830 struct slim_pending_ch, pending);
2831 slc = &ctrl->chans[pch->chan];
2832 if (revert) {
2833 if (slc->state == SLIM_CH_PENDING_ACTIVE) {
2834 u32 sl = slc->seglen << slc->rootexp;
2835
2836 if (slc->coeff == SLIM_COEFF_3)
2837 sl *= 3;
2838 if (!ctrl->allocbw)
2839 ctrl->sched.usedslots -= sl;
2840 slim_remove_ch(ctrl, slc);
2841 slc->state = SLIM_CH_DEFINED;
2842 }
2843 } else {
2844 slc->state = SLIM_CH_ACTIVE;
2845 slc->def++;
2846 }
2847 list_del_init(&pch->pending);
2848 kfree(pch);
2849 }
2850
2851 while (!list_empty(&sb->mark_removal)) {
2852 struct slim_pending_ch *pch =
2853 list_entry(sb->mark_removal.next,
2854 struct slim_pending_ch, pending);
2855 struct slim_ich *slc = &ctrl->chans[pch->chan];
2856 u32 sl = slc->seglen << slc->rootexp;
2857
2858 if (revert || slc->def > 0) {
2859 if (slc->coeff == SLIM_COEFF_3)
2860 sl *= 3;
2861 if (!ctrl->allocbw)
2862 ctrl->sched.usedslots += sl;
2863 if (revert)
2864 slc->def++;
2865 slc->state = SLIM_CH_ACTIVE;
2866 } else
2867 slim_remove_ch(ctrl, slc);
2868 list_del_init(&pch->pending);
2869 kfree(pch);
2870 }
2871
2872 while (!list_empty(&sb->mark_suspend)) {
2873 struct slim_pending_ch *pch =
2874 list_entry(sb->mark_suspend.next,
2875 struct slim_pending_ch, pending);
2876 struct slim_ich *slc = &ctrl->chans[pch->chan];
2877
2878 if (revert)
2879 slc->state = SLIM_CH_ACTIVE;
2880 list_del_init(&pch->pending);
2881 kfree(pch);
2882 }
2883 /* Change already active channel if reconfig succeeded */
2884 if (!revert) {
2885 slim_change_existing_chans(ctrl, SLIM_COEFF_1);
2886 slim_change_existing_chans(ctrl, SLIM_COEFF_3);
2887 }
2888}
2889
2890/*
2891 * slim_reconfigure_now: Request reconfiguration now.
2892 * @sb: client handle
2893 * This API does what commit flag in other scheduling APIs do.
2894 * -EXFULL is returned if there is no space in TDM to reserve the
2895 * bandwidth. -EBUSY is returned if reconfiguration request is already in
2896 * progress.
2897 */
2898int slim_reconfigure_now(struct slim_device *sb)
2899{
2900 u8 i;
2901 u8 wbuf[4];
2902 u32 clkgear, subframe;
2903 u32 curexp;
2904 int ret;
2905 struct slim_controller *ctrl = sb->ctrl;
2906 u32 expshft;
2907 u32 segdist;
2908 struct slim_pending_ch *pch;
2909 DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
2910 NULL, NULL, sb->laddr);
2911
2912 mutex_lock(&ctrl->sched.m_reconf);
2913 /*
2914 * If there are no pending changes from this client, avoid sending
2915 * the reconfiguration sequence
2916 */
2917 if (sb->pending_msgsl == sb->cur_msgsl &&
2918 list_empty(&sb->mark_define) &&
2919 list_empty(&sb->mark_suspend)) {
2920 struct list_head *pos, *next;
2921
2922 list_for_each_safe(pos, next, &sb->mark_removal) {
2923 struct slim_ich *slc;
2924
2925 pch = list_entry(pos, struct slim_pending_ch, pending);
2926 slc = &ctrl->chans[pch->chan];
2927 if (slc->def > 0)
2928 slc->def--;
2929 /* Disconnect source port to free it up */
2930 if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
2931 slc->srch = 0;
2932 /*
2933 * If controller overrides BW allocation,
2934 * delete this in remove channel itself
2935 */
2936 if (slc->def != 0 && !ctrl->allocbw) {
2937 list_del(&pch->pending);
2938 kfree(pch);
2939 }
2940 }
2941 if (list_empty(&sb->mark_removal)) {
2942 mutex_unlock(&ctrl->sched.m_reconf);
2943 pr_info("SLIM_CL: skip reconfig sequence");
2944 return 0;
2945 }
2946 }
2947
2948 ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
2949 list_for_each_entry(pch, &sb->mark_define, pending) {
2950 struct slim_ich *slc = &ctrl->chans[pch->chan];
2951
2952 slim_add_ch(ctrl, slc);
2953 if (slc->state < SLIM_CH_ACTIVE)
2954 slc->state = SLIM_CH_PENDING_ACTIVE;
2955 }
2956
2957 list_for_each_entry(pch, &sb->mark_removal, pending) {
2958 struct slim_ich *slc = &ctrl->chans[pch->chan];
2959 u32 sl = slc->seglen << slc->rootexp;
2960
2961 if (slc->coeff == SLIM_COEFF_3)
2962 sl *= 3;
2963 if (!ctrl->allocbw)
2964 ctrl->sched.usedslots -= sl;
2965 slc->state = SLIM_CH_PENDING_REMOVAL;
2966 }
2967 list_for_each_entry(pch, &sb->mark_suspend, pending) {
2968 struct slim_ich *slc = &ctrl->chans[pch->chan];
2969
2970 slc->state = SLIM_CH_SUSPENDED;
2971 }
2972
2973 /*
2974 * Controller can override default channel scheduling algorithm.
2975 * (e.g. if controller needs to use fixed channel scheduling based
2976 * on number of channels)
2977 */
2978 if (ctrl->allocbw)
2979 ret = ctrl->allocbw(sb, &subframe, &clkgear);
2980 else
2981 ret = slim_allocbw(sb, &subframe, &clkgear);
2982
2983 if (!ret) {
2984 ret = slim_processtxn(ctrl, &txn, false);
2985 dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
2986 }
2987
2988 if (!ret && subframe != ctrl->sched.subfrmcode) {
2989 wbuf[0] = (u8)(subframe & 0xFF);
2990 txn.mc = SLIM_MSG_MC_NEXT_SUBFRAME_MODE;
2991 txn.len = 1;
2992 txn.rl = 4;
2993 txn.wbuf = wbuf;
2994 ret = slim_processtxn(ctrl, &txn, false);
2995 dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
2996 (int)wbuf[0], ret);
2997 }
2998 if (!ret && clkgear != ctrl->clkgear) {
2999 wbuf[0] = (u8)(clkgear & 0xFF);
3000 txn.mc = SLIM_MSG_MC_NEXT_CLOCK_GEAR;
3001 txn.len = 1;
3002 txn.rl = 4;
3003 txn.wbuf = wbuf;
3004 ret = slim_processtxn(ctrl, &txn, false);
3005 dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
3006 (int)wbuf[0], ret);
3007 }
3008 if (ret)
3009 goto revert_reconfig;
3010
3011 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3012 /* activate/remove channel */
3013 list_for_each_entry(pch, &sb->mark_define, pending) {
3014 struct slim_ich *slc = &ctrl->chans[pch->chan];
3015 /* Define content */
3016 wbuf[0] = slc->chan;
3017 wbuf[1] = slc->prrate;
3018 wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
3019 wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
3020 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
3021 txn.len = 4;
3022 txn.rl = 7;
3023 txn.wbuf = wbuf;
3024 dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
3025 wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
3026 /* Right now, channel link bit is not supported */
3027 ret = slim_processtxn(ctrl, &txn, false);
3028 if (ret)
3029 goto revert_reconfig;
3030
3031 txn.mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
3032 txn.len = 1;
3033 txn.rl = 4;
3034 ret = slim_processtxn(ctrl, &txn, false);
3035 if (ret)
3036 goto revert_reconfig;
3037 }
3038
3039 list_for_each_entry(pch, &sb->mark_removal, pending) {
3040 struct slim_ich *slc = &ctrl->chans[pch->chan];
3041
3042 dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
3043 wbuf[0] = slc->chan;
3044 txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
3045 txn.len = 1;
3046 txn.rl = 4;
3047 txn.wbuf = wbuf;
3048 ret = slim_processtxn(ctrl, &txn, false);
3049 if (ret)
3050 goto revert_reconfig;
3051 }
3052 list_for_each_entry(pch, &sb->mark_suspend, pending) {
3053 struct slim_ich *slc = &ctrl->chans[pch->chan];
3054
3055 dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
3056 wbuf[0] = slc->chan;
3057 txn.mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
3058 txn.len = 1;
3059 txn.rl = 4;
3060 txn.wbuf = wbuf;
3061 ret = slim_processtxn(ctrl, &txn, false);
3062 if (ret)
3063 goto revert_reconfig;
3064 }
3065
3066 /* Define CC1 channel */
3067 for (i = 0; i < ctrl->sched.num_cc1; i++) {
3068 struct slim_ich *slc = ctrl->sched.chc1[i];
3069
3070 if (slc->state == SLIM_CH_PENDING_REMOVAL)
3071 continue;
3072 curexp = slc->rootexp + expshft;
3073 segdist = (slc->newoff << curexp) & 0x1FF;
3074 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3075 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
3076 slc->newintr, slc->interval, segdist);
3077 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
3078 slc->newoff, slc->offset);
3079
3080 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
3081 slc->newintr != slc->interval ||
3082 slc->newoff != slc->offset) {
3083 segdist |= 0x200;
3084 segdist >>= curexp;
3085 segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
3086 wbuf[0] = slc->chan;
3087 wbuf[1] = (u8)(segdist & 0xFF);
3088 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
3089 (slc->prop.prot << 4);
3090 wbuf[3] = slc->seglen;
3091 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
3092 txn.len = 4;
3093 txn.rl = 7;
3094 txn.wbuf = wbuf;
3095 ret = slim_processtxn(ctrl, &txn, false);
3096 if (ret)
3097 goto revert_reconfig;
3098 }
3099 }
3100
3101 /* Define CC3 channels */
3102 for (i = 0; i < ctrl->sched.num_cc3; i++) {
3103 struct slim_ich *slc = ctrl->sched.chc3[i];
3104
3105 if (slc->state == SLIM_CH_PENDING_REMOVAL)
3106 continue;
3107 curexp = slc->rootexp + expshft;
3108 segdist = (slc->newoff << curexp) & 0x1FF;
3109 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3110 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
3111 slc->newintr, slc->interval, segdist);
3112 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
3113 slc->newoff, slc->offset);
3114
3115 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
3116 slc->newintr != slc->interval ||
3117 slc->newoff != slc->offset) {
3118 segdist |= 0x200;
3119 segdist >>= curexp;
3120 segdist |= 0xC00;
3121 wbuf[0] = slc->chan;
3122 wbuf[1] = (u8)(segdist & 0xFF);
3123 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
3124 (slc->prop.prot << 4);
3125 wbuf[3] = (u8)(slc->seglen);
3126 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
3127 txn.len = 4;
3128 txn.rl = 7;
3129 txn.wbuf = wbuf;
3130 ret = slim_processtxn(ctrl, &txn, false);
3131 if (ret)
3132 goto revert_reconfig;
3133 }
3134 }
3135 txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
3136 txn.len = 0;
3137 txn.rl = 3;
3138 txn.wbuf = NULL;
3139 ret = slim_processtxn(ctrl, &txn, false);
3140 dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
3141 if (!ret) {
3142 ctrl->sched.subfrmcode = subframe;
3143 ctrl->clkgear = clkgear;
3144 ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
3145 sb->cur_msgsl = sb->pending_msgsl;
3146 slim_chan_changes(sb, false);
3147 mutex_unlock(&ctrl->sched.m_reconf);
3148 return 0;
3149 }
3150
3151revert_reconfig:
3152 /* Revert channel changes */
3153 slim_chan_changes(sb, true);
3154 mutex_unlock(&ctrl->sched.m_reconf);
3155 return ret;
3156}
3157EXPORT_SYMBOL(slim_reconfigure_now);
3158
3159static int add_pending_ch(struct list_head *listh, u8 chan)
3160{
3161 struct slim_pending_ch *pch;
3162
3163 pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
3164 if (!pch)
3165 return -ENOMEM;
3166 pch->chan = chan;
3167 list_add_tail(&pch->pending, listh);
3168 return 0;
3169}
3170
3171/*
3172 * slim_control_ch: Channel control API.
3173 * @sb: client handle
3174 * @chanh: group or channel handle to be controlled
3175 * @chctrl: Control command (activate/suspend/remove)
3176 * @commit: flag to indicate whether the control should take effect right-away.
3177 * This API activates, removes or suspends a channel (or group of channels)
3178 * chanh indicates the channel or group handle (returned by the define_ch API).
3179 * Reconfiguration may be time-consuming since it can change all other active
3180 * channel allocations on the bus, change in clock gear used by the slimbus,
3181 * and change in the control space width used for messaging.
3182 * commit makes sure that multiple channels can be activated/deactivated before
3183 * reconfiguration is started.
3184 * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
3185 * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
3186 * yet defined.
3187 * -EINVAL is returned if individual control of a grouped-channel is attempted.
3188 */
3189int slim_control_ch(struct slim_device *sb, u16 chanh,
3190 enum slim_ch_control chctrl, bool commit)
3191{
3192 struct slim_controller *ctrl = sb->ctrl;
3193 int ret = 0;
3194 /* Get rid of the group flag in MSB if any */
3195 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
3196 u8 nchan = 0;
3197 struct slim_ich *slc = &ctrl->chans[chan];
3198
3199 if (!(slc->nextgrp & SLIM_START_GRP))
3200 return -EINVAL;
3201
3202 mutex_lock(&sb->sldev_reconf);
3203 mutex_lock(&ctrl->sched.m_reconf);
3204 do {
3205 struct slim_pending_ch *pch;
3206 u8 add_mark_removal = true;
3207
3208 slc = &ctrl->chans[chan];
3209 dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
3210 slc->def);
3211 if (slc->state < SLIM_CH_DEFINED) {
3212 ret = -ENOTCONN;
3213 break;
3214 }
3215 if (chctrl == SLIM_CH_SUSPEND) {
3216 ret = add_pending_ch(&sb->mark_suspend, chan);
3217 if (ret)
3218 break;
3219 } else if (chctrl == SLIM_CH_ACTIVATE) {
3220 if (slc->state > SLIM_CH_ACTIVE) {
3221 ret = -EISCONN;
3222 break;
3223 }
3224 ret = add_pending_ch(&sb->mark_define, chan);
3225 if (ret)
3226 break;
3227 } else {
3228 if (slc->state < SLIM_CH_ACTIVE) {
3229 ret = -ENOTCONN;
3230 break;
3231 }
3232 /* If channel removal request comes when pending
3233 * in the mark_define, remove it from the define
3234 * list instead of adding it to removal list
3235 */
3236 if (!list_empty(&sb->mark_define)) {
3237 struct list_head *pos, *next;
3238
3239 list_for_each_safe(pos, next,
3240 &sb->mark_define) {
3241 pch = list_entry(pos,
3242 struct slim_pending_ch,
3243 pending);
3244 if (pch->chan == chan) {
3245 list_del(&pch->pending);
3246 kfree(pch);
3247 add_mark_removal = false;
3248 break;
3249 }
3250 }
3251 }
3252 if (add_mark_removal == true) {
3253 ret = add_pending_ch(&sb->mark_removal, chan);
3254 if (ret)
3255 break;
3256 }
3257 }
3258
3259 nchan++;
3260 if (nchan < SLIM_GRP_TO_NCHAN(chanh))
3261 chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
3262 } while (nchan < SLIM_GRP_TO_NCHAN(chanh));
3263 mutex_unlock(&ctrl->sched.m_reconf);
3264 if (!ret && commit == true)
3265 ret = slim_reconfigure_now(sb);
3266 mutex_unlock(&sb->sldev_reconf);
3267 return ret;
3268}
3269EXPORT_SYMBOL(slim_control_ch);
3270
3271/*
3272 * slim_reservemsg_bw: Request to reserve bandwidth for messages.
3273 * @sb: client handle
3274 * @bw_bps: message bandwidth in bits per second to be requested
3275 * @commit: indicates whether the reconfiguration needs to be acted upon.
3276 * This API call can be grouped with slim_control_ch API call with only one of
3277 * the APIs specifying the commit flag to avoid reconfiguration being called too
3278 * frequently. -EXFULL is returned if there is no space in TDM to reserve the
3279 * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
3280 * is already in progress.
3281 */
3282int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
3283{
3284 struct slim_controller *ctrl = sb->ctrl;
3285 int ret = 0;
3286 int sl;
3287
3288 mutex_lock(&sb->sldev_reconf);
3289 if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
3290 sl = SLIM_SL_PER_SUPERFRAME;
3291 else {
3292 sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
3293 (ctrl->a_framer->rootfreq/2 - 1)) /
3294 (ctrl->a_framer->rootfreq/2);
3295 }
3296 dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
3297 sb->cur_msgsl);
3298 sb->pending_msgsl = sl;
3299 if (commit == true)
3300 ret = slim_reconfigure_now(sb);
3301 mutex_unlock(&sb->sldev_reconf);
3302 return ret;
3303}
3304EXPORT_SYMBOL(slim_reservemsg_bw);
3305
3306/*
3307 * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
3308 * paused or woken up out of clock pause
3309 * or woken up from clock pause
3310 * @ctrl: controller requesting bus to be paused or woken up
3311 * @wakeup: Wakeup this controller from clock pause.
3312 * @restart: Restart time value per spec used for clock pause. This value
3313 * isn't used when controller is to be woken up.
3314 * This API executes clock pause reconfiguration sequence if wakeup is false.
3315 * If wakeup is true, controller's wakeup is called
3316 * Slimbus clock is idle and can be disabled by the controller later.
3317 */
3318int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
3319{
3320 int ret = 0;
3321 int i;
3322 DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_CLK_PAUSE_SEQ_FLG |
3323 SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
3324 NULL, NULL, 0);
3325
3326 if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
3327 return -EINVAL;
3328 mutex_lock(&ctrl->m_ctrl);
3329 if (wakeup) {
3330 if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
3331 mutex_unlock(&ctrl->m_ctrl);
3332 return 0;
3333 }
3334 wait_for_completion(&ctrl->pause_comp);
3335 /*
3336 * Slimbus framework will call controller wakeup
3337 * Controller should make sure that it sets active framer
3338 * out of clock pause by doing appropriate setting
3339 */
3340 if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
3341 ret = ctrl->wakeup(ctrl);
3342 /*
3343 * If wakeup fails, make sure that next attempt can succeed.
3344 * Since we already consumed pause_comp, complete it so
3345 * that next wakeup isn't blocked forever
3346 */
3347 if (!ret)
3348 ctrl->clk_state = SLIM_CLK_ACTIVE;
3349 else
3350 complete(&ctrl->pause_comp);
3351 mutex_unlock(&ctrl->m_ctrl);
3352 return ret;
3353 }
3354
3355 switch (ctrl->clk_state) {
3356 case SLIM_CLK_ENTERING_PAUSE:
3357 case SLIM_CLK_PAUSE_FAILED:
3358 /*
3359 * If controller is already trying to enter clock pause,
3360 * let it finish.
3361 * In case of error, retry
3362 * In both cases, previous clock pause has signalled
3363 * completion.
3364 */
3365 wait_for_completion(&ctrl->pause_comp);
3366 /* retry upon failure */
3367 if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
3368 ctrl->clk_state = SLIM_CLK_ACTIVE;
3369 } else {
3370 mutex_unlock(&ctrl->m_ctrl);
3371 /*
3372 * Signal completion so that wakeup can wait on
3373 * it.
3374 */
3375 complete(&ctrl->pause_comp);
3376 return 0;
3377 }
3378 break;
3379 case SLIM_CLK_PAUSED:
3380 /* already paused */
3381 mutex_unlock(&ctrl->m_ctrl);
3382 return 0;
3383 case SLIM_CLK_ACTIVE:
3384 default:
3385 break;
3386 }
3387 /* Pending response for a message */
3388 for (i = 0; i < ctrl->last_tid; i++) {
3389 if (ctrl->txnt[i]) {
3390 ret = -EBUSY;
3391 pr_info("slim_clk_pause: txn-rsp for %d pending", i);
3392 mutex_unlock(&ctrl->m_ctrl);
3393 return -EBUSY;
3394 }
3395 }
3396 ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
3397 mutex_unlock(&ctrl->m_ctrl);
3398
3399 mutex_lock(&ctrl->sched.m_reconf);
3400 /* Data channels active */
3401 if (ctrl->sched.usedslots) {
3402 pr_info("slim_clk_pause: data channel active");
3403 ret = -EBUSY;
3404 goto clk_pause_ret;
3405 }
3406
3407 ret = slim_processtxn(ctrl, &txn, false);
3408 if (ret)
3409 goto clk_pause_ret;
3410
3411 txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
3412 txn.len = 1;
3413 txn.rl = 4;
3414 txn.wbuf = &restart;
3415 ret = slim_processtxn(ctrl, &txn, false);
3416 if (ret)
3417 goto clk_pause_ret;
3418
3419 txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW;
3420 txn.len = 0;
3421 txn.rl = 3;
3422 txn.wbuf = NULL;
3423 ret = slim_processtxn(ctrl, &txn, false);
3424 if (ret)
3425 goto clk_pause_ret;
3426
3427clk_pause_ret:
3428 if (ret)
3429 ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
3430 else
3431 ctrl->clk_state = SLIM_CLK_PAUSED;
3432 complete(&ctrl->pause_comp);
3433 mutex_unlock(&ctrl->sched.m_reconf);
3434 return ret;
3435}
3436EXPORT_SYMBOL(slim_ctrl_clk_pause);
3437
3438MODULE_LICENSE("GPL v2");
3439MODULE_DESCRIPTION("Slimbus module");
3440MODULE_ALIAS("platform:slimbus");