blob: 1140b33cf3144fe761ea8865ac2c2c38e53b28bc [file] [log] [blame]
Dilip Kota81c88552017-10-24 12:18:23 +05301/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/idr.h>
19#include <linux/pm_runtime.h>
20#include <linux/slimbus/slimbus.h>
21
22#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
23
24#define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24)
25#define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16)
26#define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF)
27
28#define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF)
29#define SLIM_GRP_TO_NCHAN(hdl) ((u16)(hdl >> 8) & 0xFF)
30
31#define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p))
32#define SLIM_MGR_PORT(p) ((0xFF << 16) | (p))
33#define SLIM_LA_MANAGER 0xFF
34
35#define SLIM_START_GRP (1 << 8)
36#define SLIM_END_GRP (1 << 9)
37
38#define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3)
39#define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME
40
41static DEFINE_MUTEX(slim_lock);
42static DEFINE_IDR(ctrl_idr);
43static struct device_type slim_dev_type;
44static struct device_type slim_ctrl_type;
45
46#define DEFINE_SLIM_LDEST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
47 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
48 len, 0, la, false, rbuf, wbuf, NULL, }
49
50#define DEFINE_SLIM_BCAST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
51 struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
52 len, 0, la, false, rbuf, wbuf, NULL, }
53
54static const struct slim_device_id *slim_match(const struct slim_device_id *id,
55 const struct slim_device *slim_dev)
56{
57 while (id->name[0]) {
58 if (strcmp(slim_dev->name, id->name) == 0)
59 return id;
60 id++;
61 }
62 return NULL;
63}
64
65const struct slim_device_id *slim_get_device_id(const struct slim_device *sdev)
66{
67 const struct slim_driver *sdrv = to_slim_driver(sdev->dev.driver);
68
69 return slim_match(sdrv->id_table, sdev);
70}
71EXPORT_SYMBOL(slim_get_device_id);
72
73static int slim_device_match(struct device *dev, struct device_driver *driver)
74{
75 struct slim_device *slim_dev;
76 struct slim_driver *drv = to_slim_driver(driver);
77
78 if (dev->type == &slim_dev_type)
79 slim_dev = to_slim_device(dev);
80 else
81 return 0;
82 if (drv->id_table)
83 return slim_match(drv->id_table, slim_dev) != NULL;
84
85 if (driver->name)
86 return strcmp(slim_dev->name, driver->name) == 0;
87 return 0;
88}
89
90#ifdef CONFIG_PM_SLEEP
91static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
92{
93 struct slim_device *slim_dev = NULL;
94 struct slim_driver *driver;
95
96 if (dev->type == &slim_dev_type)
97 slim_dev = to_slim_device(dev);
98
99 if (!slim_dev || !dev->driver)
100 return 0;
101
102 driver = to_slim_driver(dev->driver);
103 if (!driver->suspend)
104 return 0;
105
106 return driver->suspend(slim_dev, mesg);
107}
108
109static int slim_legacy_resume(struct device *dev)
110{
111 struct slim_device *slim_dev = NULL;
112 struct slim_driver *driver;
113
114 if (dev->type == &slim_dev_type)
115 slim_dev = to_slim_device(dev);
116
117 if (!slim_dev || !dev->driver)
118 return 0;
119
120 driver = to_slim_driver(dev->driver);
121 if (!driver->resume)
122 return 0;
123
124 return driver->resume(slim_dev);
125}
126
127static int slim_pm_suspend(struct device *dev)
128{
129 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
130
131 if (pm)
132 return pm_generic_suspend(dev);
133 else
134 return slim_legacy_suspend(dev, PMSG_SUSPEND);
135}
136
137static int slim_pm_resume(struct device *dev)
138{
139 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
140
141 if (pm)
142 return pm_generic_resume(dev);
143 else
144 return slim_legacy_resume(dev);
145}
146
147#else
148#define slim_pm_suspend NULL
149#define slim_pm_resume NULL
150#endif
151
152static const struct dev_pm_ops slimbus_pm = {
153 .suspend = slim_pm_suspend,
154 .resume = slim_pm_resume,
155 SET_RUNTIME_PM_OPS(
156 pm_generic_suspend,
157 pm_generic_resume,
158 NULL
159 )
160};
161struct bus_type slimbus_type = {
162 .name = "slimbus",
163 .match = slim_device_match,
164 .pm = &slimbus_pm,
165};
166EXPORT_SYMBOL(slimbus_type);
167
168struct device slimbus_dev = {
169 .init_name = "slimbus",
170};
171
172static void __exit slimbus_exit(void)
173{
174 device_unregister(&slimbus_dev);
175 bus_unregister(&slimbus_type);
176}
177
178static int __init slimbus_init(void)
179{
180 int retval;
181
182 retval = bus_register(&slimbus_type);
183 if (!retval)
184 retval = device_register(&slimbus_dev);
185
186 if (retval)
187 bus_unregister(&slimbus_type);
188
189 return retval;
190}
191postcore_initcall(slimbus_init);
192module_exit(slimbus_exit);
193
194static int slim_drv_probe(struct device *dev)
195{
196 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
197 struct slim_device *sbdev = to_slim_device(dev);
198 struct slim_controller *ctrl = sbdev->ctrl;
199
200 if (sdrv->probe) {
201 int ret;
202
203 ret = sdrv->probe(sbdev);
204 if (ret)
205 return ret;
206 if (sdrv->device_up)
207 queue_work(ctrl->wq, &sbdev->wd);
208 return 0;
209 }
210 return -ENODEV;
211}
212
213static int slim_drv_remove(struct device *dev)
214{
215 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
216 struct slim_device *sbdev = to_slim_device(dev);
217
218 sbdev->notified = false;
219 if (sdrv->remove)
220 return sdrv->remove(to_slim_device(dev));
221 return -ENODEV;
222}
223
224static void slim_drv_shutdown(struct device *dev)
225{
226 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
227
228 if (sdrv->shutdown)
229 sdrv->shutdown(to_slim_device(dev));
230}
231
232/*
233 * slim_driver_register: Client driver registration with slimbus
234 * @drv:Client driver to be associated with client-device.
235 * This API will register the client driver with the slimbus
236 * It is called from the driver's module-init function.
237 */
238int slim_driver_register(struct slim_driver *drv)
239{
240 drv->driver.bus = &slimbus_type;
241 if (drv->probe)
242 drv->driver.probe = slim_drv_probe;
243
244 if (drv->remove)
245 drv->driver.remove = slim_drv_remove;
246
247 if (drv->shutdown)
248 drv->driver.shutdown = slim_drv_shutdown;
249
250 return driver_register(&drv->driver);
251}
252EXPORT_SYMBOL(slim_driver_register);
253
254/*
255 * slim_driver_unregister: Undo effects of slim_driver_register
256 * @drv: Client driver to be unregistered
257 */
258void slim_driver_unregister(struct slim_driver *drv)
259{
260 if (drv)
261 driver_unregister(&drv->driver);
262}
263EXPORT_SYMBOL(slim_driver_unregister);
264
265#define slim_ctrl_attr_gr NULL
266
267static void slim_ctrl_release(struct device *dev)
268{
269 struct slim_controller *ctrl = to_slim_controller(dev);
270
271 complete(&ctrl->dev_released);
272}
273
274static struct device_type slim_ctrl_type = {
275 .groups = slim_ctrl_attr_gr,
276 .release = slim_ctrl_release,
277};
278
279static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
280{
281 if (!ctrl || !get_device(&ctrl->dev))
282 return NULL;
283
284 return ctrl;
285}
286
287static void slim_ctrl_put(struct slim_controller *ctrl)
288{
289 if (ctrl)
290 put_device(&ctrl->dev);
291}
292
293#define slim_device_attr_gr NULL
294#define slim_device_uevent NULL
295static void slim_dev_release(struct device *dev)
296{
297 struct slim_device *sbdev = to_slim_device(dev);
298
299 slim_ctrl_put(sbdev->ctrl);
300}
301
302static struct device_type slim_dev_type = {
303 .groups = slim_device_attr_gr,
304 .uevent = slim_device_uevent,
305 .release = slim_dev_release,
306};
307
308static void slim_report(struct work_struct *work)
309{
310 struct slim_driver *sbdrv;
311 struct slim_device *sbdev =
312 container_of(work, struct slim_device, wd);
313 if (!sbdev->dev.driver)
314 return;
315 /* check if device-up or down needs to be called */
316 if ((!sbdev->reported && !sbdev->notified) ||
317 (sbdev->reported && sbdev->notified))
318 return;
319
320 sbdrv = to_slim_driver(sbdev->dev.driver);
321 /*
322 * address no longer valid, means device reported absent, whereas
323 * address valid, means device reported present
324 */
325 if (sbdev->notified && !sbdev->reported) {
326 sbdev->notified = false;
327 if (sbdrv->device_down)
328 sbdrv->device_down(sbdev);
329 } else if (!sbdev->notified && sbdev->reported) {
330 sbdev->notified = true;
331 if (sbdrv->device_up)
332 sbdrv->device_up(sbdev);
333 }
334}
335
Karthikeyan Ramasubramanian6fcf9fc2017-10-30 14:03:01 -0600336static void slim_device_reset(struct work_struct *work)
337{
338 struct slim_driver *sbdrv;
339 struct slim_device *sbdev =
340 container_of(work, struct slim_device, device_reset);
341
342 if (!sbdev->dev.driver)
343 return;
344
345 sbdrv = to_slim_driver(sbdev->dev.driver);
346 if (sbdrv && sbdrv->reset_device)
347 sbdrv->reset_device(sbdev);
348}
349
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700350/*
351 * slim_add_device: Add a new device without register board info.
352 * @ctrl: Controller to which this device is to be added to.
353 * Called when device doesn't have an explicit client-driver to be probed, or
354 * the client-driver is a module installed dynamically.
355 */
356int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
357{
358 sbdev->dev.bus = &slimbus_type;
359 sbdev->dev.parent = ctrl->dev.parent;
360 sbdev->dev.type = &slim_dev_type;
361 sbdev->dev.driver = NULL;
362 sbdev->ctrl = ctrl;
363 slim_ctrl_get(ctrl);
364 dev_set_name(&sbdev->dev, "%s", sbdev->name);
365 mutex_init(&sbdev->sldev_reconf);
366 INIT_LIST_HEAD(&sbdev->mark_define);
367 INIT_LIST_HEAD(&sbdev->mark_suspend);
368 INIT_LIST_HEAD(&sbdev->mark_removal);
369 INIT_WORK(&sbdev->wd, slim_report);
Karthikeyan Ramasubramanian6fcf9fc2017-10-30 14:03:01 -0600370 INIT_WORK(&sbdev->device_reset, slim_device_reset);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700371 mutex_lock(&ctrl->m_ctrl);
372 list_add_tail(&sbdev->dev_list, &ctrl->devs);
373 mutex_unlock(&ctrl->m_ctrl);
374 /* probe slave on this controller */
375 return device_register(&sbdev->dev);
376}
377EXPORT_SYMBOL(slim_add_device);
378
379struct sbi_boardinfo {
380 struct list_head list;
381 struct slim_boardinfo board_info;
382};
383
384static LIST_HEAD(board_list);
385static LIST_HEAD(slim_ctrl_list);
386static DEFINE_MUTEX(board_lock);
387
388/* If controller is not present, only add to boards list */
389static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
390 struct slim_boardinfo *bi)
391{
392 int ret;
393
394 if (ctrl->nr != bi->bus_num)
395 return;
396
397 ret = slim_add_device(ctrl, bi->slim_slave);
398 if (ret != 0)
399 dev_err(ctrl->dev.parent, "can't create new device for %s\n",
400 bi->slim_slave->name);
401}
402
403/*
404 * slim_register_board_info: Board-initialization routine.
405 * @info: List of all devices on all controllers present on the board.
406 * @n: number of entries.
407 * API enumerates respective devices on corresponding controller.
408 * Called from board-init function.
409 */
410int slim_register_board_info(struct slim_boardinfo const *info, unsigned int n)
411{
412 struct sbi_boardinfo *bi;
413 int i;
414
415 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
416 if (!bi)
417 return -ENOMEM;
418
419 for (i = 0; i < n; i++, bi++, info++) {
420 struct slim_controller *ctrl;
421
422 memcpy(&bi->board_info, info, sizeof(*info));
423 mutex_lock(&board_lock);
424 list_add_tail(&bi->list, &board_list);
425 list_for_each_entry(ctrl, &slim_ctrl_list, list)
426 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
427 mutex_unlock(&board_lock);
428 }
429 return 0;
430}
431EXPORT_SYMBOL(slim_register_board_info);
432
433/*
434 * slim_ctrl_add_boarddevs: Add devices registered by board-info
435 * @ctrl: Controller to which these devices are to be added to.
436 * This API is called by controller when it is up and running.
437 * If devices on a controller were registered before controller,
438 * this will make sure that they get probed when controller is up.
439 */
440void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
441{
442 struct sbi_boardinfo *bi;
443
444 mutex_lock(&board_lock);
445 list_add_tail(&ctrl->list, &slim_ctrl_list);
446 list_for_each_entry(bi, &board_list, list)
447 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
448 mutex_unlock(&board_lock);
449}
450EXPORT_SYMBOL(slim_ctrl_add_boarddevs);
451
452/*
453 * slim_busnum_to_ctrl: Map bus number to controller
454 * @busnum: Bus number
455 * Returns controller representing this bus number
456 */
457struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
458{
459 struct slim_controller *ctrl;
460
461 mutex_lock(&board_lock);
462 list_for_each_entry(ctrl, &slim_ctrl_list, list)
463 if (bus_num == ctrl->nr) {
464 mutex_unlock(&board_lock);
465 return ctrl;
466 }
467 mutex_unlock(&board_lock);
468 return NULL;
469}
470EXPORT_SYMBOL(slim_busnum_to_ctrl);
471
472static int slim_register_controller(struct slim_controller *ctrl)
473{
474 int ret = 0;
475
476 /* Can't register until after driver model init */
477 if (WARN_ON(!slimbus_type.p)) {
478 ret = -EPROBE_DEFER;
479 goto out_list;
480 }
481
482 dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
483 ctrl->dev.bus = &slimbus_type;
484 ctrl->dev.type = &slim_ctrl_type;
485 ctrl->num_dev = 0;
486 if (!ctrl->min_cg)
487 ctrl->min_cg = SLIM_MIN_CLK_GEAR;
488 if (!ctrl->max_cg)
489 ctrl->max_cg = SLIM_MAX_CLK_GEAR;
490 spin_lock_init(&ctrl->txn_lock);
491 mutex_init(&ctrl->m_ctrl);
492 mutex_init(&ctrl->sched.m_reconf);
493 ret = device_register(&ctrl->dev);
494 if (ret)
495 goto out_list;
496
497 dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name,
498 &ctrl->dev);
499
500 if (ctrl->nports) {
501 ctrl->ports = kcalloc(ctrl->nports, sizeof(struct slim_port),
502 GFP_KERNEL);
503 if (!ctrl->ports) {
504 ret = -ENOMEM;
505 goto err_port_failed;
506 }
507 }
508 if (ctrl->nchans) {
509 ctrl->chans = kcalloc(ctrl->nchans, sizeof(struct slim_ich),
510 GFP_KERNEL);
511 if (!ctrl->chans) {
512 ret = -ENOMEM;
513 goto err_chan_failed;
514 }
515
516 ctrl->sched.chc1 = kcalloc(ctrl->nchans,
517 sizeof(struct slim_ich *), GFP_KERNEL);
518 if (!ctrl->sched.chc1) {
519 kfree(ctrl->chans);
520 ret = -ENOMEM;
521 goto err_chan_failed;
522 }
523 ctrl->sched.chc3 = kcalloc(ctrl->nchans,
524 sizeof(struct slim_ich *), GFP_KERNEL);
525 if (!ctrl->sched.chc3) {
526 kfree(ctrl->sched.chc1);
527 kfree(ctrl->chans);
528 ret = -ENOMEM;
529 goto err_chan_failed;
530 }
531 }
532#ifdef DEBUG
533 ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
534#endif
535 init_completion(&ctrl->pause_comp);
536
537 INIT_LIST_HEAD(&ctrl->devs);
538 ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
539 if (!ctrl->wq)
540 goto err_workq_failed;
541
542 return 0;
543
544err_workq_failed:
545 kfree(ctrl->sched.chc3);
546 kfree(ctrl->sched.chc1);
547 kfree(ctrl->chans);
548err_chan_failed:
549 kfree(ctrl->ports);
550err_port_failed:
551 device_unregister(&ctrl->dev);
552out_list:
553 mutex_lock(&slim_lock);
554 idr_remove(&ctrl_idr, ctrl->nr);
555 mutex_unlock(&slim_lock);
556 return ret;
557}
558
559/* slim_remove_device: Remove the effect of slim_add_device() */
560void slim_remove_device(struct slim_device *sbdev)
561{
562 struct slim_controller *ctrl = sbdev->ctrl;
563
564 mutex_lock(&ctrl->m_ctrl);
565 list_del_init(&sbdev->dev_list);
566 mutex_unlock(&ctrl->m_ctrl);
567 device_unregister(&sbdev->dev);
568}
569EXPORT_SYMBOL(slim_remove_device);
570
571static void slim_ctrl_remove_device(struct slim_controller *ctrl,
572 struct slim_boardinfo *bi)
573{
574 if (ctrl->nr == bi->bus_num)
575 slim_remove_device(bi->slim_slave);
576}
577
578/*
579 * slim_del_controller: Controller tear-down.
580 * Controller added with the above API is teared down using this API.
581 */
582int slim_del_controller(struct slim_controller *ctrl)
583{
584 struct slim_controller *found;
585 struct sbi_boardinfo *bi;
586
587 /* First make sure that this bus was added */
588 mutex_lock(&slim_lock);
589 found = idr_find(&ctrl_idr, ctrl->nr);
590 mutex_unlock(&slim_lock);
591 if (found != ctrl)
592 return -EINVAL;
593
594 /* Remove all clients */
595 mutex_lock(&board_lock);
596 list_for_each_entry(bi, &board_list, list)
597 slim_ctrl_remove_device(ctrl, &bi->board_info);
598 mutex_unlock(&board_lock);
599
600 init_completion(&ctrl->dev_released);
601 device_unregister(&ctrl->dev);
602
603 wait_for_completion(&ctrl->dev_released);
604 list_del(&ctrl->list);
605 destroy_workqueue(ctrl->wq);
606 /* free bus id */
607 mutex_lock(&slim_lock);
608 idr_remove(&ctrl_idr, ctrl->nr);
609 mutex_unlock(&slim_lock);
610
611 kfree(ctrl->sched.chc1);
612 kfree(ctrl->sched.chc3);
613#ifdef DEBUG
614 kfree(ctrl->sched.slots);
615#endif
616 kfree(ctrl->chans);
617 kfree(ctrl->ports);
618
619 return 0;
620}
621EXPORT_SYMBOL(slim_del_controller);
622
623/*
624 * slim_add_numbered_controller: Controller bring-up.
625 * @ctrl: Controller to be registered.
626 * A controller is registered with the framework using this API. ctrl->nr is the
627 * desired number with which slimbus framework registers the controller.
628 * Function will return -EBUSY if the number is in use.
629 */
630int slim_add_numbered_controller(struct slim_controller *ctrl)
631{
632 int id;
633
634 mutex_lock(&slim_lock);
635 id = idr_alloc(&ctrl_idr, ctrl, ctrl->nr, ctrl->nr + 1, GFP_KERNEL);
636 mutex_unlock(&slim_lock);
637
638 if (id < 0)
639 return id;
640
641 ctrl->nr = id;
642 return slim_register_controller(ctrl);
643}
644EXPORT_SYMBOL(slim_add_numbered_controller);
645
646/*
647 * slim_report_absent: Controller calls this function when a device
648 * reports absent, OR when the device cannot be communicated with
649 * @sbdev: Device that cannot be reached, or sent report absent
650 */
651void slim_report_absent(struct slim_device *sbdev)
652{
653 struct slim_controller *ctrl;
654 int i;
655
656 if (!sbdev)
657 return;
658 ctrl = sbdev->ctrl;
659 if (!ctrl)
660 return;
661 /* invalidate logical addresses */
662 mutex_lock(&ctrl->m_ctrl);
663 for (i = 0; i < ctrl->num_dev; i++) {
664 if (sbdev->laddr == ctrl->addrt[i].laddr)
665 ctrl->addrt[i].valid = false;
666 }
667 mutex_unlock(&ctrl->m_ctrl);
668 sbdev->reported = false;
669 queue_work(ctrl->wq, &sbdev->wd);
670}
671EXPORT_SYMBOL(slim_report_absent);
672
673static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc);
674/*
675 * slim_framer_booted: This function is called by controller after the active
676 * framer has booted (using Bus Reset sequence, or after it has shutdown and has
677 * come back up). Components, devices on the bus may be in undefined state,
678 * and this function triggers their drivers to do the needful
679 * to bring them back in Reset state so that they can acquire sync, report
680 * present and be operational again.
681 */
682void slim_framer_booted(struct slim_controller *ctrl)
683{
684 struct slim_device *sbdev;
685 struct list_head *pos, *next;
686 int i;
687
688 if (!ctrl)
689 return;
690
691 /* Since framer has rebooted, reset all data channels */
692 mutex_lock(&ctrl->sched.m_reconf);
693 for (i = 0; i < ctrl->nchans; i++) {
694 struct slim_ich *slc = &ctrl->chans[i];
695
696 if (slc->state > SLIM_CH_DEFINED)
697 slim_remove_ch(ctrl, slc);
698 }
699 mutex_unlock(&ctrl->sched.m_reconf);
700 mutex_lock(&ctrl->m_ctrl);
701 list_for_each_safe(pos, next, &ctrl->devs) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700702 sbdev = list_entry(pos, struct slim_device, dev_list);
Karthikeyan Ramasubramanian6fcf9fc2017-10-30 14:03:01 -0600703 if (sbdev)
704 queue_work(ctrl->wq, &sbdev->device_reset);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700705 }
706 mutex_unlock(&ctrl->m_ctrl);
707}
708EXPORT_SYMBOL(slim_framer_booted);
709
710/*
711 * slim_msg_response: Deliver Message response received from a device to the
712 * framework.
713 * @ctrl: Controller handle
714 * @reply: Reply received from the device
715 * @len: Length of the reply
716 * @tid: Transaction ID received with which framework can associate reply.
717 * Called by controller to inform framework about the response received.
718 * This helps in making the API asynchronous, and controller-driver doesn't need
719 * to manage 1 more table other than the one managed by framework mapping TID
720 * with buffers
721 */
722void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
723{
724 int i;
725 unsigned long flags;
726 bool async;
727 struct slim_msg_txn *txn;
728
729 spin_lock_irqsave(&ctrl->txn_lock, flags);
730 txn = ctrl->txnt[tid];
731 if (txn == NULL || txn->rbuf == NULL) {
732 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
733 if (txn == NULL)
734 dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
735 tid, len);
736 else
737 dev_err(&ctrl->dev, "Invalid client buffer passed\n");
738 return;
739 }
740 async = txn->async;
741 for (i = 0; i < len; i++)
742 txn->rbuf[i] = reply[i];
743 if (txn->comp)
744 complete(txn->comp);
745 ctrl->txnt[tid] = NULL;
746 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
747 if (async)
748 kfree(txn);
749}
750EXPORT_SYMBOL(slim_msg_response);
751
752static int slim_processtxn(struct slim_controller *ctrl,
753 struct slim_msg_txn *txn, bool need_tid)
754{
755 u8 i = 0;
756 int ret = 0;
757 unsigned long flags;
758
759 if (need_tid) {
760 spin_lock_irqsave(&ctrl->txn_lock, flags);
761 for (i = 0; i < ctrl->last_tid; i++) {
762 if (ctrl->txnt[i] == NULL)
763 break;
764 }
765 if (i >= ctrl->last_tid) {
766 if (ctrl->last_tid == 255) {
767 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
768 return -ENOMEM;
769 }
770 ctrl->last_tid++;
771 }
772 ctrl->txnt[i] = txn;
773 txn->tid = i;
774 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
775 }
776
777 ret = ctrl->xfer_msg(ctrl, txn);
778 return ret;
779}
780
781static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
782 u8 e_len, u8 *entry)
783{
784 u8 i;
785
786 for (i = 0; i < ctrl->num_dev; i++) {
787 if (ctrl->addrt[i].valid &&
788 memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
789 *entry = i;
790 return 0;
791 }
792 }
793 return -ENXIO;
794}
795
796/*
797 * slim_assign_laddr: Assign logical address to a device enumerated.
798 * @ctrl: Controller with which device is enumerated.
799 * @e_addr: 6-byte elemental address of the device.
800 * @e_len: buffer length for e_addr
801 * @laddr: Return logical address (if valid flag is false)
802 * @valid: true if laddr holds a valid address that controller wants to
803 * set for this enumeration address. Otherwise framework sets index into
804 * address table as logical address.
805 * Called by controller in response to REPORT_PRESENT. Framework will assign
806 * a logical address to this enumeration address.
807 * Function returns -EXFULL to indicate that all logical addresses are already
808 * taken.
809 */
810int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
811 u8 e_len, u8 *laddr, bool valid)
812{
813 int ret;
814 u8 i = 0;
815 bool exists = false;
816 struct slim_device *sbdev;
817 struct list_head *pos, *next;
818 void *new_addrt = NULL;
819
820 mutex_lock(&ctrl->m_ctrl);
821 /* already assigned */
822 if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
823 *laddr = ctrl->addrt[i].laddr;
824 exists = true;
825 } else {
826 if (ctrl->num_dev >= 254) {
827 ret = -EXFULL;
828 goto ret_assigned_laddr;
829 }
830 for (i = 0; i < ctrl->num_dev; i++) {
831 if (ctrl->addrt[i].valid == false)
832 break;
833 }
834 if (i == ctrl->num_dev) {
835 new_addrt = krealloc(ctrl->addrt,
836 (ctrl->num_dev + 1) *
837 sizeof(struct slim_addrt),
838 GFP_KERNEL);
839 if (!new_addrt) {
840 ret = -ENOMEM;
841 goto ret_assigned_laddr;
842 }
843 ctrl->addrt = new_addrt;
844 ctrl->num_dev++;
845 }
846 memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
847 ctrl->addrt[i].valid = true;
848 /* Preferred address is index into table */
849 if (!valid)
850 *laddr = i;
851 }
852
853 ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
854 *laddr);
855 if (ret) {
856 ctrl->addrt[i].valid = false;
857 goto ret_assigned_laddr;
858 }
859 ctrl->addrt[i].laddr = *laddr;
860
861 dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
862ret_assigned_laddr:
863 mutex_unlock(&ctrl->m_ctrl);
864 if (exists || ret)
865 return ret;
866
867 pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
868 e_addr[1], e_addr[2]);
869 mutex_lock(&ctrl->m_ctrl);
870 list_for_each_safe(pos, next, &ctrl->devs) {
871 sbdev = list_entry(pos, struct slim_device, dev_list);
872 if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
873 struct slim_driver *sbdrv;
874
875 sbdev->laddr = *laddr;
876 sbdev->reported = true;
877 if (sbdev->dev.driver) {
878 sbdrv = to_slim_driver(sbdev->dev.driver);
879 if (sbdrv->device_up)
880 queue_work(ctrl->wq, &sbdev->wd);
881 }
882 break;
883 }
884 }
885 mutex_unlock(&ctrl->m_ctrl);
886 return 0;
887}
888EXPORT_SYMBOL(slim_assign_laddr);
889
890/*
891 * slim_get_logical_addr: Return the logical address of a slimbus device.
892 * @sb: client handle requesting the adddress.
893 * @e_addr: Elemental address of the device.
894 * @e_len: Length of e_addr
895 * @laddr: output buffer to store the address
896 * context: can sleep
897 * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
898 * the device with this elemental address is not found.
899 */
900int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
901 u8 e_len, u8 *laddr)
902{
903 int ret = 0;
904 u8 entry;
905 struct slim_controller *ctrl = sb->ctrl;
906
907 if (!ctrl || !laddr || !e_addr || e_len != 6)
908 return -EINVAL;
909 mutex_lock(&ctrl->m_ctrl);
910 ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
911 if (!ret)
912 *laddr = ctrl->addrt[entry].laddr;
913 mutex_unlock(&ctrl->m_ctrl);
914 if (ret == -ENXIO && ctrl->get_laddr) {
915 ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
916 if (!ret)
917 ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
918 true);
919 }
920 return ret;
921}
922EXPORT_SYMBOL(slim_get_logical_addr);
923
924static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
925 u8 *rbuf, const u8 *wbuf, u8 len)
926{
927 if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
928 return -EINVAL;
929 switch (oper) {
930 case SLIM_MSG_MC_REQUEST_VALUE:
931 case SLIM_MSG_MC_REQUEST_INFORMATION:
932 if (rbuf == NULL)
933 return -EINVAL;
934 return 0;
935 case SLIM_MSG_MC_CHANGE_VALUE:
936 case SLIM_MSG_MC_CLEAR_INFORMATION:
937 if (wbuf == NULL)
938 return -EINVAL;
939 return 0;
940 case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
941 case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
942 if (rbuf == NULL || wbuf == NULL)
943 return -EINVAL;
944 return 0;
945 default:
946 return -EINVAL;
947 }
948}
949
950static u16 slim_slicecodefromsize(u32 req)
951{
952 u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
953
954 if (req >= 8)
955 return 0;
956 else
957 return codetosize[req];
958}
959
960static u16 slim_slicesize(u32 code)
961{
962 u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
963
964 if (code == 0)
965 code = 1;
966 if (code > 16)
967 code = 16;
968 return sizetocode[code - 1];
969}
970
971
972/* Message APIs Unicast message APIs used by slimbus slave drivers */
973
974/*
975 * Message API access routines.
976 * @sb: client handle requesting elemental message reads, writes.
977 * @msg: Input structure for start-offset, number of bytes to read.
978 * @rbuf: data buffer to be filled with values read.
979 * @len: data buffer size
980 * @wbuf: data buffer containing value/information to be written
981 * context: can sleep
982 * Returns:
983 * -EINVAL: Invalid parameters
984 * -ETIMEDOUT: If controller could not complete the request. This may happen if
985 * the bus lines are not clocked, controller is not powered-on, slave with
986 * given address is not enumerated/responding.
987 */
988int slim_request_val_element(struct slim_device *sb,
989 struct slim_ele_access *msg, u8 *buf, u8 len)
990{
991 struct slim_controller *ctrl = sb->ctrl;
992
993 if (!ctrl)
994 return -EINVAL;
995 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
996 NULL, len);
997}
998EXPORT_SYMBOL(slim_request_val_element);
999
1000int slim_request_inf_element(struct slim_device *sb,
1001 struct slim_ele_access *msg, u8 *buf, u8 len)
1002{
1003 struct slim_controller *ctrl = sb->ctrl;
1004
1005 if (!ctrl)
1006 return -EINVAL;
1007 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
1008 buf, NULL, len);
1009}
1010EXPORT_SYMBOL(slim_request_inf_element);
1011
1012int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
1013 const u8 *buf, u8 len)
1014{
1015 struct slim_controller *ctrl = sb->ctrl;
1016
1017 if (!ctrl)
1018 return -EINVAL;
1019 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
1020 len);
1021}
1022EXPORT_SYMBOL(slim_change_val_element);
1023
1024int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
1025 u8 *buf, u8 len)
1026{
1027 struct slim_controller *ctrl = sb->ctrl;
1028
1029 if (!ctrl)
1030 return -EINVAL;
1031 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
1032 buf, len);
1033}
1034EXPORT_SYMBOL(slim_clear_inf_element);
1035
1036int slim_request_change_val_element(struct slim_device *sb,
1037 struct slim_ele_access *msg, u8 *rbuf,
1038 const u8 *wbuf, u8 len)
1039{
1040 struct slim_controller *ctrl = sb->ctrl;
1041
1042 if (!ctrl)
1043 return -EINVAL;
1044 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
1045 rbuf, wbuf, len);
1046}
1047EXPORT_SYMBOL(slim_request_change_val_element);
1048
1049int slim_request_clear_inf_element(struct slim_device *sb,
1050 struct slim_ele_access *msg, u8 *rbuf,
1051 const u8 *wbuf, u8 len)
1052{
1053 struct slim_controller *ctrl = sb->ctrl;
1054
1055 if (!ctrl)
1056 return -EINVAL;
1057 return slim_xfer_msg(ctrl, sb, msg,
1058 SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
1059 rbuf, wbuf, len);
1060}
1061EXPORT_SYMBOL(slim_request_clear_inf_element);
1062
1063/*
1064 * Broadcast message API:
1065 * call this API directly with sbdev = NULL.
1066 * For broadcast reads, make sure that buffers are big-enough to incorporate
1067 * replies from all logical addresses.
1068 * All controllers may not support broadcast
1069 */
1070int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
1071 struct slim_ele_access *msg, u16 mc, u8 *rbuf,
1072 const u8 *wbuf, u8 len)
1073{
1074 DECLARE_COMPLETION_ONSTACK(complete);
1075 DEFINE_SLIM_LDEST_TXN(txn_stack, mc, len, 6, rbuf, wbuf, sbdev->laddr);
1076 struct slim_msg_txn *txn;
1077 int ret;
1078 u16 sl, cur;
1079
1080 if (msg->comp && rbuf) {
1081 txn = kmalloc(sizeof(struct slim_msg_txn),
1082 GFP_KERNEL);
1083 if (IS_ERR_OR_NULL(txn))
1084 return PTR_ERR(txn);
1085 *txn = txn_stack;
1086 txn->async = true;
1087 txn->comp = msg->comp;
1088 } else {
1089 txn = &txn_stack;
1090 if (rbuf)
1091 txn->comp = &complete;
1092 }
1093
1094 ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
1095 if (ret)
1096 goto xfer_err;
1097
1098 sl = slim_slicesize(len);
1099 dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
1100 msg->start_offset, len, mc, sl);
1101
1102 cur = slim_slicecodefromsize(sl);
1103 txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
1104
1105 if (wbuf)
1106 txn->rl += len;
1107 if (rbuf) {
1108 unsigned long flags;
1109
1110 txn->rl++;
1111 ret = slim_processtxn(ctrl, txn, true);
1112
1113 /* sync read */
1114 if (!ret && !msg->comp) {
1115 ret = wait_for_completion_timeout(&complete, HZ);
1116 if (!ret) {
1117 dev_err(&ctrl->dev, "slimbus Read timed out");
1118 spin_lock_irqsave(&ctrl->txn_lock, flags);
1119 /* Invalidate the transaction */
1120 ctrl->txnt[txn->tid] = NULL;
1121 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1122 ret = -ETIMEDOUT;
1123 } else
1124 ret = 0;
1125 } else if (ret < 0 && !msg->comp) {
1126 dev_err(&ctrl->dev, "slimbus Read error");
1127 spin_lock_irqsave(&ctrl->txn_lock, flags);
1128 /* Invalidate the transaction */
1129 ctrl->txnt[txn->tid] = NULL;
1130 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1131 }
1132
1133 } else
1134 ret = slim_processtxn(ctrl, txn, false);
1135xfer_err:
1136 return ret;
1137}
1138EXPORT_SYMBOL(slim_xfer_msg);
1139
1140/*
1141 * User message:
1142 * slim_user_msg: Send user message that is interpreted by destination device
1143 * @sb: Client handle sending the message
1144 * @la: Destination device for this user message
1145 * @mt: Message Type (Soruce-referred, or Destination-referred)
1146 * @mc: Message Code
1147 * @msg: Message structure (start offset, number of bytes) to be sent
1148 * @buf: data buffer to be sent
1149 * @len: data buffer size in bytes
1150 */
1151int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
1152 struct slim_ele_access *msg, u8 *buf, u8 len)
1153{
1154 if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
1155 return -EINVAL;
1156 if (!sb->ctrl->xfer_user_msg)
1157 return -EPROTONOSUPPORT;
1158 return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
1159}
1160EXPORT_SYMBOL(slim_user_msg);
1161
1162/*
1163 * Queue bulk of message writes:
1164 * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
1165 * @sb: Client handle sending these messages
1166 * @la: Destination device for these messages
1167 * @mt: Message Type
1168 * @mc: Message Code
1169 * @msgs: List of messages to be written in bulk
1170 * @n: Number of messages in the list
1171 * @cb: Callback if client needs this to be non-blocking
1172 * @ctx: Context for this callback
1173 * If supported by controller, this message list will be sent in bulk to the HW
1174 * If the client specifies this to be non-blocking, the callback will be
1175 * called from atomic context.
1176 */
1177int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
1178 struct slim_val_inf msgs[], int n,
1179 int (*comp_cb)(void *ctx, int err), void *ctx)
1180{
Karthikeyan Ramasubramanianf005ce72017-01-25 11:58:17 -07001181 int i, ret = 0;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001182
Karthikeyan Ramasubramanianf005ce72017-01-25 11:58:17 -07001183 if (!sb || !sb->ctrl || !msgs || n <= 0)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001184 return -EINVAL;
1185 if (!sb->ctrl->xfer_bulk_wr) {
1186 pr_warn("controller does not support bulk WR, serializing");
1187 for (i = 0; i < n; i++) {
1188 struct slim_ele_access ele;
1189
1190 ele.comp = NULL;
1191 ele.start_offset = msgs[i].start_offset;
1192 ele.num_bytes = msgs[i].num_bytes;
1193 ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
1194 msgs[i].rbuf, msgs[i].wbuf,
1195 ele.num_bytes);
1196 if (ret)
1197 return ret;
1198 }
1199 return ret;
1200 }
1201 return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
1202 comp_cb, ctx);
1203}
1204EXPORT_SYMBOL(slim_bulk_msg_write);
1205
1206/*
1207 * slim_alloc_mgrports: Allocate port on manager side.
1208 * @sb: device/client handle.
1209 * @req: Port request type.
1210 * @nports: Number of ports requested
1211 * @rh: output buffer to store the port handles
1212 * @hsz: size of buffer storing handles
1213 * context: can sleep
1214 * This port will be typically used by SW. e.g. client driver wants to receive
1215 * some data from audio codec HW using a data channel.
1216 * Port allocated using this API will be used to receive the data.
1217 * If half-duplex ports are requested, two adjacent ports are allocated for
1218 * 1 half-duplex port. So the handle-buffer size should be twice the number
1219 * of half-duplex ports to be allocated.
1220 * -EDQUOT is returned if all ports are in use.
1221 */
1222int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
1223 int nports, u32 *rh, int hsz)
1224{
1225 int i, j;
1226 int ret = -EINVAL;
1227 int nphysp = nports;
1228 struct slim_controller *ctrl = sb->ctrl;
1229
1230 if (!rh || !ctrl)
1231 return -EINVAL;
1232 if (req == SLIM_REQ_HALF_DUP)
1233 nphysp *= 2;
1234 if (hsz/sizeof(u32) < nphysp)
1235 return -EINVAL;
1236 mutex_lock(&ctrl->m_ctrl);
1237
1238 for (i = 0; i < ctrl->nports; i++) {
1239 bool multiok = true;
1240
1241 if (ctrl->ports[i].state != SLIM_P_FREE)
1242 continue;
1243 /* Start half duplex channel at even port */
1244 if (req == SLIM_REQ_HALF_DUP && (i % 2))
1245 continue;
1246 /* Allocate ports contiguously for multi-ch */
1247 if (ctrl->nports < (i + nphysp)) {
1248 i = ctrl->nports;
1249 break;
1250 }
1251 if (req == SLIM_REQ_MULTI_CH) {
1252 multiok = true;
1253 for (j = i; j < i + nphysp; j++) {
1254 if (ctrl->ports[j].state != SLIM_P_FREE) {
1255 multiok = false;
1256 break;
1257 }
1258 }
1259 if (!multiok)
1260 continue;
1261 }
1262 break;
1263 }
1264 if (i >= ctrl->nports) {
1265 ret = -EDQUOT;
1266 goto alloc_err;
1267 }
1268 ret = 0;
1269 for (j = i; j < i + nphysp; j++) {
1270 ctrl->ports[j].state = SLIM_P_UNCFG;
1271 ctrl->ports[j].req = req;
1272 if (req == SLIM_REQ_HALF_DUP && (j % 2))
1273 ctrl->ports[j].flow = SLIM_SINK;
1274 else
1275 ctrl->ports[j].flow = SLIM_SRC;
1276 if (ctrl->alloc_port)
1277 ret = ctrl->alloc_port(ctrl, j);
1278 if (ret) {
1279 for (; j >= i; j--)
1280 ctrl->ports[j].state = SLIM_P_FREE;
1281 goto alloc_err;
1282 }
1283 *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
1284 }
1285alloc_err:
1286 mutex_unlock(&ctrl->m_ctrl);
1287 return ret;
1288}
1289EXPORT_SYMBOL(slim_alloc_mgrports);
1290
1291/* Deallocate the port(s) allocated using the API above */
1292int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
1293{
1294 int i;
1295 struct slim_controller *ctrl = sb->ctrl;
1296
1297 if (!ctrl || !hdl)
1298 return -EINVAL;
1299
1300 mutex_lock(&ctrl->m_ctrl);
1301
1302 for (i = 0; i < nports; i++) {
1303 u8 pn;
1304
1305 pn = SLIM_HDL_TO_PORT(hdl[i]);
1306
1307 if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
1308 int j, ret;
1309
1310 if (pn >= ctrl->nports) {
1311 dev_err(&ctrl->dev, "invalid port number");
1312 ret = -EINVAL;
1313 } else {
1314 dev_err(&ctrl->dev,
1315 "Can't dealloc connected port:%d", i);
1316 ret = -EISCONN;
1317 }
1318 for (j = i - 1; j >= 0; j--) {
1319 pn = SLIM_HDL_TO_PORT(hdl[j]);
1320 ctrl->ports[pn].state = SLIM_P_UNCFG;
1321 }
1322 mutex_unlock(&ctrl->m_ctrl);
1323 return ret;
1324 }
1325 if (ctrl->dealloc_port)
1326 ctrl->dealloc_port(ctrl, pn);
1327 ctrl->ports[pn].state = SLIM_P_FREE;
1328 }
1329 mutex_unlock(&ctrl->m_ctrl);
1330 return 0;
1331}
1332EXPORT_SYMBOL(slim_dealloc_mgrports);
1333
1334/*
1335 * slim_config_mgrports: Configure manager side ports
1336 * @sb: device/client handle.
1337 * @ph: array of port handles for which this configuration is valid
1338 * @nports: Number of ports in ph
1339 * @cfg: configuration requested for port(s)
1340 * Configure port settings if they are different than the default ones.
1341 * Returns success if the config could be applied. Returns -EISCONN if the
1342 * port is in use
1343 */
1344int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
1345 struct slim_port_cfg *cfg)
1346{
1347 int i;
1348 struct slim_controller *ctrl;
1349
1350 if (!sb || !ph || !nports || !sb->ctrl || !cfg)
1351 return -EINVAL;
1352
1353 ctrl = sb->ctrl;
1354 mutex_lock(&ctrl->sched.m_reconf);
1355 for (i = 0; i < nports; i++) {
1356 u8 pn = SLIM_HDL_TO_PORT(ph[i]);
1357
1358 if (ctrl->ports[pn].state == SLIM_P_CFG)
1359 return -EISCONN;
1360 ctrl->ports[pn].cfg = *cfg;
1361 }
1362 mutex_unlock(&ctrl->sched.m_reconf);
1363 return 0;
1364}
1365EXPORT_SYMBOL(slim_config_mgrports);
1366
1367/*
1368 * slim_get_slaveport: Get slave port handle
1369 * @la: slave device logical address.
1370 * @idx: port index at slave
1371 * @rh: return handle
1372 * @flw: Flow type (source or destination)
1373 * This API only returns a slave port's representation as expected by slimbus
1374 * driver. This port is not managed by the slimbus driver. Caller is expected
1375 * to have visibility of this port since it's a device-port.
1376 */
1377int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
1378{
1379 if (rh == NULL)
1380 return -EINVAL;
1381 *rh = SLIM_PORT_HDL(la, flw, idx);
1382 return 0;
1383}
1384EXPORT_SYMBOL(slim_get_slaveport);
1385
1386static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
1387 enum slim_port_flow flow)
1388{
1389 int ret;
1390 u8 buf[2];
1391 u32 la = SLIM_HDL_TO_LA(ph);
1392 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1393 DEFINE_SLIM_LDEST_TXN(txn, 0, 2, 6, NULL, buf, la);
1394
1395 if (flow == SLIM_SRC)
1396 txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
1397 else
1398 txn.mc = SLIM_MSG_MC_CONNECT_SINK;
1399 buf[0] = pn;
1400 buf[1] = ctrl->chans[ch].chan;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001401 if (la == SLIM_LA_MANAGER) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001402 ctrl->ports[pn].flow = flow;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001403 ctrl->ports[pn].ch = &ctrl->chans[ch].prop;
1404 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001405 ret = slim_processtxn(ctrl, &txn, false);
1406 if (!ret && la == SLIM_LA_MANAGER)
1407 ctrl->ports[pn].state = SLIM_P_CFG;
1408 return ret;
1409}
1410
1411static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
1412{
1413 int ret;
1414 u32 la = SLIM_HDL_TO_LA(ph);
1415 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1416 DEFINE_SLIM_LDEST_TXN(txn, 0, 1, 5, NULL, &pn, la);
1417
1418 txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
1419 ret = slim_processtxn(ctrl, &txn, false);
1420 if (ret)
1421 return ret;
1422 if (la == SLIM_LA_MANAGER) {
1423 ctrl->ports[pn].state = SLIM_P_UNCFG;
1424 ctrl->ports[pn].cfg.watermark = 0;
1425 ctrl->ports[pn].cfg.port_opts = 0;
1426 ctrl->ports[pn].ch = NULL;
1427 }
1428 return 0;
1429}
1430
1431/*
1432 * slim_connect_src: Connect source port to channel.
1433 * @sb: client handle
1434 * @srch: source handle to be connected to this channel
1435 * @chanh: Channel with which the ports need to be associated with.
1436 * Per slimbus specification, a channel may have 1 source port.
1437 * Channel specified in chanh needs to be allocated first.
1438 * Returns -EALREADY if source is already configured for this channel.
1439 * Returns -ENOTCONN if channel is not allocated
1440 * Returns -EINVAL if invalid direction is specified for non-manager port,
1441 * or if the manager side port number is out of bounds, or in incorrect state
1442 */
1443int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
1444{
1445 struct slim_controller *ctrl = sb->ctrl;
1446 int ret;
1447 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1448 struct slim_ich *slc = &ctrl->chans[chan];
1449 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
1450 u8 la = SLIM_HDL_TO_LA(srch);
1451 u8 pn = SLIM_HDL_TO_PORT(srch);
1452
1453 /* manager ports don't have direction when they are allocated */
1454 if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
1455 return -EINVAL;
1456
1457 mutex_lock(&ctrl->sched.m_reconf);
1458
1459 if (la == SLIM_LA_MANAGER) {
1460 if (pn >= ctrl->nports ||
1461 ctrl->ports[pn].state != SLIM_P_UNCFG) {
1462 ret = -EINVAL;
1463 goto connect_src_err;
1464 }
1465 }
1466
1467 if (slc->state == SLIM_CH_FREE) {
1468 ret = -ENOTCONN;
1469 goto connect_src_err;
1470 }
1471 /*
1472 * Once channel is removed, its ports can be considered disconnected
1473 * So its ports can be reassigned. Source port is zeroed
1474 * when channel is deallocated.
1475 */
1476 if (slc->srch) {
1477 ret = -EALREADY;
1478 goto connect_src_err;
1479 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001480 ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
1481
1482 if (!ret)
1483 slc->srch = srch;
1484
1485connect_src_err:
1486 mutex_unlock(&ctrl->sched.m_reconf);
1487 return ret;
1488}
1489EXPORT_SYMBOL(slim_connect_src);
1490
1491/*
1492 * slim_connect_sink: Connect sink port(s) to channel.
1493 * @sb: client handle
1494 * @sinkh: sink handle(s) to be connected to this channel
1495 * @nsink: number of sinks
1496 * @chanh: Channel with which the ports need to be associated with.
1497 * Per slimbus specification, a channel may have multiple sink-ports.
1498 * Channel specified in chanh needs to be allocated first.
1499 * Returns -EALREADY if sink is already configured for this channel.
1500 * Returns -ENOTCONN if channel is not allocated
1501 * Returns -EINVAL if invalid parameters are passed, or invalid direction is
1502 * specified for non-manager port, or if the manager side port number is out of
1503 * bounds, or in incorrect state
1504 */
1505int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
1506{
1507 struct slim_controller *ctrl = sb->ctrl;
1508 int j;
1509 int ret = 0;
1510 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1511 struct slim_ich *slc = &ctrl->chans[chan];
1512 void *new_sinkh = NULL;
1513
1514 if (!sinkh || !nsink)
1515 return -EINVAL;
1516
1517 mutex_lock(&ctrl->sched.m_reconf);
1518
1519 /*
1520 * Once channel is removed, its ports can be considered disconnected
1521 * So its ports can be reassigned. Sink ports are freed when channel
1522 * is deallocated.
1523 */
1524 if (slc->state == SLIM_CH_FREE) {
1525 ret = -ENOTCONN;
1526 goto connect_sink_err;
1527 }
1528
1529 for (j = 0; j < nsink; j++) {
1530 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
1531 u8 la = SLIM_HDL_TO_LA(sinkh[j]);
1532 u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
1533
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001534 if (la != SLIM_LA_MANAGER && flow != SLIM_SINK)
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001535 ret = -EINVAL;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001536 else if (la == SLIM_LA_MANAGER &&
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001537 (pn >= ctrl->nports ||
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001538 ctrl->ports[pn].state != SLIM_P_UNCFG))
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001539 ret = -EINVAL;
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001540 else
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001541 ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
Karthikeyan Ramasubramanian170679c2017-02-27 15:11:56 -07001542
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001543 if (ret) {
1544 for (j = j - 1; j >= 0; j--)
1545 disconnect_port_ch(ctrl, sinkh[j]);
1546 goto connect_sink_err;
1547 }
1548 }
1549
1550 new_sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
1551 GFP_KERNEL);
1552 if (!new_sinkh) {
1553 ret = -ENOMEM;
1554 for (j = 0; j < nsink; j++)
1555 disconnect_port_ch(ctrl, sinkh[j]);
1556 goto connect_sink_err;
1557 }
1558
1559 slc->sinkh = new_sinkh;
1560 memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
1561 slc->nsink += nsink;
1562
1563connect_sink_err:
1564 mutex_unlock(&ctrl->sched.m_reconf);
1565 return ret;
1566}
1567EXPORT_SYMBOL(slim_connect_sink);
1568
1569/*
1570 * slim_disconnect_ports: Disconnect port(s) from channel
1571 * @sb: client handle
1572 * @ph: ports to be disconnected
1573 * @nph: number of ports.
1574 * Disconnects ports from a channel.
1575 */
1576int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
1577{
1578 struct slim_controller *ctrl = sb->ctrl;
1579 int i;
1580
1581 mutex_lock(&ctrl->sched.m_reconf);
1582
1583 for (i = 0; i < nph; i++)
1584 disconnect_port_ch(ctrl, ph[i]);
1585 mutex_unlock(&ctrl->sched.m_reconf);
1586 return 0;
1587}
1588EXPORT_SYMBOL(slim_disconnect_ports);
1589
1590/*
1591 * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
1592 * @sb: client handle
1593 * @ph: port-handle
1594 * @iobuf: buffer to be transferred or populated
1595 * @len: buffer size.
1596 * @comp: completion signal to indicate transfer done or error.
1597 * context: can sleep
1598 * Returns number of bytes transferred/received if used synchronously.
1599 * Will return 0 if used asynchronously.
1600 * Client will call slim_port_get_xfer_status to get error and/or number of
1601 * bytes transferred if used asynchronously.
1602 */
Dilip Kota81c88552017-10-24 12:18:23 +05301603int slim_port_xfer(struct slim_device *sb, u32 ph, void *buf, u32 len,
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001604 struct completion *comp)
1605{
1606 struct slim_controller *ctrl = sb->ctrl;
1607 u8 pn = SLIM_HDL_TO_PORT(ph);
1608
1609 dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
Dilip Kota81c88552017-10-24 12:18:23 +05301610 return ctrl->port_xfer(ctrl, pn, buf, len, comp);
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001611}
1612EXPORT_SYMBOL(slim_port_xfer);
1613
1614/*
1615 * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
1616 * after completion is done.
1617 * @sb: client handle
1618 * @ph: port-handle
1619 * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
1620 * @done_len: Number of bytes transferred.
1621 * This can be called when port_xfer complition is signalled.
1622 * The API will return port transfer error (underflow/overflow/disconnect)
1623 * and/or done_len will reflect number of bytes transferred. Note that
1624 * done_len may be valid even if port error (overflow/underflow) has happened.
1625 * e.g. If the transfer was scheduled with a few bytes to be transferred and
1626 * client has not supplied more data to be transferred, done_len will indicate
1627 * number of bytes transferred with underflow error. To avoid frequent underflow
1628 * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
1629 * channel has data to be transferred even if client is not ready to transfer
1630 * data all the time. done_buf will indicate address of the last buffer
1631 * processed from the multiple transfers.
1632 */
1633enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
1634 phys_addr_t *done_buf, u32 *done_len)
1635{
1636 struct slim_controller *ctrl = sb->ctrl;
1637 u8 pn = SLIM_HDL_TO_PORT(ph);
1638 u32 la = SLIM_HDL_TO_LA(ph);
1639 enum slim_port_err err;
1640
1641 dev_dbg(&ctrl->dev, "get status port num:%d", pn);
1642 /*
1643 * Framework only has insight into ports managed by ported device
1644 * used by the manager and not slave
1645 */
1646 if (la != SLIM_LA_MANAGER) {
1647 if (done_buf)
1648 *done_buf = 0;
1649 if (done_len)
1650 *done_len = 0;
1651 return SLIM_P_NOT_OWNED;
1652 }
1653 err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
1654 if (err == SLIM_P_INPROGRESS)
1655 err = ctrl->ports[pn].err;
1656 return err;
1657}
1658EXPORT_SYMBOL(slim_port_get_xfer_status);
1659
1660static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1661{
1662 struct slim_ich **arr;
1663 int i, j;
1664 int *len;
1665 int sl = slc->seglen << slc->rootexp;
1666 /* Channel is already active and other end is transmitting data */
1667 if (slc->state >= SLIM_CH_ACTIVE)
1668 return;
1669 if (slc->coeff == SLIM_COEFF_1) {
1670 arr = ctrl->sched.chc1;
1671 len = &ctrl->sched.num_cc1;
1672 } else {
1673 arr = ctrl->sched.chc3;
1674 len = &ctrl->sched.num_cc3;
1675 sl *= 3;
1676 }
1677
1678 *len += 1;
1679
1680 /* Insert the channel based on rootexp and seglen */
1681 for (i = 0; i < *len - 1; i++) {
1682 /*
1683 * Primary key: exp low to high.
1684 * Secondary key: seglen: high to low
1685 */
1686 if ((slc->rootexp > arr[i]->rootexp) ||
1687 ((slc->rootexp == arr[i]->rootexp) &&
1688 (slc->seglen < arr[i]->seglen)))
1689 continue;
1690 else
1691 break;
1692 }
1693 for (j = *len - 1; j > i; j--)
1694 arr[j] = arr[j - 1];
1695 arr[i] = slc;
1696 if (!ctrl->allocbw)
1697 ctrl->sched.usedslots += sl;
1698}
1699
1700static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1701{
1702 struct slim_ich **arr;
1703 int i;
1704 u32 la, ph;
1705 int *len;
1706
1707 if (slc->coeff == SLIM_COEFF_1) {
1708 arr = ctrl->sched.chc1;
1709 len = &ctrl->sched.num_cc1;
1710 } else {
1711 arr = ctrl->sched.chc3;
1712 len = &ctrl->sched.num_cc3;
1713 }
1714
1715 for (i = 0; i < *len; i++) {
1716 if (arr[i] == slc)
1717 break;
1718 }
1719 if (i >= *len)
1720 return -EXFULL;
1721 for (; i < *len - 1; i++)
1722 arr[i] = arr[i + 1];
1723 *len -= 1;
1724 arr[*len] = NULL;
1725
1726 slc->state = SLIM_CH_ALLOCATED;
1727 slc->def = 0;
1728 slc->newintr = 0;
1729 slc->newoff = 0;
1730 for (i = 0; i < slc->nsink; i++) {
1731 ph = slc->sinkh[i];
1732 la = SLIM_HDL_TO_LA(ph);
1733 /*
1734 * For ports managed by manager's ported device, no need to send
1735 * disconnect. It is client's responsibility to call disconnect
1736 * on ports owned by the slave device
1737 */
1738 if (la == SLIM_LA_MANAGER) {
1739 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1740 ctrl->ports[SLIM_HDL_TO_PORT(ph)].ch = NULL;
1741 }
1742 }
1743
1744 ph = slc->srch;
1745 la = SLIM_HDL_TO_LA(ph);
1746 if (la == SLIM_LA_MANAGER) {
1747 u8 pn = SLIM_HDL_TO_PORT(ph);
1748
1749 ctrl->ports[pn].state = SLIM_P_UNCFG;
1750 ctrl->ports[pn].cfg.watermark = 0;
1751 ctrl->ports[pn].cfg.port_opts = 0;
1752 }
1753
1754 kfree(slc->sinkh);
1755 slc->sinkh = NULL;
1756 slc->srch = 0;
1757 slc->nsink = 0;
1758 return 0;
1759}
1760
1761static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
1762{
1763 u32 rate = 0, rate4k = 0, rate11k = 0;
1764 u32 exp = 0;
1765 u32 pr = 0;
1766 bool exact = true;
1767 bool done = false;
1768 enum slim_ch_rate ratefam;
1769
1770 if (prop->prot >= SLIM_ASYNC_SMPLX)
1771 return 0;
1772 if (prop->baser == SLIM_RATE_1HZ) {
1773 rate = prop->ratem / 4000;
1774 rate4k = rate;
1775 if (rate * 4000 == prop->ratem)
1776 ratefam = SLIM_RATE_4000HZ;
1777 else {
1778 rate = prop->ratem / 11025;
1779 rate11k = rate;
1780 if (rate * 11025 == prop->ratem)
1781 ratefam = SLIM_RATE_11025HZ;
1782 else
1783 ratefam = SLIM_RATE_1HZ;
1784 }
1785 } else {
1786 ratefam = prop->baser;
1787 rate = prop->ratem;
1788 }
1789 if (ratefam == SLIM_RATE_1HZ) {
1790 exact = false;
1791 if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
1792 rate = rate4k + 1;
1793 ratefam = SLIM_RATE_4000HZ;
1794 } else {
1795 rate = rate11k + 1;
1796 ratefam = SLIM_RATE_11025HZ;
1797 }
1798 }
1799 /* covert rate to coeff-exp */
1800 while (!done) {
1801 while ((rate & 0x1) != 0x1) {
1802 rate >>= 1;
1803 exp++;
1804 }
1805 if (rate > 3) {
1806 /* roundup if not exact */
1807 rate++;
1808 exact = false;
1809 } else
1810 done = true;
1811 }
1812 if (ratefam == SLIM_RATE_4000HZ) {
1813 if (rate == 1)
1814 pr = 0x10;
1815 else {
1816 pr = 0;
1817 exp++;
1818 }
1819 } else {
1820 pr = 8;
1821 exp++;
1822 }
1823 if (exp <= 7) {
1824 pr |= exp;
1825 if (exact)
1826 pr |= 0x80;
1827 } else
1828 pr = 0;
1829 return pr;
1830}
1831
1832static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
1833{
1834 struct slim_controller *ctrl = sb->ctrl;
1835 u32 chrate = 0;
1836 u32 exp = 0;
1837 u32 coeff = 0;
1838 bool exact = true;
1839 bool done = false;
1840 int ret = 0;
1841 struct slim_ich *slc = &ctrl->chans[chan];
1842 struct slim_ch *prop = &slc->prop;
1843
1844 slc->prrate = slim_calc_prrate(ctrl, prop);
1845 dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
1846 if (prop->baser == SLIM_RATE_4000HZ)
1847 chrate = 4000 * prop->ratem;
1848 else if (prop->baser == SLIM_RATE_11025HZ)
1849 chrate = 11025 * prop->ratem;
1850 else
1851 chrate = prop->ratem;
1852 /* max allowed sample freq = 768 seg/frame */
1853 if (chrate > 3600000)
1854 return -EDQUOT;
1855 if (prop->baser == SLIM_RATE_4000HZ &&
1856 ctrl->a_framer->superfreq == 4000)
1857 coeff = prop->ratem;
1858 else if (prop->baser == SLIM_RATE_11025HZ &&
1859 ctrl->a_framer->superfreq == 3675)
1860 coeff = 3 * prop->ratem;
1861 else {
1862 u32 tempr = 0;
1863
1864 tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
1865 coeff = tempr / ctrl->a_framer->rootfreq;
1866 if (coeff * ctrl->a_framer->rootfreq != tempr) {
1867 coeff++;
1868 exact = false;
1869 }
1870 }
1871
1872 /* convert coeff to coeff-exponent */
1873 exp = 0;
1874 while (!done) {
1875 while ((coeff & 0x1) != 0x1) {
1876 coeff >>= 1;
1877 exp++;
1878 }
1879 if (coeff > 3) {
1880 coeff++;
1881 exact = false;
1882 } else
1883 done = true;
1884 }
1885 if (prop->prot == SLIM_HARD_ISO && !exact)
1886 return -EPROTONOSUPPORT;
1887 else if (prop->prot == SLIM_AUTO_ISO) {
1888 if (exact)
1889 prop->prot = SLIM_HARD_ISO;
1890 else
1891 prop->prot = SLIM_PUSH;
1892 }
1893 slc->rootexp = exp;
1894 slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
1895 if (prop->prot != SLIM_HARD_ISO)
1896 slc->seglen++;
1897 if (prop->prot >= SLIM_EXT_SMPLX)
1898 slc->seglen++;
1899 /* convert coeff to enum */
1900 if (coeff == 1) {
1901 if (exp > 9)
1902 ret = -EIO;
1903 coeff = SLIM_COEFF_1;
1904 } else {
1905 if (exp > 8)
1906 ret = -EIO;
1907 coeff = SLIM_COEFF_3;
1908 }
1909 slc->coeff = coeff;
1910
1911 return ret;
1912}
1913
1914/*
1915 * slim_alloc_ch: Allocate a slimbus channel and return its handle.
1916 * @sb: client handle.
1917 * @chanh: return channel handle
1918 * Slimbus channels are limited to 256 per specification.
1919 * -EXFULL is returned if all channels are in use.
1920 * Although slimbus specification supports 256 channels, a controller may not
1921 * support that many channels.
1922 */
1923int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
1924{
1925 struct slim_controller *ctrl = sb->ctrl;
1926 u16 i;
1927
1928 if (!ctrl)
1929 return -EINVAL;
1930 mutex_lock(&ctrl->sched.m_reconf);
1931 for (i = 0; i < ctrl->nchans; i++) {
1932 if (ctrl->chans[i].state == SLIM_CH_FREE)
1933 break;
1934 }
1935 if (i >= ctrl->nchans) {
1936 mutex_unlock(&ctrl->sched.m_reconf);
1937 return -EXFULL;
1938 }
1939 *chanh = i;
1940 ctrl->chans[i].nextgrp = 0;
1941 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1942 ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
1943
1944 mutex_unlock(&ctrl->sched.m_reconf);
1945 return 0;
1946}
1947EXPORT_SYMBOL(slim_alloc_ch);
1948
1949/*
1950 * slim_query_ch: Get reference-counted handle for a channel number. Every
1951 * channel is reference counted by upto one as producer and the others as
1952 * consumer)
1953 * @sb: client handle
1954 * @chan: slimbus channel number
1955 * @chanh: return channel handle
1956 * If request channel number is not in use, it is allocated, and reference
1957 * count is set to one. If the channel was was already allocated, this API
1958 * will return handle to that channel and reference count is incremented.
1959 * -EXFULL is returned if all channels are in use
1960 */
1961int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
1962{
1963 struct slim_controller *ctrl = sb->ctrl;
1964 u16 i, j;
1965 int ret = 0;
1966
1967 if (!ctrl || !chanh)
1968 return -EINVAL;
1969 mutex_lock(&ctrl->sched.m_reconf);
1970 /* start with modulo number */
1971 i = ch % ctrl->nchans;
1972
1973 for (j = 0; j < ctrl->nchans; j++) {
1974 if (ctrl->chans[i].chan == ch) {
1975 *chanh = i;
1976 ctrl->chans[i].ref++;
1977 if (ctrl->chans[i].state == SLIM_CH_FREE)
1978 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1979 goto query_out;
1980 }
1981 i = (i + 1) % ctrl->nchans;
1982 }
1983
1984 /* Channel not in table yet */
1985 ret = -EXFULL;
1986 for (j = 0; j < ctrl->nchans; j++) {
1987 if (ctrl->chans[i].state == SLIM_CH_FREE) {
1988 ctrl->chans[i].state =
1989 SLIM_CH_ALLOCATED;
1990 *chanh = i;
1991 ctrl->chans[i].ref++;
1992 ctrl->chans[i].chan = ch;
1993 ctrl->chans[i].nextgrp = 0;
1994 ret = 0;
1995 break;
1996 }
1997 i = (i + 1) % ctrl->nchans;
1998 }
1999query_out:
2000 mutex_unlock(&ctrl->sched.m_reconf);
2001 dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
2002 ch, i, ctrl->chans[i].ref, ret);
2003 return ret;
2004}
2005EXPORT_SYMBOL(slim_query_ch);
2006
2007/*
2008 * slim_dealloc_ch: Deallocate channel allocated using the API above
2009 * -EISCONN is returned if the channel is tried to be deallocated without
2010 * being removed first.
2011 * -ENOTCONN is returned if deallocation is tried on a channel that's not
2012 * allocated.
2013 */
2014int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
2015{
2016 struct slim_controller *ctrl = sb->ctrl;
2017 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
2018 struct slim_ich *slc = &ctrl->chans[chan];
2019
2020 if (!ctrl)
2021 return -EINVAL;
2022
2023 mutex_lock(&ctrl->sched.m_reconf);
2024 if (slc->state == SLIM_CH_FREE) {
2025 mutex_unlock(&ctrl->sched.m_reconf);
2026 return -ENOTCONN;
2027 }
2028 if (slc->ref > 1) {
2029 slc->ref--;
2030 mutex_unlock(&ctrl->sched.m_reconf);
2031 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
2032 slc->chan, chanh, slc->ref);
2033 return 0;
2034 }
2035 if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
2036 dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
2037 mutex_unlock(&ctrl->sched.m_reconf);
2038 return -EISCONN;
2039 }
2040 slc->ref--;
2041 slc->state = SLIM_CH_FREE;
2042 mutex_unlock(&ctrl->sched.m_reconf);
2043 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
2044 slc->chan, chanh, slc->ref);
2045 return 0;
2046}
2047EXPORT_SYMBOL(slim_dealloc_ch);
2048
2049/*
2050 * slim_get_ch_state: Channel state.
2051 * This API returns the channel's state (active, suspended, inactive etc)
2052 */
2053enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
2054{
2055 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
2056 struct slim_ich *slc = &sb->ctrl->chans[chan];
2057
2058 return slc->state;
2059}
2060EXPORT_SYMBOL(slim_get_ch_state);
2061
2062/*
2063 * slim_define_ch: Define a channel.This API defines channel parameters for a
2064 * given channel.
2065 * @sb: client handle.
2066 * @prop: slim_ch structure with channel parameters desired to be used.
2067 * @chanh: list of channels to be defined.
2068 * @nchan: number of channels in a group (1 if grp is false)
2069 * @grp: Are the channels grouped
2070 * @grph: return group handle if grouping of channels is desired.
2071 * Channels can be grouped if multiple channels use same parameters
2072 * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
2073 * and given 1 handle for simplicity and avoid repeatedly calling the API)
2074 * -EISCONN is returned if channel is already used with different parameters.
2075 * -ENXIO is returned if the channel is not yet allocated.
2076 */
2077int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
2078 u8 nchan, bool grp, u16 *grph)
2079{
2080 struct slim_controller *ctrl = sb->ctrl;
2081 int i, ret = 0;
2082
2083 if (!ctrl || !chanh || !prop || !nchan)
2084 return -EINVAL;
2085 mutex_lock(&ctrl->sched.m_reconf);
2086 for (i = 0; i < nchan; i++) {
2087 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
2088 struct slim_ich *slc = &ctrl->chans[chan];
2089
2090 dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
2091 (int)ctrl->chans[chan].state);
2092 if (slc->state < SLIM_CH_ALLOCATED) {
2093 ret = -ENXIO;
2094 goto err_define_ch;
2095 }
2096 if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
2097 if (prop->ratem != slc->prop.ratem ||
2098 prop->sampleszbits != slc->prop.sampleszbits ||
2099 prop->baser != slc->prop.baser) {
2100 ret = -EISCONN;
2101 goto err_define_ch;
2102 }
2103 } else if (slc->state > SLIM_CH_DEFINED) {
2104 ret = -EISCONN;
2105 goto err_define_ch;
2106 } else {
2107 ctrl->chans[chan].prop = *prop;
2108 ret = slim_nextdefine_ch(sb, chan);
2109 if (ret)
2110 goto err_define_ch;
2111 }
2112 if (i < (nchan - 1))
2113 ctrl->chans[chan].nextgrp = chanh[i + 1];
2114 if (i == 0)
2115 ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
2116 if (i == (nchan - 1))
2117 ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
2118 }
2119
2120 if (grp)
2121 *grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
2122 for (i = 0; i < nchan; i++) {
2123 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
2124 struct slim_ich *slc = &ctrl->chans[chan];
2125
2126 if (slc->state == SLIM_CH_ALLOCATED)
2127 slc->state = SLIM_CH_DEFINED;
2128 }
2129err_define_ch:
2130 dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
2131 mutex_unlock(&ctrl->sched.m_reconf);
2132 return ret;
2133}
2134EXPORT_SYMBOL(slim_define_ch);
2135
2136static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
2137{
2138 u32 code = 0;
2139
2140 if (*ctrlw == *subfrml) {
2141 *ctrlw = 8;
2142 *subfrml = 8;
2143 *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
2144 - SLIM_GDE_SLOTS_PER_SUPERFRAME;
2145 return 0;
2146 }
2147 if (*subfrml == 6) {
2148 code = 0;
2149 *msgsl = 256;
2150 } else if (*subfrml == 8) {
2151 code = 1;
2152 *msgsl = 192;
2153 } else if (*subfrml == 24) {
2154 code = 2;
2155 *msgsl = 64;
2156 } else { /* 32 */
2157 code = 3;
2158 *msgsl = 48;
2159 }
2160
2161 if (*ctrlw < 8) {
2162 if (*ctrlw >= 6) {
2163 *ctrlw = 6;
2164 code |= 0x14;
2165 } else {
2166 if (*ctrlw == 5)
2167 *ctrlw = 4;
2168 code |= (*ctrlw << 2);
2169 }
2170 } else {
2171 code -= 2;
2172 if (*ctrlw >= 24) {
2173 *ctrlw = 24;
2174 code |= 0x1e;
2175 } else if (*ctrlw >= 16) {
2176 *ctrlw = 16;
2177 code |= 0x1c;
2178 } else if (*ctrlw >= 12) {
2179 *ctrlw = 12;
2180 code |= 0x1a;
2181 } else {
2182 *ctrlw = 8;
2183 code |= 0x18;
2184 }
2185 }
2186
2187 *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2188 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2189 return code;
2190}
2191
2192static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
2193 int sz, u32 shft)
2194{
2195 int i;
2196 u32 oldoff;
2197
2198 for (i = 0; i < sz; i++) {
2199 struct slim_ich *slc;
2200
2201 if (ach[i] == NULL)
2202 continue;
2203 slc = ach[i];
2204 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2205 continue;
2206 oldoff = slc->newoff;
2207 slc->newoff += shft;
2208 /* seg. offset must be <= interval */
2209 if (slc->newoff >= slc->newintr)
2210 slc->newoff -= slc->newintr;
2211 }
2212}
2213
2214static inline int slim_sched_4k_coeff1_chans(struct slim_controller *ctrl,
2215 struct slim_ich **slc, int *coeff, int *opensl1,
2216 u32 expshft, u32 curintr, u32 curmaxsl,
2217 int curexp, int finalexp)
2218{
2219 int coeff1;
2220 struct slim_ich *slc1;
2221
2222 if (unlikely(!coeff || !slc || !ctrl || !opensl1))
2223 return -EINVAL;
2224
2225 coeff1 = *coeff;
2226 slc1 = *slc;
2227 while ((coeff1 < ctrl->sched.num_cc1) &&
2228 (curexp == (int)(slc1->rootexp + expshft))) {
2229 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2230 coeff1++;
2231 slc1 = ctrl->sched.chc1[coeff1];
2232 continue;
2233 }
2234 if (opensl1[1] >= opensl1[0] ||
2235 (finalexp == (int)slc1->rootexp &&
2236 curintr <= 24 && opensl1[0] == curmaxsl)) {
2237 opensl1[1] -= slc1->seglen;
2238 slc1->newoff = curmaxsl + opensl1[1];
2239 if (opensl1[1] < 0 && opensl1[0] == curmaxsl) {
2240 opensl1[0] += opensl1[1];
2241 opensl1[1] = 0;
2242 if (opensl1[0] < 0) {
2243 dev_dbg(&ctrl->dev,
2244 "reconfig failed:%d\n",
2245 __LINE__);
2246 return -EXFULL;
2247 }
2248 }
2249 } else {
2250 if (slc1->seglen > opensl1[0]) {
2251 dev_dbg(&ctrl->dev,
2252 "reconfig failed:%d\n", __LINE__);
2253 return -EXFULL;
2254 }
2255 slc1->newoff = opensl1[0] - slc1->seglen;
2256 opensl1[0] = slc1->newoff;
2257 }
2258 slc1->newintr = curintr;
2259 coeff1++;
2260 slc1 = ctrl->sched.chc1[coeff1];
2261 }
2262 *coeff = coeff1;
2263 *slc = slc1;
2264 return 0;
2265}
2266
2267static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
2268 u32 *ctrlw, u32 *subfrml)
2269{
2270 int coeff1, coeff3;
2271 enum slim_ch_coeff bias;
2272 struct slim_controller *ctrl = sb->ctrl;
2273 int last1 = ctrl->sched.num_cc1 - 1;
2274 int last3 = ctrl->sched.num_cc3 - 1;
2275
2276 /*
2277 * Find first channels with coeff 1 & 3 as starting points for
2278 * scheduling
2279 */
2280 for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
2281 struct slim_ich *slc = ctrl->sched.chc3[coeff3];
2282
2283 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2284 continue;
2285 else
2286 break;
2287 }
2288 for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
2289 struct slim_ich *slc = ctrl->sched.chc1[coeff1];
2290
2291 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2292 continue;
2293 else
2294 break;
2295 }
2296 if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
2297 *ctrlw = 8;
2298 *subfrml = 8;
2299 return 0;
2300 } else if (coeff3 == ctrl->sched.num_cc3)
2301 bias = SLIM_COEFF_1;
2302 else
2303 bias = SLIM_COEFF_3;
2304
2305 /*
2306 * Find last chan in coeff1, 3 list, we will use to know when we
2307 * have done scheduling all coeff1 channels
2308 */
2309 while (last1 >= 0) {
2310 if (ctrl->sched.chc1[last1] != NULL &&
2311 (ctrl->sched.chc1[last1])->state !=
2312 SLIM_CH_PENDING_REMOVAL)
2313 break;
2314 last1--;
2315 }
2316 while (last3 >= 0) {
2317 if (ctrl->sched.chc3[last3] != NULL &&
2318 (ctrl->sched.chc3[last3])->state !=
2319 SLIM_CH_PENDING_REMOVAL)
2320 break;
2321 last3--;
2322 }
2323
2324 if (bias == SLIM_COEFF_1) {
2325 struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
2326 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2327 int curexp, finalexp;
2328 u32 curintr, curmaxsl;
2329 int opensl1[2];
2330 int maxctrlw1;
2331 int ret;
2332
2333 finalexp = (ctrl->sched.chc1[last1])->rootexp;
2334 curexp = (int)expshft - 1;
2335
2336 curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
2337 curmaxsl = curintr >> 1;
2338 opensl1[0] = opensl1[1] = curmaxsl;
2339
2340 while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
2341 curintr >>= 1;
2342 curmaxsl >>= 1;
2343
2344 /* update 4K family open slot records */
2345 if (opensl1[1] < opensl1[0])
2346 opensl1[1] -= curmaxsl;
2347 else
2348 opensl1[1] = opensl1[0] - curmaxsl;
2349 opensl1[0] = curmaxsl;
2350 if (opensl1[1] < 0) {
2351 opensl1[0] += opensl1[1];
2352 opensl1[1] = 0;
2353 }
2354 if (opensl1[0] <= 0) {
2355 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2356 __LINE__);
2357 return -EXFULL;
2358 }
2359 curexp++;
2360 /* schedule 4k family channels */
2361 ret = slim_sched_4k_coeff1_chans(ctrl, &slc1, &coeff1,
2362 opensl1, expshft, curintr, curmaxsl,
2363 curexp, finalexp);
2364 if (ret)
2365 return ret;
2366 }
2367 /* Leave some slots for messaging space */
2368 if (opensl1[1] <= 0 && opensl1[0] <= 0)
2369 return -EXFULL;
2370 if (opensl1[1] > opensl1[0]) {
2371 int temp = opensl1[0];
2372
2373 opensl1[0] = opensl1[1];
2374 opensl1[1] = temp;
2375 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2376 ctrl->sched.num_cc1, curmaxsl);
2377 }
2378 /* choose subframe mode to maximize bw */
2379 maxctrlw1 = opensl1[0];
2380 if (opensl1[0] == curmaxsl)
2381 maxctrlw1 += opensl1[1];
2382 if (curintr >= 24) {
2383 *subfrml = 24;
2384 *ctrlw = maxctrlw1;
2385 } else if (curintr == 12) {
2386 if (maxctrlw1 > opensl1[1] * 4) {
2387 *subfrml = 24;
2388 *ctrlw = maxctrlw1;
2389 } else {
2390 *subfrml = 6;
2391 *ctrlw = opensl1[1];
2392 }
2393 } else {
2394 *subfrml = 6;
2395 *ctrlw = maxctrlw1;
2396 }
2397 } else {
2398 struct slim_ich *slc1 = NULL;
2399 struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
2400 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2401 int curexp, finalexp, exp1;
2402 u32 curintr, curmaxsl;
2403 int opensl3[2];
2404 int opensl1[6];
2405 bool opensl1valid = false;
2406 int maxctrlw1, maxctrlw3, i;
2407
Dilip Kotafa1fbc92017-09-04 14:36:43 +05302408 /* intitalize array to zero */
2409 memset(opensl1, 0x0, sizeof(opensl1));
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002410 finalexp = (ctrl->sched.chc3[last3])->rootexp;
2411 if (last1 >= 0) {
2412 slc1 = ctrl->sched.chc1[coeff1];
2413 exp1 = (ctrl->sched.chc1[last1])->rootexp;
2414 if (exp1 > finalexp)
2415 finalexp = exp1;
2416 }
2417 curexp = (int)expshft - 1;
2418
2419 curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
2420 curmaxsl = curintr >> 1;
2421 opensl3[0] = opensl3[1] = curmaxsl;
2422
2423 while (coeff1 < ctrl->sched.num_cc1 ||
2424 coeff3 < ctrl->sched.num_cc3 ||
2425 curintr > 32) {
2426 curintr >>= 1;
2427 curmaxsl >>= 1;
2428
2429 /* update 12k family open slot records */
2430 if (opensl3[1] < opensl3[0])
2431 opensl3[1] -= curmaxsl;
2432 else
2433 opensl3[1] = opensl3[0] - curmaxsl;
2434 opensl3[0] = curmaxsl;
2435 if (opensl3[1] < 0) {
2436 opensl3[0] += opensl3[1];
2437 opensl3[1] = 0;
2438 }
2439 if (opensl3[0] <= 0) {
2440 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2441 __LINE__);
2442 return -EXFULL;
2443 }
2444 curexp++;
2445
2446 /* schedule 12k family channels */
2447 while (coeff3 < ctrl->sched.num_cc3 &&
2448 curexp == (int)slc3->rootexp + expshft) {
2449 if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
2450 coeff3++;
2451 slc3 = ctrl->sched.chc3[coeff3];
2452 continue;
2453 }
2454 opensl1valid = false;
2455 if (opensl3[1] >= opensl3[0] ||
2456 (finalexp == (int)slc3->rootexp &&
2457 curintr <= 32 &&
2458 opensl3[0] == curmaxsl &&
2459 last1 < 0)) {
2460 opensl3[1] -= slc3->seglen;
2461 slc3->newoff = curmaxsl + opensl3[1];
2462 if (opensl3[1] < 0 &&
2463 opensl3[0] == curmaxsl) {
2464 opensl3[0] += opensl3[1];
2465 opensl3[1] = 0;
2466 }
2467 if (opensl3[0] < 0) {
2468 dev_dbg(&ctrl->dev,
2469 "reconfig failed:%d\n",
2470 __LINE__);
2471 return -EXFULL;
2472 }
2473 } else {
2474 if (slc3->seglen > opensl3[0]) {
2475 dev_dbg(&ctrl->dev,
2476 "reconfig failed:%d\n",
2477 __LINE__);
2478 return -EXFULL;
2479 }
2480 slc3->newoff = opensl3[0] -
2481 slc3->seglen;
2482 opensl3[0] = slc3->newoff;
2483 }
2484 slc3->newintr = curintr;
2485 coeff3++;
2486 slc3 = ctrl->sched.chc3[coeff3];
2487 }
2488 /* update 4k openslot records */
2489 if (opensl1valid == false) {
2490 for (i = 0; i < 3; i++) {
2491 opensl1[i * 2] = opensl3[0];
2492 opensl1[(i * 2) + 1] = opensl3[1];
2493 }
2494 } else {
2495 int opensl1p[6];
2496
2497 memcpy(opensl1p, opensl1, sizeof(opensl1));
2498 for (i = 0; i < 3; i++) {
2499 if (opensl1p[i] < opensl1p[i + 3])
2500 opensl1[(i * 2) + 1] =
2501 opensl1p[i];
2502 else
2503 opensl1[(i * 2) + 1] =
2504 opensl1p[i + 3];
2505 }
2506 for (i = 0; i < 3; i++) {
2507 opensl1[(i * 2) + 1] -= curmaxsl;
2508 opensl1[i * 2] = curmaxsl;
2509 if (opensl1[(i * 2) + 1] < 0) {
2510 opensl1[i * 2] +=
2511 opensl1[(i * 2) + 1];
2512 opensl1[(i * 2) + 1] = 0;
2513 }
2514 if (opensl1[i * 2] < 0) {
2515 dev_dbg(&ctrl->dev,
2516 "reconfig failed:%d\n",
2517 __LINE__);
2518 return -EXFULL;
2519 }
2520 }
2521 }
2522 /* schedule 4k family channels */
Karthikeyan Ramasubramanian8c1c5f52017-10-04 10:57:18 -06002523 while (coeff1 < ctrl->sched.num_cc1 && slc1 &&
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002524 curexp == (int)slc1->rootexp + expshft) {
2525 /* searchorder effective when opensl valid */
2526 static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
2527 int maxopensl = 0;
2528 int maxi = 0;
2529
2530 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2531 coeff1++;
2532 slc1 = ctrl->sched.chc1[coeff1];
2533 continue;
2534 }
2535 opensl1valid = true;
2536 for (i = 0; i < 6; i++) {
2537 if (opensl1[srcho[i]] > maxopensl) {
2538 maxopensl = opensl1[srcho[i]];
2539 maxi = srcho[i];
2540 }
2541 }
2542 opensl1[maxi] -= slc1->seglen;
2543 slc1->newoff = (curmaxsl * maxi) +
2544 opensl1[maxi];
2545 if (opensl1[maxi] < 0 && (maxi & 1) == 1 &&
2546 opensl1[maxi - 1] == curmaxsl) {
2547 opensl1[maxi - 1] += opensl1[maxi];
2548 if (opensl3[0] > opensl1[maxi - 1])
2549 opensl3[0] = opensl1[maxi - 1];
2550 opensl3[1] = 0;
2551 opensl1[maxi] = 0;
2552 if (opensl1[maxi - 1] < 0) {
2553 dev_dbg(&ctrl->dev,
2554 "reconfig failed:%d\n",
2555 __LINE__);
2556 return -EXFULL;
2557 }
2558 } else if (opensl1[maxi] < 0) {
2559 dev_dbg(&ctrl->dev,
2560 "reconfig failed:%d\n",
2561 __LINE__);
2562 return -EXFULL;
2563 } else if (opensl3[maxi & 1] > opensl1[maxi]) {
2564 opensl3[maxi & 1] = opensl1[maxi];
2565 }
2566 slc1->newintr = curintr * 3;
2567 coeff1++;
2568 slc1 = ctrl->sched.chc1[coeff1];
2569 }
2570 }
2571 /* Leave some slots for messaging space */
2572 if (opensl3[1] <= 0 && opensl3[0] <= 0)
2573 return -EXFULL;
2574 /* swap 1st and 2nd bucket if 2nd bucket has more open slots */
2575 if (opensl3[1] > opensl3[0]) {
2576 int temp = opensl3[0];
2577
2578 opensl3[0] = opensl3[1];
2579 opensl3[1] = temp;
2580 temp = opensl1[5];
2581 opensl1[5] = opensl1[4];
2582 opensl1[4] = opensl1[3];
2583 opensl1[3] = opensl1[2];
2584 opensl1[2] = opensl1[1];
2585 opensl1[1] = opensl1[0];
2586 opensl1[0] = temp;
2587 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2588 ctrl->sched.num_cc1, curmaxsl);
2589 shiftsegoffsets(ctrl, ctrl->sched.chc3,
2590 ctrl->sched.num_cc3, curmaxsl);
2591 }
2592 /* subframe mode to maximize BW */
2593 maxctrlw3 = opensl3[0];
2594 maxctrlw1 = opensl1[0];
2595 if (opensl3[0] == curmaxsl)
2596 maxctrlw3 += opensl3[1];
2597 for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
2598 maxctrlw1 += opensl1[i + 1];
2599 if (curintr >= 32) {
2600 *subfrml = 32;
2601 *ctrlw = maxctrlw3;
2602 } else if (curintr == 16) {
2603 if (maxctrlw3 > (opensl3[1] * 4)) {
2604 *subfrml = 32;
2605 *ctrlw = maxctrlw3;
2606 } else {
2607 *subfrml = 8;
2608 *ctrlw = opensl3[1];
2609 }
2610 } else {
2611 if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
2612 *subfrml = 24;
2613 *ctrlw = maxctrlw1;
2614 } else {
2615 *subfrml = 8;
2616 *ctrlw = maxctrlw3;
2617 }
2618 }
2619 }
2620 return 0;
2621}
2622
2623#ifdef DEBUG
2624static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2625 u32 subfrml, u32 clkgear)
2626{
2627 int sl, i;
2628 int cc1 = 0;
2629 int cc3 = 0;
2630 struct slim_ich *slc = NULL;
2631
2632 if (!ctrl->sched.slots)
2633 return 0;
2634 memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
2635 dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
2636 for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
2637 for (i = 0; i < ctrlw; i++)
2638 ctrl->sched.slots[sl + i] = 33;
2639 }
2640 while (cc1 < ctrl->sched.num_cc1) {
2641 slc = ctrl->sched.chc1[cc1];
2642 if (slc == NULL) {
2643 dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
2644 cc1);
2645 return -EIO;
2646 }
2647 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2648 (slc - ctrl->chans), slc->newoff,
2649 slc->newintr, slc->seglen);
2650
2651 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2652 for (sl = slc->newoff;
2653 sl < SLIM_SL_PER_SUPERFRAME;
2654 sl += slc->newintr) {
2655 for (i = 0; i < slc->seglen; i++) {
2656 if (ctrl->sched.slots[sl + i])
2657 return -EXFULL;
2658 ctrl->sched.slots[sl + i] = cc1 + 1;
2659 }
2660 }
2661 }
2662 cc1++;
2663 }
2664 while (cc3 < ctrl->sched.num_cc3) {
2665 slc = ctrl->sched.chc3[cc3];
2666 if (slc == NULL) {
2667 dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
2668 cc3);
2669 return -EIO;
2670 }
2671 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2672 (slc - ctrl->chans), slc->newoff,
2673 slc->newintr, slc->seglen);
2674 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2675 for (sl = slc->newoff;
2676 sl < SLIM_SL_PER_SUPERFRAME;
2677 sl += slc->newintr) {
2678 for (i = 0; i < slc->seglen; i++) {
2679 if (ctrl->sched.slots[sl + i])
2680 return -EXFULL;
2681 ctrl->sched.slots[sl + i] = cc3 + 1;
2682 }
2683 }
2684 }
2685 cc3++;
2686 }
2687
2688 return 0;
2689}
2690#else
2691static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2692 u32 subfrml, u32 clkgear)
2693{
2694 return 0;
2695}
2696#endif
2697
2698static void slim_sort_chan_grp(struct slim_controller *ctrl,
2699 struct slim_ich *slc)
2700{
2701 u8 last = (u8)-1;
2702 u8 second = 0;
2703
2704 for (; last > 0; last--) {
2705 struct slim_ich *slc1 = slc;
2706 struct slim_ich *slc2;
2707 u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
2708
2709 slc2 = &ctrl->chans[next];
2710 for (second = 1; second <= last && slc2 &&
2711 (slc2->state == SLIM_CH_ACTIVE ||
2712 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
2713 if (slc1->newoff > slc2->newoff) {
2714 u32 temp = slc2->newoff;
2715
2716 slc2->newoff = slc1->newoff;
2717 slc1->newoff = temp;
2718 }
2719 if (slc2->nextgrp & SLIM_END_GRP) {
2720 last = second;
2721 break;
2722 }
2723 slc1 = slc2;
2724 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
2725 slc2 = &ctrl->chans[next];
2726 }
2727 if (slc2 == NULL)
2728 last = second - 1;
2729 }
2730}
2731
2732
2733static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
2734{
2735 u32 msgsl = 0;
2736 u32 ctrlw = 0;
2737 u32 subfrml = 0;
2738 int ret = -EIO;
2739 struct slim_controller *ctrl = sb->ctrl;
2740 u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
2741 u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2742 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2743 *clkgear = SLIM_MAX_CLK_GEAR;
2744
2745 dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
2746 dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
2747 ctrl->sched.usedslots,
2748 ctrl->sched.pending_msgsl, *clkgear);
2749 /*
2750 * If number of slots are 0, that means channels are inactive.
2751 * It is very likely that the manager will call clock pause very soon.
2752 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
2753 * minimum amount of time.
2754 */
2755 if (ctrl->sched.usedslots != 0) {
2756 while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
2757 *clkgear -= 1;
2758 usedsl *= 2;
2759 }
2760 }
2761
2762 /*
2763 * Try scheduling data channels at current clock gear, if all channels
2764 * can be scheduled, or reserved BW can't be satisfied, increase clock
2765 * gear and try again
2766 */
2767 for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
2768 ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
2769
2770 if (ret == 0) {
2771 *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
2772 if ((msgsl >> (ctrl->max_cg - *clkgear) <
2773 ctrl->sched.pending_msgsl) &&
2774 (*clkgear < ctrl->max_cg))
2775 continue;
2776 else
2777 break;
2778 }
2779 }
2780 if (ret == 0) {
2781 int i;
2782 /* Sort channel-groups */
2783 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2784 struct slim_ich *slc = ctrl->sched.chc1[i];
2785
2786 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2787 continue;
2788 if ((slc->nextgrp & SLIM_START_GRP) &&
2789 !(slc->nextgrp & SLIM_END_GRP)) {
2790 slim_sort_chan_grp(ctrl, slc);
2791 }
2792 }
2793 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2794 struct slim_ich *slc = ctrl->sched.chc3[i];
2795
2796 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2797 continue;
2798 if ((slc->nextgrp & SLIM_START_GRP) &&
2799 !(slc->nextgrp & SLIM_END_GRP)) {
2800 slim_sort_chan_grp(ctrl, slc);
2801 }
2802 }
2803
2804 ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
2805 }
2806
2807 return ret;
2808}
2809
2810static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
2811{
2812 struct slim_ich **arr;
2813 int len, i;
2814
2815 if (coeff == SLIM_COEFF_1) {
2816 arr = ctrl->sched.chc1;
2817 len = ctrl->sched.num_cc1;
2818 } else {
2819 arr = ctrl->sched.chc3;
2820 len = ctrl->sched.num_cc3;
2821 }
2822 for (i = 0; i < len; i++) {
2823 struct slim_ich *slc = arr[i];
2824
2825 if (slc->state == SLIM_CH_ACTIVE ||
Stephen Boyd6a95fe72017-03-01 17:06:32 -08002826 slc->state == SLIM_CH_SUSPENDED) {
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002827 slc->offset = slc->newoff;
2828 slc->interval = slc->newintr;
Stephen Boyd6a95fe72017-03-01 17:06:32 -08002829 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002830 }
2831}
2832static void slim_chan_changes(struct slim_device *sb, bool revert)
2833{
2834 struct slim_controller *ctrl = sb->ctrl;
2835
2836 while (!list_empty(&sb->mark_define)) {
2837 struct slim_ich *slc;
2838 struct slim_pending_ch *pch =
2839 list_entry(sb->mark_define.next,
2840 struct slim_pending_ch, pending);
2841 slc = &ctrl->chans[pch->chan];
2842 if (revert) {
2843 if (slc->state == SLIM_CH_PENDING_ACTIVE) {
2844 u32 sl = slc->seglen << slc->rootexp;
2845
2846 if (slc->coeff == SLIM_COEFF_3)
2847 sl *= 3;
2848 if (!ctrl->allocbw)
2849 ctrl->sched.usedslots -= sl;
2850 slim_remove_ch(ctrl, slc);
2851 slc->state = SLIM_CH_DEFINED;
2852 }
2853 } else {
2854 slc->state = SLIM_CH_ACTIVE;
2855 slc->def++;
2856 }
2857 list_del_init(&pch->pending);
2858 kfree(pch);
2859 }
2860
2861 while (!list_empty(&sb->mark_removal)) {
2862 struct slim_pending_ch *pch =
2863 list_entry(sb->mark_removal.next,
2864 struct slim_pending_ch, pending);
2865 struct slim_ich *slc = &ctrl->chans[pch->chan];
2866 u32 sl = slc->seglen << slc->rootexp;
2867
2868 if (revert || slc->def > 0) {
2869 if (slc->coeff == SLIM_COEFF_3)
2870 sl *= 3;
2871 if (!ctrl->allocbw)
2872 ctrl->sched.usedslots += sl;
2873 if (revert)
2874 slc->def++;
2875 slc->state = SLIM_CH_ACTIVE;
2876 } else
2877 slim_remove_ch(ctrl, slc);
2878 list_del_init(&pch->pending);
2879 kfree(pch);
2880 }
2881
2882 while (!list_empty(&sb->mark_suspend)) {
2883 struct slim_pending_ch *pch =
2884 list_entry(sb->mark_suspend.next,
2885 struct slim_pending_ch, pending);
2886 struct slim_ich *slc = &ctrl->chans[pch->chan];
2887
2888 if (revert)
2889 slc->state = SLIM_CH_ACTIVE;
2890 list_del_init(&pch->pending);
2891 kfree(pch);
2892 }
2893 /* Change already active channel if reconfig succeeded */
2894 if (!revert) {
2895 slim_change_existing_chans(ctrl, SLIM_COEFF_1);
2896 slim_change_existing_chans(ctrl, SLIM_COEFF_3);
2897 }
2898}
2899
2900/*
2901 * slim_reconfigure_now: Request reconfiguration now.
2902 * @sb: client handle
2903 * This API does what commit flag in other scheduling APIs do.
2904 * -EXFULL is returned if there is no space in TDM to reserve the
2905 * bandwidth. -EBUSY is returned if reconfiguration request is already in
2906 * progress.
2907 */
2908int slim_reconfigure_now(struct slim_device *sb)
2909{
2910 u8 i;
2911 u8 wbuf[4];
2912 u32 clkgear, subframe;
2913 u32 curexp;
2914 int ret;
2915 struct slim_controller *ctrl = sb->ctrl;
2916 u32 expshft;
2917 u32 segdist;
2918 struct slim_pending_ch *pch;
2919 DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
2920 NULL, NULL, sb->laddr);
2921
2922 mutex_lock(&ctrl->sched.m_reconf);
2923 /*
2924 * If there are no pending changes from this client, avoid sending
2925 * the reconfiguration sequence
2926 */
2927 if (sb->pending_msgsl == sb->cur_msgsl &&
2928 list_empty(&sb->mark_define) &&
2929 list_empty(&sb->mark_suspend)) {
2930 struct list_head *pos, *next;
2931
2932 list_for_each_safe(pos, next, &sb->mark_removal) {
2933 struct slim_ich *slc;
2934
2935 pch = list_entry(pos, struct slim_pending_ch, pending);
2936 slc = &ctrl->chans[pch->chan];
2937 if (slc->def > 0)
2938 slc->def--;
2939 /* Disconnect source port to free it up */
2940 if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
2941 slc->srch = 0;
2942 /*
2943 * If controller overrides BW allocation,
2944 * delete this in remove channel itself
2945 */
2946 if (slc->def != 0 && !ctrl->allocbw) {
2947 list_del(&pch->pending);
2948 kfree(pch);
2949 }
2950 }
2951 if (list_empty(&sb->mark_removal)) {
2952 mutex_unlock(&ctrl->sched.m_reconf);
2953 pr_info("SLIM_CL: skip reconfig sequence");
2954 return 0;
2955 }
2956 }
2957
2958 ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
2959 list_for_each_entry(pch, &sb->mark_define, pending) {
2960 struct slim_ich *slc = &ctrl->chans[pch->chan];
2961
2962 slim_add_ch(ctrl, slc);
2963 if (slc->state < SLIM_CH_ACTIVE)
2964 slc->state = SLIM_CH_PENDING_ACTIVE;
2965 }
2966
2967 list_for_each_entry(pch, &sb->mark_removal, pending) {
2968 struct slim_ich *slc = &ctrl->chans[pch->chan];
2969 u32 sl = slc->seglen << slc->rootexp;
2970
2971 if (slc->coeff == SLIM_COEFF_3)
2972 sl *= 3;
2973 if (!ctrl->allocbw)
2974 ctrl->sched.usedslots -= sl;
2975 slc->state = SLIM_CH_PENDING_REMOVAL;
2976 }
2977 list_for_each_entry(pch, &sb->mark_suspend, pending) {
2978 struct slim_ich *slc = &ctrl->chans[pch->chan];
2979
2980 slc->state = SLIM_CH_SUSPENDED;
2981 }
2982
2983 /*
2984 * Controller can override default channel scheduling algorithm.
2985 * (e.g. if controller needs to use fixed channel scheduling based
2986 * on number of channels)
2987 */
2988 if (ctrl->allocbw)
2989 ret = ctrl->allocbw(sb, &subframe, &clkgear);
2990 else
2991 ret = slim_allocbw(sb, &subframe, &clkgear);
2992
2993 if (!ret) {
2994 ret = slim_processtxn(ctrl, &txn, false);
2995 dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
2996 }
2997
2998 if (!ret && subframe != ctrl->sched.subfrmcode) {
2999 wbuf[0] = (u8)(subframe & 0xFF);
3000 txn.mc = SLIM_MSG_MC_NEXT_SUBFRAME_MODE;
3001 txn.len = 1;
3002 txn.rl = 4;
3003 txn.wbuf = wbuf;
3004 ret = slim_processtxn(ctrl, &txn, false);
3005 dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
3006 (int)wbuf[0], ret);
3007 }
3008 if (!ret && clkgear != ctrl->clkgear) {
3009 wbuf[0] = (u8)(clkgear & 0xFF);
3010 txn.mc = SLIM_MSG_MC_NEXT_CLOCK_GEAR;
3011 txn.len = 1;
3012 txn.rl = 4;
3013 txn.wbuf = wbuf;
3014 ret = slim_processtxn(ctrl, &txn, false);
3015 dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
3016 (int)wbuf[0], ret);
3017 }
3018 if (ret)
3019 goto revert_reconfig;
3020
3021 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3022 /* activate/remove channel */
3023 list_for_each_entry(pch, &sb->mark_define, pending) {
3024 struct slim_ich *slc = &ctrl->chans[pch->chan];
3025 /* Define content */
3026 wbuf[0] = slc->chan;
3027 wbuf[1] = slc->prrate;
3028 wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
3029 wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
3030 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
3031 txn.len = 4;
3032 txn.rl = 7;
3033 txn.wbuf = wbuf;
3034 dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
3035 wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
3036 /* Right now, channel link bit is not supported */
3037 ret = slim_processtxn(ctrl, &txn, false);
3038 if (ret)
3039 goto revert_reconfig;
3040
3041 txn.mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
3042 txn.len = 1;
3043 txn.rl = 4;
3044 ret = slim_processtxn(ctrl, &txn, false);
3045 if (ret)
3046 goto revert_reconfig;
3047 }
3048
3049 list_for_each_entry(pch, &sb->mark_removal, pending) {
3050 struct slim_ich *slc = &ctrl->chans[pch->chan];
3051
3052 dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
3053 wbuf[0] = slc->chan;
3054 txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
3055 txn.len = 1;
3056 txn.rl = 4;
3057 txn.wbuf = wbuf;
3058 ret = slim_processtxn(ctrl, &txn, false);
3059 if (ret)
3060 goto revert_reconfig;
3061 }
3062 list_for_each_entry(pch, &sb->mark_suspend, pending) {
3063 struct slim_ich *slc = &ctrl->chans[pch->chan];
3064
3065 dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
3066 wbuf[0] = slc->chan;
3067 txn.mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
3068 txn.len = 1;
3069 txn.rl = 4;
3070 txn.wbuf = wbuf;
3071 ret = slim_processtxn(ctrl, &txn, false);
3072 if (ret)
3073 goto revert_reconfig;
3074 }
3075
3076 /* Define CC1 channel */
3077 for (i = 0; i < ctrl->sched.num_cc1; i++) {
3078 struct slim_ich *slc = ctrl->sched.chc1[i];
3079
3080 if (slc->state == SLIM_CH_PENDING_REMOVAL)
3081 continue;
3082 curexp = slc->rootexp + expshft;
3083 segdist = (slc->newoff << curexp) & 0x1FF;
3084 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3085 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
3086 slc->newintr, slc->interval, segdist);
3087 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
3088 slc->newoff, slc->offset);
3089
3090 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
3091 slc->newintr != slc->interval ||
3092 slc->newoff != slc->offset) {
3093 segdist |= 0x200;
3094 segdist >>= curexp;
3095 segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
3096 wbuf[0] = slc->chan;
3097 wbuf[1] = (u8)(segdist & 0xFF);
3098 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
3099 (slc->prop.prot << 4);
3100 wbuf[3] = slc->seglen;
3101 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
3102 txn.len = 4;
3103 txn.rl = 7;
3104 txn.wbuf = wbuf;
3105 ret = slim_processtxn(ctrl, &txn, false);
3106 if (ret)
3107 goto revert_reconfig;
3108 }
3109 }
3110
3111 /* Define CC3 channels */
3112 for (i = 0; i < ctrl->sched.num_cc3; i++) {
3113 struct slim_ich *slc = ctrl->sched.chc3[i];
3114
3115 if (slc->state == SLIM_CH_PENDING_REMOVAL)
3116 continue;
3117 curexp = slc->rootexp + expshft;
3118 segdist = (slc->newoff << curexp) & 0x1FF;
3119 expshft = SLIM_MAX_CLK_GEAR - clkgear;
3120 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
3121 slc->newintr, slc->interval, segdist);
3122 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
3123 slc->newoff, slc->offset);
3124
3125 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
3126 slc->newintr != slc->interval ||
3127 slc->newoff != slc->offset) {
3128 segdist |= 0x200;
3129 segdist >>= curexp;
3130 segdist |= 0xC00;
3131 wbuf[0] = slc->chan;
3132 wbuf[1] = (u8)(segdist & 0xFF);
3133 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
3134 (slc->prop.prot << 4);
3135 wbuf[3] = (u8)(slc->seglen);
3136 txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
3137 txn.len = 4;
3138 txn.rl = 7;
3139 txn.wbuf = wbuf;
3140 ret = slim_processtxn(ctrl, &txn, false);
3141 if (ret)
3142 goto revert_reconfig;
3143 }
3144 }
3145 txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
3146 txn.len = 0;
3147 txn.rl = 3;
3148 txn.wbuf = NULL;
3149 ret = slim_processtxn(ctrl, &txn, false);
3150 dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
3151 if (!ret) {
3152 ctrl->sched.subfrmcode = subframe;
3153 ctrl->clkgear = clkgear;
3154 ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
3155 sb->cur_msgsl = sb->pending_msgsl;
3156 slim_chan_changes(sb, false);
3157 mutex_unlock(&ctrl->sched.m_reconf);
3158 return 0;
3159 }
3160
3161revert_reconfig:
3162 /* Revert channel changes */
3163 slim_chan_changes(sb, true);
3164 mutex_unlock(&ctrl->sched.m_reconf);
3165 return ret;
3166}
3167EXPORT_SYMBOL(slim_reconfigure_now);
3168
3169static int add_pending_ch(struct list_head *listh, u8 chan)
3170{
3171 struct slim_pending_ch *pch;
3172
3173 pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
3174 if (!pch)
3175 return -ENOMEM;
3176 pch->chan = chan;
3177 list_add_tail(&pch->pending, listh);
3178 return 0;
3179}
3180
3181/*
3182 * slim_control_ch: Channel control API.
3183 * @sb: client handle
3184 * @chanh: group or channel handle to be controlled
3185 * @chctrl: Control command (activate/suspend/remove)
3186 * @commit: flag to indicate whether the control should take effect right-away.
3187 * This API activates, removes or suspends a channel (or group of channels)
3188 * chanh indicates the channel or group handle (returned by the define_ch API).
3189 * Reconfiguration may be time-consuming since it can change all other active
3190 * channel allocations on the bus, change in clock gear used by the slimbus,
3191 * and change in the control space width used for messaging.
3192 * commit makes sure that multiple channels can be activated/deactivated before
3193 * reconfiguration is started.
3194 * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
3195 * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
3196 * yet defined.
3197 * -EINVAL is returned if individual control of a grouped-channel is attempted.
3198 */
3199int slim_control_ch(struct slim_device *sb, u16 chanh,
3200 enum slim_ch_control chctrl, bool commit)
3201{
3202 struct slim_controller *ctrl = sb->ctrl;
3203 int ret = 0;
3204 /* Get rid of the group flag in MSB if any */
3205 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
3206 u8 nchan = 0;
3207 struct slim_ich *slc = &ctrl->chans[chan];
3208
3209 if (!(slc->nextgrp & SLIM_START_GRP))
3210 return -EINVAL;
3211
3212 mutex_lock(&sb->sldev_reconf);
3213 mutex_lock(&ctrl->sched.m_reconf);
3214 do {
3215 struct slim_pending_ch *pch;
3216 u8 add_mark_removal = true;
3217
3218 slc = &ctrl->chans[chan];
3219 dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
3220 slc->def);
3221 if (slc->state < SLIM_CH_DEFINED) {
3222 ret = -ENOTCONN;
3223 break;
3224 }
3225 if (chctrl == SLIM_CH_SUSPEND) {
3226 ret = add_pending_ch(&sb->mark_suspend, chan);
3227 if (ret)
3228 break;
3229 } else if (chctrl == SLIM_CH_ACTIVATE) {
3230 if (slc->state > SLIM_CH_ACTIVE) {
3231 ret = -EISCONN;
3232 break;
3233 }
3234 ret = add_pending_ch(&sb->mark_define, chan);
3235 if (ret)
3236 break;
3237 } else {
3238 if (slc->state < SLIM_CH_ACTIVE) {
3239 ret = -ENOTCONN;
3240 break;
3241 }
3242 /* If channel removal request comes when pending
3243 * in the mark_define, remove it from the define
3244 * list instead of adding it to removal list
3245 */
3246 if (!list_empty(&sb->mark_define)) {
3247 struct list_head *pos, *next;
3248
3249 list_for_each_safe(pos, next,
3250 &sb->mark_define) {
3251 pch = list_entry(pos,
3252 struct slim_pending_ch,
3253 pending);
3254 if (pch->chan == chan) {
3255 list_del(&pch->pending);
3256 kfree(pch);
3257 add_mark_removal = false;
3258 break;
3259 }
3260 }
3261 }
3262 if (add_mark_removal == true) {
3263 ret = add_pending_ch(&sb->mark_removal, chan);
3264 if (ret)
3265 break;
3266 }
3267 }
3268
3269 nchan++;
3270 if (nchan < SLIM_GRP_TO_NCHAN(chanh))
3271 chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
3272 } while (nchan < SLIM_GRP_TO_NCHAN(chanh));
3273 mutex_unlock(&ctrl->sched.m_reconf);
3274 if (!ret && commit == true)
3275 ret = slim_reconfigure_now(sb);
3276 mutex_unlock(&sb->sldev_reconf);
3277 return ret;
3278}
3279EXPORT_SYMBOL(slim_control_ch);
3280
3281/*
3282 * slim_reservemsg_bw: Request to reserve bandwidth for messages.
3283 * @sb: client handle
3284 * @bw_bps: message bandwidth in bits per second to be requested
3285 * @commit: indicates whether the reconfiguration needs to be acted upon.
3286 * This API call can be grouped with slim_control_ch API call with only one of
3287 * the APIs specifying the commit flag to avoid reconfiguration being called too
3288 * frequently. -EXFULL is returned if there is no space in TDM to reserve the
3289 * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
3290 * is already in progress.
3291 */
3292int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
3293{
3294 struct slim_controller *ctrl = sb->ctrl;
3295 int ret = 0;
3296 int sl;
3297
3298 mutex_lock(&sb->sldev_reconf);
3299 if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
3300 sl = SLIM_SL_PER_SUPERFRAME;
3301 else {
3302 sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
3303 (ctrl->a_framer->rootfreq/2 - 1)) /
3304 (ctrl->a_framer->rootfreq/2);
3305 }
3306 dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
3307 sb->cur_msgsl);
3308 sb->pending_msgsl = sl;
3309 if (commit == true)
3310 ret = slim_reconfigure_now(sb);
3311 mutex_unlock(&sb->sldev_reconf);
3312 return ret;
3313}
3314EXPORT_SYMBOL(slim_reservemsg_bw);
3315
3316/*
3317 * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
3318 * paused or woken up out of clock pause
3319 * or woken up from clock pause
3320 * @ctrl: controller requesting bus to be paused or woken up
3321 * @wakeup: Wakeup this controller from clock pause.
3322 * @restart: Restart time value per spec used for clock pause. This value
3323 * isn't used when controller is to be woken up.
3324 * This API executes clock pause reconfiguration sequence if wakeup is false.
3325 * If wakeup is true, controller's wakeup is called
3326 * Slimbus clock is idle and can be disabled by the controller later.
3327 */
3328int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
3329{
3330 int ret = 0;
3331 int i;
3332 DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_CLK_PAUSE_SEQ_FLG |
3333 SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
3334 NULL, NULL, 0);
3335
3336 if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
3337 return -EINVAL;
3338 mutex_lock(&ctrl->m_ctrl);
3339 if (wakeup) {
3340 if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
3341 mutex_unlock(&ctrl->m_ctrl);
3342 return 0;
3343 }
3344 wait_for_completion(&ctrl->pause_comp);
3345 /*
3346 * Slimbus framework will call controller wakeup
3347 * Controller should make sure that it sets active framer
3348 * out of clock pause by doing appropriate setting
3349 */
3350 if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
3351 ret = ctrl->wakeup(ctrl);
3352 /*
3353 * If wakeup fails, make sure that next attempt can succeed.
3354 * Since we already consumed pause_comp, complete it so
3355 * that next wakeup isn't blocked forever
3356 */
3357 if (!ret)
3358 ctrl->clk_state = SLIM_CLK_ACTIVE;
3359 else
3360 complete(&ctrl->pause_comp);
3361 mutex_unlock(&ctrl->m_ctrl);
3362 return ret;
3363 }
3364
3365 switch (ctrl->clk_state) {
3366 case SLIM_CLK_ENTERING_PAUSE:
3367 case SLIM_CLK_PAUSE_FAILED:
3368 /*
3369 * If controller is already trying to enter clock pause,
3370 * let it finish.
3371 * In case of error, retry
3372 * In both cases, previous clock pause has signalled
3373 * completion.
3374 */
3375 wait_for_completion(&ctrl->pause_comp);
3376 /* retry upon failure */
3377 if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
3378 ctrl->clk_state = SLIM_CLK_ACTIVE;
3379 } else {
3380 mutex_unlock(&ctrl->m_ctrl);
3381 /*
3382 * Signal completion so that wakeup can wait on
3383 * it.
3384 */
3385 complete(&ctrl->pause_comp);
3386 return 0;
3387 }
3388 break;
3389 case SLIM_CLK_PAUSED:
3390 /* already paused */
3391 mutex_unlock(&ctrl->m_ctrl);
3392 return 0;
3393 case SLIM_CLK_ACTIVE:
3394 default:
3395 break;
3396 }
3397 /* Pending response for a message */
3398 for (i = 0; i < ctrl->last_tid; i++) {
3399 if (ctrl->txnt[i]) {
3400 ret = -EBUSY;
3401 pr_info("slim_clk_pause: txn-rsp for %d pending", i);
3402 mutex_unlock(&ctrl->m_ctrl);
3403 return -EBUSY;
3404 }
3405 }
3406 ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
3407 mutex_unlock(&ctrl->m_ctrl);
3408
3409 mutex_lock(&ctrl->sched.m_reconf);
3410 /* Data channels active */
3411 if (ctrl->sched.usedslots) {
3412 pr_info("slim_clk_pause: data channel active");
3413 ret = -EBUSY;
3414 goto clk_pause_ret;
3415 }
3416
3417 ret = slim_processtxn(ctrl, &txn, false);
3418 if (ret)
3419 goto clk_pause_ret;
3420
3421 txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
3422 txn.len = 1;
3423 txn.rl = 4;
3424 txn.wbuf = &restart;
3425 ret = slim_processtxn(ctrl, &txn, false);
3426 if (ret)
3427 goto clk_pause_ret;
3428
3429 txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW;
3430 txn.len = 0;
3431 txn.rl = 3;
3432 txn.wbuf = NULL;
3433 ret = slim_processtxn(ctrl, &txn, false);
3434 if (ret)
3435 goto clk_pause_ret;
3436
3437clk_pause_ret:
3438 if (ret)
3439 ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
3440 else
3441 ctrl->clk_state = SLIM_CLK_PAUSED;
3442 complete(&ctrl->pause_comp);
3443 mutex_unlock(&ctrl->sched.m_reconf);
3444 return ret;
3445}
3446EXPORT_SYMBOL(slim_ctrl_clk_pause);
3447
3448MODULE_LICENSE("GPL v2");
3449MODULE_DESCRIPTION("Slimbus module");
3450MODULE_ALIAS("platform:slimbus");