blob: b074289358be71fcde496561845b6980a7d6f799 [file] [log] [blame]
Sagar Dharia33beca02012-10-22 16:21:46 -06001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/idr.h>
19#include <linux/pm_runtime.h>
20#include <linux/slimbus/slimbus.h>
21
22#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
23
24#define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24)
25#define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16)
26#define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF)
27
Sagar Dharia29f35f02011-10-01 20:37:50 -060028#define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF)
Sagar Dhariab886e042012-10-17 22:41:57 -060029#define SLIM_GRP_TO_NCHAN(hdl) ((u16)(hdl >> 8) & 0xFF)
Sagar Dharia29f35f02011-10-01 20:37:50 -060030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p))
32#define SLIM_MGR_PORT(p) ((0xFF << 16) | (p))
33#define SLIM_LA_MANAGER 0xFF
34
35#define SLIM_START_GRP (1 << 8)
36#define SLIM_END_GRP (1 << 9)
37
38#define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3)
39#define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME
40
41static DEFINE_MUTEX(slim_lock);
42static DEFINE_IDR(ctrl_idr);
43static struct device_type slim_dev_type;
44static struct device_type slim_ctrl_type;
45
46static const struct slim_device_id *slim_match(const struct slim_device_id *id,
47 const struct slim_device *slim_dev)
48{
49 while (id->name[0]) {
50 if (strncmp(slim_dev->name, id->name, SLIMBUS_NAME_SIZE) == 0)
51 return id;
52 id++;
53 }
54 return NULL;
55}
56
57static int slim_device_match(struct device *dev, struct device_driver *driver)
58{
59 struct slim_device *slim_dev;
60 struct slim_driver *drv = to_slim_driver(driver);
61
62 if (dev->type == &slim_dev_type)
63 slim_dev = to_slim_device(dev);
64 else
65 return 0;
66 if (drv->id_table)
67 return slim_match(drv->id_table, slim_dev) != NULL;
68
69 if (driver->name)
70 return strncmp(slim_dev->name, driver->name, SLIMBUS_NAME_SIZE)
71 == 0;
72 return 0;
73}
74
75#ifdef CONFIG_PM_SLEEP
76static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
77{
78 struct slim_device *slim_dev = NULL;
79 struct slim_driver *driver;
80 if (dev->type == &slim_dev_type)
81 slim_dev = to_slim_device(dev);
82
83 if (!slim_dev || !dev->driver)
84 return 0;
85
86 driver = to_slim_driver(dev->driver);
87 if (!driver->suspend)
88 return 0;
89
90 return driver->suspend(slim_dev, mesg);
91}
92
93static int slim_legacy_resume(struct device *dev)
94{
95 struct slim_device *slim_dev = NULL;
96 struct slim_driver *driver;
97 if (dev->type == &slim_dev_type)
98 slim_dev = to_slim_device(dev);
99
100 if (!slim_dev || !dev->driver)
101 return 0;
102
103 driver = to_slim_driver(dev->driver);
104 if (!driver->resume)
105 return 0;
106
107 return driver->resume(slim_dev);
108}
109
110static int slim_pm_suspend(struct device *dev)
111{
112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113
114 if (pm)
115 return pm_generic_suspend(dev);
116 else
117 return slim_legacy_suspend(dev, PMSG_SUSPEND);
118}
119
120static int slim_pm_resume(struct device *dev)
121{
122 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
123
124 if (pm)
125 return pm_generic_resume(dev);
126 else
127 return slim_legacy_resume(dev);
128}
129
130#else
131#define slim_pm_suspend NULL
132#define slim_pm_resume NULL
133#endif
134
135static const struct dev_pm_ops slimbus_pm = {
136 .suspend = slim_pm_suspend,
137 .resume = slim_pm_resume,
138 SET_RUNTIME_PM_OPS(
139 pm_generic_suspend,
140 pm_generic_resume,
141 pm_generic_runtime_idle
142 )
143};
144struct bus_type slimbus_type = {
145 .name = "slimbus",
146 .match = slim_device_match,
147 .pm = &slimbus_pm,
148};
149EXPORT_SYMBOL_GPL(slimbus_type);
150
151struct device slimbus_dev = {
152 .init_name = "slimbus",
153};
154
155static void __exit slimbus_exit(void)
156{
157 device_unregister(&slimbus_dev);
158 bus_unregister(&slimbus_type);
159}
160
161static int __init slimbus_init(void)
162{
163 int retval;
164
165 retval = bus_register(&slimbus_type);
166 if (!retval)
167 retval = device_register(&slimbus_dev);
168
169 if (retval)
170 bus_unregister(&slimbus_type);
171
172 return retval;
173}
174postcore_initcall(slimbus_init);
175module_exit(slimbus_exit);
176
177static int slim_drv_probe(struct device *dev)
178{
179 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600180 struct slim_device *sbdev = to_slim_device(dev);
181 struct slim_controller *ctrl = sbdev->ctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600183 if (sdrv->probe) {
184 int ret;
185 ret = sdrv->probe(sbdev);
186 if (ret)
187 return ret;
188 if (sdrv->device_up)
189 queue_work(ctrl->wq, &sbdev->wd);
190 return 0;
191 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 return -ENODEV;
193}
194
195static int slim_drv_remove(struct device *dev)
196{
197 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600198 struct slim_device *sbdev = to_slim_device(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600200 sbdev->notified = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 if (sdrv->remove)
202 return sdrv->remove(to_slim_device(dev));
203 return -ENODEV;
204}
205
206static void slim_drv_shutdown(struct device *dev)
207{
208 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
209
210 if (sdrv->shutdown)
211 sdrv->shutdown(to_slim_device(dev));
212}
213
214/*
215 * slim_driver_register: Client driver registration with slimbus
216 * @drv:Client driver to be associated with client-device.
217 * This API will register the client driver with the slimbus
218 * It is called from the driver's module-init function.
219 */
220int slim_driver_register(struct slim_driver *drv)
221{
222 drv->driver.bus = &slimbus_type;
223 if (drv->probe)
224 drv->driver.probe = slim_drv_probe;
225
226 if (drv->remove)
227 drv->driver.remove = slim_drv_remove;
228
229 if (drv->shutdown)
230 drv->driver.shutdown = slim_drv_shutdown;
231
232 return driver_register(&drv->driver);
233}
234EXPORT_SYMBOL_GPL(slim_driver_register);
235
Sagar Dharia371bfa22013-01-22 17:51:41 -0700236/*
237 * slim_driver_unregister: Undo effects of slim_driver_register
238 * @drv: Client driver to be unregistered
239 */
240void slim_driver_unregister(struct slim_driver *drv)
241{
242 if (drv)
243 driver_unregister(&drv->driver);
244}
245EXPORT_SYMBOL_GPL(slim_driver_unregister);
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247#define slim_ctrl_attr_gr NULL
248
249static void slim_ctrl_release(struct device *dev)
250{
251 struct slim_controller *ctrl = to_slim_controller(dev);
252
253 complete(&ctrl->dev_released);
254}
255
256static struct device_type slim_ctrl_type = {
257 .groups = slim_ctrl_attr_gr,
258 .release = slim_ctrl_release,
259};
260
261static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
262{
263 if (!ctrl || !get_device(&ctrl->dev))
264 return NULL;
265
266 return ctrl;
267}
268
269static void slim_ctrl_put(struct slim_controller *ctrl)
270{
271 if (ctrl)
272 put_device(&ctrl->dev);
273}
274
275#define slim_device_attr_gr NULL
276#define slim_device_uevent NULL
277static void slim_dev_release(struct device *dev)
278{
279 struct slim_device *sbdev = to_slim_device(dev);
280 slim_ctrl_put(sbdev->ctrl);
281}
282
283static struct device_type slim_dev_type = {
284 .groups = slim_device_attr_gr,
285 .uevent = slim_device_uevent,
286 .release = slim_dev_release,
287};
288
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600289static void slim_report(struct work_struct *work)
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600290{
291 u8 laddr;
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600292 int ret, i;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600293 struct slim_driver *sbdrv;
294 struct slim_device *sbdev =
295 container_of(work, struct slim_device, wd);
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600296 struct slim_controller *ctrl = sbdev->ctrl;
297 if (!sbdev->dev.driver)
298 return;
299 /* check if device-up or down needs to be called */
300 mutex_lock(&ctrl->m_ctrl);
301 /* address no longer valid, means device reported absent */
302 for (i = 0; i < ctrl->num_dev; i++) {
303 if (sbdev->laddr == ctrl->addrt[i].laddr &&
304 ctrl->addrt[i].valid == false &&
305 sbdev->notified)
306 break;
307 }
308 mutex_unlock(&ctrl->m_ctrl);
309 sbdrv = to_slim_driver(sbdev->dev.driver);
310 if (i < ctrl->num_dev) {
311 sbdev->notified = false;
312 if (sbdrv->device_down)
313 sbdrv->device_down(sbdev);
314 return;
315 }
316 if (sbdev->notified)
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600317 return;
318 ret = slim_get_logical_addr(sbdev, sbdev->e_addr, 6, &laddr);
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600319 if (!ret) {
320 if (sbdrv)
321 sbdev->notified = true;
322 if (sbdrv->device_up)
323 sbdrv->device_up(sbdev);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600324 }
325}
326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327/*
328 * slim_add_device: Add a new device without register board info.
329 * @ctrl: Controller to which this device is to be added to.
330 * Called when device doesn't have an explicit client-driver to be probed, or
331 * the client-driver is a module installed dynamically.
332 */
333int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
334{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 sbdev->dev.bus = &slimbus_type;
336 sbdev->dev.parent = ctrl->dev.parent;
337 sbdev->dev.type = &slim_dev_type;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600338 sbdev->dev.driver = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 sbdev->ctrl = ctrl;
340 slim_ctrl_get(ctrl);
341 dev_set_name(&sbdev->dev, "%s", sbdev->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 mutex_init(&sbdev->sldev_reconf);
343 INIT_LIST_HEAD(&sbdev->mark_define);
344 INIT_LIST_HEAD(&sbdev->mark_suspend);
345 INIT_LIST_HEAD(&sbdev->mark_removal);
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600346 INIT_WORK(&sbdev->wd, slim_report);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600347 mutex_lock(&ctrl->m_ctrl);
348 list_add_tail(&sbdev->dev_list, &ctrl->devs);
349 mutex_unlock(&ctrl->m_ctrl);
350 /* probe slave on this controller */
351 return device_register(&sbdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352}
353EXPORT_SYMBOL_GPL(slim_add_device);
354
355struct sbi_boardinfo {
356 struct list_head list;
357 struct slim_boardinfo board_info;
358};
359
360static LIST_HEAD(board_list);
361static LIST_HEAD(slim_ctrl_list);
362static DEFINE_MUTEX(board_lock);
363
364/* If controller is not present, only add to boards list */
365static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
366 struct slim_boardinfo *bi)
367{
368 int ret;
369 if (ctrl->nr != bi->bus_num)
370 return;
371
372 ret = slim_add_device(ctrl, bi->slim_slave);
373 if (ret != 0)
374 dev_err(ctrl->dev.parent, "can't create new device for %s\n",
375 bi->slim_slave->name);
376}
377
378/*
379 * slim_register_board_info: Board-initialization routine.
380 * @info: List of all devices on all controllers present on the board.
381 * @n: number of entries.
382 * API enumerates respective devices on corresponding controller.
383 * Called from board-init function.
384 */
385int slim_register_board_info(struct slim_boardinfo const *info, unsigned n)
386{
387 struct sbi_boardinfo *bi;
388 int i;
389
390 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
391 if (!bi)
392 return -ENOMEM;
393
394 for (i = 0; i < n; i++, bi++, info++) {
395 struct slim_controller *ctrl;
396
397 memcpy(&bi->board_info, info, sizeof(*info));
398 mutex_lock(&board_lock);
399 list_add_tail(&bi->list, &board_list);
400 list_for_each_entry(ctrl, &slim_ctrl_list, list)
401 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
402 mutex_unlock(&board_lock);
403 }
404 return 0;
405}
406EXPORT_SYMBOL_GPL(slim_register_board_info);
407
408/*
Sagar Dhariaa6627e02012-08-28 12:20:49 -0600409 * slim_ctrl_add_boarddevs: Add devices registered by board-info
410 * @ctrl: Controller to which these devices are to be added to.
411 * This API is called by controller when it is up and running.
412 * If devices on a controller were registered before controller,
413 * this will make sure that they get probed when controller is up.
414 */
415void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
416{
417 struct sbi_boardinfo *bi;
418 mutex_lock(&board_lock);
419 list_add_tail(&ctrl->list, &slim_ctrl_list);
420 list_for_each_entry(bi, &board_list, list)
421 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
422 mutex_unlock(&board_lock);
423}
424EXPORT_SYMBOL_GPL(slim_ctrl_add_boarddevs);
425
426/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 * slim_busnum_to_ctrl: Map bus number to controller
428 * @busnum: Bus number
429 * Returns controller representing this bus number
430 */
431struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
432{
433 struct slim_controller *ctrl;
434 mutex_lock(&board_lock);
435 list_for_each_entry(ctrl, &slim_ctrl_list, list)
436 if (bus_num == ctrl->nr) {
437 mutex_unlock(&board_lock);
438 return ctrl;
439 }
440 mutex_unlock(&board_lock);
441 return NULL;
442}
443EXPORT_SYMBOL_GPL(slim_busnum_to_ctrl);
444
445static int slim_register_controller(struct slim_controller *ctrl)
446{
447 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448
449 /* Can't register until after driver model init */
450 if (WARN_ON(!slimbus_type.p)) {
451 ret = -EAGAIN;
452 goto out_list;
453 }
454
455 dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
456 ctrl->dev.bus = &slimbus_type;
457 ctrl->dev.type = &slim_ctrl_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 ctrl->num_dev = 0;
Sagar Dharia98a7ecb2011-07-25 15:25:35 -0600459 if (!ctrl->min_cg)
460 ctrl->min_cg = SLIM_MIN_CLK_GEAR;
461 if (!ctrl->max_cg)
462 ctrl->max_cg = SLIM_MAX_CLK_GEAR;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 mutex_init(&ctrl->m_ctrl);
464 mutex_init(&ctrl->sched.m_reconf);
465 ret = device_register(&ctrl->dev);
466 if (ret)
467 goto out_list;
468
469 dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%x\n", ctrl->name,
470 (u32)&ctrl->dev);
471
472 if (ctrl->nports) {
473 ctrl->ports = kzalloc(ctrl->nports * sizeof(struct slim_port),
474 GFP_KERNEL);
475 if (!ctrl->ports) {
476 ret = -ENOMEM;
477 goto err_port_failed;
478 }
479 }
480 if (ctrl->nchans) {
481 ctrl->chans = kzalloc(ctrl->nchans * sizeof(struct slim_ich),
482 GFP_KERNEL);
483 if (!ctrl->chans) {
484 ret = -ENOMEM;
485 goto err_chan_failed;
486 }
487
488 ctrl->sched.chc1 =
489 kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
490 GFP_KERNEL);
491 if (!ctrl->sched.chc1) {
492 kfree(ctrl->chans);
493 ret = -ENOMEM;
494 goto err_chan_failed;
495 }
496 ctrl->sched.chc3 =
497 kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
498 GFP_KERNEL);
499 if (!ctrl->sched.chc3) {
500 kfree(ctrl->sched.chc1);
501 kfree(ctrl->chans);
502 ret = -ENOMEM;
503 goto err_chan_failed;
504 }
505 }
506#ifdef DEBUG
507 ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
508#endif
Sagar Dharia33f34442011-08-08 16:22:03 -0600509 init_completion(&ctrl->pause_comp);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600510
511 INIT_LIST_HEAD(&ctrl->devs);
512 ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
513 if (!ctrl->wq)
514 goto err_workq_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
516 return 0;
517
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600518err_workq_failed:
519 kfree(ctrl->sched.chc3);
520 kfree(ctrl->sched.chc1);
521 kfree(ctrl->chans);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522err_chan_failed:
523 kfree(ctrl->ports);
524err_port_failed:
525 device_unregister(&ctrl->dev);
526out_list:
527 mutex_lock(&slim_lock);
528 idr_remove(&ctrl_idr, ctrl->nr);
529 mutex_unlock(&slim_lock);
530 return ret;
531}
532
533/* slim_remove_device: Remove the effect of slim_add_device() */
534void slim_remove_device(struct slim_device *sbdev)
535{
536 device_unregister(&sbdev->dev);
537}
538EXPORT_SYMBOL_GPL(slim_remove_device);
539
540static void slim_ctrl_remove_device(struct slim_controller *ctrl,
541 struct slim_boardinfo *bi)
542{
543 if (ctrl->nr == bi->bus_num)
544 slim_remove_device(bi->slim_slave);
545}
546
547/*
548 * slim_del_controller: Controller tear-down.
549 * Controller added with the above API is teared down using this API.
550 */
551int slim_del_controller(struct slim_controller *ctrl)
552{
553 struct slim_controller *found;
554 struct sbi_boardinfo *bi;
555
556 /* First make sure that this bus was added */
557 mutex_lock(&slim_lock);
558 found = idr_find(&ctrl_idr, ctrl->nr);
559 mutex_unlock(&slim_lock);
560 if (found != ctrl)
561 return -EINVAL;
562
563 /* Remove all clients */
564 mutex_lock(&board_lock);
565 list_for_each_entry(bi, &board_list, list)
566 slim_ctrl_remove_device(ctrl, &bi->board_info);
567 mutex_unlock(&board_lock);
568
569 init_completion(&ctrl->dev_released);
570 device_unregister(&ctrl->dev);
571
572 wait_for_completion(&ctrl->dev_released);
573 list_del(&ctrl->list);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600574 destroy_workqueue(ctrl->wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 /* free bus id */
576 mutex_lock(&slim_lock);
577 idr_remove(&ctrl_idr, ctrl->nr);
578 mutex_unlock(&slim_lock);
579
580 kfree(ctrl->sched.chc1);
581 kfree(ctrl->sched.chc3);
582#ifdef DEBUG
583 kfree(ctrl->sched.slots);
584#endif
585 kfree(ctrl->chans);
586 kfree(ctrl->ports);
587
588 return 0;
589}
590EXPORT_SYMBOL_GPL(slim_del_controller);
591
592/*
593 * slim_add_numbered_controller: Controller bring-up.
594 * @ctrl: Controller to be registered.
595 * A controller is registered with the framework using this API. ctrl->nr is the
596 * desired number with which slimbus framework registers the controller.
597 * Function will return -EBUSY if the number is in use.
598 */
599int slim_add_numbered_controller(struct slim_controller *ctrl)
600{
601 int id;
602 int status;
603
604 if (ctrl->nr & ~MAX_ID_MASK)
605 return -EINVAL;
606
607retry:
608 if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0)
609 return -ENOMEM;
610
611 mutex_lock(&slim_lock);
612 status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id);
613 if (status == 0 && id != ctrl->nr) {
614 status = -EAGAIN;
615 idr_remove(&ctrl_idr, id);
616 }
617 mutex_unlock(&slim_lock);
618 if (status == -EAGAIN)
619 goto retry;
620
621 if (status == 0)
622 status = slim_register_controller(ctrl);
623 return status;
624}
625EXPORT_SYMBOL_GPL(slim_add_numbered_controller);
626
627/*
Sagar Dhariaf68d71f2013-07-31 17:43:46 -0600628 * slim_report_absent: Controller calls this function when a device
629 * reports absent, OR when the device cannot be communicated with
630 * @sbdev: Device that cannot be reached, or sent report absent
631 */
632void slim_report_absent(struct slim_device *sbdev)
633{
634 struct slim_controller *ctrl;
635 int i;
636 if (!sbdev)
637 return;
638 ctrl = sbdev->ctrl;
639 if (!ctrl)
640 return;
641 /* invalidate logical addresses */
642 mutex_lock(&ctrl->m_ctrl);
643 for (i = 0; i < ctrl->num_dev; i++) {
644 if (sbdev->laddr == ctrl->addrt[i].laddr)
645 ctrl->addrt[i].valid = false;
646 }
647 mutex_unlock(&ctrl->m_ctrl);
648 queue_work(ctrl->wq, &sbdev->wd);
649}
650EXPORT_SYMBOL(slim_report_absent);
651
652/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 * slim_msg_response: Deliver Message response received from a device to the
654 * framework.
655 * @ctrl: Controller handle
656 * @reply: Reply received from the device
657 * @len: Length of the reply
658 * @tid: Transaction ID received with which framework can associate reply.
659 * Called by controller to inform framework about the response received.
660 * This helps in making the API asynchronous, and controller-driver doesn't need
661 * to manage 1 more table other than the one managed by framework mapping TID
662 * with buffers
663 */
664void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
665{
666 int i;
667 struct slim_msg_txn *txn;
668
669 mutex_lock(&ctrl->m_ctrl);
670 txn = ctrl->txnt[tid];
671 if (txn == NULL) {
672 dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
673 tid, len);
674 mutex_unlock(&ctrl->m_ctrl);
675 return;
676 }
677 for (i = 0; i < len; i++)
678 txn->rbuf[i] = reply[i];
679 if (txn->comp)
680 complete(txn->comp);
681 ctrl->txnt[tid] = NULL;
682 mutex_unlock(&ctrl->m_ctrl);
683 kfree(txn);
684}
685EXPORT_SYMBOL_GPL(slim_msg_response);
686
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600687static int slim_processtxn(struct slim_controller *ctrl, u8 dt, u16 mc, u16 ec,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 u8 mt, u8 *rbuf, const u8 *wbuf, u8 len, u8 mlen,
689 struct completion *comp, u8 la, u8 *tid)
690{
691 u8 i = 0;
692 int ret = 0;
693 struct slim_msg_txn *txn = kmalloc(sizeof(struct slim_msg_txn),
694 GFP_KERNEL);
695 if (!txn)
696 return -ENOMEM;
697 if (tid) {
698 mutex_lock(&ctrl->m_ctrl);
699 for (i = 0; i < ctrl->last_tid; i++) {
700 if (ctrl->txnt[i] == NULL)
701 break;
702 }
703 if (i >= ctrl->last_tid) {
704 if (ctrl->last_tid == 255) {
705 mutex_unlock(&ctrl->m_ctrl);
706 kfree(txn);
707 return -ENOMEM;
708 }
709 ctrl->txnt = krealloc(ctrl->txnt,
710 (i + 1) * sizeof(struct slim_msg_txn *),
711 GFP_KERNEL);
712 if (!ctrl->txnt) {
713 mutex_unlock(&ctrl->m_ctrl);
714 kfree(txn);
715 return -ENOMEM;
716 }
717 ctrl->last_tid++;
718 }
719 ctrl->txnt[i] = txn;
720 mutex_unlock(&ctrl->m_ctrl);
721 txn->tid = i;
722 *tid = i;
723 }
724 txn->mc = mc;
725 txn->mt = mt;
726 txn->dt = dt;
727 txn->ec = ec;
728 txn->la = la;
729 txn->rbuf = rbuf;
730 txn->wbuf = wbuf;
731 txn->rl = mlen;
732 txn->len = len;
733 txn->comp = comp;
734
735 ret = ctrl->xfer_msg(ctrl, txn);
736 if (!tid)
737 kfree(txn);
738 return ret;
739}
740
741static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600742 u8 e_len, u8 *entry)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743{
744 u8 i;
745 for (i = 0; i < ctrl->num_dev; i++) {
746 if (ctrl->addrt[i].valid &&
747 memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600748 *entry = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 return 0;
750 }
751 }
752 return -ENXIO;
753}
754
755/*
756 * slim_assign_laddr: Assign logical address to a device enumerated.
757 * @ctrl: Controller with which device is enumerated.
758 * @e_addr: 6-byte elemental address of the device.
759 * @e_len: buffer length for e_addr
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600760 * @laddr: Return logical address (if valid flag is false)
761 * @valid: true if laddr holds a valid address that controller wants to
762 * set for this enumeration address. Otherwise framework sets index into
763 * address table as logical address.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 * Called by controller in response to REPORT_PRESENT. Framework will assign
765 * a logical address to this enumeration address.
766 * Function returns -EXFULL to indicate that all logical addresses are already
767 * taken.
768 */
769int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600770 u8 e_len, u8 *laddr, bool valid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771{
772 int ret;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600773 u8 i = 0;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600774 bool exists = false;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600775 struct slim_device *sbdev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 mutex_lock(&ctrl->m_ctrl);
777 /* already assigned */
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600778 if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
779 *laddr = ctrl->addrt[i].laddr;
780 exists = true;
781 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 if (ctrl->num_dev >= 254) {
783 ret = -EXFULL;
784 goto ret_assigned_laddr;
785 }
786 for (i = 0; i < ctrl->num_dev; i++) {
787 if (ctrl->addrt[i].valid == false)
788 break;
789 }
790 if (i == ctrl->num_dev) {
791 ctrl->addrt = krealloc(ctrl->addrt,
792 (ctrl->num_dev + 1) *
793 sizeof(struct slim_addrt),
794 GFP_KERNEL);
795 if (!ctrl->addrt) {
796 ret = -ENOMEM;
797 goto ret_assigned_laddr;
798 }
799 ctrl->num_dev++;
800 }
801 memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
802 ctrl->addrt[i].valid = true;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600803 /* Preferred address is index into table */
804 if (!valid)
805 *laddr = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 }
807
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600808 ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
809 *laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 if (ret) {
811 ctrl->addrt[i].valid = false;
812 goto ret_assigned_laddr;
813 }
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600814 ctrl->addrt[i].laddr = *laddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600816 dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817ret_assigned_laddr:
818 mutex_unlock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600819 if (exists || ret)
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600820 return ret;
821
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600822 pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600823 e_addr[1], e_addr[2]);
824 mutex_lock(&ctrl->m_ctrl);
825 list_for_each_entry(sbdev, &ctrl->devs, dev_list) {
826 if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
827 struct slim_driver *sbdrv;
Sagar Dharia33c84e62012-10-30 21:12:09 -0600828 sbdev->laddr = *laddr;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600829 if (sbdev->dev.driver) {
830 sbdrv = to_slim_driver(sbdev->dev.driver);
831 if (sbdrv->device_up)
832 queue_work(ctrl->wq, &sbdev->wd);
833 }
834 break;
835 }
836 }
837 mutex_unlock(&ctrl->m_ctrl);
838 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839}
840EXPORT_SYMBOL_GPL(slim_assign_laddr);
841
842/*
843 * slim_get_logical_addr: Return the logical address of a slimbus device.
844 * @sb: client handle requesting the adddress.
845 * @e_addr: Elemental address of the device.
846 * @e_len: Length of e_addr
847 * @laddr: output buffer to store the address
848 * context: can sleep
849 * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
850 * the device with this elemental address is not found.
851 */
852int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
853 u8 e_len, u8 *laddr)
854{
855 int ret = 0;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600856 u8 entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 struct slim_controller *ctrl = sb->ctrl;
858 if (!ctrl || !laddr || !e_addr || e_len != 6)
859 return -EINVAL;
860 mutex_lock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600861 ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
862 if (!ret)
863 *laddr = ctrl->addrt[entry].laddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 mutex_unlock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600865 if (ret == -ENXIO && ctrl->get_laddr) {
866 ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
867 if (!ret)
868 ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
869 true);
870 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 return ret;
872}
873EXPORT_SYMBOL_GPL(slim_get_logical_addr);
874
875static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
876 u8 *rbuf, const u8 *wbuf, u8 len)
877{
878 if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
879 return -EINVAL;
880 switch (oper) {
881 case SLIM_MSG_MC_REQUEST_VALUE:
882 case SLIM_MSG_MC_REQUEST_INFORMATION:
883 if (rbuf == NULL)
884 return -EINVAL;
885 return 0;
886 case SLIM_MSG_MC_CHANGE_VALUE:
887 case SLIM_MSG_MC_CLEAR_INFORMATION:
888 if (wbuf == NULL)
889 return -EINVAL;
890 return 0;
891 case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
892 case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
893 if (rbuf == NULL || wbuf == NULL)
894 return -EINVAL;
895 return 0;
896 default:
897 return -EINVAL;
898 }
899}
900
901static u16 slim_slicecodefromsize(u32 req)
902{
903 u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
904 if (req >= 8)
905 return 0;
906 else
907 return codetosize[req];
908}
909
910static u16 slim_slicesize(u32 code)
911{
912 u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
913 if (code == 0)
914 code = 1;
915 if (code > 16)
916 code = 16;
917 return sizetocode[code - 1];
918}
919
920
921/* Message APIs Unicast message APIs used by slimbus slave drivers */
922
923/*
924 * Message API access routines.
925 * @sb: client handle requesting elemental message reads, writes.
926 * @msg: Input structure for start-offset, number of bytes to read.
927 * @rbuf: data buffer to be filled with values read.
928 * @len: data buffer size
929 * @wbuf: data buffer containing value/information to be written
930 * context: can sleep
931 * Returns:
932 * -EINVAL: Invalid parameters
933 * -ETIMEDOUT: If controller could not complete the request. This may happen if
934 * the bus lines are not clocked, controller is not powered-on, slave with
935 * given address is not enumerated/responding.
936 */
937int slim_request_val_element(struct slim_device *sb,
938 struct slim_ele_access *msg, u8 *buf, u8 len)
939{
940 struct slim_controller *ctrl = sb->ctrl;
941 if (!ctrl)
942 return -EINVAL;
943 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
944 NULL, len);
945}
946EXPORT_SYMBOL_GPL(slim_request_val_element);
947
948int slim_request_inf_element(struct slim_device *sb,
949 struct slim_ele_access *msg, u8 *buf, u8 len)
950{
951 struct slim_controller *ctrl = sb->ctrl;
952 if (!ctrl)
953 return -EINVAL;
954 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
955 buf, NULL, len);
956}
957EXPORT_SYMBOL_GPL(slim_request_inf_element);
958
959int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
960 const u8 *buf, u8 len)
961{
962 struct slim_controller *ctrl = sb->ctrl;
963 if (!ctrl)
964 return -EINVAL;
965 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
966 len);
967}
968EXPORT_SYMBOL_GPL(slim_change_val_element);
969
970int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
971 u8 *buf, u8 len)
972{
973 struct slim_controller *ctrl = sb->ctrl;
974 if (!ctrl)
975 return -EINVAL;
976 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
977 buf, len);
978}
979EXPORT_SYMBOL_GPL(slim_clear_inf_element);
980
981int slim_request_change_val_element(struct slim_device *sb,
982 struct slim_ele_access *msg, u8 *rbuf,
983 const u8 *wbuf, u8 len)
984{
985 struct slim_controller *ctrl = sb->ctrl;
986 if (!ctrl)
987 return -EINVAL;
988 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
989 rbuf, wbuf, len);
990}
991EXPORT_SYMBOL_GPL(slim_request_change_val_element);
992
993int slim_request_clear_inf_element(struct slim_device *sb,
994 struct slim_ele_access *msg, u8 *rbuf,
995 const u8 *wbuf, u8 len)
996{
997 struct slim_controller *ctrl = sb->ctrl;
998 if (!ctrl)
999 return -EINVAL;
1000 return slim_xfer_msg(ctrl, sb, msg,
1001 SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
1002 rbuf, wbuf, len);
1003}
1004EXPORT_SYMBOL_GPL(slim_request_clear_inf_element);
1005
1006/*
1007 * Broadcast message API:
1008 * call this API directly with sbdev = NULL.
1009 * For broadcast reads, make sure that buffers are big-enough to incorporate
1010 * replies from all logical addresses.
1011 * All controllers may not support broadcast
1012 */
1013int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001014 struct slim_ele_access *msg, u16 mc, u8 *rbuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 const u8 *wbuf, u8 len)
1016{
1017 DECLARE_COMPLETION_ONSTACK(complete);
1018 int ret;
1019 u16 sl, cur;
1020 u16 ec;
1021 u8 tid, mlen = 6;
1022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001023 ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
1024 if (ret)
1025 goto xfer_err;
1026
1027 sl = slim_slicesize(len);
1028 dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
1029 msg->start_offset, len, mc, sl);
1030
1031 cur = slim_slicecodefromsize(sl);
1032 ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
1033
1034 if (wbuf)
1035 mlen += len;
1036 if (rbuf) {
1037 mlen++;
1038 if (!msg->comp)
1039 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
1040 mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
1041 &complete, sbdev->laddr, &tid);
1042 else
1043 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
1044 mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
1045 msg->comp, sbdev->laddr, &tid);
1046 /* sync read */
Sagar Dhariacd0a2522011-08-31 18:29:31 -06001047 if (!ret && !msg->comp) {
1048 ret = wait_for_completion_timeout(&complete, HZ);
1049 if (!ret) {
1050 struct slim_msg_txn *txn;
1051 dev_err(&ctrl->dev, "slimbus Read timed out");
1052 mutex_lock(&ctrl->m_ctrl);
1053 txn = ctrl->txnt[tid];
1054 /* Invalidate the transaction */
1055 ctrl->txnt[tid] = NULL;
1056 mutex_unlock(&ctrl->m_ctrl);
1057 kfree(txn);
1058 ret = -ETIMEDOUT;
1059 } else
1060 ret = 0;
Sagar Dharia53a9f792012-09-04 19:56:18 -06001061 } else if (ret < 0 && !msg->comp) {
1062 struct slim_msg_txn *txn;
1063 dev_err(&ctrl->dev, "slimbus Read error");
1064 mutex_lock(&ctrl->m_ctrl);
1065 txn = ctrl->txnt[tid];
1066 /* Invalidate the transaction */
1067 ctrl->txnt[tid] = NULL;
1068 mutex_unlock(&ctrl->m_ctrl);
1069 kfree(txn);
Sagar Dhariacd0a2522011-08-31 18:29:31 -06001070 }
1071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 } else
1073 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec,
1074 SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
1075 NULL, sbdev->laddr, NULL);
1076xfer_err:
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(slim_xfer_msg);
1080
1081/*
1082 * slim_alloc_mgrports: Allocate port on manager side.
1083 * @sb: device/client handle.
1084 * @req: Port request type.
1085 * @nports: Number of ports requested
1086 * @rh: output buffer to store the port handles
1087 * @hsz: size of buffer storing handles
1088 * context: can sleep
1089 * This port will be typically used by SW. e.g. client driver wants to receive
1090 * some data from audio codec HW using a data channel.
1091 * Port allocated using this API will be used to receive the data.
1092 * If half-duplex ports are requested, two adjacent ports are allocated for
1093 * 1 half-duplex port. So the handle-buffer size should be twice the number
1094 * of half-duplex ports to be allocated.
1095 * -EDQUOT is returned if all ports are in use.
1096 */
1097int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
1098 int nports, u32 *rh, int hsz)
1099{
Sagar Dharia4d364c22011-10-04 12:47:21 -06001100 int i, j;
1101 int ret = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001102 int nphysp = nports;
1103 struct slim_controller *ctrl = sb->ctrl;
1104
1105 if (!rh || !ctrl)
1106 return -EINVAL;
1107 if (req == SLIM_REQ_HALF_DUP)
1108 nphysp *= 2;
1109 if (hsz/sizeof(u32) < nphysp)
1110 return -EINVAL;
1111 mutex_lock(&ctrl->m_ctrl);
1112
1113 for (i = 0; i < ctrl->nports; i++) {
1114 bool multiok = true;
1115 if (ctrl->ports[i].state != SLIM_P_FREE)
1116 continue;
1117 /* Start half duplex channel at even port */
1118 if (req == SLIM_REQ_HALF_DUP && (i % 2))
1119 continue;
1120 /* Allocate ports contiguously for multi-ch */
1121 if (ctrl->nports < (i + nphysp)) {
1122 i = ctrl->nports;
1123 break;
1124 }
1125 if (req == SLIM_REQ_MULTI_CH) {
1126 multiok = true;
1127 for (j = i; j < i + nphysp; j++) {
1128 if (ctrl->ports[j].state != SLIM_P_FREE) {
1129 multiok = false;
1130 break;
1131 }
1132 }
1133 if (!multiok)
1134 continue;
1135 }
1136 break;
1137 }
Sagar Dharia100e7212013-05-17 18:20:57 -06001138 if (i >= ctrl->nports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139 ret = -EDQUOT;
Sagar Dharia100e7212013-05-17 18:20:57 -06001140 goto alloc_err;
1141 }
1142 ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143 for (j = i; j < i + nphysp; j++) {
1144 ctrl->ports[j].state = SLIM_P_UNCFG;
1145 ctrl->ports[j].req = req;
1146 if (req == SLIM_REQ_HALF_DUP && (j % 2))
1147 ctrl->ports[j].flow = SLIM_SINK;
1148 else
1149 ctrl->ports[j].flow = SLIM_SRC;
Sagar Dharia100e7212013-05-17 18:20:57 -06001150 if (ctrl->alloc_port)
1151 ret = ctrl->alloc_port(ctrl, j);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 if (ret) {
1153 for (; j >= i; j--)
1154 ctrl->ports[j].state = SLIM_P_FREE;
1155 goto alloc_err;
1156 }
1157 *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
1158 }
1159alloc_err:
1160 mutex_unlock(&ctrl->m_ctrl);
1161 return ret;
1162}
1163EXPORT_SYMBOL_GPL(slim_alloc_mgrports);
1164
1165/* Deallocate the port(s) allocated using the API above */
1166int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
1167{
1168 int i;
1169 struct slim_controller *ctrl = sb->ctrl;
1170
1171 if (!ctrl || !hdl)
1172 return -EINVAL;
1173
1174 mutex_lock(&ctrl->m_ctrl);
1175
1176 for (i = 0; i < nports; i++) {
1177 u8 pn;
1178 pn = SLIM_HDL_TO_PORT(hdl[i]);
Sagar Dharia100e7212013-05-17 18:20:57 -06001179
1180 if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
1181 int j, ret;
1182 if (pn >= ctrl->nports) {
1183 dev_err(&ctrl->dev, "invalid port number");
1184 ret = -EINVAL;
1185 } else {
1186 dev_err(&ctrl->dev,
1187 "Can't dealloc connected port:%d", i);
1188 ret = -EISCONN;
1189 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190 for (j = i - 1; j >= 0; j--) {
1191 pn = SLIM_HDL_TO_PORT(hdl[j]);
1192 ctrl->ports[pn].state = SLIM_P_UNCFG;
1193 }
1194 mutex_unlock(&ctrl->m_ctrl);
Sagar Dharia100e7212013-05-17 18:20:57 -06001195 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 }
Sagar Dharia100e7212013-05-17 18:20:57 -06001197 if (ctrl->dealloc_port)
1198 ctrl->dealloc_port(ctrl, pn);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 ctrl->ports[pn].state = SLIM_P_FREE;
1200 }
1201 mutex_unlock(&ctrl->m_ctrl);
1202 return 0;
1203}
1204EXPORT_SYMBOL_GPL(slim_dealloc_mgrports);
1205
1206/*
1207 * slim_get_slaveport: Get slave port handle
1208 * @la: slave device logical address.
1209 * @idx: port index at slave
1210 * @rh: return handle
1211 * @flw: Flow type (source or destination)
1212 * This API only returns a slave port's representation as expected by slimbus
1213 * driver. This port is not managed by the slimbus driver. Caller is expected
1214 * to have visibility of this port since it's a device-port.
1215 */
1216int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
1217{
1218 if (rh == NULL)
1219 return -EINVAL;
1220 *rh = SLIM_PORT_HDL(la, flw, idx);
1221 return 0;
1222}
1223EXPORT_SYMBOL_GPL(slim_get_slaveport);
1224
1225static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
1226 enum slim_port_flow flow)
1227{
1228 int ret;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001229 u16 mc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 u8 buf[2];
1231 u32 la = SLIM_HDL_TO_LA(ph);
1232 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1233
1234 if (flow == SLIM_SRC)
1235 mc = SLIM_MSG_MC_CONNECT_SOURCE;
1236 else
1237 mc = SLIM_MSG_MC_CONNECT_SINK;
1238 buf[0] = pn;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001239 buf[1] = ctrl->chans[ch].chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 if (la == SLIM_LA_MANAGER)
1241 ctrl->ports[pn].flow = flow;
1242 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
1243 SLIM_MSG_MT_CORE, NULL, buf, 2, 6, NULL, la,
1244 NULL);
1245 if (!ret && la == SLIM_LA_MANAGER)
1246 ctrl->ports[pn].state = SLIM_P_CFG;
1247 return ret;
1248}
1249
1250static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
1251{
1252 int ret;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001253 u16 mc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 u32 la = SLIM_HDL_TO_LA(ph);
1255 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1256
1257 mc = SLIM_MSG_MC_DISCONNECT_PORT;
1258 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
1259 SLIM_MSG_MT_CORE, NULL, &pn, 1, 5,
1260 NULL, la, NULL);
1261 if (ret)
1262 return ret;
1263 if (la == SLIM_LA_MANAGER)
1264 ctrl->ports[pn].state = SLIM_P_UNCFG;
1265 return 0;
1266}
1267
1268/*
Sagar Dharia29f35f02011-10-01 20:37:50 -06001269 * slim_connect_src: Connect source port to channel.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 * @sb: client handle
Sagar Dharia29f35f02011-10-01 20:37:50 -06001271 * @srch: source handle to be connected to this channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 * @chanh: Channel with which the ports need to be associated with.
Sagar Dharia29f35f02011-10-01 20:37:50 -06001273 * Per slimbus specification, a channel may have 1 source port.
1274 * Channel specified in chanh needs to be allocated first.
1275 * Returns -EALREADY if source is already configured for this channel.
1276 * Returns -ENOTCONN if channel is not allocated
Sagar Dharia100e7212013-05-17 18:20:57 -06001277 * Returns -EINVAL if invalid direction is specified for non-manager port,
1278 * or if the manager side port number is out of bounds, or in incorrect state
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 */
Sagar Dharia29f35f02011-10-01 20:37:50 -06001280int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
1281{
1282 struct slim_controller *ctrl = sb->ctrl;
1283 int ret;
1284 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1285 struct slim_ich *slc = &ctrl->chans[chan];
1286 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
Sagar Dharia100e7212013-05-17 18:20:57 -06001287 u8 la = SLIM_HDL_TO_LA(srch);
Sagar Dharia29f35f02011-10-01 20:37:50 -06001288
Sagar Dharia100e7212013-05-17 18:20:57 -06001289 /* manager ports don't have direction when they are allocated */
1290 if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
Sagar Dharia29f35f02011-10-01 20:37:50 -06001291 return -EINVAL;
1292
Sagar Dhariad2959352012-12-01 15:43:01 -07001293 mutex_lock(&ctrl->sched.m_reconf);
Sagar Dharia29f35f02011-10-01 20:37:50 -06001294
Sagar Dharia100e7212013-05-17 18:20:57 -06001295 if (la == SLIM_LA_MANAGER) {
1296 u8 pn = SLIM_HDL_TO_PORT(srch);
1297 if (pn >= ctrl->nports ||
1298 ctrl->ports[pn].state != SLIM_P_UNCFG) {
1299 ret = -EINVAL;
1300 goto connect_src_err;
1301 }
1302 }
1303
Sagar Dharia29f35f02011-10-01 20:37:50 -06001304 if (slc->state == SLIM_CH_FREE) {
1305 ret = -ENOTCONN;
1306 goto connect_src_err;
1307 }
1308 /*
1309 * Once channel is removed, its ports can be considered disconnected
1310 * So its ports can be reassigned. Source port is zeroed
1311 * when channel is deallocated.
1312 */
1313 if (slc->srch) {
1314 ret = -EALREADY;
1315 goto connect_src_err;
1316 }
1317
1318 ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
1319
1320 if (!ret)
1321 slc->srch = srch;
1322
1323connect_src_err:
Sagar Dhariad2959352012-12-01 15:43:01 -07001324 mutex_unlock(&ctrl->sched.m_reconf);
Sagar Dharia29f35f02011-10-01 20:37:50 -06001325 return ret;
1326}
1327EXPORT_SYMBOL_GPL(slim_connect_src);
1328
1329/*
1330 * slim_connect_sink: Connect sink port(s) to channel.
1331 * @sb: client handle
1332 * @sinkh: sink handle(s) to be connected to this channel
1333 * @nsink: number of sinks
1334 * @chanh: Channel with which the ports need to be associated with.
1335 * Per slimbus specification, a channel may have multiple sink-ports.
1336 * Channel specified in chanh needs to be allocated first.
1337 * Returns -EALREADY if sink is already configured for this channel.
1338 * Returns -ENOTCONN if channel is not allocated
Sagar Dharia100e7212013-05-17 18:20:57 -06001339 * Returns -EINVAL if invalid parameters are passed, or invalid direction is
1340 * specified for non-manager port, or if the manager side port number is out of
1341 * bounds, or in incorrect state
Sagar Dharia29f35f02011-10-01 20:37:50 -06001342 */
1343int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344{
1345 struct slim_controller *ctrl = sb->ctrl;
1346 int j;
1347 int ret = 0;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001348 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349 struct slim_ich *slc = &ctrl->chans[chan];
1350
Sagar Dharia29f35f02011-10-01 20:37:50 -06001351 if (!sinkh || !nsink)
1352 return -EINVAL;
1353
Sagar Dhariad2959352012-12-01 15:43:01 -07001354 mutex_lock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355
1356 /*
1357 * Once channel is removed, its ports can be considered disconnected
Sagar Dharia29f35f02011-10-01 20:37:50 -06001358 * So its ports can be reassigned. Sink ports are freed when channel
1359 * is deallocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 */
Sagar Dharia29f35f02011-10-01 20:37:50 -06001361 if (slc->state == SLIM_CH_FREE) {
1362 ret = -ENOTCONN;
1363 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364 }
Sagar Dharia33f34442011-08-08 16:22:03 -06001365
Sagar Dharia29f35f02011-10-01 20:37:50 -06001366 for (j = 0; j < nsink; j++) {
1367 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
Sagar Dharia100e7212013-05-17 18:20:57 -06001368 u8 la = SLIM_HDL_TO_LA(sinkh[j]);
1369 u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
1370 if (la != SLIM_LA_MANAGER && flow != SLIM_SINK)
Sagar Dharia29f35f02011-10-01 20:37:50 -06001371 ret = -EINVAL;
Sagar Dharia100e7212013-05-17 18:20:57 -06001372 else if (la == SLIM_LA_MANAGER &&
1373 (pn >= ctrl->nports ||
1374 ctrl->ports[pn].state != SLIM_P_UNCFG))
1375 ret = -EINVAL;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001376 else
1377 ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 if (ret) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001379 for (j = j - 1; j >= 0; j--)
1380 disconnect_port_ch(ctrl, sinkh[j]);
1381 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 }
1383 }
Sagar Dharia29f35f02011-10-01 20:37:50 -06001384
1385 slc->sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
1386 GFP_KERNEL);
1387 if (!slc->sinkh) {
1388 ret = -ENOMEM;
1389 for (j = 0; j < nsink; j++)
1390 disconnect_port_ch(ctrl, sinkh[j]);
1391 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 }
1393
Sagar Dharia29f35f02011-10-01 20:37:50 -06001394 memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
1395 slc->nsink += nsink;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396
Sagar Dharia29f35f02011-10-01 20:37:50 -06001397connect_sink_err:
Sagar Dhariad2959352012-12-01 15:43:01 -07001398 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 return ret;
1400}
Sagar Dharia29f35f02011-10-01 20:37:50 -06001401EXPORT_SYMBOL_GPL(slim_connect_sink);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402
1403/*
1404 * slim_disconnect_ports: Disconnect port(s) from channel
1405 * @sb: client handle
1406 * @ph: ports to be disconnected
1407 * @nph: number of ports.
1408 * Disconnects ports from a channel.
1409 */
1410int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
1411{
1412 struct slim_controller *ctrl = sb->ctrl;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001413 int i;
Sagar Dharia33f34442011-08-08 16:22:03 -06001414
Sagar Dhariad2959352012-12-01 15:43:01 -07001415 mutex_lock(&ctrl->sched.m_reconf);
Sagar Dharia33f34442011-08-08 16:22:03 -06001416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 for (i = 0; i < nph; i++)
1418 disconnect_port_ch(ctrl, ph[i]);
Sagar Dhariad2959352012-12-01 15:43:01 -07001419 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 return 0;
1421}
1422EXPORT_SYMBOL_GPL(slim_disconnect_ports);
1423
1424/*
1425 * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
1426 * @sb: client handle
1427 * @ph: port-handle
1428 * @iobuf: buffer to be transferred or populated
1429 * @len: buffer size.
1430 * @comp: completion signal to indicate transfer done or error.
1431 * context: can sleep
1432 * Returns number of bytes transferred/received if used synchronously.
1433 * Will return 0 if used asynchronously.
1434 * Client will call slim_port_get_xfer_status to get error and/or number of
1435 * bytes transferred if used asynchronously.
1436 */
1437int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
1438 struct completion *comp)
1439{
1440 struct slim_controller *ctrl = sb->ctrl;
1441 u8 pn = SLIM_HDL_TO_PORT(ph);
1442 dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
1443 return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
1444}
1445EXPORT_SYMBOL_GPL(slim_port_xfer);
1446
1447/*
1448 * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
1449 * after completion is done.
1450 * @sb: client handle
1451 * @ph: port-handle
1452 * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
1453 * @done_len: Number of bytes transferred.
1454 * This can be called when port_xfer complition is signalled.
1455 * The API will return port transfer error (underflow/overflow/disconnect)
1456 * and/or done_len will reflect number of bytes transferred. Note that
1457 * done_len may be valid even if port error (overflow/underflow) has happened.
1458 * e.g. If the transfer was scheduled with a few bytes to be transferred and
1459 * client has not supplied more data to be transferred, done_len will indicate
1460 * number of bytes transferred with underflow error. To avoid frequent underflow
1461 * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
1462 * channel has data to be transferred even if client is not ready to transfer
1463 * data all the time. done_buf will indicate address of the last buffer
1464 * processed from the multiple transfers.
1465 */
1466enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
1467 u8 **done_buf, u32 *done_len)
1468{
1469 struct slim_controller *ctrl = sb->ctrl;
1470 u8 pn = SLIM_HDL_TO_PORT(ph);
1471 u32 la = SLIM_HDL_TO_LA(ph);
1472 enum slim_port_err err;
1473 dev_dbg(&ctrl->dev, "get status port num:%d", pn);
1474 /*
1475 * Framework only has insight into ports managed by ported device
1476 * used by the manager and not slave
1477 */
1478 if (la != SLIM_LA_MANAGER) {
1479 if (done_buf)
1480 *done_buf = NULL;
1481 if (done_len)
1482 *done_len = 0;
1483 return SLIM_P_NOT_OWNED;
1484 }
1485 err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
1486 if (err == SLIM_P_INPROGRESS)
1487 err = ctrl->ports[pn].err;
1488 return err;
1489}
1490EXPORT_SYMBOL_GPL(slim_port_get_xfer_status);
1491
1492static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1493{
1494 struct slim_ich **arr;
1495 int i, j;
1496 int *len;
1497 int sl = slc->seglen << slc->rootexp;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001498 /* Channel is already active and other end is transmitting data */
1499 if (slc->state >= SLIM_CH_ACTIVE)
1500 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 if (slc->coeff == SLIM_COEFF_1) {
1502 arr = ctrl->sched.chc1;
1503 len = &ctrl->sched.num_cc1;
1504 } else {
1505 arr = ctrl->sched.chc3;
1506 len = &ctrl->sched.num_cc3;
1507 sl *= 3;
1508 }
1509
1510 *len += 1;
1511
1512 /* Insert the channel based on rootexp and seglen */
1513 for (i = 0; i < *len - 1; i++) {
1514 /*
1515 * Primary key: exp low to high.
1516 * Secondary key: seglen: high to low
1517 */
1518 if ((slc->rootexp > arr[i]->rootexp) ||
1519 ((slc->rootexp == arr[i]->rootexp) &&
1520 (slc->seglen < arr[i]->seglen)))
1521 continue;
1522 else
1523 break;
1524 }
1525 for (j = *len - 1; j > i; j--)
1526 arr[j] = arr[j - 1];
1527 arr[i] = slc;
1528 ctrl->sched.usedslots += sl;
1529
1530 return;
1531}
1532
1533static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1534{
1535 struct slim_ich **arr;
1536 int i;
1537 u32 la, ph;
1538 int *len;
1539 if (slc->coeff == SLIM_COEFF_1) {
1540 arr = ctrl->sched.chc1;
1541 len = &ctrl->sched.num_cc1;
1542 } else {
1543 arr = ctrl->sched.chc3;
1544 len = &ctrl->sched.num_cc3;
1545 }
1546
1547 for (i = 0; i < *len; i++) {
1548 if (arr[i] == slc)
1549 break;
1550 }
1551 if (i >= *len)
1552 return -EXFULL;
1553 for (; i < *len - 1; i++)
1554 arr[i] = arr[i + 1];
1555 *len -= 1;
1556 arr[*len] = NULL;
1557
1558 slc->state = SLIM_CH_ALLOCATED;
1559 slc->newintr = 0;
1560 slc->newoff = 0;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001561 for (i = 0; i < slc->nsink; i++) {
1562 ph = slc->sinkh[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563 la = SLIM_HDL_TO_LA(ph);
1564 /*
1565 * For ports managed by manager's ported device, no need to send
1566 * disconnect. It is client's responsibility to call disconnect
1567 * on ports owned by the slave device
1568 */
1569 if (la == SLIM_LA_MANAGER)
1570 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1571 }
1572
Sagar Dharia29f35f02011-10-01 20:37:50 -06001573 ph = slc->srch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574 la = SLIM_HDL_TO_LA(ph);
1575 if (la == SLIM_LA_MANAGER)
1576 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1577
Sagar Dharia29f35f02011-10-01 20:37:50 -06001578 kfree(slc->sinkh);
1579 slc->sinkh = NULL;
1580 slc->srch = 0;
1581 slc->nsink = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 return 0;
1583}
1584
1585static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
1586{
1587 u32 rate = 0, rate4k = 0, rate11k = 0;
1588 u32 exp = 0;
1589 u32 pr = 0;
1590 bool exact = true;
1591 bool done = false;
1592 enum slim_ch_rate ratefam;
1593
1594 if (prop->prot >= SLIM_PUSH)
1595 return 0;
1596 if (prop->baser == SLIM_RATE_1HZ) {
1597 rate = prop->ratem / 4000;
1598 rate4k = rate;
1599 if (rate * 4000 == prop->ratem)
1600 ratefam = SLIM_RATE_4000HZ;
1601 else {
1602 rate = prop->ratem / 11025;
1603 rate11k = rate;
1604 if (rate * 11025 == prop->ratem)
1605 ratefam = SLIM_RATE_11025HZ;
1606 else
1607 ratefam = SLIM_RATE_1HZ;
1608 }
1609 } else {
1610 ratefam = prop->baser;
1611 rate = prop->ratem;
1612 }
1613 if (ratefam == SLIM_RATE_1HZ) {
1614 exact = false;
1615 if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
1616 rate = rate4k + 1;
1617 ratefam = SLIM_RATE_4000HZ;
1618 } else {
1619 rate = rate11k + 1;
1620 ratefam = SLIM_RATE_11025HZ;
1621 }
1622 }
1623 /* covert rate to coeff-exp */
1624 while (!done) {
1625 while ((rate & 0x1) != 0x1) {
1626 rate >>= 1;
1627 exp++;
1628 }
1629 if (rate > 3) {
1630 /* roundup if not exact */
1631 rate++;
1632 exact = false;
1633 } else
1634 done = true;
1635 }
1636 if (ratefam == SLIM_RATE_4000HZ) {
1637 if (rate == 1)
1638 pr = 0x10;
1639 else {
1640 pr = 0;
1641 exp++;
1642 }
1643 } else {
1644 pr = 8;
1645 exp++;
1646 }
1647 if (exp <= 7) {
1648 pr |= exp;
1649 if (exact)
1650 pr |= 0x80;
1651 } else
1652 pr = 0;
1653 return pr;
1654}
1655
1656static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
1657{
1658 struct slim_controller *ctrl = sb->ctrl;
1659 u32 chrate = 0;
1660 u32 exp = 0;
1661 u32 coeff = 0;
1662 bool exact = true;
1663 bool done = false;
1664 int ret = 0;
1665 struct slim_ich *slc = &ctrl->chans[chan];
1666 struct slim_ch *prop = &slc->prop;
1667
1668 slc->prrate = slim_calc_prrate(ctrl, prop);
1669 dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
1670 if (prop->baser == SLIM_RATE_4000HZ)
1671 chrate = 4000 * prop->ratem;
1672 else if (prop->baser == SLIM_RATE_11025HZ)
1673 chrate = 11025 * prop->ratem;
1674 else
1675 chrate = prop->ratem;
1676 /* max allowed sample freq = 768 seg/frame */
1677 if (chrate > 3600000)
1678 return -EDQUOT;
1679 if (prop->baser == SLIM_RATE_4000HZ &&
1680 ctrl->a_framer->superfreq == 4000)
1681 coeff = prop->ratem;
1682 else if (prop->baser == SLIM_RATE_11025HZ &&
1683 ctrl->a_framer->superfreq == 3675)
1684 coeff = 3 * prop->ratem;
1685 else {
1686 u32 tempr = 0;
1687 tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
1688 coeff = tempr / ctrl->a_framer->rootfreq;
1689 if (coeff * ctrl->a_framer->rootfreq != tempr) {
1690 coeff++;
1691 exact = false;
1692 }
1693 }
1694
1695 /* convert coeff to coeff-exponent */
1696 exp = 0;
1697 while (!done) {
1698 while ((coeff & 0x1) != 0x1) {
1699 coeff >>= 1;
1700 exp++;
1701 }
1702 if (coeff > 3) {
1703 coeff++;
1704 exact = false;
1705 } else
1706 done = true;
1707 }
1708 if (prop->prot == SLIM_HARD_ISO && !exact)
1709 return -EPROTONOSUPPORT;
1710 else if (prop->prot == SLIM_AUTO_ISO) {
1711 if (exact)
1712 prop->prot = SLIM_HARD_ISO;
1713 else {
1714 /* Push-Pull not supported for now */
1715 return -EPROTONOSUPPORT;
1716 }
1717 }
1718 slc->rootexp = exp;
1719 slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
1720 if (prop->prot != SLIM_HARD_ISO)
1721 slc->seglen++;
1722 if (prop->prot >= SLIM_EXT_SMPLX)
1723 slc->seglen++;
1724 /* convert coeff to enum */
1725 if (coeff == 1) {
1726 if (exp > 9)
1727 ret = -EIO;
1728 coeff = SLIM_COEFF_1;
1729 } else {
1730 if (exp > 8)
1731 ret = -EIO;
1732 coeff = SLIM_COEFF_3;
1733 }
1734 slc->coeff = coeff;
1735
1736 return ret;
1737}
1738
1739/*
1740 * slim_alloc_ch: Allocate a slimbus channel and return its handle.
1741 * @sb: client handle.
1742 * @chanh: return channel handle
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001743 * Slimbus channels are limited to 256 per specification.
1744 * -EXFULL is returned if all channels are in use.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745 * Although slimbus specification supports 256 channels, a controller may not
1746 * support that many channels.
1747 */
1748int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
1749{
1750 struct slim_controller *ctrl = sb->ctrl;
1751 u16 i;
1752
1753 if (!ctrl)
1754 return -EINVAL;
Sagar Dhariad2959352012-12-01 15:43:01 -07001755 mutex_lock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001756 for (i = 0; i < ctrl->nchans; i++) {
1757 if (ctrl->chans[i].state == SLIM_CH_FREE)
1758 break;
1759 }
1760 if (i >= ctrl->nchans) {
Sagar Dhariad2959352012-12-01 15:43:01 -07001761 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 return -EXFULL;
1763 }
1764 *chanh = i;
1765 ctrl->chans[i].nextgrp = 0;
1766 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001767 ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768
Sagar Dhariad2959352012-12-01 15:43:01 -07001769 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001770 return 0;
1771}
1772EXPORT_SYMBOL_GPL(slim_alloc_ch);
1773
1774/*
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001775 * slim_query_ch: Get reference-counted handle for a channel number. Every
1776 * channel is reference counted by upto one as producer and the others as
1777 * consumer)
1778 * @sb: client handle
1779 * @chan: slimbus channel number
1780 * @chanh: return channel handle
1781 * If request channel number is not in use, it is allocated, and reference
1782 * count is set to one. If the channel was was already allocated, this API
1783 * will return handle to that channel and reference count is incremented.
1784 * -EXFULL is returned if all channels are in use
1785 */
1786int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
1787{
1788 struct slim_controller *ctrl = sb->ctrl;
1789 u16 i, j;
1790 int ret = 0;
1791 if (!ctrl || !chanh)
1792 return -EINVAL;
Sagar Dhariad2959352012-12-01 15:43:01 -07001793 mutex_lock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001794 /* start with modulo number */
1795 i = ch % ctrl->nchans;
1796
1797 for (j = 0; j < ctrl->nchans; j++) {
1798 if (ctrl->chans[i].chan == ch) {
1799 *chanh = i;
1800 ctrl->chans[i].ref++;
1801 if (ctrl->chans[i].state == SLIM_CH_FREE)
1802 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1803 goto query_out;
1804 }
1805 i = (i + 1) % ctrl->nchans;
1806 }
1807
1808 /* Channel not in table yet */
1809 ret = -EXFULL;
1810 for (j = 0; j < ctrl->nchans; j++) {
1811 if (ctrl->chans[i].state == SLIM_CH_FREE) {
1812 ctrl->chans[i].state =
1813 SLIM_CH_ALLOCATED;
1814 *chanh = i;
1815 ctrl->chans[i].ref++;
1816 ctrl->chans[i].chan = ch;
1817 ctrl->chans[i].nextgrp = 0;
1818 ret = 0;
1819 break;
1820 }
1821 i = (i + 1) % ctrl->nchans;
1822 }
1823query_out:
Sagar Dhariad2959352012-12-01 15:43:01 -07001824 mutex_unlock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001825 dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
1826 ch, i, ctrl->chans[i].ref, ret);
1827 return ret;
1828}
1829EXPORT_SYMBOL_GPL(slim_query_ch);
1830
1831/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832 * slim_dealloc_ch: Deallocate channel allocated using the API above
1833 * -EISCONN is returned if the channel is tried to be deallocated without
1834 * being removed first.
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001835 * -ENOTCONN is returned if deallocation is tried on a channel that's not
1836 * allocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 */
1838int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
1839{
1840 struct slim_controller *ctrl = sb->ctrl;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001841 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 struct slim_ich *slc = &ctrl->chans[chan];
1843 if (!ctrl)
1844 return -EINVAL;
1845
Sagar Dhariad2959352012-12-01 15:43:01 -07001846 mutex_lock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001847 if (slc->state == SLIM_CH_FREE) {
Sagar Dhariad2959352012-12-01 15:43:01 -07001848 mutex_unlock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001849 return -ENOTCONN;
1850 }
1851 if (slc->ref > 1) {
1852 slc->ref--;
Sagar Dhariad2959352012-12-01 15:43:01 -07001853 mutex_unlock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001854 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
1855 slc->chan, chanh, slc->ref);
1856 return 0;
1857 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
1859 dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
Sagar Dhariad2959352012-12-01 15:43:01 -07001860 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 return -EISCONN;
1862 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001863 slc->ref--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 slc->state = SLIM_CH_FREE;
Sagar Dhariad2959352012-12-01 15:43:01 -07001865 mutex_unlock(&ctrl->sched.m_reconf);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001866 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
1867 slc->chan, chanh, slc->ref);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 return 0;
1869}
1870EXPORT_SYMBOL_GPL(slim_dealloc_ch);
1871
1872/*
1873 * slim_get_ch_state: Channel state.
1874 * This API returns the channel's state (active, suspended, inactive etc)
1875 */
1876enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
1877{
Sagar Dharia29f35f02011-10-01 20:37:50 -06001878 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 struct slim_ich *slc = &sb->ctrl->chans[chan];
1880 return slc->state;
1881}
1882EXPORT_SYMBOL_GPL(slim_get_ch_state);
1883
1884/*
1885 * slim_define_ch: Define a channel.This API defines channel parameters for a
1886 * given channel.
1887 * @sb: client handle.
1888 * @prop: slim_ch structure with channel parameters desired to be used.
1889 * @chanh: list of channels to be defined.
1890 * @nchan: number of channels in a group (1 if grp is false)
1891 * @grp: Are the channels grouped
1892 * @grph: return group handle if grouping of channels is desired.
1893 * Channels can be grouped if multiple channels use same parameters
1894 * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
1895 * and given 1 handle for simplicity and avoid repeatedly calling the API)
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001896 * -EISCONN is returned if channel is already used with different parameters.
1897 * -ENXIO is returned if the channel is not yet allocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 */
1899int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
1900 u8 nchan, bool grp, u16 *grph)
1901{
1902 struct slim_controller *ctrl = sb->ctrl;
1903 int i, ret = 0;
1904
1905 if (!ctrl || !chanh || !prop || !nchan)
1906 return -EINVAL;
Sagar Dhariad2959352012-12-01 15:43:01 -07001907 mutex_lock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 for (i = 0; i < nchan; i++) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001909 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001910 struct slim_ich *slc = &ctrl->chans[chan];
1911 dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912 (int)ctrl->chans[chan].state);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001913 if (slc->state < SLIM_CH_ALLOCATED) {
1914 ret = -ENXIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 goto err_define_ch;
1916 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001917 if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
1918 if (prop->ratem != slc->prop.ratem ||
1919 prop->sampleszbits != slc->prop.sampleszbits ||
1920 prop->baser != slc->prop.baser) {
1921 ret = -EISCONN;
1922 goto err_define_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001924 } else if (slc->state > SLIM_CH_DEFINED) {
1925 ret = -EISCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 goto err_define_ch;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001927 } else {
1928 ctrl->chans[chan].prop = *prop;
1929 ret = slim_nextdefine_ch(sb, chan);
1930 if (ret)
1931 goto err_define_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001932 }
1933 if (i < (nchan - 1))
1934 ctrl->chans[chan].nextgrp = chanh[i + 1];
1935 if (i == 0)
1936 ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
1937 if (i == (nchan - 1))
1938 ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939 }
1940
1941 if (grp)
Sagar Dhariab886e042012-10-17 22:41:57 -06001942 *grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001943 for (i = 0; i < nchan; i++) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001944 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001945 struct slim_ich *slc = &ctrl->chans[chan];
1946 if (slc->state == SLIM_CH_ALLOCATED)
1947 slc->state = SLIM_CH_DEFINED;
1948 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949err_define_ch:
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001950 dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
Sagar Dhariad2959352012-12-01 15:43:01 -07001951 mutex_unlock(&ctrl->sched.m_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 return ret;
1953}
1954EXPORT_SYMBOL_GPL(slim_define_ch);
1955
1956static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
1957{
1958 u32 code = 0;
1959 if (*ctrlw == *subfrml) {
1960 *ctrlw = 8;
1961 *subfrml = 8;
1962 *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
1963 - SLIM_GDE_SLOTS_PER_SUPERFRAME;
1964 return 0;
1965 }
1966 if (*subfrml == 6) {
1967 code = 0;
1968 *msgsl = 256;
1969 } else if (*subfrml == 8) {
1970 code = 1;
1971 *msgsl = 192;
1972 } else if (*subfrml == 24) {
1973 code = 2;
1974 *msgsl = 64;
1975 } else { /* 32 */
1976 code = 3;
1977 *msgsl = 48;
1978 }
1979
1980 if (*ctrlw < 8) {
1981 if (*ctrlw >= 6) {
1982 *ctrlw = 6;
1983 code |= 0x14;
1984 } else {
1985 if (*ctrlw == 5)
1986 *ctrlw = 4;
1987 code |= (*ctrlw << 2);
1988 }
1989 } else {
1990 code -= 2;
1991 if (*ctrlw >= 24) {
1992 *ctrlw = 24;
1993 code |= 0x1e;
1994 } else if (*ctrlw >= 16) {
1995 *ctrlw = 16;
1996 code |= 0x1c;
1997 } else if (*ctrlw >= 12) {
1998 *ctrlw = 12;
1999 code |= 0x1a;
2000 } else {
2001 *ctrlw = 8;
2002 code |= 0x18;
2003 }
2004 }
2005
2006 *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2007 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2008 return code;
2009}
2010
2011static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
2012 int sz, u32 shft)
2013{
2014 int i;
2015 u32 oldoff;
2016 for (i = 0; i < sz; i++) {
2017 struct slim_ich *slc;
2018 if (ach[i] == NULL)
2019 continue;
2020 slc = ach[i];
2021 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2022 continue;
2023 oldoff = slc->newoff;
2024 slc->newoff += shft;
2025 /* seg. offset must be <= interval */
2026 if (slc->newoff >= slc->newintr)
2027 slc->newoff -= slc->newintr;
2028 }
2029}
2030
2031static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
2032 u32 *ctrlw, u32 *subfrml)
2033{
2034 int coeff1, coeff3;
2035 enum slim_ch_coeff bias;
2036 struct slim_controller *ctrl = sb->ctrl;
2037 int last1 = ctrl->sched.num_cc1 - 1;
2038 int last3 = ctrl->sched.num_cc3 - 1;
2039
2040 /*
2041 * Find first channels with coeff 1 & 3 as starting points for
2042 * scheduling
2043 */
2044 for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
2045 struct slim_ich *slc = ctrl->sched.chc3[coeff3];
2046 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2047 continue;
2048 else
2049 break;
2050 }
2051 for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
2052 struct slim_ich *slc = ctrl->sched.chc1[coeff1];
2053 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2054 continue;
2055 else
2056 break;
2057 }
2058 if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
2059 *ctrlw = 8;
2060 *subfrml = 8;
2061 return 0;
2062 } else if (coeff3 == ctrl->sched.num_cc3)
2063 bias = SLIM_COEFF_1;
2064 else
2065 bias = SLIM_COEFF_3;
2066
2067 /*
2068 * Find last chan in coeff1, 3 list, we will use to know when we
2069 * have done scheduling all coeff1 channels
2070 */
2071 while (last1 >= 0) {
2072 if (ctrl->sched.chc1[last1] != NULL &&
2073 (ctrl->sched.chc1[last1])->state !=
2074 SLIM_CH_PENDING_REMOVAL)
2075 break;
2076 last1--;
2077 }
2078 while (last3 >= 0) {
2079 if (ctrl->sched.chc3[last3] != NULL &&
2080 (ctrl->sched.chc3[last3])->state !=
2081 SLIM_CH_PENDING_REMOVAL)
2082 break;
2083 last3--;
2084 }
2085
2086 if (bias == SLIM_COEFF_1) {
2087 struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
2088 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2089 int curexp, finalexp;
2090 u32 curintr, curmaxsl;
2091 int opensl1[2];
2092 int maxctrlw1;
2093
2094 finalexp = (ctrl->sched.chc1[last1])->rootexp;
2095 curexp = (int)expshft - 1;
2096
2097 curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
2098 curmaxsl = curintr >> 1;
2099 opensl1[0] = opensl1[1] = curmaxsl;
2100
2101 while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
2102 curintr >>= 1;
2103 curmaxsl >>= 1;
2104
2105 /* update 4K family open slot records */
2106 if (opensl1[1] < opensl1[0])
2107 opensl1[1] -= curmaxsl;
2108 else
2109 opensl1[1] = opensl1[0] - curmaxsl;
2110 opensl1[0] = curmaxsl;
2111 if (opensl1[1] < 0) {
2112 opensl1[0] += opensl1[1];
2113 opensl1[1] = 0;
2114 }
2115 if (opensl1[0] <= 0) {
2116 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2117 __LINE__);
2118 return -EXFULL;
2119 }
2120 curexp++;
2121 /* schedule 4k family channels */
2122
2123 while ((coeff1 < ctrl->sched.num_cc1) && (curexp ==
2124 (int)(slc1->rootexp + expshft))) {
2125 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2126 coeff1++;
2127 slc1 = ctrl->sched.chc1[coeff1];
2128 continue;
2129 }
2130 if (opensl1[1] >= opensl1[0] ||
2131 (finalexp == (int)slc1->rootexp &&
2132 curintr <= 24 &&
2133 opensl1[0] == curmaxsl)) {
2134 opensl1[1] -= slc1->seglen;
2135 slc1->newoff = curmaxsl + opensl1[1];
2136 if (opensl1[1] < 0 &&
2137 opensl1[0] == curmaxsl) {
2138 opensl1[0] += opensl1[1];
2139 opensl1[1] = 0;
2140 if (opensl1[0] < 0) {
2141 dev_dbg(&ctrl->dev,
2142 "reconfig failed:%d\n",
2143 __LINE__);
2144 return -EXFULL;
2145 }
2146 }
2147 } else {
2148 if (slc1->seglen > opensl1[0]) {
2149 dev_dbg(&ctrl->dev,
2150 "reconfig failed:%d\n",
2151 __LINE__);
2152 return -EXFULL;
2153 }
2154 slc1->newoff = opensl1[0] -
2155 slc1->seglen;
2156 opensl1[0] = slc1->newoff;
2157 }
2158 slc1->newintr = curintr;
2159 coeff1++;
2160 slc1 = ctrl->sched.chc1[coeff1];
2161 }
2162 }
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002163 /* Leave some slots for messaging space */
Sagar Dharia90a06cc2012-06-25 12:44:02 -06002164 if (opensl1[1] <= 0 && opensl1[0] <= 0)
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002165 return -EXFULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002166 if (opensl1[1] > opensl1[0]) {
2167 int temp = opensl1[0];
2168 opensl1[0] = opensl1[1];
2169 opensl1[1] = temp;
2170 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2171 ctrl->sched.num_cc1, curmaxsl);
2172 }
2173 /* choose subframe mode to maximize bw */
2174 maxctrlw1 = opensl1[0];
2175 if (opensl1[0] == curmaxsl)
2176 maxctrlw1 += opensl1[1];
2177 if (curintr >= 24) {
2178 *subfrml = 24;
2179 *ctrlw = maxctrlw1;
2180 } else if (curintr == 12) {
2181 if (maxctrlw1 > opensl1[1] * 4) {
2182 *subfrml = 24;
2183 *ctrlw = maxctrlw1;
2184 } else {
2185 *subfrml = 6;
2186 *ctrlw = opensl1[1];
2187 }
2188 } else {
2189 *subfrml = 6;
2190 *ctrlw = maxctrlw1;
2191 }
2192 } else {
Jordan Crouse9bb8aca2011-11-23 11:41:20 -07002193 struct slim_ich *slc1 = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002194 struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
2195 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2196 int curexp, finalexp, exp1;
2197 u32 curintr, curmaxsl;
2198 int opensl3[2];
2199 int opensl1[6];
2200 bool opensl1valid = false;
2201 int maxctrlw1, maxctrlw3, i;
2202 finalexp = (ctrl->sched.chc3[last3])->rootexp;
2203 if (last1 >= 0) {
2204 slc1 = ctrl->sched.chc1[coeff1];
2205 exp1 = (ctrl->sched.chc1[last1])->rootexp;
2206 if (exp1 > finalexp)
2207 finalexp = exp1;
2208 }
2209 curexp = (int)expshft - 1;
2210
2211 curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
2212 curmaxsl = curintr >> 1;
2213 opensl3[0] = opensl3[1] = curmaxsl;
2214
2215 while (coeff1 < ctrl->sched.num_cc1 ||
2216 coeff3 < ctrl->sched.num_cc3 ||
2217 curintr > 32) {
2218 curintr >>= 1;
2219 curmaxsl >>= 1;
2220
2221 /* update 12k family open slot records */
2222 if (opensl3[1] < opensl3[0])
2223 opensl3[1] -= curmaxsl;
2224 else
2225 opensl3[1] = opensl3[0] - curmaxsl;
2226 opensl3[0] = curmaxsl;
2227 if (opensl3[1] < 0) {
2228 opensl3[0] += opensl3[1];
2229 opensl3[1] = 0;
2230 }
2231 if (opensl3[0] <= 0) {
2232 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2233 __LINE__);
2234 return -EXFULL;
2235 }
2236 curexp++;
2237
2238 /* schedule 12k family channels */
2239 while (coeff3 < ctrl->sched.num_cc3 &&
2240 curexp == (int)slc3->rootexp + expshft) {
2241 if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
2242 coeff3++;
2243 slc3 = ctrl->sched.chc3[coeff3];
2244 continue;
2245 }
2246 opensl1valid = false;
2247 if (opensl3[1] >= opensl3[0] ||
2248 (finalexp == (int)slc3->rootexp &&
2249 curintr <= 32 &&
2250 opensl3[0] == curmaxsl &&
2251 last1 < 0)) {
2252 opensl3[1] -= slc3->seglen;
2253 slc3->newoff = curmaxsl + opensl3[1];
2254 if (opensl3[1] < 0 &&
2255 opensl3[0] == curmaxsl) {
2256 opensl3[0] += opensl3[1];
2257 opensl3[1] = 0;
2258 }
2259 if (opensl3[0] < 0) {
2260 dev_dbg(&ctrl->dev,
2261 "reconfig failed:%d\n",
2262 __LINE__);
2263 return -EXFULL;
2264 }
2265 } else {
2266 if (slc3->seglen > opensl3[0]) {
2267 dev_dbg(&ctrl->dev,
2268 "reconfig failed:%d\n",
2269 __LINE__);
2270 return -EXFULL;
2271 }
2272 slc3->newoff = opensl3[0] -
2273 slc3->seglen;
2274 opensl3[0] = slc3->newoff;
2275 }
2276 slc3->newintr = curintr;
2277 coeff3++;
2278 slc3 = ctrl->sched.chc3[coeff3];
2279 }
2280 /* update 4k openslot records */
2281 if (opensl1valid == false) {
2282 for (i = 0; i < 3; i++) {
2283 opensl1[i * 2] = opensl3[0];
2284 opensl1[(i * 2) + 1] = opensl3[1];
2285 }
2286 } else {
2287 int opensl1p[6];
2288 memcpy(opensl1p, opensl1, sizeof(opensl1));
2289 for (i = 0; i < 3; i++) {
2290 if (opensl1p[i] < opensl1p[i + 3])
2291 opensl1[(i * 2) + 1] =
2292 opensl1p[i];
2293 else
2294 opensl1[(i * 2) + 1] =
2295 opensl1p[i + 3];
2296 }
2297 for (i = 0; i < 3; i++) {
2298 opensl1[(i * 2) + 1] -= curmaxsl;
2299 opensl1[i * 2] = curmaxsl;
2300 if (opensl1[(i * 2) + 1] < 0) {
2301 opensl1[i * 2] +=
2302 opensl1[(i * 2) + 1];
2303 opensl1[(i * 2) + 1] = 0;
2304 }
2305 if (opensl1[i * 2] < 0) {
2306 dev_dbg(&ctrl->dev,
2307 "reconfig failed:%d\n",
2308 __LINE__);
2309 return -EXFULL;
2310 }
2311 }
2312 }
2313 /* schedule 4k family channels */
2314 while (coeff1 < ctrl->sched.num_cc1 &&
2315 curexp == (int)slc1->rootexp + expshft) {
2316 /* searchorder effective when opensl valid */
2317 static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
2318 int maxopensl = 0;
2319 int maxi = 0;
2320 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2321 coeff1++;
2322 slc1 = ctrl->sched.chc1[coeff1];
2323 continue;
2324 }
2325 opensl1valid = true;
2326 for (i = 0; i < 6; i++) {
2327 if (opensl1[srcho[i]] > maxopensl) {
2328 maxopensl = opensl1[srcho[i]];
2329 maxi = srcho[i];
2330 }
2331 }
2332 opensl1[maxi] -= slc1->seglen;
2333 slc1->newoff = (curmaxsl * maxi) +
2334 opensl1[maxi];
2335 if (opensl1[maxi] < 0) {
2336 if (((maxi & 1) == 1) &&
2337 (opensl1[maxi - 1] == curmaxsl)) {
2338 opensl1[maxi - 1] +=
2339 opensl1[maxi];
2340 if (opensl3[0] >
2341 opensl1[maxi - 1])
2342 opensl3[0] =
2343 opensl1[maxi - 1];
2344 opensl3[1] = 0;
2345 opensl1[maxi] = 0;
2346 if (opensl1[maxi - 1] < 0) {
2347 dev_dbg(&ctrl->dev,
2348 "reconfig failed:%d\n",
2349 __LINE__);
2350 return -EXFULL;
2351 }
2352 } else {
2353 dev_dbg(&ctrl->dev,
2354 "reconfig failed:%d\n",
2355 __LINE__);
2356 return -EXFULL;
2357 }
2358 } else {
2359 if (opensl3[maxi & 1] > opensl1[maxi])
2360 opensl3[maxi & 1] =
2361 opensl1[maxi];
2362 }
2363 slc1->newintr = curintr * 3;
2364 coeff1++;
2365 slc1 = ctrl->sched.chc1[coeff1];
2366 }
2367 }
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002368 /* Leave some slots for messaging space */
Sagar Dharia90a06cc2012-06-25 12:44:02 -06002369 if (opensl3[1] <= 0 && opensl3[0] <= 0)
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002370 return -EXFULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371 /* swap 1st and 2nd bucket if 2nd bucket has more open slots */
2372 if (opensl3[1] > opensl3[0]) {
2373 int temp = opensl3[0];
2374 opensl3[0] = opensl3[1];
2375 opensl3[1] = temp;
2376 temp = opensl1[5];
2377 opensl1[5] = opensl1[4];
2378 opensl1[4] = opensl1[3];
2379 opensl1[3] = opensl1[2];
2380 opensl1[2] = opensl1[1];
2381 opensl1[1] = opensl1[0];
2382 opensl1[0] = temp;
2383 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2384 ctrl->sched.num_cc1, curmaxsl);
2385 shiftsegoffsets(ctrl, ctrl->sched.chc3,
2386 ctrl->sched.num_cc3, curmaxsl);
2387 }
2388 /* subframe mode to maximize BW */
2389 maxctrlw3 = opensl3[0];
2390 maxctrlw1 = opensl1[0];
2391 if (opensl3[0] == curmaxsl)
2392 maxctrlw3 += opensl3[1];
2393 for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
2394 maxctrlw1 += opensl1[i + 1];
2395 if (curintr >= 32) {
2396 *subfrml = 32;
2397 *ctrlw = maxctrlw3;
2398 } else if (curintr == 16) {
2399 if (maxctrlw3 > (opensl3[1] * 4)) {
2400 *subfrml = 32;
2401 *ctrlw = maxctrlw3;
2402 } else {
2403 *subfrml = 8;
2404 *ctrlw = opensl3[1];
2405 }
2406 } else {
2407 if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
2408 *subfrml = 24;
2409 *ctrlw = maxctrlw1;
2410 } else {
2411 *subfrml = 8;
2412 *ctrlw = maxctrlw3;
2413 }
2414 }
2415 }
2416 return 0;
2417}
2418
2419#ifdef DEBUG
2420static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2421 u32 subfrml, u32 clkgear)
2422{
2423 int sl, i;
2424 int cc1 = 0;
2425 int cc3 = 0;
2426 struct slim_ich *slc = NULL;
2427 if (!ctrl->sched.slots)
2428 return 0;
2429 memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
2430 dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
2431 for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
2432 for (i = 0; i < ctrlw; i++)
2433 ctrl->sched.slots[sl + i] = 33;
2434 }
2435 while (cc1 < ctrl->sched.num_cc1) {
2436 slc = ctrl->sched.chc1[cc1];
2437 if (slc == NULL) {
2438 dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
2439 cc1);
2440 return -EIO;
2441 }
2442 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2443 (slc - ctrl->chans), slc->newoff,
2444 slc->newintr, slc->seglen);
2445
2446 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2447 for (sl = slc->newoff;
2448 sl < SLIM_SL_PER_SUPERFRAME;
2449 sl += slc->newintr) {
2450 for (i = 0; i < slc->seglen; i++) {
2451 if (ctrl->sched.slots[sl + i])
2452 return -EXFULL;
2453 ctrl->sched.slots[sl + i] = cc1 + 1;
2454 }
2455 }
2456 }
2457 cc1++;
2458 }
2459 while (cc3 < ctrl->sched.num_cc3) {
2460 slc = ctrl->sched.chc3[cc3];
2461 if (slc == NULL) {
2462 dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
2463 cc3);
2464 return -EIO;
2465 }
2466 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2467 (slc - ctrl->chans), slc->newoff,
2468 slc->newintr, slc->seglen);
2469 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2470 for (sl = slc->newoff;
2471 sl < SLIM_SL_PER_SUPERFRAME;
2472 sl += slc->newintr) {
2473 for (i = 0; i < slc->seglen; i++) {
2474 if (ctrl->sched.slots[sl + i])
2475 return -EXFULL;
2476 ctrl->sched.slots[sl + i] = cc3 + 1;
2477 }
2478 }
2479 }
2480 cc3++;
2481 }
2482
2483 return 0;
2484}
2485#else
2486static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2487 u32 subfrml, u32 clkgear)
2488{
2489 return 0;
2490}
2491#endif
2492
2493static void slim_sort_chan_grp(struct slim_controller *ctrl,
2494 struct slim_ich *slc)
2495{
2496 u8 last = (u8)-1;
2497 u8 second = 0;
2498
2499 for (; last > 0; last--) {
2500 struct slim_ich *slc1 = slc;
2501 struct slim_ich *slc2;
Sagar Dharia29f35f02011-10-01 20:37:50 -06002502 u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002503 slc2 = &ctrl->chans[next];
2504 for (second = 1; second <= last && slc2 &&
2505 (slc2->state == SLIM_CH_ACTIVE ||
2506 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
2507 if (slc1->newoff > slc2->newoff) {
2508 u32 temp = slc2->newoff;
2509 slc2->newoff = slc1->newoff;
2510 slc1->newoff = temp;
2511 }
2512 if (slc2->nextgrp & SLIM_END_GRP) {
2513 last = second;
2514 break;
2515 }
2516 slc1 = slc2;
Sagar Dharia29f35f02011-10-01 20:37:50 -06002517 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002518 slc2 = &ctrl->chans[next];
2519 }
2520 if (slc2 == NULL)
2521 last = second - 1;
2522 }
2523}
2524
2525
2526static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
2527{
2528 u32 msgsl = 0;
2529 u32 ctrlw = 0;
2530 u32 subfrml = 0;
2531 int ret = -EIO;
2532 struct slim_controller *ctrl = sb->ctrl;
2533 u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
2534 u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2535 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2536 *clkgear = SLIM_MAX_CLK_GEAR;
2537
2538 dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
2539 dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
2540 ctrl->sched.usedslots,
2541 ctrl->sched.pending_msgsl, *clkgear);
Sagar Dharia33f34442011-08-08 16:22:03 -06002542 /*
2543 * If number of slots are 0, that means channels are inactive.
2544 * It is very likely that the manager will call clock pause very soon.
2545 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
2546 * minimum amount of time.
2547 */
2548 if (ctrl->sched.usedslots != 0) {
2549 while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
2550 *clkgear -= 1;
2551 usedsl *= 2;
2552 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002553 }
2554
2555 /*
2556 * Try scheduling data channels at current clock gear, if all channels
2557 * can be scheduled, or reserved BW can't be satisfied, increase clock
2558 * gear and try again
2559 */
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002560 for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
2562
2563 if (ret == 0) {
2564 *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002565 if ((msgsl >> (ctrl->max_cg - *clkgear) <
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002566 ctrl->sched.pending_msgsl) &&
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002567 (*clkgear < ctrl->max_cg))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002568 continue;
2569 else
2570 break;
2571 }
2572 }
2573 if (ret == 0) {
2574 int i;
2575 /* Sort channel-groups */
2576 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2577 struct slim_ich *slc = ctrl->sched.chc1[i];
2578 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2579 continue;
2580 if ((slc->nextgrp & SLIM_START_GRP) &&
2581 !(slc->nextgrp & SLIM_END_GRP)) {
2582 slim_sort_chan_grp(ctrl, slc);
2583 }
2584 }
2585 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2586 struct slim_ich *slc = ctrl->sched.chc3[i];
2587 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2588 continue;
2589 if ((slc->nextgrp & SLIM_START_GRP) &&
2590 !(slc->nextgrp & SLIM_END_GRP)) {
2591 slim_sort_chan_grp(ctrl, slc);
2592 }
2593 }
2594
2595 ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
2596 }
2597
2598 return ret;
2599}
2600
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002601static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
2602{
2603 struct slim_ich **arr;
2604 int len, i;
2605 if (coeff == SLIM_COEFF_1) {
2606 arr = ctrl->sched.chc1;
2607 len = ctrl->sched.num_cc1;
2608 } else {
2609 arr = ctrl->sched.chc3;
2610 len = ctrl->sched.num_cc3;
2611 }
2612 for (i = 0; i < len; i++) {
2613 struct slim_ich *slc = arr[i];
2614 if (slc->state == SLIM_CH_ACTIVE ||
2615 slc->state == SLIM_CH_SUSPENDED)
2616 slc->offset = slc->newoff;
2617 slc->interval = slc->newintr;
2618 }
2619}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002620static void slim_chan_changes(struct slim_device *sb, bool revert)
2621{
2622 struct slim_controller *ctrl = sb->ctrl;
2623 while (!list_empty(&sb->mark_define)) {
2624 struct slim_ich *slc;
2625 struct slim_pending_ch *pch =
2626 list_entry(sb->mark_define.next,
2627 struct slim_pending_ch, pending);
2628 slc = &ctrl->chans[pch->chan];
2629 if (revert) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002630 if (slc->state == SLIM_CH_PENDING_ACTIVE) {
2631 u32 sl = slc->seglen << slc->rootexp;
2632 if (slc->coeff == SLIM_COEFF_3)
2633 sl *= 3;
2634 ctrl->sched.usedslots -= sl;
2635 slim_remove_ch(ctrl, slc);
2636 slc->state = SLIM_CH_DEFINED;
2637 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002638 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002639 slc->state = SLIM_CH_ACTIVE;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002640 slc->def++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002641 }
2642 list_del_init(&pch->pending);
2643 kfree(pch);
2644 }
2645
2646 while (!list_empty(&sb->mark_removal)) {
2647 struct slim_pending_ch *pch =
2648 list_entry(sb->mark_removal.next,
2649 struct slim_pending_ch, pending);
2650 struct slim_ich *slc = &ctrl->chans[pch->chan];
2651 u32 sl = slc->seglen << slc->rootexp;
Sagar Dhariae8f6c9a2013-02-22 19:06:39 -07002652 if (revert || slc->def > 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002653 if (slc->coeff == SLIM_COEFF_3)
2654 sl *= 3;
2655 ctrl->sched.usedslots += sl;
Sagar Dhariae8f6c9a2013-02-22 19:06:39 -07002656 if (revert)
2657 slc->def++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658 slc->state = SLIM_CH_ACTIVE;
2659 } else
2660 slim_remove_ch(ctrl, slc);
2661 list_del_init(&pch->pending);
2662 kfree(pch);
2663 }
2664
2665 while (!list_empty(&sb->mark_suspend)) {
2666 struct slim_pending_ch *pch =
2667 list_entry(sb->mark_suspend.next,
2668 struct slim_pending_ch, pending);
2669 struct slim_ich *slc = &ctrl->chans[pch->chan];
2670 if (revert)
2671 slc->state = SLIM_CH_ACTIVE;
2672 list_del_init(&pch->pending);
2673 kfree(pch);
2674 }
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002675 /* Change already active channel if reconfig succeeded */
2676 if (!revert) {
2677 slim_change_existing_chans(ctrl, SLIM_COEFF_1);
2678 slim_change_existing_chans(ctrl, SLIM_COEFF_3);
2679 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002680}
2681
2682/*
2683 * slim_reconfigure_now: Request reconfiguration now.
2684 * @sb: client handle
2685 * This API does what commit flag in other scheduling APIs do.
2686 * -EXFULL is returned if there is no space in TDM to reserve the
2687 * bandwidth. -EBUSY is returned if reconfiguration request is already in
2688 * progress.
2689 */
2690int slim_reconfigure_now(struct slim_device *sb)
2691{
2692 u8 i;
2693 u8 wbuf[4];
2694 u32 clkgear, subframe;
2695 u32 curexp;
2696 int ret;
2697 struct slim_controller *ctrl = sb->ctrl;
2698 u32 expshft;
2699 u32 segdist;
2700 struct slim_pending_ch *pch;
2701
Sagar Dharia80a55e12012-08-16 16:43:58 -06002702 mutex_lock(&ctrl->sched.m_reconf);
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002703 /*
2704 * If there are no pending changes from this client, avoid sending
2705 * the reconfiguration sequence
2706 */
2707 if (sb->pending_msgsl == sb->cur_msgsl &&
2708 list_empty(&sb->mark_define) &&
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002709 list_empty(&sb->mark_suspend)) {
Sagar Dharia80a55e12012-08-16 16:43:58 -06002710 struct list_head *pos, *next;
2711 list_for_each_safe(pos, next, &sb->mark_removal) {
2712 struct slim_ich *slc;
2713 pch = list_entry(pos, struct slim_pending_ch, pending);
2714 slc = &ctrl->chans[pch->chan];
2715 if (slc->def > 0)
2716 slc->def--;
2717 /* Disconnect source port to free it up */
2718 if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
2719 slc->srch = 0;
Sagar Dhariae8f6c9a2013-02-22 19:06:39 -07002720 /*
2721 * If controller overrides BW allocation,
2722 * delete this in remove channel itself
2723 */
2724 if (slc->def != 0 && !ctrl->allocbw) {
Sagar Dharia80a55e12012-08-16 16:43:58 -06002725 list_del(&pch->pending);
2726 kfree(pch);
2727 }
2728 }
2729 if (list_empty(&sb->mark_removal)) {
Sagar Dharia80a55e12012-08-16 16:43:58 -06002730 mutex_unlock(&ctrl->sched.m_reconf);
2731 pr_info("SLIM_CL: skip reconfig sequence");
2732 return 0;
2733 }
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002734 }
2735
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002736 ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
2737 list_for_each_entry(pch, &sb->mark_define, pending) {
2738 struct slim_ich *slc = &ctrl->chans[pch->chan];
2739 slim_add_ch(ctrl, slc);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002740 if (slc->state < SLIM_CH_ACTIVE)
2741 slc->state = SLIM_CH_PENDING_ACTIVE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002742 }
2743
2744 list_for_each_entry(pch, &sb->mark_removal, pending) {
2745 struct slim_ich *slc = &ctrl->chans[pch->chan];
2746 u32 sl = slc->seglen << slc->rootexp;
2747 if (slc->coeff == SLIM_COEFF_3)
2748 sl *= 3;
2749 ctrl->sched.usedslots -= sl;
2750 slc->state = SLIM_CH_PENDING_REMOVAL;
2751 }
2752 list_for_each_entry(pch, &sb->mark_suspend, pending) {
2753 struct slim_ich *slc = &ctrl->chans[pch->chan];
2754 slc->state = SLIM_CH_SUSPENDED;
2755 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002756
Sagar Dharia4aec9232012-07-24 23:44:26 -06002757 /*
2758 * Controller can override default channel scheduling algorithm.
2759 * (e.g. if controller needs to use fixed channel scheduling based
2760 * on number of channels)
2761 */
2762 if (ctrl->allocbw)
2763 ret = ctrl->allocbw(sb, &subframe, &clkgear);
2764 else
2765 ret = slim_allocbw(sb, &subframe, &clkgear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002766
2767 if (!ret) {
2768 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2769 SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, SLIM_MSG_MT_CORE,
2770 NULL, NULL, 0, 3, NULL, 0, NULL);
2771 dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
2772 }
2773
2774 if (!ret && subframe != ctrl->sched.subfrmcode) {
2775 wbuf[0] = (u8)(subframe & 0xFF);
2776 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2777 SLIM_MSG_MC_NEXT_SUBFRAME_MODE, 0, SLIM_MSG_MT_CORE,
2778 NULL, (u8 *)&subframe, 1, 4, NULL, 0, NULL);
2779 dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
2780 (int)wbuf[0], ret);
2781 }
2782 if (!ret && clkgear != ctrl->clkgear) {
2783 wbuf[0] = (u8)(clkgear & 0xFF);
2784 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2785 SLIM_MSG_MC_NEXT_CLOCK_GEAR, 0, SLIM_MSG_MT_CORE,
2786 NULL, wbuf, 1, 4, NULL, 0, NULL);
2787 dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
2788 (int)wbuf[0], ret);
2789 }
2790 if (ret)
2791 goto revert_reconfig;
2792
2793 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2794 /* activate/remove channel */
2795 list_for_each_entry(pch, &sb->mark_define, pending) {
2796 struct slim_ich *slc = &ctrl->chans[pch->chan];
2797 /* Define content */
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002798 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002799 wbuf[1] = slc->prrate;
2800 wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
2801 wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
2802 dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
2803 wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
2804 /* Right now, channel link bit is not supported */
2805 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2806 SLIM_MSG_MC_NEXT_DEFINE_CONTENT, 0,
2807 SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 4, 7,
2808 NULL, 0, NULL);
2809 if (ret)
2810 goto revert_reconfig;
2811
2812 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2813 SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL, 0,
2814 SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 1, 4,
2815 NULL, 0, NULL);
2816 if (ret)
2817 goto revert_reconfig;
2818 }
2819
2820 list_for_each_entry(pch, &sb->mark_removal, pending) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002821 struct slim_ich *slc = &ctrl->chans[pch->chan];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002822 dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002823 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002824 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2825 SLIM_MSG_MC_NEXT_REMOVE_CHANNEL, 0,
2826 SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
2827 NULL, 0, NULL);
2828 if (ret)
2829 goto revert_reconfig;
2830 }
2831 list_for_each_entry(pch, &sb->mark_suspend, pending) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002832 struct slim_ich *slc = &ctrl->chans[pch->chan];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833 dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002834 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002835 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2836 SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL, 0,
2837 SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
2838 NULL, 0, NULL);
2839 if (ret)
2840 goto revert_reconfig;
2841 }
2842
2843 /* Define CC1 channel */
2844 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2845 struct slim_ich *slc = ctrl->sched.chc1[i];
2846 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2847 continue;
2848 curexp = slc->rootexp + expshft;
2849 segdist = (slc->newoff << curexp) & 0x1FF;
2850 expshft = SLIM_MAX_CLK_GEAR - clkgear;
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002851 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
2852 slc->newintr, slc->interval, segdist);
2853 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
2854 slc->newoff, slc->offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002855
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002856 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002857 slc->newintr != slc->interval ||
2858 slc->newoff != slc->offset) {
2859 segdist |= 0x200;
2860 segdist >>= curexp;
2861 segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002862 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002863 wbuf[1] = (u8)(segdist & 0xFF);
2864 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
2865 (slc->prop.prot << 4);
2866 wbuf[3] = slc->seglen;
2867 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2868 SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
2869 SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
2870 7, NULL, 0, NULL);
2871 if (ret)
2872 goto revert_reconfig;
2873 }
2874 }
2875
2876 /* Define CC3 channels */
2877 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2878 struct slim_ich *slc = ctrl->sched.chc3[i];
2879 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2880 continue;
2881 curexp = slc->rootexp + expshft;
2882 segdist = (slc->newoff << curexp) & 0x1FF;
2883 expshft = SLIM_MAX_CLK_GEAR - clkgear;
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002884 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
2885 slc->newintr, slc->interval, segdist);
2886 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
2887 slc->newoff, slc->offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002888
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002889 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002890 slc->newintr != slc->interval ||
2891 slc->newoff != slc->offset) {
2892 segdist |= 0x200;
2893 segdist >>= curexp;
2894 segdist |= 0xC00;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002895 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002896 wbuf[1] = (u8)(segdist & 0xFF);
2897 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
2898 (slc->prop.prot << 4);
2899 wbuf[3] = (u8)(slc->seglen);
2900 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2901 SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
2902 SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
2903 7, NULL, 0, NULL);
2904 if (ret)
2905 goto revert_reconfig;
2906 }
2907 }
2908 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2909 SLIM_MSG_MC_RECONFIGURE_NOW, 0, SLIM_MSG_MT_CORE, NULL,
2910 NULL, 0, 3, NULL, 0, NULL);
2911 dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
2912 if (!ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002913 ctrl->sched.subfrmcode = subframe;
2914 ctrl->clkgear = clkgear;
2915 ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
2916 sb->cur_msgsl = sb->pending_msgsl;
2917 slim_chan_changes(sb, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002918 mutex_unlock(&ctrl->sched.m_reconf);
2919 return 0;
2920 }
2921
2922revert_reconfig:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002923 /* Revert channel changes */
2924 slim_chan_changes(sb, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002925 mutex_unlock(&ctrl->sched.m_reconf);
2926 return ret;
2927}
2928EXPORT_SYMBOL_GPL(slim_reconfigure_now);
2929
2930static int add_pending_ch(struct list_head *listh, u8 chan)
2931{
2932 struct slim_pending_ch *pch;
2933 pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
2934 if (!pch)
2935 return -ENOMEM;
2936 pch->chan = chan;
2937 list_add_tail(&pch->pending, listh);
2938 return 0;
2939}
2940
2941/*
2942 * slim_control_ch: Channel control API.
2943 * @sb: client handle
2944 * @chanh: group or channel handle to be controlled
2945 * @chctrl: Control command (activate/suspend/remove)
2946 * @commit: flag to indicate whether the control should take effect right-away.
2947 * This API activates, removes or suspends a channel (or group of channels)
2948 * chanh indicates the channel or group handle (returned by the define_ch API).
2949 * Reconfiguration may be time-consuming since it can change all other active
2950 * channel allocations on the bus, change in clock gear used by the slimbus,
2951 * and change in the control space width used for messaging.
2952 * commit makes sure that multiple channels can be activated/deactivated before
2953 * reconfiguration is started.
2954 * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
2955 * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
2956 * yet defined.
Sagar Dharia2e7026a2012-02-21 17:48:14 -07002957 * -EINVAL is returned if individual control of a grouped-channel is attempted.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002958 */
2959int slim_control_ch(struct slim_device *sb, u16 chanh,
2960 enum slim_ch_control chctrl, bool commit)
2961{
2962 struct slim_controller *ctrl = sb->ctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002963 int ret = 0;
2964 /* Get rid of the group flag in MSB if any */
Sagar Dharia29f35f02011-10-01 20:37:50 -06002965 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Sagar Dhariab886e042012-10-17 22:41:57 -06002966 u8 nchan = 0;
Sagar Dharia2e7026a2012-02-21 17:48:14 -07002967 struct slim_ich *slc = &ctrl->chans[chan];
2968 if (!(slc->nextgrp & SLIM_START_GRP))
2969 return -EINVAL;
2970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002971 mutex_lock(&sb->sldev_reconf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 do {
Kiran Gunda3dad0212012-10-09 13:30:13 +05302973 struct slim_pending_ch *pch;
2974 u8 add_mark_removal = true;
2975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002976 slc = &ctrl->chans[chan];
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002977 dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
2978 slc->def);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979 if (slc->state < SLIM_CH_DEFINED) {
2980 ret = -ENOTCONN;
2981 break;
2982 }
2983 if (chctrl == SLIM_CH_SUSPEND) {
2984 ret = add_pending_ch(&sb->mark_suspend, chan);
2985 if (ret)
2986 break;
2987 } else if (chctrl == SLIM_CH_ACTIVATE) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002988 if (slc->state > SLIM_CH_ACTIVE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002989 ret = -EISCONN;
2990 break;
2991 }
2992 ret = add_pending_ch(&sb->mark_define, chan);
2993 if (ret)
2994 break;
2995 } else {
2996 if (slc->state < SLIM_CH_ACTIVE) {
2997 ret = -ENOTCONN;
2998 break;
2999 }
Kiran Gunda3dad0212012-10-09 13:30:13 +05303000 /* If channel removal request comes when pending
3001 * in the mark_define, remove it from the define
3002 * list instead of adding it to removal list
3003 */
3004 if (!list_empty(&sb->mark_define)) {
3005 struct list_head *pos, *next;
3006 list_for_each_safe(pos, next,
3007 &sb->mark_define) {
3008 pch = list_entry(pos,
3009 struct slim_pending_ch,
3010 pending);
3011 if (pch->chan == slc->chan) {
3012 list_del(&pch->pending);
3013 kfree(pch);
3014 add_mark_removal = false;
3015 break;
3016 }
3017 }
3018 }
3019 if (add_mark_removal == true) {
3020 ret = add_pending_ch(&sb->mark_removal, chan);
3021 if (ret)
3022 break;
3023 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003024 }
3025
Sagar Dhariab886e042012-10-17 22:41:57 -06003026 nchan++;
3027 if (nchan < SLIM_GRP_TO_NCHAN(chanh))
Sagar Dharia29f35f02011-10-01 20:37:50 -06003028 chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
Sagar Dhariab886e042012-10-17 22:41:57 -06003029 } while (nchan < SLIM_GRP_TO_NCHAN(chanh));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003030 if (!ret && commit == true)
3031 ret = slim_reconfigure_now(sb);
3032 mutex_unlock(&sb->sldev_reconf);
3033 return ret;
3034}
3035EXPORT_SYMBOL_GPL(slim_control_ch);
3036
3037/*
3038 * slim_reservemsg_bw: Request to reserve bandwidth for messages.
3039 * @sb: client handle
3040 * @bw_bps: message bandwidth in bits per second to be requested
3041 * @commit: indicates whether the reconfiguration needs to be acted upon.
3042 * This API call can be grouped with slim_control_ch API call with only one of
3043 * the APIs specifying the commit flag to avoid reconfiguration being called too
3044 * frequently. -EXFULL is returned if there is no space in TDM to reserve the
3045 * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
3046 * is already in progress.
3047 */
3048int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
3049{
3050 struct slim_controller *ctrl = sb->ctrl;
3051 int ret = 0;
3052 int sl;
3053 mutex_lock(&sb->sldev_reconf);
3054 if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
3055 sl = SLIM_SL_PER_SUPERFRAME;
3056 else {
3057 sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
3058 (ctrl->a_framer->rootfreq/2 - 1)) /
3059 (ctrl->a_framer->rootfreq/2);
3060 }
3061 dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
3062 sb->cur_msgsl);
3063 sb->pending_msgsl = sl;
3064 if (commit == true)
3065 ret = slim_reconfigure_now(sb);
3066 mutex_unlock(&sb->sldev_reconf);
3067 return ret;
3068}
3069EXPORT_SYMBOL_GPL(slim_reservemsg_bw);
3070
Sagar Dharia33f34442011-08-08 16:22:03 -06003071/*
3072 * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
3073 * paused or woken up out of clock pause
3074 * or woken up from clock pause
3075 * @ctrl: controller requesting bus to be paused or woken up
3076 * @wakeup: Wakeup this controller from clock pause.
3077 * @restart: Restart time value per spec used for clock pause. This value
3078 * isn't used when controller is to be woken up.
3079 * This API executes clock pause reconfiguration sequence if wakeup is false.
3080 * If wakeup is true, controller's wakeup is called
3081 * Slimbus clock is idle and can be disabled by the controller later.
3082 */
3083int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
3084{
3085 int ret = 0;
3086 int i;
3087
3088 if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
3089 return -EINVAL;
3090 mutex_lock(&ctrl->m_ctrl);
3091 if (wakeup) {
3092 if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
3093 mutex_unlock(&ctrl->m_ctrl);
3094 return 0;
3095 }
3096 wait_for_completion(&ctrl->pause_comp);
3097 /*
3098 * Slimbus framework will call controller wakeup
3099 * Controller should make sure that it sets active framer
3100 * out of clock pause by doing appropriate setting
3101 */
3102 if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
3103 ret = ctrl->wakeup(ctrl);
3104 if (!ret)
3105 ctrl->clk_state = SLIM_CLK_ACTIVE;
3106 mutex_unlock(&ctrl->m_ctrl);
3107 return ret;
3108 } else {
3109 switch (ctrl->clk_state) {
3110 case SLIM_CLK_ENTERING_PAUSE:
3111 case SLIM_CLK_PAUSE_FAILED:
3112 /*
3113 * If controller is already trying to enter clock pause,
3114 * let it finish.
3115 * In case of error, retry
3116 * In both cases, previous clock pause has signalled
3117 * completion.
3118 */
3119 wait_for_completion(&ctrl->pause_comp);
3120 /* retry upon failure */
3121 if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
3122 ctrl->clk_state = SLIM_CLK_ACTIVE;
3123 break;
3124 } else {
3125 mutex_unlock(&ctrl->m_ctrl);
3126 /*
3127 * Signal completion so that wakeup can wait on
3128 * it.
3129 */
3130 complete(&ctrl->pause_comp);
3131 return 0;
3132 }
3133 break;
3134 case SLIM_CLK_PAUSED:
3135 /* already paused */
3136 mutex_unlock(&ctrl->m_ctrl);
3137 return 0;
3138 case SLIM_CLK_ACTIVE:
3139 default:
3140 break;
3141 }
3142 }
3143 /* Pending response for a message */
3144 for (i = 0; i < ctrl->last_tid; i++) {
3145 if (ctrl->txnt[i]) {
3146 ret = -EBUSY;
Sagar Dharia33beca02012-10-22 16:21:46 -06003147 pr_info("slim_clk_pause: txn-rsp for %d pending", i);
Sagar Dharia33f34442011-08-08 16:22:03 -06003148 mutex_unlock(&ctrl->m_ctrl);
3149 return -EBUSY;
3150 }
3151 }
3152 ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
3153 mutex_unlock(&ctrl->m_ctrl);
3154
3155 mutex_lock(&ctrl->sched.m_reconf);
3156 /* Data channels active */
3157 if (ctrl->sched.usedslots) {
Sagar Dharia33beca02012-10-22 16:21:46 -06003158 pr_info("slim_clk_pause: data channel active");
Sagar Dharia33f34442011-08-08 16:22:03 -06003159 ret = -EBUSY;
3160 goto clk_pause_ret;
3161 }
3162
3163 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
Sagar Dharia45ee38a2011-08-03 17:01:31 -06003164 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_BEGIN_RECONFIGURATION,
3165 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL);
Sagar Dharia33f34442011-08-08 16:22:03 -06003166 if (ret)
3167 goto clk_pause_ret;
3168
3169 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
Sagar Dharia45ee38a2011-08-03 17:01:31 -06003170 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK, 0,
3171 SLIM_MSG_MT_CORE, NULL, &restart, 1, 4, NULL, 0, NULL);
3172 if (ret)
3173 goto clk_pause_ret;
3174
3175 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
3176 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW, 0,
3177 SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL);
Sagar Dharia33f34442011-08-08 16:22:03 -06003178 if (ret)
3179 goto clk_pause_ret;
3180
3181clk_pause_ret:
3182 if (ret)
3183 ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
3184 else
3185 ctrl->clk_state = SLIM_CLK_PAUSED;
3186 complete(&ctrl->pause_comp);
3187 mutex_unlock(&ctrl->sched.m_reconf);
3188 return ret;
3189}
Sagar Dharia88821fb2012-07-24 23:04:32 -06003190EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003191
3192MODULE_LICENSE("GPL v2");
3193MODULE_VERSION("0.1");
3194MODULE_DESCRIPTION("Slimbus module");
3195MODULE_ALIAS("platform:slimbus");