blob: a053ef2a6b6c2733dd0fe49458cd0d4c64b2301b [file] [log] [blame]
Sagar Dharia29f35f02011-10-01 20:37:50 -06001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/idr.h>
19#include <linux/pm_runtime.h>
20#include <linux/slimbus/slimbus.h>
21
22#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
23
24#define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24)
25#define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16)
26#define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF)
27
Sagar Dharia29f35f02011-10-01 20:37:50 -060028#define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF)
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p))
31#define SLIM_MGR_PORT(p) ((0xFF << 16) | (p))
32#define SLIM_LA_MANAGER 0xFF
33
34#define SLIM_START_GRP (1 << 8)
35#define SLIM_END_GRP (1 << 9)
36
37#define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3)
38#define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME
39
40static DEFINE_MUTEX(slim_lock);
41static DEFINE_IDR(ctrl_idr);
42static struct device_type slim_dev_type;
43static struct device_type slim_ctrl_type;
44
45static const struct slim_device_id *slim_match(const struct slim_device_id *id,
46 const struct slim_device *slim_dev)
47{
48 while (id->name[0]) {
49 if (strncmp(slim_dev->name, id->name, SLIMBUS_NAME_SIZE) == 0)
50 return id;
51 id++;
52 }
53 return NULL;
54}
55
56static int slim_device_match(struct device *dev, struct device_driver *driver)
57{
58 struct slim_device *slim_dev;
59 struct slim_driver *drv = to_slim_driver(driver);
60
61 if (dev->type == &slim_dev_type)
62 slim_dev = to_slim_device(dev);
63 else
64 return 0;
65 if (drv->id_table)
66 return slim_match(drv->id_table, slim_dev) != NULL;
67
68 if (driver->name)
69 return strncmp(slim_dev->name, driver->name, SLIMBUS_NAME_SIZE)
70 == 0;
71 return 0;
72}
73
74#ifdef CONFIG_PM_SLEEP
75static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
76{
77 struct slim_device *slim_dev = NULL;
78 struct slim_driver *driver;
79 if (dev->type == &slim_dev_type)
80 slim_dev = to_slim_device(dev);
81
82 if (!slim_dev || !dev->driver)
83 return 0;
84
85 driver = to_slim_driver(dev->driver);
86 if (!driver->suspend)
87 return 0;
88
89 return driver->suspend(slim_dev, mesg);
90}
91
92static int slim_legacy_resume(struct device *dev)
93{
94 struct slim_device *slim_dev = NULL;
95 struct slim_driver *driver;
96 if (dev->type == &slim_dev_type)
97 slim_dev = to_slim_device(dev);
98
99 if (!slim_dev || !dev->driver)
100 return 0;
101
102 driver = to_slim_driver(dev->driver);
103 if (!driver->resume)
104 return 0;
105
106 return driver->resume(slim_dev);
107}
108
109static int slim_pm_suspend(struct device *dev)
110{
111 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
112
113 if (pm)
114 return pm_generic_suspend(dev);
115 else
116 return slim_legacy_suspend(dev, PMSG_SUSPEND);
117}
118
119static int slim_pm_resume(struct device *dev)
120{
121 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
122
123 if (pm)
124 return pm_generic_resume(dev);
125 else
126 return slim_legacy_resume(dev);
127}
128
129#else
130#define slim_pm_suspend NULL
131#define slim_pm_resume NULL
132#endif
133
134static const struct dev_pm_ops slimbus_pm = {
135 .suspend = slim_pm_suspend,
136 .resume = slim_pm_resume,
137 SET_RUNTIME_PM_OPS(
138 pm_generic_suspend,
139 pm_generic_resume,
140 pm_generic_runtime_idle
141 )
142};
143struct bus_type slimbus_type = {
144 .name = "slimbus",
145 .match = slim_device_match,
146 .pm = &slimbus_pm,
147};
148EXPORT_SYMBOL_GPL(slimbus_type);
149
150struct device slimbus_dev = {
151 .init_name = "slimbus",
152};
153
154static void __exit slimbus_exit(void)
155{
156 device_unregister(&slimbus_dev);
157 bus_unregister(&slimbus_type);
158}
159
160static int __init slimbus_init(void)
161{
162 int retval;
163
164 retval = bus_register(&slimbus_type);
165 if (!retval)
166 retval = device_register(&slimbus_dev);
167
168 if (retval)
169 bus_unregister(&slimbus_type);
170
171 return retval;
172}
173postcore_initcall(slimbus_init);
174module_exit(slimbus_exit);
175
176static int slim_drv_probe(struct device *dev)
177{
178 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600179 struct slim_device *sbdev = to_slim_device(dev);
180 struct slim_controller *ctrl = sbdev->ctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600182 if (sdrv->probe) {
183 int ret;
184 ret = sdrv->probe(sbdev);
185 if (ret)
186 return ret;
187 if (sdrv->device_up)
188 queue_work(ctrl->wq, &sbdev->wd);
189 return 0;
190 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 return -ENODEV;
192}
193
194static int slim_drv_remove(struct device *dev)
195{
196 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600197 struct slim_device *sbdev = to_slim_device(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600199 sbdev->notified = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 if (sdrv->remove)
201 return sdrv->remove(to_slim_device(dev));
202 return -ENODEV;
203}
204
205static void slim_drv_shutdown(struct device *dev)
206{
207 const struct slim_driver *sdrv = to_slim_driver(dev->driver);
208
209 if (sdrv->shutdown)
210 sdrv->shutdown(to_slim_device(dev));
211}
212
213/*
214 * slim_driver_register: Client driver registration with slimbus
215 * @drv:Client driver to be associated with client-device.
216 * This API will register the client driver with the slimbus
217 * It is called from the driver's module-init function.
218 */
219int slim_driver_register(struct slim_driver *drv)
220{
221 drv->driver.bus = &slimbus_type;
222 if (drv->probe)
223 drv->driver.probe = slim_drv_probe;
224
225 if (drv->remove)
226 drv->driver.remove = slim_drv_remove;
227
228 if (drv->shutdown)
229 drv->driver.shutdown = slim_drv_shutdown;
230
231 return driver_register(&drv->driver);
232}
233EXPORT_SYMBOL_GPL(slim_driver_register);
234
235#define slim_ctrl_attr_gr NULL
236
237static void slim_ctrl_release(struct device *dev)
238{
239 struct slim_controller *ctrl = to_slim_controller(dev);
240
241 complete(&ctrl->dev_released);
242}
243
244static struct device_type slim_ctrl_type = {
245 .groups = slim_ctrl_attr_gr,
246 .release = slim_ctrl_release,
247};
248
249static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
250{
251 if (!ctrl || !get_device(&ctrl->dev))
252 return NULL;
253
254 return ctrl;
255}
256
257static void slim_ctrl_put(struct slim_controller *ctrl)
258{
259 if (ctrl)
260 put_device(&ctrl->dev);
261}
262
263#define slim_device_attr_gr NULL
264#define slim_device_uevent NULL
265static void slim_dev_release(struct device *dev)
266{
267 struct slim_device *sbdev = to_slim_device(dev);
268 slim_ctrl_put(sbdev->ctrl);
269}
270
271static struct device_type slim_dev_type = {
272 .groups = slim_device_attr_gr,
273 .uevent = slim_device_uevent,
274 .release = slim_dev_release,
275};
276
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600277static void slim_report_present(struct work_struct *work)
278{
279 u8 laddr;
280 int ret;
281 struct slim_driver *sbdrv;
282 struct slim_device *sbdev =
283 container_of(work, struct slim_device, wd);
284 if (sbdev->notified || !sbdev->dev.driver)
285 return;
286 ret = slim_get_logical_addr(sbdev, sbdev->e_addr, 6, &laddr);
287 sbdrv = to_slim_driver(sbdev->dev.driver);
288 if (!ret && sbdrv->device_up) {
289 sbdev->notified = true;
290 sbdrv->device_up(sbdev);
291 }
292}
293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294/*
295 * slim_add_device: Add a new device without register board info.
296 * @ctrl: Controller to which this device is to be added to.
297 * Called when device doesn't have an explicit client-driver to be probed, or
298 * the client-driver is a module installed dynamically.
299 */
300int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
301{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302 sbdev->dev.bus = &slimbus_type;
303 sbdev->dev.parent = ctrl->dev.parent;
304 sbdev->dev.type = &slim_dev_type;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600305 sbdev->dev.driver = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 sbdev->ctrl = ctrl;
307 slim_ctrl_get(ctrl);
308 dev_set_name(&sbdev->dev, "%s", sbdev->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 mutex_init(&sbdev->sldev_reconf);
310 INIT_LIST_HEAD(&sbdev->mark_define);
311 INIT_LIST_HEAD(&sbdev->mark_suspend);
312 INIT_LIST_HEAD(&sbdev->mark_removal);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600313 INIT_WORK(&sbdev->wd, slim_report_present);
314 mutex_lock(&ctrl->m_ctrl);
315 list_add_tail(&sbdev->dev_list, &ctrl->devs);
316 mutex_unlock(&ctrl->m_ctrl);
317 /* probe slave on this controller */
318 return device_register(&sbdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319}
320EXPORT_SYMBOL_GPL(slim_add_device);
321
322struct sbi_boardinfo {
323 struct list_head list;
324 struct slim_boardinfo board_info;
325};
326
327static LIST_HEAD(board_list);
328static LIST_HEAD(slim_ctrl_list);
329static DEFINE_MUTEX(board_lock);
330
331/* If controller is not present, only add to boards list */
332static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
333 struct slim_boardinfo *bi)
334{
335 int ret;
336 if (ctrl->nr != bi->bus_num)
337 return;
338
339 ret = slim_add_device(ctrl, bi->slim_slave);
340 if (ret != 0)
341 dev_err(ctrl->dev.parent, "can't create new device for %s\n",
342 bi->slim_slave->name);
343}
344
345/*
346 * slim_register_board_info: Board-initialization routine.
347 * @info: List of all devices on all controllers present on the board.
348 * @n: number of entries.
349 * API enumerates respective devices on corresponding controller.
350 * Called from board-init function.
351 */
352int slim_register_board_info(struct slim_boardinfo const *info, unsigned n)
353{
354 struct sbi_boardinfo *bi;
355 int i;
356
357 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
358 if (!bi)
359 return -ENOMEM;
360
361 for (i = 0; i < n; i++, bi++, info++) {
362 struct slim_controller *ctrl;
363
364 memcpy(&bi->board_info, info, sizeof(*info));
365 mutex_lock(&board_lock);
366 list_add_tail(&bi->list, &board_list);
367 list_for_each_entry(ctrl, &slim_ctrl_list, list)
368 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
369 mutex_unlock(&board_lock);
370 }
371 return 0;
372}
373EXPORT_SYMBOL_GPL(slim_register_board_info);
374
375/*
Sagar Dhariaa6627e02012-08-28 12:20:49 -0600376 * slim_ctrl_add_boarddevs: Add devices registered by board-info
377 * @ctrl: Controller to which these devices are to be added to.
378 * This API is called by controller when it is up and running.
379 * If devices on a controller were registered before controller,
380 * this will make sure that they get probed when controller is up.
381 */
382void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
383{
384 struct sbi_boardinfo *bi;
385 mutex_lock(&board_lock);
386 list_add_tail(&ctrl->list, &slim_ctrl_list);
387 list_for_each_entry(bi, &board_list, list)
388 slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
389 mutex_unlock(&board_lock);
390}
391EXPORT_SYMBOL_GPL(slim_ctrl_add_boarddevs);
392
393/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 * slim_busnum_to_ctrl: Map bus number to controller
395 * @busnum: Bus number
396 * Returns controller representing this bus number
397 */
398struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
399{
400 struct slim_controller *ctrl;
401 mutex_lock(&board_lock);
402 list_for_each_entry(ctrl, &slim_ctrl_list, list)
403 if (bus_num == ctrl->nr) {
404 mutex_unlock(&board_lock);
405 return ctrl;
406 }
407 mutex_unlock(&board_lock);
408 return NULL;
409}
410EXPORT_SYMBOL_GPL(slim_busnum_to_ctrl);
411
412static int slim_register_controller(struct slim_controller *ctrl)
413{
414 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 /* Can't register until after driver model init */
417 if (WARN_ON(!slimbus_type.p)) {
418 ret = -EAGAIN;
419 goto out_list;
420 }
421
422 dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
423 ctrl->dev.bus = &slimbus_type;
424 ctrl->dev.type = &slim_ctrl_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 ctrl->num_dev = 0;
Sagar Dharia98a7ecb2011-07-25 15:25:35 -0600426 if (!ctrl->min_cg)
427 ctrl->min_cg = SLIM_MIN_CLK_GEAR;
428 if (!ctrl->max_cg)
429 ctrl->max_cg = SLIM_MAX_CLK_GEAR;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 mutex_init(&ctrl->m_ctrl);
431 mutex_init(&ctrl->sched.m_reconf);
432 ret = device_register(&ctrl->dev);
433 if (ret)
434 goto out_list;
435
436 dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%x\n", ctrl->name,
437 (u32)&ctrl->dev);
438
439 if (ctrl->nports) {
440 ctrl->ports = kzalloc(ctrl->nports * sizeof(struct slim_port),
441 GFP_KERNEL);
442 if (!ctrl->ports) {
443 ret = -ENOMEM;
444 goto err_port_failed;
445 }
446 }
447 if (ctrl->nchans) {
448 ctrl->chans = kzalloc(ctrl->nchans * sizeof(struct slim_ich),
449 GFP_KERNEL);
450 if (!ctrl->chans) {
451 ret = -ENOMEM;
452 goto err_chan_failed;
453 }
454
455 ctrl->sched.chc1 =
456 kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
457 GFP_KERNEL);
458 if (!ctrl->sched.chc1) {
459 kfree(ctrl->chans);
460 ret = -ENOMEM;
461 goto err_chan_failed;
462 }
463 ctrl->sched.chc3 =
464 kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
465 GFP_KERNEL);
466 if (!ctrl->sched.chc3) {
467 kfree(ctrl->sched.chc1);
468 kfree(ctrl->chans);
469 ret = -ENOMEM;
470 goto err_chan_failed;
471 }
472 }
473#ifdef DEBUG
474 ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
475#endif
Sagar Dharia33f34442011-08-08 16:22:03 -0600476 init_completion(&ctrl->pause_comp);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600477
478 INIT_LIST_HEAD(&ctrl->devs);
479 ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
480 if (!ctrl->wq)
481 goto err_workq_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
483 return 0;
484
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600485err_workq_failed:
486 kfree(ctrl->sched.chc3);
487 kfree(ctrl->sched.chc1);
488 kfree(ctrl->chans);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489err_chan_failed:
490 kfree(ctrl->ports);
491err_port_failed:
492 device_unregister(&ctrl->dev);
493out_list:
494 mutex_lock(&slim_lock);
495 idr_remove(&ctrl_idr, ctrl->nr);
496 mutex_unlock(&slim_lock);
497 return ret;
498}
499
500/* slim_remove_device: Remove the effect of slim_add_device() */
501void slim_remove_device(struct slim_device *sbdev)
502{
503 device_unregister(&sbdev->dev);
504}
505EXPORT_SYMBOL_GPL(slim_remove_device);
506
507static void slim_ctrl_remove_device(struct slim_controller *ctrl,
508 struct slim_boardinfo *bi)
509{
510 if (ctrl->nr == bi->bus_num)
511 slim_remove_device(bi->slim_slave);
512}
513
514/*
515 * slim_del_controller: Controller tear-down.
516 * Controller added with the above API is teared down using this API.
517 */
518int slim_del_controller(struct slim_controller *ctrl)
519{
520 struct slim_controller *found;
521 struct sbi_boardinfo *bi;
522
523 /* First make sure that this bus was added */
524 mutex_lock(&slim_lock);
525 found = idr_find(&ctrl_idr, ctrl->nr);
526 mutex_unlock(&slim_lock);
527 if (found != ctrl)
528 return -EINVAL;
529
530 /* Remove all clients */
531 mutex_lock(&board_lock);
532 list_for_each_entry(bi, &board_list, list)
533 slim_ctrl_remove_device(ctrl, &bi->board_info);
534 mutex_unlock(&board_lock);
535
536 init_completion(&ctrl->dev_released);
537 device_unregister(&ctrl->dev);
538
539 wait_for_completion(&ctrl->dev_released);
540 list_del(&ctrl->list);
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600541 destroy_workqueue(ctrl->wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 /* free bus id */
543 mutex_lock(&slim_lock);
544 idr_remove(&ctrl_idr, ctrl->nr);
545 mutex_unlock(&slim_lock);
546
547 kfree(ctrl->sched.chc1);
548 kfree(ctrl->sched.chc3);
549#ifdef DEBUG
550 kfree(ctrl->sched.slots);
551#endif
552 kfree(ctrl->chans);
553 kfree(ctrl->ports);
554
555 return 0;
556}
557EXPORT_SYMBOL_GPL(slim_del_controller);
558
559/*
560 * slim_add_numbered_controller: Controller bring-up.
561 * @ctrl: Controller to be registered.
562 * A controller is registered with the framework using this API. ctrl->nr is the
563 * desired number with which slimbus framework registers the controller.
564 * Function will return -EBUSY if the number is in use.
565 */
566int slim_add_numbered_controller(struct slim_controller *ctrl)
567{
568 int id;
569 int status;
570
571 if (ctrl->nr & ~MAX_ID_MASK)
572 return -EINVAL;
573
574retry:
575 if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0)
576 return -ENOMEM;
577
578 mutex_lock(&slim_lock);
579 status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id);
580 if (status == 0 && id != ctrl->nr) {
581 status = -EAGAIN;
582 idr_remove(&ctrl_idr, id);
583 }
584 mutex_unlock(&slim_lock);
585 if (status == -EAGAIN)
586 goto retry;
587
588 if (status == 0)
589 status = slim_register_controller(ctrl);
590 return status;
591}
592EXPORT_SYMBOL_GPL(slim_add_numbered_controller);
593
594/*
595 * slim_msg_response: Deliver Message response received from a device to the
596 * framework.
597 * @ctrl: Controller handle
598 * @reply: Reply received from the device
599 * @len: Length of the reply
600 * @tid: Transaction ID received with which framework can associate reply.
601 * Called by controller to inform framework about the response received.
602 * This helps in making the API asynchronous, and controller-driver doesn't need
603 * to manage 1 more table other than the one managed by framework mapping TID
604 * with buffers
605 */
606void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
607{
608 int i;
609 struct slim_msg_txn *txn;
610
611 mutex_lock(&ctrl->m_ctrl);
612 txn = ctrl->txnt[tid];
613 if (txn == NULL) {
614 dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
615 tid, len);
616 mutex_unlock(&ctrl->m_ctrl);
617 return;
618 }
619 for (i = 0; i < len; i++)
620 txn->rbuf[i] = reply[i];
621 if (txn->comp)
622 complete(txn->comp);
623 ctrl->txnt[tid] = NULL;
624 mutex_unlock(&ctrl->m_ctrl);
625 kfree(txn);
626}
627EXPORT_SYMBOL_GPL(slim_msg_response);
628
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600629static int slim_processtxn(struct slim_controller *ctrl, u8 dt, u16 mc, u16 ec,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 u8 mt, u8 *rbuf, const u8 *wbuf, u8 len, u8 mlen,
631 struct completion *comp, u8 la, u8 *tid)
632{
633 u8 i = 0;
634 int ret = 0;
635 struct slim_msg_txn *txn = kmalloc(sizeof(struct slim_msg_txn),
636 GFP_KERNEL);
637 if (!txn)
638 return -ENOMEM;
639 if (tid) {
640 mutex_lock(&ctrl->m_ctrl);
641 for (i = 0; i < ctrl->last_tid; i++) {
642 if (ctrl->txnt[i] == NULL)
643 break;
644 }
645 if (i >= ctrl->last_tid) {
646 if (ctrl->last_tid == 255) {
647 mutex_unlock(&ctrl->m_ctrl);
648 kfree(txn);
649 return -ENOMEM;
650 }
651 ctrl->txnt = krealloc(ctrl->txnt,
652 (i + 1) * sizeof(struct slim_msg_txn *),
653 GFP_KERNEL);
654 if (!ctrl->txnt) {
655 mutex_unlock(&ctrl->m_ctrl);
656 kfree(txn);
657 return -ENOMEM;
658 }
659 ctrl->last_tid++;
660 }
661 ctrl->txnt[i] = txn;
662 mutex_unlock(&ctrl->m_ctrl);
663 txn->tid = i;
664 *tid = i;
665 }
666 txn->mc = mc;
667 txn->mt = mt;
668 txn->dt = dt;
669 txn->ec = ec;
670 txn->la = la;
671 txn->rbuf = rbuf;
672 txn->wbuf = wbuf;
673 txn->rl = mlen;
674 txn->len = len;
675 txn->comp = comp;
676
677 ret = ctrl->xfer_msg(ctrl, txn);
678 if (!tid)
679 kfree(txn);
680 return ret;
681}
682
683static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600684 u8 e_len, u8 *entry)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685{
686 u8 i;
687 for (i = 0; i < ctrl->num_dev; i++) {
688 if (ctrl->addrt[i].valid &&
689 memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600690 *entry = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 return 0;
692 }
693 }
694 return -ENXIO;
695}
696
697/*
698 * slim_assign_laddr: Assign logical address to a device enumerated.
699 * @ctrl: Controller with which device is enumerated.
700 * @e_addr: 6-byte elemental address of the device.
701 * @e_len: buffer length for e_addr
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600702 * @laddr: Return logical address (if valid flag is false)
703 * @valid: true if laddr holds a valid address that controller wants to
704 * set for this enumeration address. Otherwise framework sets index into
705 * address table as logical address.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 * Called by controller in response to REPORT_PRESENT. Framework will assign
707 * a logical address to this enumeration address.
708 * Function returns -EXFULL to indicate that all logical addresses are already
709 * taken.
710 */
711int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600712 u8 e_len, u8 *laddr, bool valid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713{
714 int ret;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600715 u8 i = 0;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600716 bool exists = false;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600717 struct slim_device *sbdev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 mutex_lock(&ctrl->m_ctrl);
719 /* already assigned */
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600720 if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
721 *laddr = ctrl->addrt[i].laddr;
722 exists = true;
723 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 if (ctrl->num_dev >= 254) {
725 ret = -EXFULL;
726 goto ret_assigned_laddr;
727 }
728 for (i = 0; i < ctrl->num_dev; i++) {
729 if (ctrl->addrt[i].valid == false)
730 break;
731 }
732 if (i == ctrl->num_dev) {
733 ctrl->addrt = krealloc(ctrl->addrt,
734 (ctrl->num_dev + 1) *
735 sizeof(struct slim_addrt),
736 GFP_KERNEL);
737 if (!ctrl->addrt) {
738 ret = -ENOMEM;
739 goto ret_assigned_laddr;
740 }
741 ctrl->num_dev++;
742 }
743 memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
744 ctrl->addrt[i].valid = true;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600745 /* Preferred address is index into table */
746 if (!valid)
747 *laddr = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 }
749
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600750 ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
751 *laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752 if (ret) {
753 ctrl->addrt[i].valid = false;
754 goto ret_assigned_laddr;
755 }
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600756 ctrl->addrt[i].laddr = *laddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600758 dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759ret_assigned_laddr:
760 mutex_unlock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600761 if (exists || ret)
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600762 return ret;
763
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600764 pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600765 e_addr[1], e_addr[2]);
766 mutex_lock(&ctrl->m_ctrl);
767 list_for_each_entry(sbdev, &ctrl->devs, dev_list) {
768 if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
769 struct slim_driver *sbdrv;
Sagar Dharia33c84e62012-10-30 21:12:09 -0600770 sbdev->laddr = *laddr;
Sagar Dharia0e7a1ef2012-07-23 19:38:57 -0600771 if (sbdev->dev.driver) {
772 sbdrv = to_slim_driver(sbdev->dev.driver);
773 if (sbdrv->device_up)
774 queue_work(ctrl->wq, &sbdev->wd);
775 }
776 break;
777 }
778 }
779 mutex_unlock(&ctrl->m_ctrl);
780 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781}
782EXPORT_SYMBOL_GPL(slim_assign_laddr);
783
784/*
785 * slim_get_logical_addr: Return the logical address of a slimbus device.
786 * @sb: client handle requesting the adddress.
787 * @e_addr: Elemental address of the device.
788 * @e_len: Length of e_addr
789 * @laddr: output buffer to store the address
790 * context: can sleep
791 * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
792 * the device with this elemental address is not found.
793 */
794int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
795 u8 e_len, u8 *laddr)
796{
797 int ret = 0;
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600798 u8 entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 struct slim_controller *ctrl = sb->ctrl;
800 if (!ctrl || !laddr || !e_addr || e_len != 6)
801 return -EINVAL;
802 mutex_lock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600803 ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
804 if (!ret)
805 *laddr = ctrl->addrt[entry].laddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 mutex_unlock(&ctrl->m_ctrl);
Sagar Dhariaf0b9c752012-09-09 17:32:46 -0600807 if (ret == -ENXIO && ctrl->get_laddr) {
808 ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
809 if (!ret)
810 ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
811 true);
812 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 return ret;
814}
815EXPORT_SYMBOL_GPL(slim_get_logical_addr);
816
817static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
818 u8 *rbuf, const u8 *wbuf, u8 len)
819{
820 if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
821 return -EINVAL;
822 switch (oper) {
823 case SLIM_MSG_MC_REQUEST_VALUE:
824 case SLIM_MSG_MC_REQUEST_INFORMATION:
825 if (rbuf == NULL)
826 return -EINVAL;
827 return 0;
828 case SLIM_MSG_MC_CHANGE_VALUE:
829 case SLIM_MSG_MC_CLEAR_INFORMATION:
830 if (wbuf == NULL)
831 return -EINVAL;
832 return 0;
833 case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
834 case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
835 if (rbuf == NULL || wbuf == NULL)
836 return -EINVAL;
837 return 0;
838 default:
839 return -EINVAL;
840 }
841}
842
843static u16 slim_slicecodefromsize(u32 req)
844{
845 u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
846 if (req >= 8)
847 return 0;
848 else
849 return codetosize[req];
850}
851
852static u16 slim_slicesize(u32 code)
853{
854 u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
855 if (code == 0)
856 code = 1;
857 if (code > 16)
858 code = 16;
859 return sizetocode[code - 1];
860}
861
862
863/* Message APIs Unicast message APIs used by slimbus slave drivers */
864
865/*
866 * Message API access routines.
867 * @sb: client handle requesting elemental message reads, writes.
868 * @msg: Input structure for start-offset, number of bytes to read.
869 * @rbuf: data buffer to be filled with values read.
870 * @len: data buffer size
871 * @wbuf: data buffer containing value/information to be written
872 * context: can sleep
873 * Returns:
874 * -EINVAL: Invalid parameters
875 * -ETIMEDOUT: If controller could not complete the request. This may happen if
876 * the bus lines are not clocked, controller is not powered-on, slave with
877 * given address is not enumerated/responding.
878 */
879int slim_request_val_element(struct slim_device *sb,
880 struct slim_ele_access *msg, u8 *buf, u8 len)
881{
882 struct slim_controller *ctrl = sb->ctrl;
883 if (!ctrl)
884 return -EINVAL;
885 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
886 NULL, len);
887}
888EXPORT_SYMBOL_GPL(slim_request_val_element);
889
890int slim_request_inf_element(struct slim_device *sb,
891 struct slim_ele_access *msg, u8 *buf, u8 len)
892{
893 struct slim_controller *ctrl = sb->ctrl;
894 if (!ctrl)
895 return -EINVAL;
896 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
897 buf, NULL, len);
898}
899EXPORT_SYMBOL_GPL(slim_request_inf_element);
900
901int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
902 const u8 *buf, u8 len)
903{
904 struct slim_controller *ctrl = sb->ctrl;
905 if (!ctrl)
906 return -EINVAL;
907 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
908 len);
909}
910EXPORT_SYMBOL_GPL(slim_change_val_element);
911
912int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
913 u8 *buf, u8 len)
914{
915 struct slim_controller *ctrl = sb->ctrl;
916 if (!ctrl)
917 return -EINVAL;
918 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
919 buf, len);
920}
921EXPORT_SYMBOL_GPL(slim_clear_inf_element);
922
923int slim_request_change_val_element(struct slim_device *sb,
924 struct slim_ele_access *msg, u8 *rbuf,
925 const u8 *wbuf, u8 len)
926{
927 struct slim_controller *ctrl = sb->ctrl;
928 if (!ctrl)
929 return -EINVAL;
930 return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
931 rbuf, wbuf, len);
932}
933EXPORT_SYMBOL_GPL(slim_request_change_val_element);
934
935int slim_request_clear_inf_element(struct slim_device *sb,
936 struct slim_ele_access *msg, u8 *rbuf,
937 const u8 *wbuf, u8 len)
938{
939 struct slim_controller *ctrl = sb->ctrl;
940 if (!ctrl)
941 return -EINVAL;
942 return slim_xfer_msg(ctrl, sb, msg,
943 SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
944 rbuf, wbuf, len);
945}
946EXPORT_SYMBOL_GPL(slim_request_clear_inf_element);
947
948/*
949 * Broadcast message API:
950 * call this API directly with sbdev = NULL.
951 * For broadcast reads, make sure that buffers are big-enough to incorporate
952 * replies from all logical addresses.
953 * All controllers may not support broadcast
954 */
955int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
Sagar Dharia45ee38a2011-08-03 17:01:31 -0600956 struct slim_ele_access *msg, u16 mc, u8 *rbuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 const u8 *wbuf, u8 len)
958{
959 DECLARE_COMPLETION_ONSTACK(complete);
960 int ret;
961 u16 sl, cur;
962 u16 ec;
963 u8 tid, mlen = 6;
964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
966 if (ret)
967 goto xfer_err;
968
969 sl = slim_slicesize(len);
970 dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
971 msg->start_offset, len, mc, sl);
972
973 cur = slim_slicecodefromsize(sl);
974 ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
975
976 if (wbuf)
977 mlen += len;
978 if (rbuf) {
979 mlen++;
980 if (!msg->comp)
981 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
982 mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
983 &complete, sbdev->laddr, &tid);
984 else
985 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
986 mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
987 msg->comp, sbdev->laddr, &tid);
988 /* sync read */
Sagar Dhariacd0a2522011-08-31 18:29:31 -0600989 if (!ret && !msg->comp) {
990 ret = wait_for_completion_timeout(&complete, HZ);
991 if (!ret) {
992 struct slim_msg_txn *txn;
993 dev_err(&ctrl->dev, "slimbus Read timed out");
994 mutex_lock(&ctrl->m_ctrl);
995 txn = ctrl->txnt[tid];
996 /* Invalidate the transaction */
997 ctrl->txnt[tid] = NULL;
998 mutex_unlock(&ctrl->m_ctrl);
999 kfree(txn);
1000 ret = -ETIMEDOUT;
1001 } else
1002 ret = 0;
Sagar Dharia53a9f792012-09-04 19:56:18 -06001003 } else if (ret < 0 && !msg->comp) {
1004 struct slim_msg_txn *txn;
1005 dev_err(&ctrl->dev, "slimbus Read error");
1006 mutex_lock(&ctrl->m_ctrl);
1007 txn = ctrl->txnt[tid];
1008 /* Invalidate the transaction */
1009 ctrl->txnt[tid] = NULL;
1010 mutex_unlock(&ctrl->m_ctrl);
1011 kfree(txn);
Sagar Dhariacd0a2522011-08-31 18:29:31 -06001012 }
1013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014 } else
1015 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec,
1016 SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
1017 NULL, sbdev->laddr, NULL);
1018xfer_err:
1019 return ret;
1020}
1021EXPORT_SYMBOL_GPL(slim_xfer_msg);
1022
1023/*
1024 * slim_alloc_mgrports: Allocate port on manager side.
1025 * @sb: device/client handle.
1026 * @req: Port request type.
1027 * @nports: Number of ports requested
1028 * @rh: output buffer to store the port handles
1029 * @hsz: size of buffer storing handles
1030 * context: can sleep
1031 * This port will be typically used by SW. e.g. client driver wants to receive
1032 * some data from audio codec HW using a data channel.
1033 * Port allocated using this API will be used to receive the data.
1034 * If half-duplex ports are requested, two adjacent ports are allocated for
1035 * 1 half-duplex port. So the handle-buffer size should be twice the number
1036 * of half-duplex ports to be allocated.
1037 * -EDQUOT is returned if all ports are in use.
1038 */
1039int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
1040 int nports, u32 *rh, int hsz)
1041{
Sagar Dharia4d364c22011-10-04 12:47:21 -06001042 int i, j;
1043 int ret = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 int nphysp = nports;
1045 struct slim_controller *ctrl = sb->ctrl;
1046
1047 if (!rh || !ctrl)
1048 return -EINVAL;
1049 if (req == SLIM_REQ_HALF_DUP)
1050 nphysp *= 2;
1051 if (hsz/sizeof(u32) < nphysp)
1052 return -EINVAL;
1053 mutex_lock(&ctrl->m_ctrl);
1054
1055 for (i = 0; i < ctrl->nports; i++) {
1056 bool multiok = true;
1057 if (ctrl->ports[i].state != SLIM_P_FREE)
1058 continue;
1059 /* Start half duplex channel at even port */
1060 if (req == SLIM_REQ_HALF_DUP && (i % 2))
1061 continue;
1062 /* Allocate ports contiguously for multi-ch */
1063 if (ctrl->nports < (i + nphysp)) {
1064 i = ctrl->nports;
1065 break;
1066 }
1067 if (req == SLIM_REQ_MULTI_CH) {
1068 multiok = true;
1069 for (j = i; j < i + nphysp; j++) {
1070 if (ctrl->ports[j].state != SLIM_P_FREE) {
1071 multiok = false;
1072 break;
1073 }
1074 }
1075 if (!multiok)
1076 continue;
1077 }
1078 break;
1079 }
1080 if (i >= ctrl->nports)
1081 ret = -EDQUOT;
1082 for (j = i; j < i + nphysp; j++) {
1083 ctrl->ports[j].state = SLIM_P_UNCFG;
1084 ctrl->ports[j].req = req;
1085 if (req == SLIM_REQ_HALF_DUP && (j % 2))
1086 ctrl->ports[j].flow = SLIM_SINK;
1087 else
1088 ctrl->ports[j].flow = SLIM_SRC;
1089 ret = ctrl->config_port(ctrl, j);
1090 if (ret) {
1091 for (; j >= i; j--)
1092 ctrl->ports[j].state = SLIM_P_FREE;
1093 goto alloc_err;
1094 }
1095 *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
1096 }
1097alloc_err:
1098 mutex_unlock(&ctrl->m_ctrl);
1099 return ret;
1100}
1101EXPORT_SYMBOL_GPL(slim_alloc_mgrports);
1102
1103/* Deallocate the port(s) allocated using the API above */
1104int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
1105{
1106 int i;
1107 struct slim_controller *ctrl = sb->ctrl;
1108
1109 if (!ctrl || !hdl)
1110 return -EINVAL;
1111
1112 mutex_lock(&ctrl->m_ctrl);
1113
1114 for (i = 0; i < nports; i++) {
1115 u8 pn;
1116 pn = SLIM_HDL_TO_PORT(hdl[i]);
1117 if (ctrl->ports[pn].state == SLIM_P_CFG) {
1118 int j;
1119 dev_err(&ctrl->dev, "Can't dealloc connected port:%d",
1120 i);
1121 for (j = i - 1; j >= 0; j--) {
1122 pn = SLIM_HDL_TO_PORT(hdl[j]);
1123 ctrl->ports[pn].state = SLIM_P_UNCFG;
1124 }
1125 mutex_unlock(&ctrl->m_ctrl);
1126 return -EISCONN;
1127 }
1128 ctrl->ports[pn].state = SLIM_P_FREE;
1129 }
1130 mutex_unlock(&ctrl->m_ctrl);
1131 return 0;
1132}
1133EXPORT_SYMBOL_GPL(slim_dealloc_mgrports);
1134
1135/*
1136 * slim_get_slaveport: Get slave port handle
1137 * @la: slave device logical address.
1138 * @idx: port index at slave
1139 * @rh: return handle
1140 * @flw: Flow type (source or destination)
1141 * This API only returns a slave port's representation as expected by slimbus
1142 * driver. This port is not managed by the slimbus driver. Caller is expected
1143 * to have visibility of this port since it's a device-port.
1144 */
1145int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
1146{
1147 if (rh == NULL)
1148 return -EINVAL;
1149 *rh = SLIM_PORT_HDL(la, flw, idx);
1150 return 0;
1151}
1152EXPORT_SYMBOL_GPL(slim_get_slaveport);
1153
1154static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
1155 enum slim_port_flow flow)
1156{
1157 int ret;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001158 u16 mc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 u8 buf[2];
1160 u32 la = SLIM_HDL_TO_LA(ph);
1161 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1162
1163 if (flow == SLIM_SRC)
1164 mc = SLIM_MSG_MC_CONNECT_SOURCE;
1165 else
1166 mc = SLIM_MSG_MC_CONNECT_SINK;
1167 buf[0] = pn;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001168 buf[1] = ctrl->chans[ch].chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 if (la == SLIM_LA_MANAGER)
1170 ctrl->ports[pn].flow = flow;
1171 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
1172 SLIM_MSG_MT_CORE, NULL, buf, 2, 6, NULL, la,
1173 NULL);
1174 if (!ret && la == SLIM_LA_MANAGER)
1175 ctrl->ports[pn].state = SLIM_P_CFG;
1176 return ret;
1177}
1178
1179static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
1180{
1181 int ret;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001182 u16 mc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 u32 la = SLIM_HDL_TO_LA(ph);
1184 u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
1185
1186 mc = SLIM_MSG_MC_DISCONNECT_PORT;
1187 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
1188 SLIM_MSG_MT_CORE, NULL, &pn, 1, 5,
1189 NULL, la, NULL);
1190 if (ret)
1191 return ret;
1192 if (la == SLIM_LA_MANAGER)
1193 ctrl->ports[pn].state = SLIM_P_UNCFG;
1194 return 0;
1195}
1196
1197/*
Sagar Dharia29f35f02011-10-01 20:37:50 -06001198 * slim_connect_src: Connect source port to channel.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 * @sb: client handle
Sagar Dharia29f35f02011-10-01 20:37:50 -06001200 * @srch: source handle to be connected to this channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 * @chanh: Channel with which the ports need to be associated with.
Sagar Dharia29f35f02011-10-01 20:37:50 -06001202 * Per slimbus specification, a channel may have 1 source port.
1203 * Channel specified in chanh needs to be allocated first.
1204 * Returns -EALREADY if source is already configured for this channel.
1205 * Returns -ENOTCONN if channel is not allocated
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 */
Sagar Dharia29f35f02011-10-01 20:37:50 -06001207int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
1208{
1209 struct slim_controller *ctrl = sb->ctrl;
1210 int ret;
1211 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
1212 struct slim_ich *slc = &ctrl->chans[chan];
1213 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
1214
1215 if (flow != SLIM_SRC)
1216 return -EINVAL;
1217
1218 mutex_lock(&ctrl->m_ctrl);
1219
1220 if (slc->state == SLIM_CH_FREE) {
1221 ret = -ENOTCONN;
1222 goto connect_src_err;
1223 }
1224 /*
1225 * Once channel is removed, its ports can be considered disconnected
1226 * So its ports can be reassigned. Source port is zeroed
1227 * when channel is deallocated.
1228 */
1229 if (slc->srch) {
1230 ret = -EALREADY;
1231 goto connect_src_err;
1232 }
1233
1234 ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
1235
1236 if (!ret)
1237 slc->srch = srch;
1238
1239connect_src_err:
1240 mutex_unlock(&ctrl->m_ctrl);
1241 return ret;
1242}
1243EXPORT_SYMBOL_GPL(slim_connect_src);
1244
1245/*
1246 * slim_connect_sink: Connect sink port(s) to channel.
1247 * @sb: client handle
1248 * @sinkh: sink handle(s) to be connected to this channel
1249 * @nsink: number of sinks
1250 * @chanh: Channel with which the ports need to be associated with.
1251 * Per slimbus specification, a channel may have multiple sink-ports.
1252 * Channel specified in chanh needs to be allocated first.
1253 * Returns -EALREADY if sink is already configured for this channel.
1254 * Returns -ENOTCONN if channel is not allocated
1255 */
1256int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257{
1258 struct slim_controller *ctrl = sb->ctrl;
1259 int j;
1260 int ret = 0;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001261 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 struct slim_ich *slc = &ctrl->chans[chan];
1263
Sagar Dharia29f35f02011-10-01 20:37:50 -06001264 if (!sinkh || !nsink)
1265 return -EINVAL;
1266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 mutex_lock(&ctrl->m_ctrl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268
1269 /*
1270 * Once channel is removed, its ports can be considered disconnected
Sagar Dharia29f35f02011-10-01 20:37:50 -06001271 * So its ports can be reassigned. Sink ports are freed when channel
1272 * is deallocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 */
Sagar Dharia29f35f02011-10-01 20:37:50 -06001274 if (slc->state == SLIM_CH_FREE) {
1275 ret = -ENOTCONN;
1276 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 }
Sagar Dharia33f34442011-08-08 16:22:03 -06001278
Sagar Dharia29f35f02011-10-01 20:37:50 -06001279 for (j = 0; j < nsink; j++) {
1280 enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
1281 if (flow != SLIM_SINK)
1282 ret = -EINVAL;
1283 else
1284 ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 if (ret) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001286 for (j = j - 1; j >= 0; j--)
1287 disconnect_port_ch(ctrl, sinkh[j]);
1288 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 }
1290 }
Sagar Dharia29f35f02011-10-01 20:37:50 -06001291
1292 slc->sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
1293 GFP_KERNEL);
1294 if (!slc->sinkh) {
1295 ret = -ENOMEM;
1296 for (j = 0; j < nsink; j++)
1297 disconnect_port_ch(ctrl, sinkh[j]);
1298 goto connect_sink_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299 }
1300
Sagar Dharia29f35f02011-10-01 20:37:50 -06001301 memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
1302 slc->nsink += nsink;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303
Sagar Dharia29f35f02011-10-01 20:37:50 -06001304connect_sink_err:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 mutex_unlock(&ctrl->m_ctrl);
1306 return ret;
1307}
Sagar Dharia29f35f02011-10-01 20:37:50 -06001308EXPORT_SYMBOL_GPL(slim_connect_sink);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309
1310/*
1311 * slim_disconnect_ports: Disconnect port(s) from channel
1312 * @sb: client handle
1313 * @ph: ports to be disconnected
1314 * @nph: number of ports.
1315 * Disconnects ports from a channel.
1316 */
1317int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
1318{
1319 struct slim_controller *ctrl = sb->ctrl;
Sagar Dharia45ee38a2011-08-03 17:01:31 -06001320 int i;
Sagar Dharia33f34442011-08-08 16:22:03 -06001321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 mutex_lock(&ctrl->m_ctrl);
Sagar Dharia33f34442011-08-08 16:22:03 -06001323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 for (i = 0; i < nph; i++)
1325 disconnect_port_ch(ctrl, ph[i]);
1326 mutex_unlock(&ctrl->m_ctrl);
1327 return 0;
1328}
1329EXPORT_SYMBOL_GPL(slim_disconnect_ports);
1330
1331/*
1332 * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
1333 * @sb: client handle
1334 * @ph: port-handle
1335 * @iobuf: buffer to be transferred or populated
1336 * @len: buffer size.
1337 * @comp: completion signal to indicate transfer done or error.
1338 * context: can sleep
1339 * Returns number of bytes transferred/received if used synchronously.
1340 * Will return 0 if used asynchronously.
1341 * Client will call slim_port_get_xfer_status to get error and/or number of
1342 * bytes transferred if used asynchronously.
1343 */
1344int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
1345 struct completion *comp)
1346{
1347 struct slim_controller *ctrl = sb->ctrl;
1348 u8 pn = SLIM_HDL_TO_PORT(ph);
1349 dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
1350 return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
1351}
1352EXPORT_SYMBOL_GPL(slim_port_xfer);
1353
1354/*
1355 * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
1356 * after completion is done.
1357 * @sb: client handle
1358 * @ph: port-handle
1359 * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
1360 * @done_len: Number of bytes transferred.
1361 * This can be called when port_xfer complition is signalled.
1362 * The API will return port transfer error (underflow/overflow/disconnect)
1363 * and/or done_len will reflect number of bytes transferred. Note that
1364 * done_len may be valid even if port error (overflow/underflow) has happened.
1365 * e.g. If the transfer was scheduled with a few bytes to be transferred and
1366 * client has not supplied more data to be transferred, done_len will indicate
1367 * number of bytes transferred with underflow error. To avoid frequent underflow
1368 * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
1369 * channel has data to be transferred even if client is not ready to transfer
1370 * data all the time. done_buf will indicate address of the last buffer
1371 * processed from the multiple transfers.
1372 */
1373enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
1374 u8 **done_buf, u32 *done_len)
1375{
1376 struct slim_controller *ctrl = sb->ctrl;
1377 u8 pn = SLIM_HDL_TO_PORT(ph);
1378 u32 la = SLIM_HDL_TO_LA(ph);
1379 enum slim_port_err err;
1380 dev_dbg(&ctrl->dev, "get status port num:%d", pn);
1381 /*
1382 * Framework only has insight into ports managed by ported device
1383 * used by the manager and not slave
1384 */
1385 if (la != SLIM_LA_MANAGER) {
1386 if (done_buf)
1387 *done_buf = NULL;
1388 if (done_len)
1389 *done_len = 0;
1390 return SLIM_P_NOT_OWNED;
1391 }
1392 err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
1393 if (err == SLIM_P_INPROGRESS)
1394 err = ctrl->ports[pn].err;
1395 return err;
1396}
1397EXPORT_SYMBOL_GPL(slim_port_get_xfer_status);
1398
1399static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1400{
1401 struct slim_ich **arr;
1402 int i, j;
1403 int *len;
1404 int sl = slc->seglen << slc->rootexp;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001405 /* Channel is already active and other end is transmitting data */
1406 if (slc->state >= SLIM_CH_ACTIVE)
1407 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 if (slc->coeff == SLIM_COEFF_1) {
1409 arr = ctrl->sched.chc1;
1410 len = &ctrl->sched.num_cc1;
1411 } else {
1412 arr = ctrl->sched.chc3;
1413 len = &ctrl->sched.num_cc3;
1414 sl *= 3;
1415 }
1416
1417 *len += 1;
1418
1419 /* Insert the channel based on rootexp and seglen */
1420 for (i = 0; i < *len - 1; i++) {
1421 /*
1422 * Primary key: exp low to high.
1423 * Secondary key: seglen: high to low
1424 */
1425 if ((slc->rootexp > arr[i]->rootexp) ||
1426 ((slc->rootexp == arr[i]->rootexp) &&
1427 (slc->seglen < arr[i]->seglen)))
1428 continue;
1429 else
1430 break;
1431 }
1432 for (j = *len - 1; j > i; j--)
1433 arr[j] = arr[j - 1];
1434 arr[i] = slc;
1435 ctrl->sched.usedslots += sl;
1436
1437 return;
1438}
1439
1440static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
1441{
1442 struct slim_ich **arr;
1443 int i;
1444 u32 la, ph;
1445 int *len;
1446 if (slc->coeff == SLIM_COEFF_1) {
1447 arr = ctrl->sched.chc1;
1448 len = &ctrl->sched.num_cc1;
1449 } else {
1450 arr = ctrl->sched.chc3;
1451 len = &ctrl->sched.num_cc3;
1452 }
1453
1454 for (i = 0; i < *len; i++) {
1455 if (arr[i] == slc)
1456 break;
1457 }
1458 if (i >= *len)
1459 return -EXFULL;
1460 for (; i < *len - 1; i++)
1461 arr[i] = arr[i + 1];
1462 *len -= 1;
1463 arr[*len] = NULL;
1464
1465 slc->state = SLIM_CH_ALLOCATED;
1466 slc->newintr = 0;
1467 slc->newoff = 0;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001468 for (i = 0; i < slc->nsink; i++) {
1469 ph = slc->sinkh[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 la = SLIM_HDL_TO_LA(ph);
1471 /*
1472 * For ports managed by manager's ported device, no need to send
1473 * disconnect. It is client's responsibility to call disconnect
1474 * on ports owned by the slave device
1475 */
1476 if (la == SLIM_LA_MANAGER)
1477 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1478 }
1479
Sagar Dharia29f35f02011-10-01 20:37:50 -06001480 ph = slc->srch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481 la = SLIM_HDL_TO_LA(ph);
1482 if (la == SLIM_LA_MANAGER)
1483 ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
1484
Sagar Dharia29f35f02011-10-01 20:37:50 -06001485 kfree(slc->sinkh);
1486 slc->sinkh = NULL;
1487 slc->srch = 0;
1488 slc->nsink = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 return 0;
1490}
1491
1492static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
1493{
1494 u32 rate = 0, rate4k = 0, rate11k = 0;
1495 u32 exp = 0;
1496 u32 pr = 0;
1497 bool exact = true;
1498 bool done = false;
1499 enum slim_ch_rate ratefam;
1500
1501 if (prop->prot >= SLIM_PUSH)
1502 return 0;
1503 if (prop->baser == SLIM_RATE_1HZ) {
1504 rate = prop->ratem / 4000;
1505 rate4k = rate;
1506 if (rate * 4000 == prop->ratem)
1507 ratefam = SLIM_RATE_4000HZ;
1508 else {
1509 rate = prop->ratem / 11025;
1510 rate11k = rate;
1511 if (rate * 11025 == prop->ratem)
1512 ratefam = SLIM_RATE_11025HZ;
1513 else
1514 ratefam = SLIM_RATE_1HZ;
1515 }
1516 } else {
1517 ratefam = prop->baser;
1518 rate = prop->ratem;
1519 }
1520 if (ratefam == SLIM_RATE_1HZ) {
1521 exact = false;
1522 if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
1523 rate = rate4k + 1;
1524 ratefam = SLIM_RATE_4000HZ;
1525 } else {
1526 rate = rate11k + 1;
1527 ratefam = SLIM_RATE_11025HZ;
1528 }
1529 }
1530 /* covert rate to coeff-exp */
1531 while (!done) {
1532 while ((rate & 0x1) != 0x1) {
1533 rate >>= 1;
1534 exp++;
1535 }
1536 if (rate > 3) {
1537 /* roundup if not exact */
1538 rate++;
1539 exact = false;
1540 } else
1541 done = true;
1542 }
1543 if (ratefam == SLIM_RATE_4000HZ) {
1544 if (rate == 1)
1545 pr = 0x10;
1546 else {
1547 pr = 0;
1548 exp++;
1549 }
1550 } else {
1551 pr = 8;
1552 exp++;
1553 }
1554 if (exp <= 7) {
1555 pr |= exp;
1556 if (exact)
1557 pr |= 0x80;
1558 } else
1559 pr = 0;
1560 return pr;
1561}
1562
1563static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
1564{
1565 struct slim_controller *ctrl = sb->ctrl;
1566 u32 chrate = 0;
1567 u32 exp = 0;
1568 u32 coeff = 0;
1569 bool exact = true;
1570 bool done = false;
1571 int ret = 0;
1572 struct slim_ich *slc = &ctrl->chans[chan];
1573 struct slim_ch *prop = &slc->prop;
1574
1575 slc->prrate = slim_calc_prrate(ctrl, prop);
1576 dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
1577 if (prop->baser == SLIM_RATE_4000HZ)
1578 chrate = 4000 * prop->ratem;
1579 else if (prop->baser == SLIM_RATE_11025HZ)
1580 chrate = 11025 * prop->ratem;
1581 else
1582 chrate = prop->ratem;
1583 /* max allowed sample freq = 768 seg/frame */
1584 if (chrate > 3600000)
1585 return -EDQUOT;
1586 if (prop->baser == SLIM_RATE_4000HZ &&
1587 ctrl->a_framer->superfreq == 4000)
1588 coeff = prop->ratem;
1589 else if (prop->baser == SLIM_RATE_11025HZ &&
1590 ctrl->a_framer->superfreq == 3675)
1591 coeff = 3 * prop->ratem;
1592 else {
1593 u32 tempr = 0;
1594 tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
1595 coeff = tempr / ctrl->a_framer->rootfreq;
1596 if (coeff * ctrl->a_framer->rootfreq != tempr) {
1597 coeff++;
1598 exact = false;
1599 }
1600 }
1601
1602 /* convert coeff to coeff-exponent */
1603 exp = 0;
1604 while (!done) {
1605 while ((coeff & 0x1) != 0x1) {
1606 coeff >>= 1;
1607 exp++;
1608 }
1609 if (coeff > 3) {
1610 coeff++;
1611 exact = false;
1612 } else
1613 done = true;
1614 }
1615 if (prop->prot == SLIM_HARD_ISO && !exact)
1616 return -EPROTONOSUPPORT;
1617 else if (prop->prot == SLIM_AUTO_ISO) {
1618 if (exact)
1619 prop->prot = SLIM_HARD_ISO;
1620 else {
1621 /* Push-Pull not supported for now */
1622 return -EPROTONOSUPPORT;
1623 }
1624 }
1625 slc->rootexp = exp;
1626 slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
1627 if (prop->prot != SLIM_HARD_ISO)
1628 slc->seglen++;
1629 if (prop->prot >= SLIM_EXT_SMPLX)
1630 slc->seglen++;
1631 /* convert coeff to enum */
1632 if (coeff == 1) {
1633 if (exp > 9)
1634 ret = -EIO;
1635 coeff = SLIM_COEFF_1;
1636 } else {
1637 if (exp > 8)
1638 ret = -EIO;
1639 coeff = SLIM_COEFF_3;
1640 }
1641 slc->coeff = coeff;
1642
1643 return ret;
1644}
1645
1646/*
1647 * slim_alloc_ch: Allocate a slimbus channel and return its handle.
1648 * @sb: client handle.
1649 * @chanh: return channel handle
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001650 * Slimbus channels are limited to 256 per specification.
1651 * -EXFULL is returned if all channels are in use.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 * Although slimbus specification supports 256 channels, a controller may not
1653 * support that many channels.
1654 */
1655int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
1656{
1657 struct slim_controller *ctrl = sb->ctrl;
1658 u16 i;
1659
1660 if (!ctrl)
1661 return -EINVAL;
1662 mutex_lock(&ctrl->m_ctrl);
1663 for (i = 0; i < ctrl->nchans; i++) {
1664 if (ctrl->chans[i].state == SLIM_CH_FREE)
1665 break;
1666 }
1667 if (i >= ctrl->nchans) {
1668 mutex_unlock(&ctrl->m_ctrl);
1669 return -EXFULL;
1670 }
1671 *chanh = i;
1672 ctrl->chans[i].nextgrp = 0;
1673 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001674 ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675
1676 mutex_unlock(&ctrl->m_ctrl);
1677 return 0;
1678}
1679EXPORT_SYMBOL_GPL(slim_alloc_ch);
1680
1681/*
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001682 * slim_query_ch: Get reference-counted handle for a channel number. Every
1683 * channel is reference counted by upto one as producer and the others as
1684 * consumer)
1685 * @sb: client handle
1686 * @chan: slimbus channel number
1687 * @chanh: return channel handle
1688 * If request channel number is not in use, it is allocated, and reference
1689 * count is set to one. If the channel was was already allocated, this API
1690 * will return handle to that channel and reference count is incremented.
1691 * -EXFULL is returned if all channels are in use
1692 */
1693int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
1694{
1695 struct slim_controller *ctrl = sb->ctrl;
1696 u16 i, j;
1697 int ret = 0;
1698 if (!ctrl || !chanh)
1699 return -EINVAL;
1700 mutex_lock(&ctrl->m_ctrl);
1701 /* start with modulo number */
1702 i = ch % ctrl->nchans;
1703
1704 for (j = 0; j < ctrl->nchans; j++) {
1705 if (ctrl->chans[i].chan == ch) {
1706 *chanh = i;
1707 ctrl->chans[i].ref++;
1708 if (ctrl->chans[i].state == SLIM_CH_FREE)
1709 ctrl->chans[i].state = SLIM_CH_ALLOCATED;
1710 goto query_out;
1711 }
1712 i = (i + 1) % ctrl->nchans;
1713 }
1714
1715 /* Channel not in table yet */
1716 ret = -EXFULL;
1717 for (j = 0; j < ctrl->nchans; j++) {
1718 if (ctrl->chans[i].state == SLIM_CH_FREE) {
1719 ctrl->chans[i].state =
1720 SLIM_CH_ALLOCATED;
1721 *chanh = i;
1722 ctrl->chans[i].ref++;
1723 ctrl->chans[i].chan = ch;
1724 ctrl->chans[i].nextgrp = 0;
1725 ret = 0;
1726 break;
1727 }
1728 i = (i + 1) % ctrl->nchans;
1729 }
1730query_out:
1731 mutex_unlock(&ctrl->m_ctrl);
1732 dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
1733 ch, i, ctrl->chans[i].ref, ret);
1734 return ret;
1735}
1736EXPORT_SYMBOL_GPL(slim_query_ch);
1737
1738/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001739 * slim_dealloc_ch: Deallocate channel allocated using the API above
1740 * -EISCONN is returned if the channel is tried to be deallocated without
1741 * being removed first.
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001742 * -ENOTCONN is returned if deallocation is tried on a channel that's not
1743 * allocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744 */
1745int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
1746{
1747 struct slim_controller *ctrl = sb->ctrl;
Sagar Dharia29f35f02011-10-01 20:37:50 -06001748 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 struct slim_ich *slc = &ctrl->chans[chan];
1750 if (!ctrl)
1751 return -EINVAL;
1752
1753 mutex_lock(&ctrl->m_ctrl);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001754 if (slc->state == SLIM_CH_FREE) {
1755 mutex_unlock(&ctrl->m_ctrl);
1756 return -ENOTCONN;
1757 }
1758 if (slc->ref > 1) {
1759 slc->ref--;
1760 mutex_unlock(&ctrl->m_ctrl);
1761 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
1762 slc->chan, chanh, slc->ref);
1763 return 0;
1764 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
1766 dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
1767 mutex_unlock(&ctrl->m_ctrl);
1768 return -EISCONN;
1769 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001770 slc->ref--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 slc->state = SLIM_CH_FREE;
1772 mutex_unlock(&ctrl->m_ctrl);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001773 dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
1774 slc->chan, chanh, slc->ref);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 return 0;
1776}
1777EXPORT_SYMBOL_GPL(slim_dealloc_ch);
1778
1779/*
1780 * slim_get_ch_state: Channel state.
1781 * This API returns the channel's state (active, suspended, inactive etc)
1782 */
1783enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
1784{
Sagar Dharia29f35f02011-10-01 20:37:50 -06001785 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 struct slim_ich *slc = &sb->ctrl->chans[chan];
1787 return slc->state;
1788}
1789EXPORT_SYMBOL_GPL(slim_get_ch_state);
1790
1791/*
1792 * slim_define_ch: Define a channel.This API defines channel parameters for a
1793 * given channel.
1794 * @sb: client handle.
1795 * @prop: slim_ch structure with channel parameters desired to be used.
1796 * @chanh: list of channels to be defined.
1797 * @nchan: number of channels in a group (1 if grp is false)
1798 * @grp: Are the channels grouped
1799 * @grph: return group handle if grouping of channels is desired.
1800 * Channels can be grouped if multiple channels use same parameters
1801 * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
1802 * and given 1 handle for simplicity and avoid repeatedly calling the API)
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001803 * -EISCONN is returned if channel is already used with different parameters.
1804 * -ENXIO is returned if the channel is not yet allocated.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 */
1806int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
1807 u8 nchan, bool grp, u16 *grph)
1808{
1809 struct slim_controller *ctrl = sb->ctrl;
1810 int i, ret = 0;
1811
1812 if (!ctrl || !chanh || !prop || !nchan)
1813 return -EINVAL;
1814 mutex_lock(&ctrl->m_ctrl);
1815 for (i = 0; i < nchan; i++) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001816 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001817 struct slim_ich *slc = &ctrl->chans[chan];
1818 dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819 (int)ctrl->chans[chan].state);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001820 if (slc->state < SLIM_CH_ALLOCATED) {
1821 ret = -ENXIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822 goto err_define_ch;
1823 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001824 if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
1825 if (prop->ratem != slc->prop.ratem ||
1826 prop->sampleszbits != slc->prop.sampleszbits ||
1827 prop->baser != slc->prop.baser) {
1828 ret = -EISCONN;
1829 goto err_define_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 }
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001831 } else if (slc->state > SLIM_CH_DEFINED) {
1832 ret = -EISCONN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 goto err_define_ch;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001834 } else {
1835 ctrl->chans[chan].prop = *prop;
1836 ret = slim_nextdefine_ch(sb, chan);
1837 if (ret)
1838 goto err_define_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 }
1840 if (i < (nchan - 1))
1841 ctrl->chans[chan].nextgrp = chanh[i + 1];
1842 if (i == 0)
1843 ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
1844 if (i == (nchan - 1))
1845 ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 }
1847
1848 if (grp)
1849 *grph = chanh[0];
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001850 for (i = 0; i < nchan; i++) {
Sagar Dharia29f35f02011-10-01 20:37:50 -06001851 u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001852 struct slim_ich *slc = &ctrl->chans[chan];
1853 if (slc->state == SLIM_CH_ALLOCATED)
1854 slc->state = SLIM_CH_DEFINED;
1855 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856err_define_ch:
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06001857 dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 mutex_unlock(&ctrl->m_ctrl);
1859 return ret;
1860}
1861EXPORT_SYMBOL_GPL(slim_define_ch);
1862
1863static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
1864{
1865 u32 code = 0;
1866 if (*ctrlw == *subfrml) {
1867 *ctrlw = 8;
1868 *subfrml = 8;
1869 *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
1870 - SLIM_GDE_SLOTS_PER_SUPERFRAME;
1871 return 0;
1872 }
1873 if (*subfrml == 6) {
1874 code = 0;
1875 *msgsl = 256;
1876 } else if (*subfrml == 8) {
1877 code = 1;
1878 *msgsl = 192;
1879 } else if (*subfrml == 24) {
1880 code = 2;
1881 *msgsl = 64;
1882 } else { /* 32 */
1883 code = 3;
1884 *msgsl = 48;
1885 }
1886
1887 if (*ctrlw < 8) {
1888 if (*ctrlw >= 6) {
1889 *ctrlw = 6;
1890 code |= 0x14;
1891 } else {
1892 if (*ctrlw == 5)
1893 *ctrlw = 4;
1894 code |= (*ctrlw << 2);
1895 }
1896 } else {
1897 code -= 2;
1898 if (*ctrlw >= 24) {
1899 *ctrlw = 24;
1900 code |= 0x1e;
1901 } else if (*ctrlw >= 16) {
1902 *ctrlw = 16;
1903 code |= 0x1c;
1904 } else if (*ctrlw >= 12) {
1905 *ctrlw = 12;
1906 code |= 0x1a;
1907 } else {
1908 *ctrlw = 8;
1909 code |= 0x18;
1910 }
1911 }
1912
1913 *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
1914 SLIM_GDE_SLOTS_PER_SUPERFRAME;
1915 return code;
1916}
1917
1918static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
1919 int sz, u32 shft)
1920{
1921 int i;
1922 u32 oldoff;
1923 for (i = 0; i < sz; i++) {
1924 struct slim_ich *slc;
1925 if (ach[i] == NULL)
1926 continue;
1927 slc = ach[i];
1928 if (slc->state == SLIM_CH_PENDING_REMOVAL)
1929 continue;
1930 oldoff = slc->newoff;
1931 slc->newoff += shft;
1932 /* seg. offset must be <= interval */
1933 if (slc->newoff >= slc->newintr)
1934 slc->newoff -= slc->newintr;
1935 }
1936}
1937
1938static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
1939 u32 *ctrlw, u32 *subfrml)
1940{
1941 int coeff1, coeff3;
1942 enum slim_ch_coeff bias;
1943 struct slim_controller *ctrl = sb->ctrl;
1944 int last1 = ctrl->sched.num_cc1 - 1;
1945 int last3 = ctrl->sched.num_cc3 - 1;
1946
1947 /*
1948 * Find first channels with coeff 1 & 3 as starting points for
1949 * scheduling
1950 */
1951 for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
1952 struct slim_ich *slc = ctrl->sched.chc3[coeff3];
1953 if (slc->state == SLIM_CH_PENDING_REMOVAL)
1954 continue;
1955 else
1956 break;
1957 }
1958 for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
1959 struct slim_ich *slc = ctrl->sched.chc1[coeff1];
1960 if (slc->state == SLIM_CH_PENDING_REMOVAL)
1961 continue;
1962 else
1963 break;
1964 }
1965 if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
1966 *ctrlw = 8;
1967 *subfrml = 8;
1968 return 0;
1969 } else if (coeff3 == ctrl->sched.num_cc3)
1970 bias = SLIM_COEFF_1;
1971 else
1972 bias = SLIM_COEFF_3;
1973
1974 /*
1975 * Find last chan in coeff1, 3 list, we will use to know when we
1976 * have done scheduling all coeff1 channels
1977 */
1978 while (last1 >= 0) {
1979 if (ctrl->sched.chc1[last1] != NULL &&
1980 (ctrl->sched.chc1[last1])->state !=
1981 SLIM_CH_PENDING_REMOVAL)
1982 break;
1983 last1--;
1984 }
1985 while (last3 >= 0) {
1986 if (ctrl->sched.chc3[last3] != NULL &&
1987 (ctrl->sched.chc3[last3])->state !=
1988 SLIM_CH_PENDING_REMOVAL)
1989 break;
1990 last3--;
1991 }
1992
1993 if (bias == SLIM_COEFF_1) {
1994 struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
1995 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
1996 int curexp, finalexp;
1997 u32 curintr, curmaxsl;
1998 int opensl1[2];
1999 int maxctrlw1;
2000
2001 finalexp = (ctrl->sched.chc1[last1])->rootexp;
2002 curexp = (int)expshft - 1;
2003
2004 curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
2005 curmaxsl = curintr >> 1;
2006 opensl1[0] = opensl1[1] = curmaxsl;
2007
2008 while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
2009 curintr >>= 1;
2010 curmaxsl >>= 1;
2011
2012 /* update 4K family open slot records */
2013 if (opensl1[1] < opensl1[0])
2014 opensl1[1] -= curmaxsl;
2015 else
2016 opensl1[1] = opensl1[0] - curmaxsl;
2017 opensl1[0] = curmaxsl;
2018 if (opensl1[1] < 0) {
2019 opensl1[0] += opensl1[1];
2020 opensl1[1] = 0;
2021 }
2022 if (opensl1[0] <= 0) {
2023 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2024 __LINE__);
2025 return -EXFULL;
2026 }
2027 curexp++;
2028 /* schedule 4k family channels */
2029
2030 while ((coeff1 < ctrl->sched.num_cc1) && (curexp ==
2031 (int)(slc1->rootexp + expshft))) {
2032 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2033 coeff1++;
2034 slc1 = ctrl->sched.chc1[coeff1];
2035 continue;
2036 }
2037 if (opensl1[1] >= opensl1[0] ||
2038 (finalexp == (int)slc1->rootexp &&
2039 curintr <= 24 &&
2040 opensl1[0] == curmaxsl)) {
2041 opensl1[1] -= slc1->seglen;
2042 slc1->newoff = curmaxsl + opensl1[1];
2043 if (opensl1[1] < 0 &&
2044 opensl1[0] == curmaxsl) {
2045 opensl1[0] += opensl1[1];
2046 opensl1[1] = 0;
2047 if (opensl1[0] < 0) {
2048 dev_dbg(&ctrl->dev,
2049 "reconfig failed:%d\n",
2050 __LINE__);
2051 return -EXFULL;
2052 }
2053 }
2054 } else {
2055 if (slc1->seglen > opensl1[0]) {
2056 dev_dbg(&ctrl->dev,
2057 "reconfig failed:%d\n",
2058 __LINE__);
2059 return -EXFULL;
2060 }
2061 slc1->newoff = opensl1[0] -
2062 slc1->seglen;
2063 opensl1[0] = slc1->newoff;
2064 }
2065 slc1->newintr = curintr;
2066 coeff1++;
2067 slc1 = ctrl->sched.chc1[coeff1];
2068 }
2069 }
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002070 /* Leave some slots for messaging space */
Sagar Dharia90a06cc2012-06-25 12:44:02 -06002071 if (opensl1[1] <= 0 && opensl1[0] <= 0)
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002072 return -EXFULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073 if (opensl1[1] > opensl1[0]) {
2074 int temp = opensl1[0];
2075 opensl1[0] = opensl1[1];
2076 opensl1[1] = temp;
2077 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2078 ctrl->sched.num_cc1, curmaxsl);
2079 }
2080 /* choose subframe mode to maximize bw */
2081 maxctrlw1 = opensl1[0];
2082 if (opensl1[0] == curmaxsl)
2083 maxctrlw1 += opensl1[1];
2084 if (curintr >= 24) {
2085 *subfrml = 24;
2086 *ctrlw = maxctrlw1;
2087 } else if (curintr == 12) {
2088 if (maxctrlw1 > opensl1[1] * 4) {
2089 *subfrml = 24;
2090 *ctrlw = maxctrlw1;
2091 } else {
2092 *subfrml = 6;
2093 *ctrlw = opensl1[1];
2094 }
2095 } else {
2096 *subfrml = 6;
2097 *ctrlw = maxctrlw1;
2098 }
2099 } else {
Jordan Crouse9bb8aca2011-11-23 11:41:20 -07002100 struct slim_ich *slc1 = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002101 struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
2102 u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2103 int curexp, finalexp, exp1;
2104 u32 curintr, curmaxsl;
2105 int opensl3[2];
2106 int opensl1[6];
2107 bool opensl1valid = false;
2108 int maxctrlw1, maxctrlw3, i;
2109 finalexp = (ctrl->sched.chc3[last3])->rootexp;
2110 if (last1 >= 0) {
2111 slc1 = ctrl->sched.chc1[coeff1];
2112 exp1 = (ctrl->sched.chc1[last1])->rootexp;
2113 if (exp1 > finalexp)
2114 finalexp = exp1;
2115 }
2116 curexp = (int)expshft - 1;
2117
2118 curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
2119 curmaxsl = curintr >> 1;
2120 opensl3[0] = opensl3[1] = curmaxsl;
2121
2122 while (coeff1 < ctrl->sched.num_cc1 ||
2123 coeff3 < ctrl->sched.num_cc3 ||
2124 curintr > 32) {
2125 curintr >>= 1;
2126 curmaxsl >>= 1;
2127
2128 /* update 12k family open slot records */
2129 if (opensl3[1] < opensl3[0])
2130 opensl3[1] -= curmaxsl;
2131 else
2132 opensl3[1] = opensl3[0] - curmaxsl;
2133 opensl3[0] = curmaxsl;
2134 if (opensl3[1] < 0) {
2135 opensl3[0] += opensl3[1];
2136 opensl3[1] = 0;
2137 }
2138 if (opensl3[0] <= 0) {
2139 dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
2140 __LINE__);
2141 return -EXFULL;
2142 }
2143 curexp++;
2144
2145 /* schedule 12k family channels */
2146 while (coeff3 < ctrl->sched.num_cc3 &&
2147 curexp == (int)slc3->rootexp + expshft) {
2148 if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
2149 coeff3++;
2150 slc3 = ctrl->sched.chc3[coeff3];
2151 continue;
2152 }
2153 opensl1valid = false;
2154 if (opensl3[1] >= opensl3[0] ||
2155 (finalexp == (int)slc3->rootexp &&
2156 curintr <= 32 &&
2157 opensl3[0] == curmaxsl &&
2158 last1 < 0)) {
2159 opensl3[1] -= slc3->seglen;
2160 slc3->newoff = curmaxsl + opensl3[1];
2161 if (opensl3[1] < 0 &&
2162 opensl3[0] == curmaxsl) {
2163 opensl3[0] += opensl3[1];
2164 opensl3[1] = 0;
2165 }
2166 if (opensl3[0] < 0) {
2167 dev_dbg(&ctrl->dev,
2168 "reconfig failed:%d\n",
2169 __LINE__);
2170 return -EXFULL;
2171 }
2172 } else {
2173 if (slc3->seglen > opensl3[0]) {
2174 dev_dbg(&ctrl->dev,
2175 "reconfig failed:%d\n",
2176 __LINE__);
2177 return -EXFULL;
2178 }
2179 slc3->newoff = opensl3[0] -
2180 slc3->seglen;
2181 opensl3[0] = slc3->newoff;
2182 }
2183 slc3->newintr = curintr;
2184 coeff3++;
2185 slc3 = ctrl->sched.chc3[coeff3];
2186 }
2187 /* update 4k openslot records */
2188 if (opensl1valid == false) {
2189 for (i = 0; i < 3; i++) {
2190 opensl1[i * 2] = opensl3[0];
2191 opensl1[(i * 2) + 1] = opensl3[1];
2192 }
2193 } else {
2194 int opensl1p[6];
2195 memcpy(opensl1p, opensl1, sizeof(opensl1));
2196 for (i = 0; i < 3; i++) {
2197 if (opensl1p[i] < opensl1p[i + 3])
2198 opensl1[(i * 2) + 1] =
2199 opensl1p[i];
2200 else
2201 opensl1[(i * 2) + 1] =
2202 opensl1p[i + 3];
2203 }
2204 for (i = 0; i < 3; i++) {
2205 opensl1[(i * 2) + 1] -= curmaxsl;
2206 opensl1[i * 2] = curmaxsl;
2207 if (opensl1[(i * 2) + 1] < 0) {
2208 opensl1[i * 2] +=
2209 opensl1[(i * 2) + 1];
2210 opensl1[(i * 2) + 1] = 0;
2211 }
2212 if (opensl1[i * 2] < 0) {
2213 dev_dbg(&ctrl->dev,
2214 "reconfig failed:%d\n",
2215 __LINE__);
2216 return -EXFULL;
2217 }
2218 }
2219 }
2220 /* schedule 4k family channels */
2221 while (coeff1 < ctrl->sched.num_cc1 &&
2222 curexp == (int)slc1->rootexp + expshft) {
2223 /* searchorder effective when opensl valid */
2224 static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
2225 int maxopensl = 0;
2226 int maxi = 0;
2227 if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
2228 coeff1++;
2229 slc1 = ctrl->sched.chc1[coeff1];
2230 continue;
2231 }
2232 opensl1valid = true;
2233 for (i = 0; i < 6; i++) {
2234 if (opensl1[srcho[i]] > maxopensl) {
2235 maxopensl = opensl1[srcho[i]];
2236 maxi = srcho[i];
2237 }
2238 }
2239 opensl1[maxi] -= slc1->seglen;
2240 slc1->newoff = (curmaxsl * maxi) +
2241 opensl1[maxi];
2242 if (opensl1[maxi] < 0) {
2243 if (((maxi & 1) == 1) &&
2244 (opensl1[maxi - 1] == curmaxsl)) {
2245 opensl1[maxi - 1] +=
2246 opensl1[maxi];
2247 if (opensl3[0] >
2248 opensl1[maxi - 1])
2249 opensl3[0] =
2250 opensl1[maxi - 1];
2251 opensl3[1] = 0;
2252 opensl1[maxi] = 0;
2253 if (opensl1[maxi - 1] < 0) {
2254 dev_dbg(&ctrl->dev,
2255 "reconfig failed:%d\n",
2256 __LINE__);
2257 return -EXFULL;
2258 }
2259 } else {
2260 dev_dbg(&ctrl->dev,
2261 "reconfig failed:%d\n",
2262 __LINE__);
2263 return -EXFULL;
2264 }
2265 } else {
2266 if (opensl3[maxi & 1] > opensl1[maxi])
2267 opensl3[maxi & 1] =
2268 opensl1[maxi];
2269 }
2270 slc1->newintr = curintr * 3;
2271 coeff1++;
2272 slc1 = ctrl->sched.chc1[coeff1];
2273 }
2274 }
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002275 /* Leave some slots for messaging space */
Sagar Dharia90a06cc2012-06-25 12:44:02 -06002276 if (opensl3[1] <= 0 && opensl3[0] <= 0)
Sagar Dhariaa00f61f2012-04-21 15:10:08 -06002277 return -EXFULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002278 /* swap 1st and 2nd bucket if 2nd bucket has more open slots */
2279 if (opensl3[1] > opensl3[0]) {
2280 int temp = opensl3[0];
2281 opensl3[0] = opensl3[1];
2282 opensl3[1] = temp;
2283 temp = opensl1[5];
2284 opensl1[5] = opensl1[4];
2285 opensl1[4] = opensl1[3];
2286 opensl1[3] = opensl1[2];
2287 opensl1[2] = opensl1[1];
2288 opensl1[1] = opensl1[0];
2289 opensl1[0] = temp;
2290 shiftsegoffsets(ctrl, ctrl->sched.chc1,
2291 ctrl->sched.num_cc1, curmaxsl);
2292 shiftsegoffsets(ctrl, ctrl->sched.chc3,
2293 ctrl->sched.num_cc3, curmaxsl);
2294 }
2295 /* subframe mode to maximize BW */
2296 maxctrlw3 = opensl3[0];
2297 maxctrlw1 = opensl1[0];
2298 if (opensl3[0] == curmaxsl)
2299 maxctrlw3 += opensl3[1];
2300 for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
2301 maxctrlw1 += opensl1[i + 1];
2302 if (curintr >= 32) {
2303 *subfrml = 32;
2304 *ctrlw = maxctrlw3;
2305 } else if (curintr == 16) {
2306 if (maxctrlw3 > (opensl3[1] * 4)) {
2307 *subfrml = 32;
2308 *ctrlw = maxctrlw3;
2309 } else {
2310 *subfrml = 8;
2311 *ctrlw = opensl3[1];
2312 }
2313 } else {
2314 if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
2315 *subfrml = 24;
2316 *ctrlw = maxctrlw1;
2317 } else {
2318 *subfrml = 8;
2319 *ctrlw = maxctrlw3;
2320 }
2321 }
2322 }
2323 return 0;
2324}
2325
2326#ifdef DEBUG
2327static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2328 u32 subfrml, u32 clkgear)
2329{
2330 int sl, i;
2331 int cc1 = 0;
2332 int cc3 = 0;
2333 struct slim_ich *slc = NULL;
2334 if (!ctrl->sched.slots)
2335 return 0;
2336 memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
2337 dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
2338 for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
2339 for (i = 0; i < ctrlw; i++)
2340 ctrl->sched.slots[sl + i] = 33;
2341 }
2342 while (cc1 < ctrl->sched.num_cc1) {
2343 slc = ctrl->sched.chc1[cc1];
2344 if (slc == NULL) {
2345 dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
2346 cc1);
2347 return -EIO;
2348 }
2349 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2350 (slc - ctrl->chans), slc->newoff,
2351 slc->newintr, slc->seglen);
2352
2353 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2354 for (sl = slc->newoff;
2355 sl < SLIM_SL_PER_SUPERFRAME;
2356 sl += slc->newintr) {
2357 for (i = 0; i < slc->seglen; i++) {
2358 if (ctrl->sched.slots[sl + i])
2359 return -EXFULL;
2360 ctrl->sched.slots[sl + i] = cc1 + 1;
2361 }
2362 }
2363 }
2364 cc1++;
2365 }
2366 while (cc3 < ctrl->sched.num_cc3) {
2367 slc = ctrl->sched.chc3[cc3];
2368 if (slc == NULL) {
2369 dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
2370 cc3);
2371 return -EIO;
2372 }
2373 dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
2374 (slc - ctrl->chans), slc->newoff,
2375 slc->newintr, slc->seglen);
2376 if (slc->state != SLIM_CH_PENDING_REMOVAL) {
2377 for (sl = slc->newoff;
2378 sl < SLIM_SL_PER_SUPERFRAME;
2379 sl += slc->newintr) {
2380 for (i = 0; i < slc->seglen; i++) {
2381 if (ctrl->sched.slots[sl + i])
2382 return -EXFULL;
2383 ctrl->sched.slots[sl + i] = cc3 + 1;
2384 }
2385 }
2386 }
2387 cc3++;
2388 }
2389
2390 return 0;
2391}
2392#else
2393static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
2394 u32 subfrml, u32 clkgear)
2395{
2396 return 0;
2397}
2398#endif
2399
2400static void slim_sort_chan_grp(struct slim_controller *ctrl,
2401 struct slim_ich *slc)
2402{
2403 u8 last = (u8)-1;
2404 u8 second = 0;
2405
2406 for (; last > 0; last--) {
2407 struct slim_ich *slc1 = slc;
2408 struct slim_ich *slc2;
Sagar Dharia29f35f02011-10-01 20:37:50 -06002409 u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002410 slc2 = &ctrl->chans[next];
2411 for (second = 1; second <= last && slc2 &&
2412 (slc2->state == SLIM_CH_ACTIVE ||
2413 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
2414 if (slc1->newoff > slc2->newoff) {
2415 u32 temp = slc2->newoff;
2416 slc2->newoff = slc1->newoff;
2417 slc1->newoff = temp;
2418 }
2419 if (slc2->nextgrp & SLIM_END_GRP) {
2420 last = second;
2421 break;
2422 }
2423 slc1 = slc2;
Sagar Dharia29f35f02011-10-01 20:37:50 -06002424 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002425 slc2 = &ctrl->chans[next];
2426 }
2427 if (slc2 == NULL)
2428 last = second - 1;
2429 }
2430}
2431
2432
2433static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
2434{
2435 u32 msgsl = 0;
2436 u32 ctrlw = 0;
2437 u32 subfrml = 0;
2438 int ret = -EIO;
2439 struct slim_controller *ctrl = sb->ctrl;
2440 u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
2441 u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
2442 SLIM_GDE_SLOTS_PER_SUPERFRAME;
2443 *clkgear = SLIM_MAX_CLK_GEAR;
2444
2445 dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
2446 dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
2447 ctrl->sched.usedslots,
2448 ctrl->sched.pending_msgsl, *clkgear);
Sagar Dharia33f34442011-08-08 16:22:03 -06002449 /*
2450 * If number of slots are 0, that means channels are inactive.
2451 * It is very likely that the manager will call clock pause very soon.
2452 * By making sure that bus is in MAX_GEAR, clk pause sequence will take
2453 * minimum amount of time.
2454 */
2455 if (ctrl->sched.usedslots != 0) {
2456 while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
2457 *clkgear -= 1;
2458 usedsl *= 2;
2459 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002460 }
2461
2462 /*
2463 * Try scheduling data channels at current clock gear, if all channels
2464 * can be scheduled, or reserved BW can't be satisfied, increase clock
2465 * gear and try again
2466 */
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002467 for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002468 ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
2469
2470 if (ret == 0) {
2471 *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002472 if ((msgsl >> (ctrl->max_cg - *clkgear) <
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 ctrl->sched.pending_msgsl) &&
Sagar Dharia98a7ecb2011-07-25 15:25:35 -06002474 (*clkgear < ctrl->max_cg))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002475 continue;
2476 else
2477 break;
2478 }
2479 }
2480 if (ret == 0) {
2481 int i;
2482 /* Sort channel-groups */
2483 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2484 struct slim_ich *slc = ctrl->sched.chc1[i];
2485 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2486 continue;
2487 if ((slc->nextgrp & SLIM_START_GRP) &&
2488 !(slc->nextgrp & SLIM_END_GRP)) {
2489 slim_sort_chan_grp(ctrl, slc);
2490 }
2491 }
2492 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2493 struct slim_ich *slc = ctrl->sched.chc3[i];
2494 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2495 continue;
2496 if ((slc->nextgrp & SLIM_START_GRP) &&
2497 !(slc->nextgrp & SLIM_END_GRP)) {
2498 slim_sort_chan_grp(ctrl, slc);
2499 }
2500 }
2501
2502 ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
2503 }
2504
2505 return ret;
2506}
2507
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002508static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
2509{
2510 struct slim_ich **arr;
2511 int len, i;
2512 if (coeff == SLIM_COEFF_1) {
2513 arr = ctrl->sched.chc1;
2514 len = ctrl->sched.num_cc1;
2515 } else {
2516 arr = ctrl->sched.chc3;
2517 len = ctrl->sched.num_cc3;
2518 }
2519 for (i = 0; i < len; i++) {
2520 struct slim_ich *slc = arr[i];
2521 if (slc->state == SLIM_CH_ACTIVE ||
2522 slc->state == SLIM_CH_SUSPENDED)
2523 slc->offset = slc->newoff;
2524 slc->interval = slc->newintr;
2525 }
2526}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002527static void slim_chan_changes(struct slim_device *sb, bool revert)
2528{
2529 struct slim_controller *ctrl = sb->ctrl;
2530 while (!list_empty(&sb->mark_define)) {
2531 struct slim_ich *slc;
2532 struct slim_pending_ch *pch =
2533 list_entry(sb->mark_define.next,
2534 struct slim_pending_ch, pending);
2535 slc = &ctrl->chans[pch->chan];
2536 if (revert) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002537 if (slc->state == SLIM_CH_PENDING_ACTIVE) {
2538 u32 sl = slc->seglen << slc->rootexp;
2539 if (slc->coeff == SLIM_COEFF_3)
2540 sl *= 3;
2541 ctrl->sched.usedslots -= sl;
2542 slim_remove_ch(ctrl, slc);
2543 slc->state = SLIM_CH_DEFINED;
2544 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002545 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002546 slc->state = SLIM_CH_ACTIVE;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002547 slc->def++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002548 }
2549 list_del_init(&pch->pending);
2550 kfree(pch);
2551 }
2552
2553 while (!list_empty(&sb->mark_removal)) {
2554 struct slim_pending_ch *pch =
2555 list_entry(sb->mark_removal.next,
2556 struct slim_pending_ch, pending);
2557 struct slim_ich *slc = &ctrl->chans[pch->chan];
2558 u32 sl = slc->seglen << slc->rootexp;
2559 if (revert) {
2560 if (slc->coeff == SLIM_COEFF_3)
2561 sl *= 3;
2562 ctrl->sched.usedslots += sl;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002563 slc->def = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002564 slc->state = SLIM_CH_ACTIVE;
2565 } else
2566 slim_remove_ch(ctrl, slc);
2567 list_del_init(&pch->pending);
2568 kfree(pch);
2569 }
2570
2571 while (!list_empty(&sb->mark_suspend)) {
2572 struct slim_pending_ch *pch =
2573 list_entry(sb->mark_suspend.next,
2574 struct slim_pending_ch, pending);
2575 struct slim_ich *slc = &ctrl->chans[pch->chan];
2576 if (revert)
2577 slc->state = SLIM_CH_ACTIVE;
2578 list_del_init(&pch->pending);
2579 kfree(pch);
2580 }
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002581 /* Change already active channel if reconfig succeeded */
2582 if (!revert) {
2583 slim_change_existing_chans(ctrl, SLIM_COEFF_1);
2584 slim_change_existing_chans(ctrl, SLIM_COEFF_3);
2585 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002586}
2587
2588/*
2589 * slim_reconfigure_now: Request reconfiguration now.
2590 * @sb: client handle
2591 * This API does what commit flag in other scheduling APIs do.
2592 * -EXFULL is returned if there is no space in TDM to reserve the
2593 * bandwidth. -EBUSY is returned if reconfiguration request is already in
2594 * progress.
2595 */
2596int slim_reconfigure_now(struct slim_device *sb)
2597{
2598 u8 i;
2599 u8 wbuf[4];
2600 u32 clkgear, subframe;
2601 u32 curexp;
2602 int ret;
2603 struct slim_controller *ctrl = sb->ctrl;
2604 u32 expshft;
2605 u32 segdist;
2606 struct slim_pending_ch *pch;
2607
Sagar Dharia80a55e12012-08-16 16:43:58 -06002608 mutex_lock(&ctrl->sched.m_reconf);
2609 mutex_lock(&ctrl->m_ctrl);
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002610 /*
2611 * If there are no pending changes from this client, avoid sending
2612 * the reconfiguration sequence
2613 */
2614 if (sb->pending_msgsl == sb->cur_msgsl &&
2615 list_empty(&sb->mark_define) &&
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002616 list_empty(&sb->mark_suspend)) {
Sagar Dharia80a55e12012-08-16 16:43:58 -06002617 struct list_head *pos, *next;
2618 list_for_each_safe(pos, next, &sb->mark_removal) {
2619 struct slim_ich *slc;
2620 pch = list_entry(pos, struct slim_pending_ch, pending);
2621 slc = &ctrl->chans[pch->chan];
2622 if (slc->def > 0)
2623 slc->def--;
2624 /* Disconnect source port to free it up */
2625 if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
2626 slc->srch = 0;
2627 if (slc->def != 0) {
2628 list_del(&pch->pending);
2629 kfree(pch);
2630 }
2631 }
2632 if (list_empty(&sb->mark_removal)) {
2633 mutex_unlock(&ctrl->m_ctrl);
2634 mutex_unlock(&ctrl->sched.m_reconf);
2635 pr_info("SLIM_CL: skip reconfig sequence");
2636 return 0;
2637 }
Sagar Dharia6e728bd2012-07-26 16:56:44 -06002638 }
2639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002640 ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
2641 list_for_each_entry(pch, &sb->mark_define, pending) {
2642 struct slim_ich *slc = &ctrl->chans[pch->chan];
2643 slim_add_ch(ctrl, slc);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002644 if (slc->state < SLIM_CH_ACTIVE)
2645 slc->state = SLIM_CH_PENDING_ACTIVE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646 }
2647
2648 list_for_each_entry(pch, &sb->mark_removal, pending) {
2649 struct slim_ich *slc = &ctrl->chans[pch->chan];
2650 u32 sl = slc->seglen << slc->rootexp;
2651 if (slc->coeff == SLIM_COEFF_3)
2652 sl *= 3;
2653 ctrl->sched.usedslots -= sl;
2654 slc->state = SLIM_CH_PENDING_REMOVAL;
2655 }
2656 list_for_each_entry(pch, &sb->mark_suspend, pending) {
2657 struct slim_ich *slc = &ctrl->chans[pch->chan];
2658 slc->state = SLIM_CH_SUSPENDED;
2659 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002660
Sagar Dharia4aec9232012-07-24 23:44:26 -06002661 /*
2662 * Controller can override default channel scheduling algorithm.
2663 * (e.g. if controller needs to use fixed channel scheduling based
2664 * on number of channels)
2665 */
2666 if (ctrl->allocbw)
2667 ret = ctrl->allocbw(sb, &subframe, &clkgear);
2668 else
2669 ret = slim_allocbw(sb, &subframe, &clkgear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002670
2671 if (!ret) {
2672 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2673 SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, SLIM_MSG_MT_CORE,
2674 NULL, NULL, 0, 3, NULL, 0, NULL);
2675 dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
2676 }
2677
2678 if (!ret && subframe != ctrl->sched.subfrmcode) {
2679 wbuf[0] = (u8)(subframe & 0xFF);
2680 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2681 SLIM_MSG_MC_NEXT_SUBFRAME_MODE, 0, SLIM_MSG_MT_CORE,
2682 NULL, (u8 *)&subframe, 1, 4, NULL, 0, NULL);
2683 dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
2684 (int)wbuf[0], ret);
2685 }
2686 if (!ret && clkgear != ctrl->clkgear) {
2687 wbuf[0] = (u8)(clkgear & 0xFF);
2688 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2689 SLIM_MSG_MC_NEXT_CLOCK_GEAR, 0, SLIM_MSG_MT_CORE,
2690 NULL, wbuf, 1, 4, NULL, 0, NULL);
2691 dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
2692 (int)wbuf[0], ret);
2693 }
2694 if (ret)
2695 goto revert_reconfig;
2696
2697 expshft = SLIM_MAX_CLK_GEAR - clkgear;
2698 /* activate/remove channel */
2699 list_for_each_entry(pch, &sb->mark_define, pending) {
2700 struct slim_ich *slc = &ctrl->chans[pch->chan];
2701 /* Define content */
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002702 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002703 wbuf[1] = slc->prrate;
2704 wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
2705 wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
2706 dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
2707 wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
2708 /* Right now, channel link bit is not supported */
2709 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2710 SLIM_MSG_MC_NEXT_DEFINE_CONTENT, 0,
2711 SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 4, 7,
2712 NULL, 0, NULL);
2713 if (ret)
2714 goto revert_reconfig;
2715
2716 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2717 SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL, 0,
2718 SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 1, 4,
2719 NULL, 0, NULL);
2720 if (ret)
2721 goto revert_reconfig;
2722 }
2723
2724 list_for_each_entry(pch, &sb->mark_removal, pending) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002725 struct slim_ich *slc = &ctrl->chans[pch->chan];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002726 dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002727 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002728 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2729 SLIM_MSG_MC_NEXT_REMOVE_CHANNEL, 0,
2730 SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
2731 NULL, 0, NULL);
2732 if (ret)
2733 goto revert_reconfig;
2734 }
2735 list_for_each_entry(pch, &sb->mark_suspend, pending) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002736 struct slim_ich *slc = &ctrl->chans[pch->chan];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737 dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002738 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002739 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2740 SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL, 0,
2741 SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
2742 NULL, 0, NULL);
2743 if (ret)
2744 goto revert_reconfig;
2745 }
2746
2747 /* Define CC1 channel */
2748 for (i = 0; i < ctrl->sched.num_cc1; i++) {
2749 struct slim_ich *slc = ctrl->sched.chc1[i];
2750 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2751 continue;
2752 curexp = slc->rootexp + expshft;
2753 segdist = (slc->newoff << curexp) & 0x1FF;
2754 expshft = SLIM_MAX_CLK_GEAR - clkgear;
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002755 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
2756 slc->newintr, slc->interval, segdist);
2757 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
2758 slc->newoff, slc->offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002759
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002760 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002761 slc->newintr != slc->interval ||
2762 slc->newoff != slc->offset) {
2763 segdist |= 0x200;
2764 segdist >>= curexp;
2765 segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002766 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002767 wbuf[1] = (u8)(segdist & 0xFF);
2768 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
2769 (slc->prop.prot << 4);
2770 wbuf[3] = slc->seglen;
2771 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2772 SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
2773 SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
2774 7, NULL, 0, NULL);
2775 if (ret)
2776 goto revert_reconfig;
2777 }
2778 }
2779
2780 /* Define CC3 channels */
2781 for (i = 0; i < ctrl->sched.num_cc3; i++) {
2782 struct slim_ich *slc = ctrl->sched.chc3[i];
2783 if (slc->state == SLIM_CH_PENDING_REMOVAL)
2784 continue;
2785 curexp = slc->rootexp + expshft;
2786 segdist = (slc->newoff << curexp) & 0x1FF;
2787 expshft = SLIM_MAX_CLK_GEAR - clkgear;
Sagar Dhariaa0f6b672011-08-13 17:36:55 -06002788 dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
2789 slc->newintr, slc->interval, segdist);
2790 dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
2791 slc->newoff, slc->offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002792
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002793 if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002794 slc->newintr != slc->interval ||
2795 slc->newoff != slc->offset) {
2796 segdist |= 0x200;
2797 segdist >>= curexp;
2798 segdist |= 0xC00;
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002799 wbuf[0] = slc->chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002800 wbuf[1] = (u8)(segdist & 0xFF);
2801 wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
2802 (slc->prop.prot << 4);
2803 wbuf[3] = (u8)(slc->seglen);
2804 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2805 SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
2806 SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
2807 7, NULL, 0, NULL);
2808 if (ret)
2809 goto revert_reconfig;
2810 }
2811 }
2812 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
2813 SLIM_MSG_MC_RECONFIGURE_NOW, 0, SLIM_MSG_MT_CORE, NULL,
2814 NULL, 0, 3, NULL, 0, NULL);
2815 dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
2816 if (!ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817 ctrl->sched.subfrmcode = subframe;
2818 ctrl->clkgear = clkgear;
2819 ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
2820 sb->cur_msgsl = sb->pending_msgsl;
2821 slim_chan_changes(sb, false);
2822 mutex_unlock(&ctrl->m_ctrl);
2823 mutex_unlock(&ctrl->sched.m_reconf);
2824 return 0;
2825 }
2826
2827revert_reconfig:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002828 /* Revert channel changes */
2829 slim_chan_changes(sb, true);
2830 mutex_unlock(&ctrl->m_ctrl);
2831 mutex_unlock(&ctrl->sched.m_reconf);
2832 return ret;
2833}
2834EXPORT_SYMBOL_GPL(slim_reconfigure_now);
2835
2836static int add_pending_ch(struct list_head *listh, u8 chan)
2837{
2838 struct slim_pending_ch *pch;
2839 pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
2840 if (!pch)
2841 return -ENOMEM;
2842 pch->chan = chan;
2843 list_add_tail(&pch->pending, listh);
2844 return 0;
2845}
2846
2847/*
2848 * slim_control_ch: Channel control API.
2849 * @sb: client handle
2850 * @chanh: group or channel handle to be controlled
2851 * @chctrl: Control command (activate/suspend/remove)
2852 * @commit: flag to indicate whether the control should take effect right-away.
2853 * This API activates, removes or suspends a channel (or group of channels)
2854 * chanh indicates the channel or group handle (returned by the define_ch API).
2855 * Reconfiguration may be time-consuming since it can change all other active
2856 * channel allocations on the bus, change in clock gear used by the slimbus,
2857 * and change in the control space width used for messaging.
2858 * commit makes sure that multiple channels can be activated/deactivated before
2859 * reconfiguration is started.
2860 * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
2861 * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
2862 * yet defined.
Sagar Dharia2e7026a2012-02-21 17:48:14 -07002863 * -EINVAL is returned if individual control of a grouped-channel is attempted.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 */
2865int slim_control_ch(struct slim_device *sb, u16 chanh,
2866 enum slim_ch_control chctrl, bool commit)
2867{
2868 struct slim_controller *ctrl = sb->ctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002869 int ret = 0;
2870 /* Get rid of the group flag in MSB if any */
Sagar Dharia29f35f02011-10-01 20:37:50 -06002871 u8 chan = SLIM_HDL_TO_CHIDX(chanh);
Sagar Dharia2e7026a2012-02-21 17:48:14 -07002872 struct slim_ich *slc = &ctrl->chans[chan];
2873 if (!(slc->nextgrp & SLIM_START_GRP))
2874 return -EINVAL;
2875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002876 mutex_lock(&sb->sldev_reconf);
2877 mutex_lock(&ctrl->m_ctrl);
2878 do {
Kiran Gunda3dad0212012-10-09 13:30:13 +05302879 struct slim_pending_ch *pch;
2880 u8 add_mark_removal = true;
2881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 slc = &ctrl->chans[chan];
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002883 dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
2884 slc->def);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885 if (slc->state < SLIM_CH_DEFINED) {
2886 ret = -ENOTCONN;
2887 break;
2888 }
2889 if (chctrl == SLIM_CH_SUSPEND) {
2890 ret = add_pending_ch(&sb->mark_suspend, chan);
2891 if (ret)
2892 break;
2893 } else if (chctrl == SLIM_CH_ACTIVATE) {
Sagar Dharia4ec2ff42011-09-26 10:20:17 -06002894 if (slc->state > SLIM_CH_ACTIVE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002895 ret = -EISCONN;
2896 break;
2897 }
2898 ret = add_pending_ch(&sb->mark_define, chan);
2899 if (ret)
2900 break;
2901 } else {
2902 if (slc->state < SLIM_CH_ACTIVE) {
2903 ret = -ENOTCONN;
2904 break;
2905 }
Kiran Gunda3dad0212012-10-09 13:30:13 +05302906 /* If channel removal request comes when pending
2907 * in the mark_define, remove it from the define
2908 * list instead of adding it to removal list
2909 */
2910 if (!list_empty(&sb->mark_define)) {
2911 struct list_head *pos, *next;
2912 list_for_each_safe(pos, next,
2913 &sb->mark_define) {
2914 pch = list_entry(pos,
2915 struct slim_pending_ch,
2916 pending);
2917 if (pch->chan == slc->chan) {
2918 list_del(&pch->pending);
2919 kfree(pch);
2920 add_mark_removal = false;
2921 break;
2922 }
2923 }
2924 }
2925 if (add_mark_removal == true) {
2926 ret = add_pending_ch(&sb->mark_removal, chan);
2927 if (ret)
2928 break;
2929 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002930 }
2931
2932 if (!(slc->nextgrp & SLIM_END_GRP))
Sagar Dharia29f35f02011-10-01 20:37:50 -06002933 chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 } while (!(slc->nextgrp & SLIM_END_GRP));
2935 mutex_unlock(&ctrl->m_ctrl);
2936 if (!ret && commit == true)
2937 ret = slim_reconfigure_now(sb);
2938 mutex_unlock(&sb->sldev_reconf);
2939 return ret;
2940}
2941EXPORT_SYMBOL_GPL(slim_control_ch);
2942
2943/*
2944 * slim_reservemsg_bw: Request to reserve bandwidth for messages.
2945 * @sb: client handle
2946 * @bw_bps: message bandwidth in bits per second to be requested
2947 * @commit: indicates whether the reconfiguration needs to be acted upon.
2948 * This API call can be grouped with slim_control_ch API call with only one of
2949 * the APIs specifying the commit flag to avoid reconfiguration being called too
2950 * frequently. -EXFULL is returned if there is no space in TDM to reserve the
2951 * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
2952 * is already in progress.
2953 */
2954int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
2955{
2956 struct slim_controller *ctrl = sb->ctrl;
2957 int ret = 0;
2958 int sl;
2959 mutex_lock(&sb->sldev_reconf);
2960 if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
2961 sl = SLIM_SL_PER_SUPERFRAME;
2962 else {
2963 sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
2964 (ctrl->a_framer->rootfreq/2 - 1)) /
2965 (ctrl->a_framer->rootfreq/2);
2966 }
2967 dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
2968 sb->cur_msgsl);
2969 sb->pending_msgsl = sl;
2970 if (commit == true)
2971 ret = slim_reconfigure_now(sb);
2972 mutex_unlock(&sb->sldev_reconf);
2973 return ret;
2974}
2975EXPORT_SYMBOL_GPL(slim_reservemsg_bw);
2976
Sagar Dharia33f34442011-08-08 16:22:03 -06002977/*
2978 * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
2979 * paused or woken up out of clock pause
2980 * or woken up from clock pause
2981 * @ctrl: controller requesting bus to be paused or woken up
2982 * @wakeup: Wakeup this controller from clock pause.
2983 * @restart: Restart time value per spec used for clock pause. This value
2984 * isn't used when controller is to be woken up.
2985 * This API executes clock pause reconfiguration sequence if wakeup is false.
2986 * If wakeup is true, controller's wakeup is called
2987 * Slimbus clock is idle and can be disabled by the controller later.
2988 */
2989int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
2990{
2991 int ret = 0;
2992 int i;
2993
2994 if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
2995 return -EINVAL;
2996 mutex_lock(&ctrl->m_ctrl);
2997 if (wakeup) {
2998 if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
2999 mutex_unlock(&ctrl->m_ctrl);
3000 return 0;
3001 }
3002 wait_for_completion(&ctrl->pause_comp);
3003 /*
3004 * Slimbus framework will call controller wakeup
3005 * Controller should make sure that it sets active framer
3006 * out of clock pause by doing appropriate setting
3007 */
3008 if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
3009 ret = ctrl->wakeup(ctrl);
3010 if (!ret)
3011 ctrl->clk_state = SLIM_CLK_ACTIVE;
3012 mutex_unlock(&ctrl->m_ctrl);
3013 return ret;
3014 } else {
3015 switch (ctrl->clk_state) {
3016 case SLIM_CLK_ENTERING_PAUSE:
3017 case SLIM_CLK_PAUSE_FAILED:
3018 /*
3019 * If controller is already trying to enter clock pause,
3020 * let it finish.
3021 * In case of error, retry
3022 * In both cases, previous clock pause has signalled
3023 * completion.
3024 */
3025 wait_for_completion(&ctrl->pause_comp);
3026 /* retry upon failure */
3027 if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
3028 ctrl->clk_state = SLIM_CLK_ACTIVE;
3029 break;
3030 } else {
3031 mutex_unlock(&ctrl->m_ctrl);
3032 /*
3033 * Signal completion so that wakeup can wait on
3034 * it.
3035 */
3036 complete(&ctrl->pause_comp);
3037 return 0;
3038 }
3039 break;
3040 case SLIM_CLK_PAUSED:
3041 /* already paused */
3042 mutex_unlock(&ctrl->m_ctrl);
3043 return 0;
3044 case SLIM_CLK_ACTIVE:
3045 default:
3046 break;
3047 }
3048 }
3049 /* Pending response for a message */
3050 for (i = 0; i < ctrl->last_tid; i++) {
3051 if (ctrl->txnt[i]) {
3052 ret = -EBUSY;
3053 mutex_unlock(&ctrl->m_ctrl);
3054 return -EBUSY;
3055 }
3056 }
3057 ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
3058 mutex_unlock(&ctrl->m_ctrl);
3059
3060 mutex_lock(&ctrl->sched.m_reconf);
3061 /* Data channels active */
3062 if (ctrl->sched.usedslots) {
3063 ret = -EBUSY;
3064 goto clk_pause_ret;
3065 }
3066
3067 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
Sagar Dharia45ee38a2011-08-03 17:01:31 -06003068 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_BEGIN_RECONFIGURATION,
3069 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL);
Sagar Dharia33f34442011-08-08 16:22:03 -06003070 if (ret)
3071 goto clk_pause_ret;
3072
3073 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
Sagar Dharia45ee38a2011-08-03 17:01:31 -06003074 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK, 0,
3075 SLIM_MSG_MT_CORE, NULL, &restart, 1, 4, NULL, 0, NULL);
3076 if (ret)
3077 goto clk_pause_ret;
3078
3079 ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
3080 SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW, 0,
3081 SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL);
Sagar Dharia33f34442011-08-08 16:22:03 -06003082 if (ret)
3083 goto clk_pause_ret;
3084
3085clk_pause_ret:
3086 if (ret)
3087 ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
3088 else
3089 ctrl->clk_state = SLIM_CLK_PAUSED;
3090 complete(&ctrl->pause_comp);
3091 mutex_unlock(&ctrl->sched.m_reconf);
3092 return ret;
3093}
Sagar Dharia88821fb2012-07-24 23:04:32 -06003094EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095
3096MODULE_LICENSE("GPL v2");
3097MODULE_VERSION("0.1");
3098MODULE_DESCRIPTION("Slimbus module");
3099MODULE_ALIAS("platform:slimbus");