blob: 753f6fb85a96a96978ea49fde8ba8fe89dcb8f37 [file] [log] [blame]
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070013#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/genalloc.h>
19#include <linux/of.h>
Naveen Ramaraj8ad63602012-05-09 20:50:39 -070020#include <linux/of_address.h>
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070021#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
Naveen Ramaraj8ad63602012-05-09 20:50:39 -070025#include <mach/ocmem_priv.h>
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070026
Naveen Ramaraj5b0982b2012-06-20 23:02:33 -070027/* This code is to temporarily work around the default state of OCMEM
28 regions in Virtio. These registers will be read from DT in a subsequent
29 patch which initializes the regions to appropriate default state.
30*/
31
32#define OCMEM_REGION_CTL_BASE 0xFDD0003C
Naveen Ramaraj1ebbfad2012-07-20 19:05:59 -070033#define OCMEM_REGION_CTL_SIZE 0xFD0
Naveen Ramaraj5b0982b2012-06-20 23:02:33 -070034#define REGION_ENABLE 0x00003333
Naveen Ramaraj1ebbfad2012-07-20 19:05:59 -070035#define GRAPHICS_REGION_CTL (0x17F000)
Naveen Ramaraj5b0982b2012-06-20 23:02:33 -070036
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070037struct ocmem_partition {
38 const char *name;
39 int id;
40 unsigned long p_start;
41 unsigned long p_size;
42 unsigned long p_min;
Naveen Ramaraj135cd672012-04-23 12:13:28 -070043 unsigned int p_tail;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070044};
45
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070046struct ocmem_zone zones[OCMEM_CLIENT_MAX];
47
48struct ocmem_zone *get_zone(unsigned id)
49{
50 if (id < OCMEM_GRAPHICS || id >= OCMEM_CLIENT_MAX)
51 return NULL;
52 else
53 return &zones[id];
54}
55
56static struct ocmem_plat_data *ocmem_pdata;
57
58#define CLIENT_NAME_MAX 10
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070059
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070060/* Must be in sync with enum ocmem_client */
61static const char *client_names[OCMEM_CLIENT_MAX] = {
62 "graphics",
63 "video",
64 "camera",
65 "hp_audio",
66 "voice",
67 "lp_audio",
68 "sensors",
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070069 "other_os",
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070070};
71
72struct ocmem_quota_table {
73 const char *name;
74 int id;
75 unsigned long start;
76 unsigned long size;
77 unsigned long min;
Naveen Ramaraj135cd672012-04-23 12:13:28 -070078 unsigned int tail;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070079};
80
81/* This static table will go away with device tree support */
82static struct ocmem_quota_table qt[OCMEM_CLIENT_MAX] = {
Naveen Ramaraj135cd672012-04-23 12:13:28 -070083 /* name, id, start, size, min, tail */
84 { "graphics", OCMEM_GRAPHICS, 0x0, 0x100000, 0x80000, 0},
85 { "video", OCMEM_VIDEO, 0x100000, 0x80000, 0x55000, 1},
86 { "camera", OCMEM_CAMERA, 0x0, 0x0, 0x0, 0},
87 { "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 },
88 { "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0},
89 { "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070090 { "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0},
Naveen Ramaraj135cd672012-04-23 12:13:28 -070091 { "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0},
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -070092};
93
94static inline int get_id(const char *name)
95{
96 int i = 0;
97 for (i = 0 ; i < OCMEM_CLIENT_MAX; i++) {
98 if (strncmp(name, client_names[i], CLIENT_NAME_MAX) == 0)
99 return i;
100 }
101 return -EINVAL;
102}
103
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700104int check_id(int id)
105{
106 return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
107}
108
109const char *get_name(int id)
110{
111 if (!check_id(id))
112 return NULL;
113 return client_names[id];
114}
115
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700116inline unsigned long phys_to_offset(unsigned long addr)
117{
118 if (!ocmem_pdata)
119 return 0;
120 if (addr < ocmem_pdata->base ||
121 addr > (ocmem_pdata->base + ocmem_pdata->size))
122 return 0;
123 return addr - ocmem_pdata->base;
124}
125
126inline unsigned long offset_to_phys(unsigned long offset)
127{
128 if (!ocmem_pdata)
129 return 0;
130 if (offset > ocmem_pdata->size)
131 return 0;
132 return offset + ocmem_pdata->base;
133}
134
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700135static struct ocmem_plat_data *parse_static_config(struct platform_device *pdev)
136{
137 struct ocmem_plat_data *pdata = NULL;
138 struct ocmem_partition *parts = NULL;
139 struct device *dev = &pdev->dev;
Naveen Ramaraje653c2b2012-05-30 12:59:25 -0700140 unsigned nr_parts = 0;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700141 int i;
142 int j;
143
144 pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data),
145 GFP_KERNEL);
146
147 if (!pdata) {
148 dev_err(dev, "Unable to allocate memory for"
149 " platform data\n");
150 return NULL;
151 }
152
153 for (i = 0 ; i < ARRAY_SIZE(qt); i++)
154 if (qt[i].size != 0x0)
155 nr_parts++;
156
157 if (nr_parts == 0x0) {
158 dev_err(dev, "No valid ocmem partitions\n");
159 return NULL;
160 } else
161 dev_info(dev, "Total partitions = %d\n", nr_parts);
162
163 parts = devm_kzalloc(dev, sizeof(struct ocmem_partition) * nr_parts,
164 GFP_KERNEL);
165
166 if (!parts) {
167 dev_err(dev, "Unable to allocate memory for"
168 " partition data\n");
169 return NULL;
170 }
171
172 for (i = 0, j = 0; i < ARRAY_SIZE(qt); i++) {
173 if (qt[i].size == 0x0) {
174 dev_dbg(dev, "Skipping creation of pool for %s\n",
175 qt[i].name);
176 continue;
177 }
178 parts[j].id = qt[i].id;
179 parts[j].p_size = qt[i].size;
180 parts[j].p_start = qt[i].start;
181 parts[j].p_min = qt[i].min;
Naveen Ramaraj135cd672012-04-23 12:13:28 -0700182 parts[j].p_tail = qt[i].tail;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700183 j++;
184 }
185 BUG_ON(j != nr_parts);
186 pdata->nr_parts = nr_parts;
187 pdata->parts = parts;
188 pdata->base = OCMEM_PHYS_BASE;
189 pdata->size = OCMEM_PHYS_SIZE;
190 return pdata;
191}
192
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700193int __devinit of_ocmem_parse_regions(struct device *dev,
194 struct ocmem_partition **part)
195{
196 const char *name;
197 struct device_node *child = NULL;
198 int nr_parts = 0;
199 int i = 0;
200 int rc = 0;
201 int id = -1;
202
203 /*Compute total partitions */
204 for_each_child_of_node(dev->of_node, child)
205 nr_parts++;
206
207 if (nr_parts == 0)
208 return 0;
209
210 *part = devm_kzalloc(dev, nr_parts * sizeof(**part),
211 GFP_KERNEL);
212
213 if (!*part)
214 return -ENOMEM;
215
216 for_each_child_of_node(dev->of_node, child)
217 {
218 const u32 *addr;
219 u32 min;
220 u64 size;
221 u64 p_start;
222
223 addr = of_get_address(child, 0, &size, NULL);
224
225 if (!addr) {
226 dev_err(dev, "Invalid addr for partition %d, ignored\n",
227 i);
228 continue;
229 }
230
231 rc = of_property_read_u32(child, "qcom,ocmem-part-min", &min);
232
233 if (rc) {
234 dev_err(dev, "No min for partition %d, ignored\n", i);
235 continue;
236 }
237
238 rc = of_property_read_string(child, "qcom,ocmem-part-name",
239 &name);
240
241 if (rc) {
242 dev_err(dev, "No name for partition %d, ignored\n", i);
243 continue;
244 }
245
246 id = get_id(name);
247
248 if (id < 0) {
249 dev_err(dev, "Ignoring invalid partition %s\n", name);
250 continue;
251 }
252
253 p_start = of_translate_address(child, addr);
254
255 if (p_start == OF_BAD_ADDR) {
256 dev_err(dev, "Invalid offset for partition %d\n", i);
257 continue;
258 }
259
260 (*part)[i].p_start = p_start;
261 (*part)[i].p_size = size;
262 (*part)[i].id = id;
263 (*part)[i].name = name;
264 (*part)[i].p_min = min;
265 (*part)[i].p_tail = of_property_read_bool(child, "tail");
266 i++;
267 }
268
269 return i;
270}
271
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700272static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
273{
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700274 struct device *dev = &pdev->dev;
275 struct device_node *node = pdev->dev.of_node;
276 struct ocmem_plat_data *pdata = NULL;
277 struct ocmem_partition *parts = NULL;
278 struct resource *ocmem_irq;
279 struct resource *dm_irq;
280 struct resource *ocmem_mem;
281 struct resource *reg_base;
282 struct resource *br_base;
283 struct resource *dm_base;
284 struct resource *ocmem_mem_io;
285 unsigned nr_parts = 0;
286 unsigned nr_regions = 0;
287
288 pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data),
289 GFP_KERNEL);
290
291 if (!pdata) {
292 dev_err(dev, "Unable to allocate memory for platform data\n");
293 return NULL;
294 }
295
296 ocmem_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
297 "ocmem_physical");
298 if (!ocmem_mem) {
299 dev_err(dev, "No OCMEM memory resource\n");
300 return NULL;
301 }
302
303 ocmem_mem_io = request_mem_region(ocmem_mem->start,
304 resource_size(ocmem_mem), pdev->name);
305
306 if (!ocmem_mem_io) {
307 dev_err(dev, "Could not claim OCMEM memory\n");
308 return NULL;
309 }
310
311 pdata->base = ocmem_mem->start;
312 pdata->size = resource_size(ocmem_mem);
313 pdata->vbase = devm_ioremap_nocache(dev, ocmem_mem->start,
314 resource_size(ocmem_mem));
315 if (!pdata->vbase) {
316 dev_err(dev, "Could not ioremap ocmem memory\n");
317 return NULL;
318 }
319
320 reg_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
321 "ocmem_ctrl_physical");
322 if (!reg_base) {
323 dev_err(dev, "No OCMEM register resource\n");
324 return NULL;
325 }
326
327 pdata->reg_base = devm_ioremap_nocache(dev, reg_base->start,
328 resource_size(reg_base));
329 if (!pdata->reg_base) {
330 dev_err(dev, "Could not ioremap register map\n");
331 return NULL;
332 }
333
334 br_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
335 "br_ctrl_physical");
336 if (!br_base) {
337 dev_err(dev, "No OCMEM BR resource\n");
338 return NULL;
339 }
340
341 pdata->br_base = devm_ioremap_nocache(dev, br_base->start,
342 resource_size(br_base));
343 if (!pdata->br_base) {
344 dev_err(dev, "Could not ioremap BR resource\n");
345 return NULL;
346 }
347
348 dm_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
349 "dm_ctrl_physical");
350 if (!dm_base) {
351 dev_err(dev, "No OCMEM DM resource\n");
352 return NULL;
353 }
354
355 pdata->dm_base = devm_ioremap_nocache(dev, dm_base->start,
356 resource_size(dm_base));
357 if (!pdata->dm_base) {
358 dev_err(dev, "Could not ioremap DM resource\n");
359 return NULL;
360 }
361
362 ocmem_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
363 "ocmem_irq");
364
365 if (!ocmem_irq) {
366 dev_err(dev, "No OCMEM IRQ resource\n");
367 return NULL;
368 }
369
370 dm_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
371 "dm_irq");
372
373 if (!dm_irq) {
374 dev_err(dev, "No DM IRQ resource\n");
375 return NULL;
376 }
377
378 if (of_property_read_u32(node, "qcom,ocmem-num-regions",
379 &nr_regions)) {
380 dev_err(dev, "No OCMEM memory regions specified\n");
381 }
382
383 if (nr_regions == 0) {
384 dev_err(dev, "No hardware memory regions found\n");
385 return NULL;
386 }
387
388 /* Figure out the number of partititons */
389 nr_parts = of_ocmem_parse_regions(dev, &parts);
390 if (nr_parts <= 0) {
391 dev_err(dev, "No valid OCMEM partitions found\n");
392 goto pdata_error;
393 } else
394 dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts);
395
396 pdata->nr_parts = nr_parts;
397 pdata->parts = parts;
398 pdata->nr_regions = nr_regions;
399 pdata->ocmem_irq = ocmem_irq->start;
400 pdata->dm_irq = dm_irq->start;
401 return pdata;
402pdata_error:
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700403 return NULL;
404}
405
406static int ocmem_zone_init(struct platform_device *pdev)
407{
408
409 int ret = -1;
410 int i = 0;
411 unsigned active_zones = 0;
412
413 struct ocmem_zone *zone = NULL;
414 struct ocmem_zone_ops *z_ops = NULL;
415 struct device *dev = &pdev->dev;
416 unsigned long start;
417 struct ocmem_plat_data *pdata = NULL;
418
419 pdata = platform_get_drvdata(pdev);
420
421 for (i = 0; i < pdata->nr_parts; i++) {
422 struct ocmem_partition *part = &pdata->parts[i];
423 zone = get_zone(part->id);
424
425 dev_dbg(dev, "Partition %d, start %lx, size %lx for %s\n",
426 i, part->p_start, part->p_size,
427 client_names[part->id]);
428
429 if (part->p_size > pdata->size) {
430 dev_alert(dev, "Quota > ocmem_size for id:%d\n",
431 part->id);
432 continue;
433 }
434
435 zone->z_pool = gen_pool_create(PAGE_SHIFT, -1);
436
437 if (!zone->z_pool) {
438 dev_alert(dev, "Creating pool failed for id:%d\n",
439 part->id);
440 return -EBUSY;
441 }
442
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700443 start = part->p_start;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700444 ret = gen_pool_add(zone->z_pool, start,
445 part->p_size, -1);
446
447 if (ret < 0) {
448 gen_pool_destroy(zone->z_pool);
449 dev_alert(dev, "Unable to back pool %d with "
450 "buffer:%lx\n", part->id, part->p_size);
451 return -EBUSY;
452 }
453
454 /* Initialize zone allocators */
455 z_ops = devm_kzalloc(dev, sizeof(struct ocmem_zone_ops),
456 GFP_KERNEL);
457 if (!z_ops) {
458 pr_alert("ocmem: Unable to allocate memory for"
459 "zone ops:%d\n", i);
460 return -EBUSY;
461 }
462
463 /* Initialize zone parameters */
464 zone->z_start = start;
465 zone->z_head = zone->z_start;
466 zone->z_end = start + part->p_size;
467 zone->z_tail = zone->z_end;
468 zone->z_free = part->p_size;
469 zone->owner = part->id;
470 zone->active_regions = 0;
471 zone->max_regions = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700472 INIT_LIST_HEAD(&zone->req_list);
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700473 zone->z_ops = z_ops;
Naveen Ramaraj135cd672012-04-23 12:13:28 -0700474 if (part->p_tail) {
475 z_ops->allocate = allocate_tail;
476 z_ops->free = free_tail;
477 } else {
478 z_ops->allocate = allocate_head;
479 z_ops->free = free_head;
480 }
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700481 active_zones++;
482
483 if (active_zones == 1)
484 pr_info("Physical OCMEM zone layout:\n");
485
486 pr_info(" zone %s\t: 0x%08lx - 0x%08lx (%4ld KB)\n",
487 client_names[part->id], zone->z_start,
488 zone->z_end, part->p_size/SZ_1K);
489 }
490
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700491 dev_dbg(dev, "Total active zones = %d\n", active_zones);
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700492 return 0;
493}
494
495static int __devinit msm_ocmem_probe(struct platform_device *pdev)
496{
497 struct device *dev = &pdev->dev;
Naveen Ramaraj5b0982b2012-06-20 23:02:33 -0700498 void *ocmem_region_vbase = NULL;
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700499
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700500 if (!pdev->dev.of_node) {
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700501 dev_info(dev, "Missing Configuration in Device Tree\n");
502 ocmem_pdata = parse_static_config(pdev);
503 } else {
504 ocmem_pdata = parse_dt_config(pdev);
505 }
506
507 /* Check if we have some configuration data to start */
508 if (!ocmem_pdata)
509 return -ENODEV;
510
511 /* Sanity Checks */
512 BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE));
513 BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE));
514
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700515 dev_info(dev, "OCMEM Virtual addr %p\n", ocmem_pdata->vbase);
516
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700517 platform_set_drvdata(pdev, ocmem_pdata);
518
519 if (ocmem_zone_init(pdev))
520 return -EBUSY;
521
Naveen Ramarajbdf4dfe2012-04-23 14:09:50 -0700522 if (ocmem_notifier_init())
523 return -EBUSY;
524
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700525 if (ocmem_sched_init())
526 return -EBUSY;
Naveen Ramaraj5b0982b2012-06-20 23:02:33 -0700527
528 ocmem_region_vbase = devm_ioremap_nocache(dev, OCMEM_REGION_CTL_BASE,
529 OCMEM_REGION_CTL_SIZE);
530 if (!ocmem_region_vbase)
531 return -EBUSY;
532 /* Enable all the 3 regions until we have support for power features */
533 writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
534 writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
535 writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
Naveen Ramaraj1ebbfad2012-07-20 19:05:59 -0700536 /* Enable the ocmem graphics mpU as a workaround in Virtio */
537 /* This will be programmed by TZ after TZ support is integrated */
538 writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700539
540 if (ocmem_rdm_init(pdev))
541 return -EBUSY;
542
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700543 dev_dbg(dev, "initialized successfully\n");
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700544 return 0;
545}
546
547static int __devexit msm_ocmem_remove(struct platform_device *pdev)
548{
549 return 0;
550}
551
552static struct of_device_id msm_ocmem_dt_match[] = {
Naveen Ramaraj8ad63602012-05-09 20:50:39 -0700553 { .compatible = "qcom,msm-ocmem",
Naveen Ramaraj51f5e8b2012-04-09 15:58:40 -0700554 },
555 {}
556};
557
558static struct platform_driver msm_ocmem_driver = {
559 .probe = msm_ocmem_probe,
560 .remove = __devexit_p(msm_ocmem_remove),
561 .driver = {
562 .name = "msm_ocmem",
563 .owner = THIS_MODULE,
564 .of_match_table = msm_ocmem_dt_match,
565 },
566};
567
568static int __init ocmem_init(void)
569{
570 return platform_driver_register(&msm_ocmem_driver);
571}
572subsys_initcall(ocmem_init);
573
574static void __exit ocmem_exit(void)
575{
576 platform_driver_unregister(&msm_ocmem_driver);
577}
578module_exit(ocmem_exit);
579
580MODULE_LICENSE("GPL v2");
581MODULE_DESCRIPTION("Support for On-Chip Memory on MSM");