blob: 868fd1a7dcd243c6d2635c4acfb59433951bfdea [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
Naveen Ramaraj89738952013-02-13 15:24:57 -080028 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_ENQUEUED, /* request has been enqueued for future retry */
32 R_MUST_GROW, /* request must grow as a part of pending operation */
33 R_MUST_SHRINK, /* request must shrink */
34 R_WF_SHRINK, /* shrink must be ack'ed by a client */
35 R_SHRUNK, /* request was shrunk */
36 R_MUST_MAP, /* request must be mapped before being used */
37 R_MUST_UNMAP, /* request must be unmapped when not being used */
38 R_MAPPED, /* request is mapped and actively used by client */
39 R_UNMAPPED, /* request is not mapped, so it's not in active use */
40 R_EVICTED, /* request is evicted and must be restored */
Naveen Ramarajb9da05782012-05-07 09:07:35 -070041};
42
43#define SET_STATE(x, val) (set_bit((val), &(x)->state))
44#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
45#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
46
47enum op_res {
48 OP_COMPLETE = 0x0,
49 OP_RESCHED,
50 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070051 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070052 OP_FAIL = ~0x0,
53};
54
55/* Represents various client priorities */
56/* Note: More than one client can share a priority level */
57enum client_prio {
58 MIN_PRIO = 0x0,
59 NO_PRIO = MIN_PRIO,
60 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070061 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070062 PRIO_LP_AUDIO = 0x1,
63 PRIO_HP_AUDIO = 0x2,
64 PRIO_VOICE = 0x3,
65 PRIO_GFX_GROWTH = 0x4,
66 PRIO_VIDEO = 0x5,
67 PRIO_GFX = 0x6,
68 PRIO_OCMEM = 0x7,
69 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
70};
71
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070072static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070073static struct list_head sched_queue[MAX_OCMEM_PRIO];
74static struct mutex sched_queue_mutex;
75
76/* The duration in msecs before a pending operation is scheduled
77 * This allows an idle window between use case boundaries where various
78 * hardware state changes can occur. The value will be tweaked on actual
79 * hardware.
80*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070081/* Delay in ms for switching to low power mode for OCMEM */
82#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070083
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070084static struct list_head rdm_queue;
85static struct mutex rdm_mutex;
86static struct workqueue_struct *ocmem_rdm_wq;
87static struct workqueue_struct *ocmem_eviction_wq;
88
89static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
90
91struct ocmem_rdm_work {
92 int id;
93 struct ocmem_map_list *list;
94 struct ocmem_handle *handle;
95 int direction;
96 struct work_struct work;
97};
98
Naveen Ramarajb9da05782012-05-07 09:07:35 -070099/* OCMEM Operational modes */
100enum ocmem_client_modes {
101 OCMEM_PERFORMANCE = 1,
102 OCMEM_PASSIVE,
103 OCMEM_LOW_POWER,
104 OCMEM_MODE_MAX = OCMEM_LOW_POWER
105};
106
107/* OCMEM Addressing modes */
108enum ocmem_interconnects {
109 OCMEM_BLOCKED = 0,
110 OCMEM_PORT = 1,
111 OCMEM_OCMEMNOC = 2,
112 OCMEM_SYSNOC = 3,
113};
114
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700115enum ocmem_tz_client {
116 TZ_UNUSED = 0x0,
117 TZ_GRAPHICS,
118 TZ_VIDEO,
119 TZ_LP_AUDIO,
120 TZ_SENSORS,
121 TZ_OTHER_OS,
122 TZ_DEBUG,
123};
124
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700125/**
126 * Primary OCMEM Arbitration Table
127 **/
128struct ocmem_table {
129 int client_id;
130 int priority;
131 int mode;
132 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700133 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700135 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
136 TZ_GRAPHICS},
137 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
138 TZ_VIDEO},
139 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
140 TZ_UNUSED},
141 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
142 TZ_UNUSED},
143 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
144 TZ_UNUSED},
145 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
146 TZ_LP_AUDIO},
147 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
148 TZ_SENSORS},
149 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
150 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700151};
152
153static struct rb_root sched_tree;
154static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700155static struct mutex allocation_mutex;
Naveen Ramaraj89738952013-02-13 15:24:57 -0800156static struct mutex free_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700157
158/* A region represents a continuous interval in OCMEM address space */
159struct ocmem_region {
160 /* Chain in Interval Tree */
161 struct rb_node region_rb;
162 /* Hash map of requests */
163 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700164 /* Chain in eviction list */
165 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700166 unsigned long r_start;
167 unsigned long r_end;
168 unsigned long r_sz;
169 /* Highest priority of all requests served by this region */
170 int max_prio;
171};
172
173/* Is OCMEM tightly coupled to the client ?*/
174static inline int is_tcm(int id)
175{
176 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
177 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
178 return 1;
179 else
180 return 0;
181}
182
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700183static inline int is_iface_access(int id)
184{
185 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
186}
187
188static inline int is_remapped_access(int id)
189{
190 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
191}
192
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700193static inline int is_blocked(int id)
194{
195 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
196}
197
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700198inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
199{
200 if (handle)
201 return &handle->buffer;
202 else
203 return NULL;
204}
205
206inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
207{
208 if (buffer)
209 return container_of(buffer, struct ocmem_handle, buffer);
210 else
211 return NULL;
212}
213
214inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
215{
216 if (handle)
217 return handle->req;
218 else
219 return NULL;
220}
221
222inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
223{
224 if (req && req->buffer)
225 return container_of(req->buffer, struct ocmem_handle, buffer);
226 else
227 return NULL;
228}
229
230/* Simple wrappers which will have debug features added later */
231inline int ocmem_read(void *at)
232{
233 return readl_relaxed(at);
234}
235
236inline int ocmem_write(unsigned long val, void *at)
237{
238 writel_relaxed(val, at);
239 return 0;
240}
241
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700242inline int get_mode(int id)
243{
244 if (!check_id(id))
245 return MODE_NOT_SET;
246 else
247 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
248 WIDE_MODE : THIN_MODE;
249}
250
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700251inline int get_tz_id(int id)
252{
253 if (!check_id(id))
254 return TZ_UNUSED;
255 else
256 return ocmem_client_table[id].tz_id;
257}
258
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700259/* Returns the address that can be used by a device core to access OCMEM */
260static unsigned long device_address(int id, unsigned long addr)
261{
262 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
263 unsigned long ret_addr = 0x0;
264
265 switch (hw_interconnect) {
266 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700267 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700268 ret_addr = phys_to_offset(addr);
269 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700270 case OCMEM_SYSNOC:
271 ret_addr = addr;
272 break;
273 case OCMEM_BLOCKED:
274 ret_addr = 0x0;
275 break;
276 }
277 return ret_addr;
278}
279
280/* Returns the address as viewed by the core */
281static unsigned long core_address(int id, unsigned long addr)
282{
283 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
284 unsigned long ret_addr = 0x0;
285
286 switch (hw_interconnect) {
287 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700288 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700289 ret_addr = offset_to_phys(addr);
290 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700291 case OCMEM_SYSNOC:
292 ret_addr = addr;
293 break;
294 case OCMEM_BLOCKED:
295 ret_addr = 0x0;
296 break;
297 }
298 return ret_addr;
299}
300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700301static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
302{
303 int owner;
304 if (!req)
305 return NULL;
306 owner = req->owner;
307 return get_zone(owner);
308}
309
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700310static int insert_region(struct ocmem_region *region)
311{
312
313 struct rb_root *root = &sched_tree;
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct ocmem_region *tmp = NULL;
317 unsigned long addr = region->r_start;
318
319 while (*p) {
320 parent = *p;
321 tmp = rb_entry(parent, struct ocmem_region, region_rb);
322
323 if (tmp->r_end > addr) {
324 if (tmp->r_start <= addr)
325 break;
326 p = &(*p)->rb_left;
327 } else if (tmp->r_end <= addr)
328 p = &(*p)->rb_right;
329 }
330 rb_link_node(&region->region_rb, parent, p);
331 rb_insert_color(&region->region_rb, root);
332 return 0;
333}
334
335static int remove_region(struct ocmem_region *region)
336{
337 struct rb_root *root = &sched_tree;
338 rb_erase(&region->region_rb, root);
339 return 0;
340}
341
342static struct ocmem_req *ocmem_create_req(void)
343{
344 struct ocmem_req *p = NULL;
345
346 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
347 if (!p)
348 return NULL;
349
350 INIT_LIST_HEAD(&p->zone_list);
351 INIT_LIST_HEAD(&p->sched_list);
352 init_rwsem(&p->rw_sem);
353 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700354 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700355 return p;
356}
357
358static int ocmem_destroy_req(struct ocmem_req *req)
359{
360 kfree(req);
361 return 0;
362}
363
364static struct ocmem_region *create_region(void)
365{
366 struct ocmem_region *p = NULL;
367
368 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700372 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700373 p->r_start = p->r_end = p->r_sz = 0x0;
374 p->max_prio = NO_PRIO;
375 return p;
376}
377
378static int destroy_region(struct ocmem_region *region)
379{
380 kfree(region);
381 return 0;
382}
383
384static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
385{
386 int ret, id;
387
388 while (1) {
389 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
390 return -ENOMEM;
391
392 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
393
394 if (ret != -EAGAIN)
395 break;
396 }
397
398 if (!ret) {
399 req->req_id = id;
400 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
401 req, id, region);
402 return 0;
403 }
404 return -EINVAL;
405}
406
407static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
408{
409 idr_remove(&region->region_idr, req->req_id);
410 return 0;
411}
412
413static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
414{
415 region->r_start = req->req_start;
416 region->r_end = req->req_end;
417 region->r_sz = req->req_end - req->req_start + 1;
418 return 0;
419}
420
421static int region_req_count(int id, void *ptr, void *data)
422{
423 int *count = data;
424 *count = *count + 1;
425 return 0;
426}
427
428static int req_count(struct ocmem_region *region)
429{
430 int count = 0;
431 idr_for_each(&region->region_idr, region_req_count, &count);
432 return count;
433}
434
435static int compute_max_prio(int id, void *ptr, void *data)
436{
437 int *max = data;
438 struct ocmem_req *req = ptr;
439
440 if (req->prio > *max)
441 *max = req->prio;
442 return 0;
443}
444
445static int update_region_prio(struct ocmem_region *region)
446{
447 int max_prio;
448 if (req_count(region) != 0) {
449 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
450 region->max_prio = max_prio;
451 } else {
452 region->max_prio = NO_PRIO;
453 }
454 pr_debug("ocmem: Updating prio of region %p as %d\n",
455 region, max_prio);
456
457 return 0;
458}
459
460static struct ocmem_region *find_region(unsigned long addr)
461{
462 struct ocmem_region *region = NULL;
463 struct rb_node *rb_node = NULL;
464
465 rb_node = sched_tree.rb_node;
466
467 while (rb_node) {
468 struct ocmem_region *tmp_region = NULL;
469 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
470
471 if (tmp_region->r_end > addr) {
472 region = tmp_region;
473 if (tmp_region->r_start <= addr)
474 break;
475 rb_node = rb_node->rb_left;
476 } else {
477 rb_node = rb_node->rb_right;
478 }
479 }
480 return region;
481}
482
483static struct ocmem_region *find_region_intersection(unsigned long start,
484 unsigned long end)
485{
486
487 struct ocmem_region *region = NULL;
488 region = find_region(start);
489 if (region && end <= region->r_start)
490 region = NULL;
491 return region;
492}
493
494static struct ocmem_region *find_region_match(unsigned long start,
495 unsigned long end)
496{
497
498 struct ocmem_region *region = NULL;
499 region = find_region(start);
500 if (region && start == region->r_start && end == region->r_end)
501 return region;
502 return NULL;
503}
504
505static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
506{
507 struct ocmem_req *req = NULL;
508
509 if (!region)
510 return NULL;
511
512 req = idr_find(&region->region_idr, owner);
513
514 return req;
515}
516
517/* Must be called with req->sem held */
518static inline int is_mapped(struct ocmem_req *req)
519{
520 return TEST_STATE(req, R_MAPPED);
521}
522
Naveen Ramaraj89738952013-02-13 15:24:57 -0800523static inline int is_pending_shrink(struct ocmem_req *req)
524{
525 return TEST_STATE(req, R_MUST_SHRINK) ||
526 TEST_STATE(req, R_WF_SHRINK);
527}
528
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700529/* Must be called with sched_mutex held */
530static int __sched_unmap(struct ocmem_req *req)
531{
532 struct ocmem_req *matched_req = NULL;
533 struct ocmem_region *matched_region = NULL;
534
Neeti Desaidad1d8e2013-01-09 19:42:06 -0800535 if (!TEST_STATE(req, R_MAPPED))
536 goto invalid_op_error;
537
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700538 matched_region = find_region_match(req->req_start, req->req_end);
539 matched_req = find_req_match(req->req_id, matched_region);
540
541 if (!matched_region || !matched_req) {
542 pr_err("Could not find backing region for req");
543 goto invalid_op_error;
544 }
545
546 if (matched_req != req) {
547 pr_err("Request does not match backing req");
548 goto invalid_op_error;
549 }
550
551 if (!is_mapped(req)) {
552 pr_err("Request is not currently mapped");
553 goto invalid_op_error;
554 }
555
556 /* Update the request state */
557 CLEAR_STATE(req, R_MAPPED);
558 SET_STATE(req, R_MUST_MAP);
559
560 return OP_COMPLETE;
561
562invalid_op_error:
563 return OP_FAIL;
564}
565
566/* Must be called with sched_mutex held */
567static int __sched_map(struct ocmem_req *req)
568{
569 struct ocmem_req *matched_req = NULL;
570 struct ocmem_region *matched_region = NULL;
571
572 matched_region = find_region_match(req->req_start, req->req_end);
573 matched_req = find_req_match(req->req_id, matched_region);
574
575 if (!matched_region || !matched_req) {
576 pr_err("Could not find backing region for req");
577 goto invalid_op_error;
578 }
579
580 if (matched_req != req) {
581 pr_err("Request does not match backing req");
582 goto invalid_op_error;
583 }
584
585 /* Update the request state */
586 CLEAR_STATE(req, R_MUST_MAP);
587 SET_STATE(req, R_MAPPED);
588
589 return OP_COMPLETE;
590
591invalid_op_error:
592 return OP_FAIL;
593}
594
595static int do_map(struct ocmem_req *req)
596{
597 int rc = 0;
598
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700599 down_write(&req->rw_sem);
600
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700601 mutex_lock(&sched_mutex);
602 rc = __sched_map(req);
603 mutex_unlock(&sched_mutex);
604
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700605 up_write(&req->rw_sem);
606
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700607 if (rc == OP_FAIL)
608 return -EINVAL;
609
610 return 0;
611}
612
613static int do_unmap(struct ocmem_req *req)
614{
615 int rc = 0;
616
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700617 down_write(&req->rw_sem);
618
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700619 mutex_lock(&sched_mutex);
620 rc = __sched_unmap(req);
621 mutex_unlock(&sched_mutex);
622
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700623 up_write(&req->rw_sem);
624
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700625 if (rc == OP_FAIL)
626 return -EINVAL;
627
628 return 0;
629}
630
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700631static int process_map(struct ocmem_req *req, unsigned long start,
632 unsigned long end)
633{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700634 int rc = 0;
635
636 rc = ocmem_enable_core_clock();
637
638 if (rc < 0)
639 goto core_clock_fail;
640
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700641
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700642 if (is_iface_access(req->owner)) {
643 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700644
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700645 if (rc < 0)
646 goto iface_clock_fail;
647 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700648
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700649 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
650 get_mode(req->owner));
651
652 if (rc < 0) {
653 pr_err("ocmem: Failed to secure request %p for %d\n", req,
654 req->owner);
655 goto lock_failed;
656 }
657
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700658 rc = do_map(req);
659
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700660 if (rc < 0) {
661 pr_err("ocmem: Failed to map request %p for %d\n",
662 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700663 goto process_map_fail;
664
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700665 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700666 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700667 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700668
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700669process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700670 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
671lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700672 if (is_iface_access(req->owner))
673 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700674iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700675 ocmem_disable_core_clock();
676core_clock_fail:
677 pr_err("ocmem: Failed to map ocmem request\n");
678 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700679}
680
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700681static int process_unmap(struct ocmem_req *req, unsigned long start,
682 unsigned long end)
683{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700684 int rc = 0;
685
686 rc = do_unmap(req);
687
688 if (rc < 0)
689 goto process_unmap_fail;
690
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700691 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
692 req->req_sz);
693
694 if (rc < 0) {
695 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
696 req->owner);
697 goto unlock_failed;
698 }
699
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700700 if (is_iface_access(req->owner))
701 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700702 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700703 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700704 return 0;
705
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700706unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700707process_unmap_fail:
708 pr_err("ocmem: Failed to unmap ocmem request\n");
709 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700710}
711
712static int __sched_grow(struct ocmem_req *req, bool can_block)
713{
714 unsigned long min = req->req_min;
715 unsigned long max = req->req_max;
716 unsigned long step = req->req_step;
717 int owner = req->owner;
718 unsigned long curr_sz = 0;
719 unsigned long growth_sz = 0;
720 unsigned long curr_start = 0;
721 enum client_prio prio = req->prio;
722 unsigned long alloc_addr = 0x0;
723 bool retry;
724 struct ocmem_region *spanned_r = NULL;
725 struct ocmem_region *overlap_r = NULL;
726
727 struct ocmem_req *matched_req = NULL;
728 struct ocmem_region *matched_region = NULL;
729
730 struct ocmem_zone *zone = get_zone(owner);
731 struct ocmem_region *region = NULL;
732
733 matched_region = find_region_match(req->req_start, req->req_end);
734 matched_req = find_req_match(req->req_id, matched_region);
735
736 if (!matched_region || !matched_req) {
737 pr_err("Could not find backing region for req");
738 goto invalid_op_error;
739 }
740
741 if (matched_req != req) {
742 pr_err("Request does not match backing req");
743 goto invalid_op_error;
744 }
745
746 curr_sz = matched_req->req_sz;
747 curr_start = matched_req->req_start;
748 growth_sz = matched_req->req_max - matched_req->req_sz;
749
750 pr_debug("Attempting to grow req %p from %lx to %lx\n",
751 req, matched_req->req_sz, matched_req->req_max);
752
753 retry = false;
754
755 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
756
757retry_next_step:
758
759 spanned_r = NULL;
760 overlap_r = NULL;
761
762 spanned_r = find_region(zone->z_head);
763 overlap_r = find_region_intersection(zone->z_head,
764 zone->z_head + growth_sz);
765
766 if (overlap_r == NULL) {
767 /* no conflicting regions, schedule this region */
768 zone->z_ops->free(zone, curr_start, curr_sz);
769 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
770
771 if (alloc_addr < 0) {
772 pr_err("ocmem: zone allocation operation failed\n");
773 goto internal_error;
774 }
775
776 curr_sz += growth_sz;
777 /* Detach the region from the interval tree */
778 /* This is to guarantee that any change in size
779 * causes the tree to be rebalanced if required */
780
781 detach_req(matched_region, req);
782 if (req_count(matched_region) == 0) {
783 remove_region(matched_region);
784 region = matched_region;
785 } else {
786 region = create_region();
787 if (!region) {
788 pr_err("ocmem: Unable to create region\n");
789 goto region_error;
790 }
791 }
792
793 /* update the request */
794 req->req_start = alloc_addr;
795 /* increment the size to reflect new length */
796 req->req_sz = curr_sz;
797 req->req_end = alloc_addr + req->req_sz - 1;
798
799 /* update request state */
800 CLEAR_STATE(req, R_MUST_GROW);
801 SET_STATE(req, R_ALLOCATED);
802 SET_STATE(req, R_MUST_MAP);
803 req->op = SCHED_MAP;
804
805 /* update the region with new req */
806 attach_req(region, req);
807 populate_region(region, req);
808 update_region_prio(region);
809
810 /* update the tree with new region */
811 if (insert_region(region)) {
812 pr_err("ocmem: Failed to insert the region\n");
813 goto region_error;
814 }
815
816 if (retry) {
817 SET_STATE(req, R_MUST_GROW);
818 SET_STATE(req, R_PENDING);
819 req->op = SCHED_GROW;
820 return OP_PARTIAL;
821 }
822 } else if (spanned_r != NULL && overlap_r != NULL) {
823 /* resolve conflicting regions based on priority */
824 if (overlap_r->max_prio < prio) {
825 /* Growth cannot be triggered unless a previous
826 * client of lower priority was evicted */
827 pr_err("ocmem: Invalid growth scheduled\n");
828 /* This is serious enough to fail */
829 BUG();
830 return OP_FAIL;
831 } else if (overlap_r->max_prio > prio) {
832 if (min == max) {
833 /* Cannot grow at this time, try later */
834 SET_STATE(req, R_PENDING);
835 SET_STATE(req, R_MUST_GROW);
836 return OP_RESCHED;
837 } else {
838 /* Try to grow in steps */
839 growth_sz -= step;
840 /* We are OOM at this point so need to retry */
841 if (growth_sz <= curr_sz) {
842 SET_STATE(req, R_PENDING);
843 SET_STATE(req, R_MUST_GROW);
844 return OP_RESCHED;
845 }
846 retry = true;
847 pr_debug("ocmem: Attempting with reduced size %lx\n",
848 growth_sz);
849 goto retry_next_step;
850 }
851 } else {
852 pr_err("ocmem: grow: New Region %p Existing %p\n",
853 matched_region, overlap_r);
854 pr_err("ocmem: Undetermined behavior\n");
855 /* This is serious enough to fail */
856 BUG();
857 }
858 } else if (spanned_r == NULL && overlap_r != NULL) {
859 goto err_not_supported;
860 }
861
862 return OP_COMPLETE;
863
864err_not_supported:
865 pr_err("ocmem: Scheduled unsupported operation\n");
866 return OP_FAIL;
867region_error:
868 zone->z_ops->free(zone, alloc_addr, curr_sz);
869 detach_req(region, req);
870 update_region_prio(region);
871 /* req is going to be destroyed by the caller anyways */
872internal_error:
873 destroy_region(region);
874invalid_op_error:
875 return OP_FAIL;
876}
877
878/* Must be called with sched_mutex held */
879static int __sched_free(struct ocmem_req *req)
880{
881 int owner = req->owner;
882 int ret = 0;
883
884 struct ocmem_req *matched_req = NULL;
885 struct ocmem_region *matched_region = NULL;
886
887 struct ocmem_zone *zone = get_zone(owner);
888
889 BUG_ON(!zone);
890
891 matched_region = find_region_match(req->req_start, req->req_end);
892 matched_req = find_req_match(req->req_id, matched_region);
893
894 if (!matched_region || !matched_req)
895 goto invalid_op_error;
896 if (matched_req != req)
897 goto invalid_op_error;
898
899 ret = zone->z_ops->free(zone,
900 matched_req->req_start, matched_req->req_sz);
901
902 if (ret < 0)
903 goto err_op_fail;
904
905 detach_req(matched_region, matched_req);
906 update_region_prio(matched_region);
907 if (req_count(matched_region) == 0) {
908 remove_region(matched_region);
909 destroy_region(matched_region);
910 }
911
912 /* Update the request */
913 req->req_start = 0x0;
914 req->req_sz = 0x0;
915 req->req_end = 0x0;
916 SET_STATE(req, R_FREE);
917 return OP_COMPLETE;
918invalid_op_error:
919 pr_err("ocmem: free: Failed to find matching region\n");
920err_op_fail:
921 pr_err("ocmem: free: Failed\n");
922 return OP_FAIL;
923}
924
925/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700926static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
927{
928 int owner = req->owner;
929 int ret = 0;
930
931 struct ocmem_req *matched_req = NULL;
932 struct ocmem_region *matched_region = NULL;
933 struct ocmem_region *region = NULL;
934 unsigned long alloc_addr = 0x0;
935
936 struct ocmem_zone *zone = get_zone(owner);
937
938 BUG_ON(!zone);
939
940 /* The shrink should not be called for zero size */
941 BUG_ON(new_sz == 0);
942
943 matched_region = find_region_match(req->req_start, req->req_end);
944 matched_req = find_req_match(req->req_id, matched_region);
945
946 if (!matched_region || !matched_req)
947 goto invalid_op_error;
948 if (matched_req != req)
949 goto invalid_op_error;
950
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700951 ret = zone->z_ops->free(zone,
952 matched_req->req_start, matched_req->req_sz);
953
954 if (ret < 0) {
955 pr_err("Zone Allocation operation failed\n");
956 goto internal_error;
957 }
958
959 alloc_addr = zone->z_ops->allocate(zone, new_sz);
960
961 if (alloc_addr < 0) {
962 pr_err("Zone Allocation operation failed\n");
963 goto internal_error;
964 }
965
966 /* Detach the region from the interval tree */
967 /* This is to guarantee that the change in size
968 * causes the tree to be rebalanced if required */
969
970 detach_req(matched_region, req);
971 if (req_count(matched_region) == 0) {
972 remove_region(matched_region);
973 region = matched_region;
974 } else {
975 region = create_region();
976 if (!region) {
977 pr_err("ocmem: Unable to create region\n");
978 goto internal_error;
979 }
980 }
981 /* update the request */
982 req->req_start = alloc_addr;
983 req->req_sz = new_sz;
984 req->req_end = alloc_addr + req->req_sz;
985
986 if (req_count(region) == 0) {
987 remove_region(matched_region);
988 destroy_region(matched_region);
989 }
990
991 /* update request state */
992 SET_STATE(req, R_MUST_GROW);
993 SET_STATE(req, R_MUST_MAP);
994 req->op = SCHED_MAP;
995
996 /* attach the request to the region */
997 attach_req(region, req);
998 populate_region(region, req);
999 update_region_prio(region);
1000
1001 /* update the tree with new region */
1002 if (insert_region(region)) {
1003 pr_err("ocmem: Failed to insert the region\n");
1004 zone->z_ops->free(zone, alloc_addr, new_sz);
1005 detach_req(region, req);
1006 update_region_prio(region);
1007 /* req will be destroyed by the caller */
1008 goto region_error;
1009 }
1010 return OP_COMPLETE;
1011
1012region_error:
1013 destroy_region(region);
1014internal_error:
1015 pr_err("ocmem: shrink: Failed\n");
1016 return OP_FAIL;
1017invalid_op_error:
1018 pr_err("ocmem: shrink: Failed to find matching region\n");
1019 return OP_FAIL;
1020}
1021
1022/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001023static int __sched_allocate(struct ocmem_req *req, bool can_block,
1024 bool can_wait)
1025{
1026 unsigned long min = req->req_min;
1027 unsigned long max = req->req_max;
1028 unsigned long step = req->req_step;
1029 int owner = req->owner;
1030 unsigned long sz = max;
1031 enum client_prio prio = req->prio;
1032 unsigned long alloc_addr = 0x0;
1033 bool retry;
1034
1035 struct ocmem_region *spanned_r = NULL;
1036 struct ocmem_region *overlap_r = NULL;
1037
1038 struct ocmem_zone *zone = get_zone(owner);
1039 struct ocmem_region *region = NULL;
1040
1041 BUG_ON(!zone);
1042
1043 if (min > (zone->z_end - zone->z_start)) {
1044 pr_err("ocmem: requested minimum size exceeds quota\n");
1045 goto invalid_op_error;
1046 }
1047
1048 if (max > (zone->z_end - zone->z_start)) {
1049 pr_err("ocmem: requested maximum size exceeds quota\n");
1050 goto invalid_op_error;
1051 }
1052
1053 if (min > zone->z_free) {
1054 pr_err("ocmem: out of memory for zone %d\n", owner);
1055 goto invalid_op_error;
1056 }
1057
1058 region = create_region();
1059
1060 if (!region) {
1061 pr_err("ocmem: Unable to create region\n");
1062 goto invalid_op_error;
1063 }
1064
1065 retry = false;
1066
Naveen Ramaraj89738952013-02-13 15:24:57 -08001067 pr_debug("ocmem: do_allocate: %s request %p size %lx\n",
1068 get_name(owner), req, sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001069
1070retry_next_step:
1071
1072 spanned_r = NULL;
1073 overlap_r = NULL;
1074
1075 spanned_r = find_region(zone->z_head);
1076 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1077
1078 if (overlap_r == NULL) {
1079 /* no conflicting regions, schedule this region */
1080 alloc_addr = zone->z_ops->allocate(zone, sz);
1081
1082 if (alloc_addr < 0) {
1083 pr_err("Zone Allocation operation failed\n");
1084 goto internal_error;
1085 }
1086
1087 /* update the request */
1088 req->req_start = alloc_addr;
1089 req->req_end = alloc_addr + sz - 1;
1090 req->req_sz = sz;
1091 req->zone = zone;
1092
1093 /* update request state */
1094 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001095 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001096 SET_STATE(req, R_ALLOCATED);
1097 SET_STATE(req, R_MUST_MAP);
1098 req->op = SCHED_NOP;
1099
1100 /* attach the request to the region */
1101 attach_req(region, req);
1102 populate_region(region, req);
1103 update_region_prio(region);
1104
1105 /* update the tree with new region */
1106 if (insert_region(region)) {
1107 pr_err("ocmem: Failed to insert the region\n");
1108 zone->z_ops->free(zone, alloc_addr, sz);
1109 detach_req(region, req);
1110 update_region_prio(region);
1111 /* req will be destroyed by the caller */
1112 goto internal_error;
1113 }
1114
1115 if (retry) {
1116 SET_STATE(req, R_MUST_GROW);
1117 SET_STATE(req, R_PENDING);
1118 req->op = SCHED_GROW;
1119 return OP_PARTIAL;
1120 }
1121 } else if (spanned_r != NULL && overlap_r != NULL) {
1122 /* resolve conflicting regions based on priority */
1123 if (overlap_r->max_prio < prio) {
1124 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001125 req->req_start = zone->z_head;
1126 req->req_end = zone->z_head + sz - 1;
1127 req->req_sz = 0x0;
1128 req->edata = NULL;
1129 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001130 } else {
1131 /* Try to allocate atleast >= 'min' immediately */
1132 sz -= step;
1133 if (sz < min)
1134 goto err_out_of_mem;
1135 retry = true;
1136 pr_debug("ocmem: Attempting with reduced size %lx\n",
1137 sz);
1138 goto retry_next_step;
1139 }
1140 } else if (overlap_r->max_prio > prio) {
1141 if (can_block == true) {
1142 SET_STATE(req, R_PENDING);
1143 SET_STATE(req, R_MUST_GROW);
1144 return OP_RESCHED;
1145 } else {
1146 if (min == max) {
1147 pr_err("Cannot allocate %lx synchronously\n",
1148 sz);
1149 goto err_out_of_mem;
1150 } else {
1151 sz -= step;
1152 if (sz < min)
1153 goto err_out_of_mem;
1154 retry = true;
1155 pr_debug("ocmem: Attempting reduced size %lx\n",
1156 sz);
1157 goto retry_next_step;
1158 }
1159 }
1160 } else {
1161 pr_err("ocmem: Undetermined behavior\n");
1162 pr_err("ocmem: New Region %p Existing %p\n", region,
1163 overlap_r);
1164 /* This is serious enough to fail */
1165 BUG();
1166 }
1167 } else if (spanned_r == NULL && overlap_r != NULL)
1168 goto err_not_supported;
1169
1170 return OP_COMPLETE;
1171
Naveen Ramaraj59907982012-10-16 17:40:38 -07001172trigger_eviction:
1173 pr_debug("Trigger eviction of region %p\n", overlap_r);
1174 destroy_region(region);
1175 return OP_EVICT;
1176
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001177err_not_supported:
1178 pr_err("ocmem: Scheduled unsupported operation\n");
1179 return OP_FAIL;
1180
1181err_out_of_mem:
1182 pr_err("ocmem: Out of memory during allocation\n");
1183internal_error:
1184 destroy_region(region);
1185invalid_op_error:
1186 return OP_FAIL;
1187}
1188
Naveen Ramaraj89738952013-02-13 15:24:57 -08001189/* Remove the request from eviction lists */
1190static void cancel_restore(struct ocmem_req *e_handle,
1191 struct ocmem_req *req)
1192{
1193 struct ocmem_eviction_data *edata = e_handle->edata;
1194
1195 if (!edata || !req)
1196 return;
1197
1198 if (list_empty(&edata->req_list))
1199 return;
1200
1201 list_del_init(&req->eviction_list);
1202 req->e_handle = NULL;
1203
1204 return;
1205}
1206
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001207static int sched_enqueue(struct ocmem_req *priv)
1208{
1209 struct ocmem_req *next = NULL;
1210 mutex_lock(&sched_queue_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001211 SET_STATE(priv, R_ENQUEUED);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001212 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1213 pr_debug("enqueued req %p\n", priv);
1214 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001215 pr_debug("pending request %p for client %s\n", next,
1216 get_name(next->owner));
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001217 }
1218 mutex_unlock(&sched_queue_mutex);
1219 return 0;
1220}
1221
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001222static void sched_dequeue(struct ocmem_req *victim_req)
1223{
1224 struct ocmem_req *req = NULL;
1225 struct ocmem_req *next = NULL;
1226 int id;
1227
1228 if (!victim_req)
1229 return;
1230
1231 id = victim_req->owner;
1232
1233 mutex_lock(&sched_queue_mutex);
1234
1235 if (list_empty(&sched_queue[id]))
1236 goto dequeue_done;
1237
1238 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1239 {
1240 if (req == victim_req) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001241 pr_debug("ocmem: Cancelling pending request %p for %s\n",
1242 req, get_name(req->owner));
1243 list_del_init(&victim_req->sched_list);
1244 CLEAR_STATE(victim_req, R_ENQUEUED);
1245 break;
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001246 }
1247 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001248dequeue_done:
1249 mutex_unlock(&sched_queue_mutex);
1250 return;
1251}
1252
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001253static struct ocmem_req *ocmem_fetch_req(void)
1254{
1255 int i;
1256 struct ocmem_req *req = NULL;
1257 struct ocmem_req *next = NULL;
1258
1259 mutex_lock(&sched_queue_mutex);
1260 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1261 if (list_empty(&sched_queue[i]))
1262 continue;
1263 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1264 {
1265 if (req) {
1266 pr_debug("ocmem: Fetched pending request %p\n",
1267 req);
1268 list_del(&req->sched_list);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001269 CLEAR_STATE(req, R_ENQUEUED);
1270 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001271 }
1272 }
1273 }
1274 mutex_unlock(&sched_queue_mutex);
1275 return req;
1276}
1277
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001278
1279unsigned long process_quota(int id)
1280{
1281 struct ocmem_zone *zone = NULL;
1282
1283 if (is_blocked(id))
1284 return 0;
1285
1286 zone = get_zone(id);
1287
1288 if (zone && zone->z_pool)
1289 return zone->z_end - zone->z_start;
1290 else
1291 return 0;
1292}
1293
1294static int do_grow(struct ocmem_req *req)
1295{
1296 struct ocmem_buf *buffer = NULL;
1297 bool can_block = true;
1298 int rc = 0;
1299
1300 down_write(&req->rw_sem);
1301 buffer = req->buffer;
1302
1303 /* Take the scheduler mutex */
1304 mutex_lock(&sched_mutex);
1305 rc = __sched_grow(req, can_block);
1306 mutex_unlock(&sched_mutex);
1307
1308 if (rc == OP_FAIL)
1309 goto err_op_fail;
1310
1311 if (rc == OP_RESCHED) {
1312 pr_debug("ocmem: Enqueue this allocation");
1313 sched_enqueue(req);
1314 }
1315
1316 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1317 buffer->addr = device_address(req->owner, req->req_start);
1318 buffer->len = req->req_sz;
1319 }
1320
1321 up_write(&req->rw_sem);
1322 return 0;
1323err_op_fail:
1324 up_write(&req->rw_sem);
1325 return -EINVAL;
1326}
1327
1328static int process_grow(struct ocmem_req *req)
1329{
1330 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001331 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001332
1333 /* Attempt to grow the region */
1334 rc = do_grow(req);
1335
1336 if (rc < 0)
1337 return -EINVAL;
1338
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001339 rc = process_map(req, req->req_start, req->req_end);
1340 if (rc < 0)
1341 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001342
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001343 offset = phys_to_offset(req->req_start);
1344
1345 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1346
1347 if (rc < 0) {
1348 pr_err("Failed to switch ON memory macros\n");
1349 goto power_ctl_error;
1350 }
1351
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001352 /* Notify the client about the buffer growth */
1353 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1354 if (rc < 0) {
1355 pr_err("No notifier callback to cater for req %p event: %d\n",
1356 req, OCMEM_ALLOC_GROW);
1357 BUG();
1358 }
1359 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001360power_ctl_error:
1361 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001362}
1363
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001364static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1365{
1366
1367 int rc = 0;
1368 struct ocmem_buf *buffer = NULL;
1369
1370 down_write(&req->rw_sem);
1371 buffer = req->buffer;
1372
1373 /* Take the scheduler mutex */
1374 mutex_lock(&sched_mutex);
1375 rc = __sched_shrink(req, shrink_size);
1376 mutex_unlock(&sched_mutex);
1377
1378 if (rc == OP_FAIL)
1379 goto err_op_fail;
1380
1381 else if (rc == OP_COMPLETE) {
1382 buffer->addr = device_address(req->owner, req->req_start);
1383 buffer->len = req->req_sz;
1384 }
1385
1386 up_write(&req->rw_sem);
1387 return 0;
1388err_op_fail:
1389 up_write(&req->rw_sem);
1390 return -EINVAL;
1391}
1392
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001393static void ocmem_sched_wk_func(struct work_struct *work);
1394DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1395
1396static int ocmem_schedule_pending(void)
1397{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001398
1399 bool need_sched = false;
1400 int i = 0;
1401
1402 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1403 if (!list_empty(&sched_queue[i])) {
1404 need_sched = true;
1405 break;
1406 }
1407 }
1408
1409 if (need_sched == true) {
1410 cancel_delayed_work(&ocmem_sched_thread);
1411 schedule_delayed_work(&ocmem_sched_thread,
1412 msecs_to_jiffies(SCHED_DELAY));
1413 pr_debug("ocmem: Scheduled delayed work\n");
1414 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001415 return 0;
1416}
1417
1418static int do_free(struct ocmem_req *req)
1419{
1420 int rc = 0;
1421 struct ocmem_buf *buffer = req->buffer;
1422
1423 down_write(&req->rw_sem);
1424
1425 if (is_mapped(req)) {
1426 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1427 goto err_free_fail;
1428 }
1429
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001430 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1431 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001432 /* Grab the sched mutex */
1433 mutex_lock(&sched_mutex);
1434 rc = __sched_free(req);
1435 mutex_unlock(&sched_mutex);
1436
1437 switch (rc) {
1438
1439 case OP_COMPLETE:
1440 buffer->addr = 0x0;
1441 buffer->len = 0x0;
1442 break;
1443 case OP_FAIL:
1444 default:
1445 goto err_free_fail;
1446 break;
1447 }
1448
1449 up_write(&req->rw_sem);
1450 return 0;
1451err_free_fail:
1452 up_write(&req->rw_sem);
1453 pr_err("ocmem: freeing req %p failed\n", req);
1454 return -EINVAL;
1455}
1456
1457int process_free(int id, struct ocmem_handle *handle)
1458{
1459 struct ocmem_req *req = NULL;
1460 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001461 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001462 int rc = 0;
1463
Naveen Ramaraj89738952013-02-13 15:24:57 -08001464 mutex_lock(&free_mutex);
1465
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001466 if (is_blocked(id)) {
1467 pr_err("Client %d cannot request free\n", id);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001468 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001469 }
1470
1471 req = handle_to_req(handle);
1472 buffer = handle_to_buffer(handle);
1473
Naveen Ramaraj89738952013-02-13 15:24:57 -08001474 if (!req) {
1475 pr_err("ocmem: No valid request to free\n");
1476 goto free_invalid;
1477 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001478
1479 if (req->req_start != core_address(id, buffer->addr)) {
1480 pr_err("Invalid buffer handle passed for free\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001481 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001482 }
1483
Naveen Ramaraj89738952013-02-13 15:24:57 -08001484 if (req->edata != NULL) {
1485 pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n",
1486 req, req->state, req->edata);
1487 goto free_invalid;
1488 }
1489
1490 if (is_pending_shrink(req)) {
1491 pr_err("ocmem: Request %p(%2lx) yet to process eviction\n",
1492 req, req->state);
1493 goto pending_shrink;
1494 }
1495
1496 /* Remove the request from any restore lists */
1497 if (req->e_handle)
1498 cancel_restore(req->e_handle, req);
1499
1500 /* Remove the request from any pending opreations */
1501 if (TEST_STATE(req, R_ENQUEUED)) {
1502 mutex_lock(&sched_mutex);
1503 sched_dequeue(req);
1504 mutex_unlock(&sched_mutex);
1505 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001506
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001507 if (!TEST_STATE(req, R_FREE)) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001508
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001509 if (TEST_STATE(req, R_MAPPED)) {
1510 /* unmap the interval and clear the memory */
1511 rc = process_unmap(req, req->req_start, req->req_end);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001512
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001513 if (rc < 0) {
1514 pr_err("ocmem: Failed to unmap %p\n", req);
1515 goto free_fail;
1516 }
1517
1518 rc = do_free(req);
1519 if (rc < 0) {
1520 pr_err("ocmem: Failed to free %p\n", req);
1521 goto free_fail;
1522 }
1523 } else
Naveen Ramaraj89738952013-02-13 15:24:57 -08001524 pr_debug("request %p was already shrunk to 0\n", req);
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001525 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001526
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001527 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001528 if (req->req_sz != 0) {
1529
1530 offset = phys_to_offset(req->req_start);
1531
1532 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1533
1534 if (rc < 0) {
1535 pr_err("Failed to switch OFF memory macros\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001536 goto free_fail;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001537 }
1538
1539 }
1540
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001541 if (!TEST_STATE(req, R_FREE)) {
1542 /* free the allocation */
1543 rc = do_free(req);
1544 if (rc < 0)
1545 return -EINVAL;
1546 }
1547
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001548 inc_ocmem_stat(zone_of(req), NR_FREES);
1549
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001550 ocmem_destroy_req(req);
1551 handle->req = NULL;
1552
1553 ocmem_schedule_pending();
Naveen Ramaraj89738952013-02-13 15:24:57 -08001554 mutex_unlock(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001555 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001556free_fail:
1557free_invalid:
1558 mutex_unlock(&free_mutex);
1559 return -EINVAL;
1560pending_shrink:
1561 mutex_unlock(&free_mutex);
1562 return -EAGAIN;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001563}
1564
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001565static void ocmem_rdm_worker(struct work_struct *work)
1566{
1567 int offset = 0;
1568 int rc = 0;
1569 int event;
1570 struct ocmem_rdm_work *work_data = container_of(work,
1571 struct ocmem_rdm_work, work);
1572 int id = work_data->id;
1573 struct ocmem_map_list *list = work_data->list;
1574 int direction = work_data->direction;
1575 struct ocmem_handle *handle = work_data->handle;
1576 struct ocmem_req *req = handle_to_req(handle);
1577 struct ocmem_buf *buffer = handle_to_buffer(handle);
1578
1579 down_write(&req->rw_sem);
1580 offset = phys_to_offset(req->req_start);
1581 rc = ocmem_rdm_transfer(id, list, offset, direction);
1582 if (work_data->direction == TO_OCMEM)
1583 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1584 else
1585 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001586 up_write(&req->rw_sem);
1587 kfree(work_data);
1588 dispatch_notification(id, event, buffer);
1589}
1590
1591int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1592 struct ocmem_map_list *list, int direction)
1593{
1594 struct ocmem_rdm_work *work_data = NULL;
1595
1596 down_write(&req->rw_sem);
1597
1598 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1599 if (!work_data)
1600 BUG();
1601
1602 work_data->handle = handle;
1603 work_data->list = list;
1604 work_data->id = req->owner;
1605 work_data->direction = direction;
1606 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1607 up_write(&req->rw_sem);
1608 queue_work(ocmem_rdm_wq, &work_data->work);
1609 return 0;
1610}
1611
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001612int process_drop(int id, struct ocmem_handle *handle,
1613 struct ocmem_map_list *list)
1614{
1615 struct ocmem_req *req = NULL;
1616 struct ocmem_buf *buffer = NULL;
1617 int rc = 0;
1618
1619 if (is_blocked(id)) {
1620 pr_err("Client %d cannot request drop\n", id);
1621 return -EINVAL;
1622 }
1623
1624 if (is_tcm(id))
1625 pr_err("Client %d cannot request drop\n", id);
1626
1627 req = handle_to_req(handle);
1628 buffer = handle_to_buffer(handle);
1629
1630 if (!req)
1631 return -EINVAL;
1632
1633 if (req->req_start != core_address(id, buffer->addr)) {
1634 pr_err("Invalid buffer handle passed for drop\n");
1635 return -EINVAL;
1636 }
1637
1638 if (TEST_STATE(req, R_MAPPED)) {
1639 rc = process_unmap(req, req->req_start, req->req_end);
1640 if (rc < 0)
1641 return -EINVAL;
1642 } else
1643 return -EINVAL;
1644
1645 return 0;
1646}
1647
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001648int process_xfer_out(int id, struct ocmem_handle *handle,
1649 struct ocmem_map_list *list)
1650{
1651 struct ocmem_req *req = NULL;
1652 int rc = 0;
1653
1654 req = handle_to_req(handle);
1655
1656 if (!req)
1657 return -EINVAL;
1658
1659 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001660 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001661 goto transfer_out_error;
1662 }
1663
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001664 rc = queue_transfer(req, handle, list, TO_DDR);
1665
1666 if (rc < 0) {
1667 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001668 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001669 goto transfer_out_error;
1670 }
1671
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001672 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001673 return 0;
1674
1675transfer_out_error:
1676 return -EINVAL;
1677}
1678
1679int process_xfer_in(int id, struct ocmem_handle *handle,
1680 struct ocmem_map_list *list)
1681{
1682 struct ocmem_req *req = NULL;
1683 int rc = 0;
1684
1685 req = handle_to_req(handle);
1686
1687 if (!req)
1688 return -EINVAL;
1689
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001690
1691 if (!is_mapped(req)) {
1692 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001693 goto transfer_in_error;
1694 }
1695
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001696 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001697 rc = queue_transfer(req, handle, list, TO_OCMEM);
1698
1699 if (rc < 0) {
1700 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001701 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001702 goto transfer_in_error;
1703 }
1704
1705 return 0;
1706transfer_in_error:
1707 return -EINVAL;
1708}
1709
1710int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1711{
1712 struct ocmem_req *req = NULL;
1713 struct ocmem_buf *buffer = NULL;
1714 struct ocmem_eviction_data *edata = NULL;
1715 int rc = 0;
1716
1717 if (is_blocked(id)) {
1718 pr_err("Client %d cannot request free\n", id);
1719 return -EINVAL;
1720 }
1721
1722 req = handle_to_req(handle);
1723 buffer = handle_to_buffer(handle);
1724
1725 if (!req)
1726 return -EINVAL;
1727
Naveen Ramaraj89738952013-02-13 15:24:57 -08001728 mutex_lock(&free_mutex);
1729
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001730 if (req->req_start != core_address(id, buffer->addr)) {
1731 pr_err("Invalid buffer handle passed for shrink\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001732 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001733 }
1734
Naveen Ramaraj89738952013-02-13 15:24:57 -08001735 if (!req->e_handle) {
1736 pr_err("Unable to find evicting request\n");
1737 goto shrink_fail;
1738 }
1739
1740 edata = req->e_handle->edata;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001741
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001742 if (!edata) {
1743 pr_err("Unable to find eviction data\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001744 goto shrink_fail;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001745 }
1746
1747 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001748
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001749 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1750
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001751 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001752 pr_debug("req %p being shrunk to zero\n", req);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001753 if (is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001754 rc = process_unmap(req, req->req_start, req->req_end);
1755 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001756 goto shrink_fail;
1757 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001758 rc = do_free(req);
1759 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001760 goto shrink_fail;
1761 SET_STATE(req, R_FREE);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001762 } else {
1763 rc = do_shrink(req, size);
1764 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001765 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001766 }
1767
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001768 CLEAR_STATE(req, R_ALLOCATED);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001769 CLEAR_STATE(req, R_WF_SHRINK);
1770 SET_STATE(req, R_SHRUNK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001771
1772 if (atomic_dec_and_test(&edata->pending)) {
1773 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001774 complete(&edata->completion);
1775 }
1776
Naveen Ramaraj89738952013-02-13 15:24:57 -08001777 mutex_unlock(&free_mutex);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001778 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001779shrink_fail:
1780 pr_err("ocmem: Failed to shrink request %p of %s\n",
1781 req, get_name(req->owner));
1782 mutex_unlock(&free_mutex);
1783 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001784}
1785
1786int process_xfer(int id, struct ocmem_handle *handle,
1787 struct ocmem_map_list *list, int direction)
1788{
1789 int rc = 0;
1790
1791 if (is_tcm(id)) {
1792 WARN(1, "Mapping operation is invalid for client\n");
1793 return -EINVAL;
1794 }
1795
1796 if (direction == TO_DDR)
1797 rc = process_xfer_out(id, handle, list);
1798 else if (direction == TO_OCMEM)
1799 rc = process_xfer_in(id, handle, list);
1800 return rc;
1801}
1802
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001803static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001804{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001805 struct ocmem_eviction_data *edata = NULL;
1806 int prio = ocmem_client_table[id].priority;
1807
1808 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1809
1810 if (!edata) {
1811 pr_err("ocmem: Could not allocate eviction data\n");
1812 return NULL;
1813 }
1814
1815 INIT_LIST_HEAD(&edata->victim_list);
1816 INIT_LIST_HEAD(&edata->req_list);
1817 edata->prio = prio;
1818 atomic_set(&edata->pending, 0);
1819 return edata;
1820}
1821
1822static void free_eviction(struct ocmem_eviction_data *edata)
1823{
1824
1825 if (!edata)
1826 return;
1827
1828 if (!list_empty(&edata->req_list))
1829 pr_err("ocmem: Eviction data %p not empty\n", edata);
1830
1831 kfree(edata);
1832 edata = NULL;
1833}
1834
1835static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1836{
1837
1838 if (!new || !old)
1839 return false;
1840
1841 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1842 new->req_start, new->req_end,
1843 old->req_start, old->req_end);
1844
1845 if ((new->req_start < old->req_start &&
1846 new->req_end >= old->req_start) ||
1847 (new->req_start >= old->req_start &&
1848 new->req_start <= old->req_end &&
1849 new->req_end >= old->req_end)) {
1850 pr_debug("request %p overlaps with existing req %p\n",
1851 new, old);
1852 return true;
1853 }
1854 return false;
1855}
1856
1857static int __evict_common(struct ocmem_eviction_data *edata,
1858 struct ocmem_req *req)
1859{
1860 struct rb_node *rb_node = NULL;
1861 struct ocmem_req *e_req = NULL;
1862 bool needs_eviction = false;
1863 int j = 0;
1864
1865 for (rb_node = rb_first(&sched_tree); rb_node;
1866 rb_node = rb_next(rb_node)) {
1867
1868 struct ocmem_region *tmp_region = NULL;
1869
1870 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1871
1872 if (tmp_region->max_prio < edata->prio) {
1873 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1874 needs_eviction = false;
1875 e_req = find_req_match(j, tmp_region);
1876 if (!e_req)
1877 continue;
1878 if (edata->passive == true) {
1879 needs_eviction = true;
1880 } else {
1881 needs_eviction = is_overlapping(req,
1882 e_req);
1883 }
1884
1885 if (needs_eviction) {
1886 pr_debug("adding %p in region %p to eviction list\n",
1887 e_req, tmp_region);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001888 SET_STATE(e_req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001889 list_add_tail(
1890 &e_req->eviction_list,
1891 &edata->req_list);
1892 atomic_inc(&edata->pending);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001893 e_req->e_handle = req;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001894 }
1895 }
1896 } else {
1897 pr_debug("Skipped region %p\n", tmp_region);
1898 }
1899 }
1900
1901 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1902
Naveen Ramaraj89738952013-02-13 15:24:57 -08001903 return atomic_read(&edata->pending);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001904}
1905
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001906static void trigger_eviction(struct ocmem_eviction_data *edata)
1907{
1908 struct ocmem_req *req = NULL;
1909 struct ocmem_req *next = NULL;
1910 struct ocmem_buf buffer;
1911
1912 if (!edata)
1913 return;
1914
1915 BUG_ON(atomic_read(&edata->pending) == 0);
1916
1917 init_completion(&edata->completion);
1918
1919 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1920 {
1921 if (req) {
1922 pr_debug("ocmem: Evicting request %p\n", req);
1923 buffer.addr = req->req_start;
1924 buffer.len = 0x0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001925 CLEAR_STATE(req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001926 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1927 &buffer);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001928 SET_STATE(req, R_WF_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001929 }
1930 }
1931 return;
1932}
1933
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001934int process_evict(int id)
1935{
1936 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001937 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001938
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001939 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001940
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001941 if (!edata)
1942 return -EINVAL;
1943
1944 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001945
1946 mutex_lock(&sched_mutex);
1947
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001948 rc = __evict_common(edata, NULL);
1949
Naveen Ramaraj89738952013-02-13 15:24:57 -08001950 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001951 goto skip_eviction;
1952
1953 trigger_eviction(edata);
1954
1955 evictions[id] = edata;
1956
1957 mutex_unlock(&sched_mutex);
1958
1959 wait_for_completion(&edata->completion);
1960
1961 return 0;
1962
1963skip_eviction:
1964 evictions[id] = NULL;
1965 mutex_unlock(&sched_mutex);
1966 return 0;
1967}
1968
1969static int run_evict(struct ocmem_req *req)
1970{
1971 struct ocmem_eviction_data *edata = NULL;
1972 int rc = 0;
1973
1974 if (!req)
1975 return -EINVAL;
1976
1977 edata = init_eviction(req->owner);
1978
1979 if (!edata)
1980 return -EINVAL;
1981
1982 edata->passive = false;
1983
Naveen Ramaraj89738952013-02-13 15:24:57 -08001984 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001985 rc = __evict_common(edata, req);
1986
Naveen Ramaraj89738952013-02-13 15:24:57 -08001987 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001988 goto skip_eviction;
1989
1990 trigger_eviction(edata);
1991
1992 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1993 req->edata = edata;
1994
Naveen Ramaraj89738952013-02-13 15:24:57 -08001995 mutex_unlock(&free_mutex);
1996
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001997 wait_for_completion(&edata->completion);
1998
1999 pr_debug("ocmem: eviction completed successfully\n");
2000 return 0;
2001
2002skip_eviction:
2003 pr_err("ocmem: Unable to run eviction\n");
2004 free_eviction(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002005 req->edata = NULL;
2006 mutex_unlock(&free_mutex);
2007 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002008}
2009
2010static int __restore_common(struct ocmem_eviction_data *edata)
2011{
2012
2013 struct ocmem_req *req = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002014
2015 if (!edata)
2016 return -EINVAL;
2017
Naveen Ramaraj89738952013-02-13 15:24:57 -08002018 while (!list_empty(&edata->req_list)) {
2019 req = list_first_entry(&edata->req_list, struct ocmem_req,
2020 eviction_list);
2021 list_del_init(&req->eviction_list);
2022 pr_debug("ocmem: restoring evicted request %p\n",
2023 req);
2024 req->edata = NULL;
2025 req->e_handle = NULL;
2026 req->op = SCHED_ALLOCATE;
2027 inc_ocmem_stat(zone_of(req), NR_RESTORES);
2028 sched_enqueue(req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002029 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002030
2031 pr_debug("Scheduled all evicted regions\n");
2032
2033 return 0;
2034}
2035
2036static int sched_restore(struct ocmem_req *req)
2037{
2038
2039 int rc = 0;
2040
2041 if (!req)
2042 return -EINVAL;
2043
2044 if (!req->edata)
2045 return 0;
2046
Naveen Ramaraj89738952013-02-13 15:24:57 -08002047 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002048 rc = __restore_common(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002049 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002050
2051 if (rc < 0)
2052 return -EINVAL;
2053
2054 free_eviction(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002055 req->edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002056 return 0;
2057}
2058
2059int process_restore(int id)
2060{
2061 struct ocmem_eviction_data *edata = evictions[id];
2062 int rc = 0;
2063
2064 if (!edata)
2065 return -EINVAL;
2066
Naveen Ramaraj89738952013-02-13 15:24:57 -08002067 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002068 rc = __restore_common(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002069 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002070
2071 if (rc < 0) {
2072 pr_err("Failed to restore evicted requests\n");
2073 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002074 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002075
2076 free_eviction(edata);
2077 evictions[id] = NULL;
2078 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002079 return 0;
2080}
2081
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002082static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
2083{
2084 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002085 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002086 struct ocmem_buf *buffer = req->buffer;
2087
2088 down_write(&req->rw_sem);
2089
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002090 mutex_lock(&allocation_mutex);
2091retry_allocate:
2092
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002093 /* Take the scheduler mutex */
2094 mutex_lock(&sched_mutex);
2095 rc = __sched_allocate(req, can_block, can_wait);
2096 mutex_unlock(&sched_mutex);
2097
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002098 if (rc == OP_EVICT) {
2099
2100 ret = run_evict(req);
2101
2102 if (ret == 0) {
2103 rc = sched_restore(req);
2104 if (rc < 0) {
2105 pr_err("Failed to restore for req %p\n", req);
2106 goto err_allocate_fail;
2107 }
2108 req->edata = NULL;
2109
2110 pr_debug("Attempting to re-allocate req %p\n", req);
2111 req->req_start = 0x0;
2112 req->req_end = 0x0;
2113 goto retry_allocate;
2114 } else {
2115 goto err_allocate_fail;
2116 }
2117 }
2118
2119 mutex_unlock(&allocation_mutex);
2120
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002121 if (rc == OP_FAIL) {
2122 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002123 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002124 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002125
2126 if (rc == OP_RESCHED) {
2127 buffer->addr = 0x0;
2128 buffer->len = 0x0;
2129 pr_debug("ocmem: Enqueuing req %p\n", req);
2130 sched_enqueue(req);
2131 } else if (rc == OP_PARTIAL) {
2132 buffer->addr = device_address(req->owner, req->req_start);
2133 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002134 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002135 pr_debug("ocmem: Enqueuing req %p\n", req);
2136 sched_enqueue(req);
2137 } else if (rc == OP_COMPLETE) {
2138 buffer->addr = device_address(req->owner, req->req_start);
2139 buffer->len = req->req_sz;
2140 }
2141
2142 up_write(&req->rw_sem);
2143 return 0;
2144err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002145 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002146 up_write(&req->rw_sem);
2147 return -EINVAL;
2148}
2149
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002150static int do_dump(struct ocmem_req *req, unsigned long addr)
2151{
2152
2153 void __iomem *req_vaddr;
2154 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002155 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002156
2157 down_write(&req->rw_sem);
2158
2159 offset = phys_to_offset(req->req_start);
2160
2161 req_vaddr = ocmem_vaddr + offset;
2162
2163 if (!req_vaddr)
2164 goto err_do_dump;
2165
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002166 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2167
2168 if (rc < 0)
2169 goto err_do_dump;
2170
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002171 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2172 get_name(req->owner), req->req_start,
2173 req_vaddr, addr);
2174
2175 memcpy((void *)addr, req_vaddr, req->req_sz);
2176
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002177 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2178
2179 if (rc < 0)
2180 pr_err("Failed to secure request %p of %s after dump\n",
2181 req, get_name(req->owner));
2182
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002183 up_write(&req->rw_sem);
2184 return 0;
2185err_do_dump:
2186 up_write(&req->rw_sem);
2187 return -EINVAL;
2188}
2189
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002190int process_allocate(int id, struct ocmem_handle *handle,
2191 unsigned long min, unsigned long max,
2192 unsigned long step, bool can_block, bool can_wait)
2193{
2194
2195 struct ocmem_req *req = NULL;
2196 struct ocmem_buf *buffer = NULL;
2197 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002198 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002199
2200 /* sanity checks */
2201 if (is_blocked(id)) {
2202 pr_err("Client %d cannot request allocation\n", id);
2203 return -EINVAL;
2204 }
2205
2206 if (handle->req != NULL) {
2207 pr_err("Invalid handle passed in\n");
2208 return -EINVAL;
2209 }
2210
2211 buffer = handle_to_buffer(handle);
2212 BUG_ON(buffer == NULL);
2213
2214 /* prepare a request structure to represent this transaction */
2215 req = ocmem_create_req();
2216 if (!req)
2217 return -ENOMEM;
2218
2219 req->owner = id;
2220 req->req_min = min;
2221 req->req_max = max;
2222 req->req_step = step;
2223 req->prio = ocmem_client_table[id].priority;
2224 req->op = SCHED_ALLOCATE;
2225 req->buffer = buffer;
2226
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002227 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2228
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002229 rc = do_allocate(req, can_block, can_wait);
2230
2231 if (rc < 0)
2232 goto do_allocate_error;
2233
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002234 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2235
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002236 handle->req = req;
2237
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002238 if (req->req_sz != 0) {
2239
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002240 rc = process_map(req, req->req_start, req->req_end);
2241 if (rc < 0)
2242 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002243
2244 offset = phys_to_offset(req->req_start);
2245
2246 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2247
2248 if (rc < 0) {
2249 pr_err("Failed to switch ON memory macros\n");
2250 goto power_ctl_error;
2251 }
2252 }
2253
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002254 return 0;
2255
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002256power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002257 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002258map_error:
2259 handle->req = NULL;
2260 do_free(req);
2261do_allocate_error:
2262 ocmem_destroy_req(req);
2263 return -EINVAL;
2264}
2265
2266int process_delayed_allocate(struct ocmem_req *req)
2267{
2268
2269 struct ocmem_handle *handle = NULL;
2270 int rc = 0;
2271 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002272 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002273
2274 handle = req_to_handle(req);
2275 BUG_ON(handle == NULL);
2276
2277 rc = do_allocate(req, true, false);
2278
2279 if (rc < 0)
2280 goto do_allocate_error;
2281
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002282 /* The request can still be pending */
2283 if (TEST_STATE(req, R_PENDING))
2284 return 0;
2285
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002286 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2287
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002288 if (req->req_sz != 0) {
2289
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002290 rc = process_map(req, req->req_start, req->req_end);
2291 if (rc < 0)
2292 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002293
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002294
2295 offset = phys_to_offset(req->req_start);
2296
2297 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2298
2299 if (rc < 0) {
2300 pr_err("Failed to switch ON memory macros\n");
2301 goto power_ctl_error;
2302 }
2303 }
2304
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002305 /* Notify the client about the buffer growth */
2306 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2307 if (rc < 0) {
2308 pr_err("No notifier callback to cater for req %p event: %d\n",
2309 req, OCMEM_ALLOC_GROW);
2310 BUG();
2311 }
2312 return 0;
2313
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002314power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002315 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002316map_error:
2317 handle->req = NULL;
2318 do_free(req);
2319do_allocate_error:
2320 ocmem_destroy_req(req);
2321 return -EINVAL;
2322}
2323
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002324int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2325{
2326 struct ocmem_req *req = NULL;
2327 int rc = 0;
2328
2329 req = handle_to_req(handle);
2330
2331 if (!req)
2332 return -EINVAL;
2333
2334 if (!is_mapped(req)) {
2335 pr_err("Buffer is not mapped\n");
2336 goto dump_error;
2337 }
2338
2339 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2340
2341 mutex_lock(&sched_mutex);
2342 rc = do_dump(req, addr);
2343 mutex_unlock(&sched_mutex);
2344
2345 if (rc < 0)
2346 goto dump_error;
2347
2348 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2349 return 0;
2350
2351dump_error:
2352 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2353 return -EINVAL;
2354}
2355
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002356static void ocmem_sched_wk_func(struct work_struct *work)
2357{
2358
2359 struct ocmem_buf *buffer = NULL;
2360 struct ocmem_handle *handle = NULL;
2361 struct ocmem_req *req = ocmem_fetch_req();
2362
2363 if (!req) {
2364 pr_debug("No Pending Requests found\n");
2365 return;
2366 }
2367
2368 pr_debug("ocmem: sched_wk pending req %p\n", req);
2369 handle = req_to_handle(req);
2370 buffer = handle_to_buffer(handle);
2371 BUG_ON(req->op == SCHED_NOP);
2372
2373 switch (req->op) {
2374 case SCHED_GROW:
2375 process_grow(req);
2376 break;
2377 case SCHED_ALLOCATE:
2378 process_delayed_allocate(req);
2379 break;
2380 default:
2381 pr_err("ocmem: Unknown operation encountered\n");
2382 break;
2383 }
2384 return;
2385}
2386
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002387static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2388{
2389 struct rb_node *rb_node = NULL;
2390 struct ocmem_req *req = NULL;
2391 unsigned j;
2392 mutex_lock(&sched_mutex);
2393 for (rb_node = rb_first(&sched_tree); rb_node;
2394 rb_node = rb_next(rb_node)) {
2395 struct ocmem_region *tmp_region = NULL;
2396 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2397 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2398 req = find_req_match(j, tmp_region);
2399 if (req) {
2400 seq_printf(f,
2401 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2402 get_name(req->owner),
2403 req->req_start, req->req_end,
2404 req->req_sz, req->state);
2405 }
2406 }
2407 }
2408 mutex_unlock(&sched_mutex);
2409 return 0;
2410}
2411
2412static int ocmem_allocations_open(struct inode *inode, struct file *file)
2413{
2414 return single_open(file, ocmem_allocations_show, inode->i_private);
2415}
2416
2417static const struct file_operations allocations_show_fops = {
2418 .open = ocmem_allocations_open,
2419 .read = seq_read,
2420 .llseek = seq_lseek,
2421 .release = seq_release,
2422};
2423
2424int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002425{
2426 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002427 struct ocmem_plat_data *pdata = NULL;
2428 struct device *dev = &pdev->dev;
2429
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002430 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002431 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002432 mutex_init(&allocation_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002433 mutex_init(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002434 mutex_init(&sched_mutex);
2435 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002436 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002437 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2438 INIT_LIST_HEAD(&sched_queue[i]);
2439
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002440 mutex_init(&rdm_mutex);
2441 INIT_LIST_HEAD(&rdm_queue);
2442 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2443 if (!ocmem_rdm_wq)
2444 return -ENOMEM;
2445 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2446 if (!ocmem_eviction_wq)
2447 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002448
2449 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2450 NULL, &allocations_show_fops)) {
2451 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2452 return -EBUSY;
2453 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002454 return 0;
2455}