blob: a14b960596939c89e9cb7c4c5819172b4bf89f28 [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
Naveen Ramaraj89738952013-02-13 15:24:57 -080028 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_ENQUEUED, /* request has been enqueued for future retry */
32 R_MUST_GROW, /* request must grow as a part of pending operation */
33 R_MUST_SHRINK, /* request must shrink */
34 R_WF_SHRINK, /* shrink must be ack'ed by a client */
35 R_SHRUNK, /* request was shrunk */
36 R_MUST_MAP, /* request must be mapped before being used */
37 R_MUST_UNMAP, /* request must be unmapped when not being used */
38 R_MAPPED, /* request is mapped and actively used by client */
39 R_UNMAPPED, /* request is not mapped, so it's not in active use */
40 R_EVICTED, /* request is evicted and must be restored */
Naveen Ramarajb9da05782012-05-07 09:07:35 -070041};
42
43#define SET_STATE(x, val) (set_bit((val), &(x)->state))
44#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
45#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
46
47enum op_res {
48 OP_COMPLETE = 0x0,
49 OP_RESCHED,
50 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070051 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070052 OP_FAIL = ~0x0,
53};
54
55/* Represents various client priorities */
56/* Note: More than one client can share a priority level */
57enum client_prio {
58 MIN_PRIO = 0x0,
59 NO_PRIO = MIN_PRIO,
60 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070061 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070062 PRIO_LP_AUDIO = 0x1,
63 PRIO_HP_AUDIO = 0x2,
64 PRIO_VOICE = 0x3,
65 PRIO_GFX_GROWTH = 0x4,
66 PRIO_VIDEO = 0x5,
67 PRIO_GFX = 0x6,
68 PRIO_OCMEM = 0x7,
69 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
70};
71
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070072static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070073static struct list_head sched_queue[MAX_OCMEM_PRIO];
74static struct mutex sched_queue_mutex;
75
76/* The duration in msecs before a pending operation is scheduled
77 * This allows an idle window between use case boundaries where various
78 * hardware state changes can occur. The value will be tweaked on actual
79 * hardware.
80*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070081/* Delay in ms for switching to low power mode for OCMEM */
82#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070083
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070084static struct list_head rdm_queue;
85static struct mutex rdm_mutex;
86static struct workqueue_struct *ocmem_rdm_wq;
87static struct workqueue_struct *ocmem_eviction_wq;
88
89static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
90
91struct ocmem_rdm_work {
92 int id;
93 struct ocmem_map_list *list;
94 struct ocmem_handle *handle;
95 int direction;
96 struct work_struct work;
97};
98
Naveen Ramarajb9da05782012-05-07 09:07:35 -070099/* OCMEM Operational modes */
100enum ocmem_client_modes {
101 OCMEM_PERFORMANCE = 1,
102 OCMEM_PASSIVE,
103 OCMEM_LOW_POWER,
104 OCMEM_MODE_MAX = OCMEM_LOW_POWER
105};
106
107/* OCMEM Addressing modes */
108enum ocmem_interconnects {
109 OCMEM_BLOCKED = 0,
110 OCMEM_PORT = 1,
111 OCMEM_OCMEMNOC = 2,
112 OCMEM_SYSNOC = 3,
113};
114
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700115enum ocmem_tz_client {
116 TZ_UNUSED = 0x0,
117 TZ_GRAPHICS,
118 TZ_VIDEO,
119 TZ_LP_AUDIO,
120 TZ_SENSORS,
121 TZ_OTHER_OS,
122 TZ_DEBUG,
123};
124
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700125/**
126 * Primary OCMEM Arbitration Table
127 **/
128struct ocmem_table {
129 int client_id;
130 int priority;
131 int mode;
132 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700133 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700135 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
136 TZ_GRAPHICS},
137 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
138 TZ_VIDEO},
139 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
140 TZ_UNUSED},
141 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
142 TZ_UNUSED},
143 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
144 TZ_UNUSED},
145 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
146 TZ_LP_AUDIO},
147 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
148 TZ_SENSORS},
149 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
150 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700151};
152
153static struct rb_root sched_tree;
154static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700155static struct mutex allocation_mutex;
Naveen Ramaraj89738952013-02-13 15:24:57 -0800156static struct mutex free_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700157
158/* A region represents a continuous interval in OCMEM address space */
159struct ocmem_region {
160 /* Chain in Interval Tree */
161 struct rb_node region_rb;
162 /* Hash map of requests */
163 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700164 /* Chain in eviction list */
165 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700166 unsigned long r_start;
167 unsigned long r_end;
168 unsigned long r_sz;
169 /* Highest priority of all requests served by this region */
170 int max_prio;
171};
172
173/* Is OCMEM tightly coupled to the client ?*/
174static inline int is_tcm(int id)
175{
176 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
177 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
178 return 1;
179 else
180 return 0;
181}
182
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700183static inline int is_iface_access(int id)
184{
185 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
186}
187
188static inline int is_remapped_access(int id)
189{
190 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
191}
192
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700193static inline int is_blocked(int id)
194{
195 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
196}
197
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700198inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
199{
200 if (handle)
201 return &handle->buffer;
202 else
203 return NULL;
204}
205
206inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
207{
208 if (buffer)
209 return container_of(buffer, struct ocmem_handle, buffer);
210 else
211 return NULL;
212}
213
214inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
215{
216 if (handle)
217 return handle->req;
218 else
219 return NULL;
220}
221
222inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
223{
224 if (req && req->buffer)
225 return container_of(req->buffer, struct ocmem_handle, buffer);
226 else
227 return NULL;
228}
229
230/* Simple wrappers which will have debug features added later */
231inline int ocmem_read(void *at)
232{
233 return readl_relaxed(at);
234}
235
236inline int ocmem_write(unsigned long val, void *at)
237{
238 writel_relaxed(val, at);
239 return 0;
240}
241
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700242inline int get_mode(int id)
243{
244 if (!check_id(id))
245 return MODE_NOT_SET;
246 else
247 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
248 WIDE_MODE : THIN_MODE;
249}
250
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700251inline int get_tz_id(int id)
252{
253 if (!check_id(id))
254 return TZ_UNUSED;
255 else
256 return ocmem_client_table[id].tz_id;
257}
258
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700259/* Returns the address that can be used by a device core to access OCMEM */
260static unsigned long device_address(int id, unsigned long addr)
261{
262 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
263 unsigned long ret_addr = 0x0;
264
265 switch (hw_interconnect) {
266 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700267 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700268 ret_addr = phys_to_offset(addr);
269 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700270 case OCMEM_SYSNOC:
271 ret_addr = addr;
272 break;
273 case OCMEM_BLOCKED:
274 ret_addr = 0x0;
275 break;
276 }
277 return ret_addr;
278}
279
280/* Returns the address as viewed by the core */
281static unsigned long core_address(int id, unsigned long addr)
282{
283 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
284 unsigned long ret_addr = 0x0;
285
286 switch (hw_interconnect) {
287 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700288 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700289 ret_addr = offset_to_phys(addr);
290 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700291 case OCMEM_SYSNOC:
292 ret_addr = addr;
293 break;
294 case OCMEM_BLOCKED:
295 ret_addr = 0x0;
296 break;
297 }
298 return ret_addr;
299}
300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700301static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
302{
303 int owner;
304 if (!req)
305 return NULL;
306 owner = req->owner;
307 return get_zone(owner);
308}
309
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700310static int insert_region(struct ocmem_region *region)
311{
312
313 struct rb_root *root = &sched_tree;
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct ocmem_region *tmp = NULL;
317 unsigned long addr = region->r_start;
318
319 while (*p) {
320 parent = *p;
321 tmp = rb_entry(parent, struct ocmem_region, region_rb);
322
323 if (tmp->r_end > addr) {
324 if (tmp->r_start <= addr)
325 break;
326 p = &(*p)->rb_left;
327 } else if (tmp->r_end <= addr)
328 p = &(*p)->rb_right;
329 }
330 rb_link_node(&region->region_rb, parent, p);
331 rb_insert_color(&region->region_rb, root);
332 return 0;
333}
334
335static int remove_region(struct ocmem_region *region)
336{
337 struct rb_root *root = &sched_tree;
338 rb_erase(&region->region_rb, root);
339 return 0;
340}
341
342static struct ocmem_req *ocmem_create_req(void)
343{
344 struct ocmem_req *p = NULL;
345
346 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
347 if (!p)
348 return NULL;
349
350 INIT_LIST_HEAD(&p->zone_list);
351 INIT_LIST_HEAD(&p->sched_list);
352 init_rwsem(&p->rw_sem);
353 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700354 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700355 return p;
356}
357
358static int ocmem_destroy_req(struct ocmem_req *req)
359{
360 kfree(req);
361 return 0;
362}
363
364static struct ocmem_region *create_region(void)
365{
366 struct ocmem_region *p = NULL;
367
368 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700372 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700373 p->r_start = p->r_end = p->r_sz = 0x0;
374 p->max_prio = NO_PRIO;
375 return p;
376}
377
378static int destroy_region(struct ocmem_region *region)
379{
Naveen Ramaraj5ec6b332013-03-27 15:24:22 -0700380 idr_destroy(&region->region_idr);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700381 kfree(region);
382 return 0;
383}
384
385static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
386{
387 int ret, id;
388
389 while (1) {
390 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
391 return -ENOMEM;
392
393 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
394
395 if (ret != -EAGAIN)
396 break;
397 }
398
399 if (!ret) {
400 req->req_id = id;
401 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
402 req, id, region);
403 return 0;
404 }
405 return -EINVAL;
406}
407
408static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
409{
410 idr_remove(&region->region_idr, req->req_id);
411 return 0;
412}
413
414static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
415{
416 region->r_start = req->req_start;
417 region->r_end = req->req_end;
418 region->r_sz = req->req_end - req->req_start + 1;
419 return 0;
420}
421
422static int region_req_count(int id, void *ptr, void *data)
423{
424 int *count = data;
425 *count = *count + 1;
426 return 0;
427}
428
429static int req_count(struct ocmem_region *region)
430{
431 int count = 0;
432 idr_for_each(&region->region_idr, region_req_count, &count);
433 return count;
434}
435
436static int compute_max_prio(int id, void *ptr, void *data)
437{
438 int *max = data;
439 struct ocmem_req *req = ptr;
440
441 if (req->prio > *max)
442 *max = req->prio;
443 return 0;
444}
445
446static int update_region_prio(struct ocmem_region *region)
447{
448 int max_prio;
449 if (req_count(region) != 0) {
450 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
451 region->max_prio = max_prio;
452 } else {
453 region->max_prio = NO_PRIO;
454 }
455 pr_debug("ocmem: Updating prio of region %p as %d\n",
456 region, max_prio);
457
458 return 0;
459}
460
461static struct ocmem_region *find_region(unsigned long addr)
462{
463 struct ocmem_region *region = NULL;
464 struct rb_node *rb_node = NULL;
465
466 rb_node = sched_tree.rb_node;
467
468 while (rb_node) {
469 struct ocmem_region *tmp_region = NULL;
470 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
471
472 if (tmp_region->r_end > addr) {
473 region = tmp_region;
474 if (tmp_region->r_start <= addr)
475 break;
476 rb_node = rb_node->rb_left;
477 } else {
478 rb_node = rb_node->rb_right;
479 }
480 }
481 return region;
482}
483
484static struct ocmem_region *find_region_intersection(unsigned long start,
485 unsigned long end)
486{
487
488 struct ocmem_region *region = NULL;
489 region = find_region(start);
490 if (region && end <= region->r_start)
491 region = NULL;
492 return region;
493}
494
495static struct ocmem_region *find_region_match(unsigned long start,
496 unsigned long end)
497{
498
499 struct ocmem_region *region = NULL;
500 region = find_region(start);
501 if (region && start == region->r_start && end == region->r_end)
502 return region;
503 return NULL;
504}
505
506static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
507{
508 struct ocmem_req *req = NULL;
509
510 if (!region)
511 return NULL;
512
513 req = idr_find(&region->region_idr, owner);
514
515 return req;
516}
517
518/* Must be called with req->sem held */
519static inline int is_mapped(struct ocmem_req *req)
520{
521 return TEST_STATE(req, R_MAPPED);
522}
523
Naveen Ramaraj89738952013-02-13 15:24:57 -0800524static inline int is_pending_shrink(struct ocmem_req *req)
525{
526 return TEST_STATE(req, R_MUST_SHRINK) ||
527 TEST_STATE(req, R_WF_SHRINK);
528}
529
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700530/* Must be called with sched_mutex held */
531static int __sched_unmap(struct ocmem_req *req)
532{
533 struct ocmem_req *matched_req = NULL;
534 struct ocmem_region *matched_region = NULL;
535
Neeti Desaidad1d8e2013-01-09 19:42:06 -0800536 if (!TEST_STATE(req, R_MAPPED))
537 goto invalid_op_error;
538
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700539 matched_region = find_region_match(req->req_start, req->req_end);
540 matched_req = find_req_match(req->req_id, matched_region);
541
542 if (!matched_region || !matched_req) {
543 pr_err("Could not find backing region for req");
544 goto invalid_op_error;
545 }
546
547 if (matched_req != req) {
548 pr_err("Request does not match backing req");
549 goto invalid_op_error;
550 }
551
552 if (!is_mapped(req)) {
553 pr_err("Request is not currently mapped");
554 goto invalid_op_error;
555 }
556
557 /* Update the request state */
558 CLEAR_STATE(req, R_MAPPED);
559 SET_STATE(req, R_MUST_MAP);
560
561 return OP_COMPLETE;
562
563invalid_op_error:
564 return OP_FAIL;
565}
566
567/* Must be called with sched_mutex held */
568static int __sched_map(struct ocmem_req *req)
569{
570 struct ocmem_req *matched_req = NULL;
571 struct ocmem_region *matched_region = NULL;
572
573 matched_region = find_region_match(req->req_start, req->req_end);
574 matched_req = find_req_match(req->req_id, matched_region);
575
576 if (!matched_region || !matched_req) {
577 pr_err("Could not find backing region for req");
578 goto invalid_op_error;
579 }
580
581 if (matched_req != req) {
582 pr_err("Request does not match backing req");
583 goto invalid_op_error;
584 }
585
586 /* Update the request state */
587 CLEAR_STATE(req, R_MUST_MAP);
588 SET_STATE(req, R_MAPPED);
589
590 return OP_COMPLETE;
591
592invalid_op_error:
593 return OP_FAIL;
594}
595
596static int do_map(struct ocmem_req *req)
597{
598 int rc = 0;
599
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700600 down_write(&req->rw_sem);
601
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700602 mutex_lock(&sched_mutex);
603 rc = __sched_map(req);
604 mutex_unlock(&sched_mutex);
605
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700606 up_write(&req->rw_sem);
607
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700608 if (rc == OP_FAIL)
609 return -EINVAL;
610
611 return 0;
612}
613
614static int do_unmap(struct ocmem_req *req)
615{
616 int rc = 0;
617
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700618 down_write(&req->rw_sem);
619
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700620 mutex_lock(&sched_mutex);
621 rc = __sched_unmap(req);
622 mutex_unlock(&sched_mutex);
623
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700624 up_write(&req->rw_sem);
625
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700626 if (rc == OP_FAIL)
627 return -EINVAL;
628
629 return 0;
630}
631
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700632static int process_map(struct ocmem_req *req, unsigned long start,
633 unsigned long end)
634{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700635 int rc = 0;
636
637 rc = ocmem_enable_core_clock();
638
639 if (rc < 0)
640 goto core_clock_fail;
641
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700642
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700643 if (is_iface_access(req->owner)) {
644 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700646 if (rc < 0)
647 goto iface_clock_fail;
648 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700649
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700650 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
651 get_mode(req->owner));
652
653 if (rc < 0) {
654 pr_err("ocmem: Failed to secure request %p for %d\n", req,
655 req->owner);
656 goto lock_failed;
657 }
658
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700659 rc = do_map(req);
660
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700661 if (rc < 0) {
662 pr_err("ocmem: Failed to map request %p for %d\n",
663 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700664 goto process_map_fail;
665
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700666 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700667 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700668 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700669
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700670process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700671 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
672lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700673 if (is_iface_access(req->owner))
674 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700675iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700676 ocmem_disable_core_clock();
677core_clock_fail:
678 pr_err("ocmem: Failed to map ocmem request\n");
679 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700680}
681
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700682static int process_unmap(struct ocmem_req *req, unsigned long start,
683 unsigned long end)
684{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700685 int rc = 0;
686
687 rc = do_unmap(req);
688
689 if (rc < 0)
690 goto process_unmap_fail;
691
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700692 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
693 req->req_sz);
694
695 if (rc < 0) {
696 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
697 req->owner);
698 goto unlock_failed;
699 }
700
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700701 if (is_iface_access(req->owner))
702 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700703 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700704 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700705 return 0;
706
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700707unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700708process_unmap_fail:
709 pr_err("ocmem: Failed to unmap ocmem request\n");
710 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700711}
712
713static int __sched_grow(struct ocmem_req *req, bool can_block)
714{
715 unsigned long min = req->req_min;
716 unsigned long max = req->req_max;
717 unsigned long step = req->req_step;
718 int owner = req->owner;
719 unsigned long curr_sz = 0;
720 unsigned long growth_sz = 0;
721 unsigned long curr_start = 0;
722 enum client_prio prio = req->prio;
723 unsigned long alloc_addr = 0x0;
724 bool retry;
725 struct ocmem_region *spanned_r = NULL;
726 struct ocmem_region *overlap_r = NULL;
727
728 struct ocmem_req *matched_req = NULL;
729 struct ocmem_region *matched_region = NULL;
730
731 struct ocmem_zone *zone = get_zone(owner);
732 struct ocmem_region *region = NULL;
733
734 matched_region = find_region_match(req->req_start, req->req_end);
735 matched_req = find_req_match(req->req_id, matched_region);
736
737 if (!matched_region || !matched_req) {
738 pr_err("Could not find backing region for req");
739 goto invalid_op_error;
740 }
741
742 if (matched_req != req) {
743 pr_err("Request does not match backing req");
744 goto invalid_op_error;
745 }
746
747 curr_sz = matched_req->req_sz;
748 curr_start = matched_req->req_start;
749 growth_sz = matched_req->req_max - matched_req->req_sz;
750
751 pr_debug("Attempting to grow req %p from %lx to %lx\n",
752 req, matched_req->req_sz, matched_req->req_max);
753
754 retry = false;
755
756 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
757
758retry_next_step:
759
760 spanned_r = NULL;
761 overlap_r = NULL;
762
763 spanned_r = find_region(zone->z_head);
764 overlap_r = find_region_intersection(zone->z_head,
765 zone->z_head + growth_sz);
766
767 if (overlap_r == NULL) {
768 /* no conflicting regions, schedule this region */
769 zone->z_ops->free(zone, curr_start, curr_sz);
770 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
771
772 if (alloc_addr < 0) {
773 pr_err("ocmem: zone allocation operation failed\n");
774 goto internal_error;
775 }
776
777 curr_sz += growth_sz;
778 /* Detach the region from the interval tree */
779 /* This is to guarantee that any change in size
780 * causes the tree to be rebalanced if required */
781
782 detach_req(matched_region, req);
783 if (req_count(matched_region) == 0) {
784 remove_region(matched_region);
785 region = matched_region;
786 } else {
787 region = create_region();
788 if (!region) {
789 pr_err("ocmem: Unable to create region\n");
790 goto region_error;
791 }
792 }
793
794 /* update the request */
795 req->req_start = alloc_addr;
796 /* increment the size to reflect new length */
797 req->req_sz = curr_sz;
798 req->req_end = alloc_addr + req->req_sz - 1;
799
800 /* update request state */
801 CLEAR_STATE(req, R_MUST_GROW);
802 SET_STATE(req, R_ALLOCATED);
803 SET_STATE(req, R_MUST_MAP);
804 req->op = SCHED_MAP;
805
806 /* update the region with new req */
807 attach_req(region, req);
808 populate_region(region, req);
809 update_region_prio(region);
810
811 /* update the tree with new region */
812 if (insert_region(region)) {
813 pr_err("ocmem: Failed to insert the region\n");
814 goto region_error;
815 }
816
817 if (retry) {
818 SET_STATE(req, R_MUST_GROW);
819 SET_STATE(req, R_PENDING);
820 req->op = SCHED_GROW;
821 return OP_PARTIAL;
822 }
823 } else if (spanned_r != NULL && overlap_r != NULL) {
824 /* resolve conflicting regions based on priority */
825 if (overlap_r->max_prio < prio) {
826 /* Growth cannot be triggered unless a previous
827 * client of lower priority was evicted */
828 pr_err("ocmem: Invalid growth scheduled\n");
829 /* This is serious enough to fail */
830 BUG();
831 return OP_FAIL;
832 } else if (overlap_r->max_prio > prio) {
833 if (min == max) {
834 /* Cannot grow at this time, try later */
835 SET_STATE(req, R_PENDING);
836 SET_STATE(req, R_MUST_GROW);
837 return OP_RESCHED;
838 } else {
839 /* Try to grow in steps */
840 growth_sz -= step;
841 /* We are OOM at this point so need to retry */
842 if (growth_sz <= curr_sz) {
843 SET_STATE(req, R_PENDING);
844 SET_STATE(req, R_MUST_GROW);
845 return OP_RESCHED;
846 }
847 retry = true;
848 pr_debug("ocmem: Attempting with reduced size %lx\n",
849 growth_sz);
850 goto retry_next_step;
851 }
852 } else {
853 pr_err("ocmem: grow: New Region %p Existing %p\n",
854 matched_region, overlap_r);
855 pr_err("ocmem: Undetermined behavior\n");
856 /* This is serious enough to fail */
857 BUG();
858 }
859 } else if (spanned_r == NULL && overlap_r != NULL) {
860 goto err_not_supported;
861 }
862
863 return OP_COMPLETE;
864
865err_not_supported:
866 pr_err("ocmem: Scheduled unsupported operation\n");
867 return OP_FAIL;
868region_error:
869 zone->z_ops->free(zone, alloc_addr, curr_sz);
870 detach_req(region, req);
871 update_region_prio(region);
872 /* req is going to be destroyed by the caller anyways */
873internal_error:
874 destroy_region(region);
875invalid_op_error:
876 return OP_FAIL;
877}
878
879/* Must be called with sched_mutex held */
880static int __sched_free(struct ocmem_req *req)
881{
882 int owner = req->owner;
883 int ret = 0;
884
885 struct ocmem_req *matched_req = NULL;
886 struct ocmem_region *matched_region = NULL;
887
888 struct ocmem_zone *zone = get_zone(owner);
889
890 BUG_ON(!zone);
891
892 matched_region = find_region_match(req->req_start, req->req_end);
893 matched_req = find_req_match(req->req_id, matched_region);
894
895 if (!matched_region || !matched_req)
896 goto invalid_op_error;
897 if (matched_req != req)
898 goto invalid_op_error;
899
900 ret = zone->z_ops->free(zone,
901 matched_req->req_start, matched_req->req_sz);
902
903 if (ret < 0)
904 goto err_op_fail;
905
906 detach_req(matched_region, matched_req);
907 update_region_prio(matched_region);
908 if (req_count(matched_region) == 0) {
909 remove_region(matched_region);
910 destroy_region(matched_region);
911 }
912
913 /* Update the request */
914 req->req_start = 0x0;
915 req->req_sz = 0x0;
916 req->req_end = 0x0;
917 SET_STATE(req, R_FREE);
918 return OP_COMPLETE;
919invalid_op_error:
920 pr_err("ocmem: free: Failed to find matching region\n");
921err_op_fail:
922 pr_err("ocmem: free: Failed\n");
923 return OP_FAIL;
924}
925
926/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700927static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
928{
929 int owner = req->owner;
930 int ret = 0;
931
932 struct ocmem_req *matched_req = NULL;
933 struct ocmem_region *matched_region = NULL;
934 struct ocmem_region *region = NULL;
935 unsigned long alloc_addr = 0x0;
936
937 struct ocmem_zone *zone = get_zone(owner);
938
939 BUG_ON(!zone);
940
941 /* The shrink should not be called for zero size */
942 BUG_ON(new_sz == 0);
943
944 matched_region = find_region_match(req->req_start, req->req_end);
945 matched_req = find_req_match(req->req_id, matched_region);
946
947 if (!matched_region || !matched_req)
948 goto invalid_op_error;
949 if (matched_req != req)
950 goto invalid_op_error;
951
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700952 ret = zone->z_ops->free(zone,
953 matched_req->req_start, matched_req->req_sz);
954
955 if (ret < 0) {
956 pr_err("Zone Allocation operation failed\n");
957 goto internal_error;
958 }
959
960 alloc_addr = zone->z_ops->allocate(zone, new_sz);
961
962 if (alloc_addr < 0) {
963 pr_err("Zone Allocation operation failed\n");
964 goto internal_error;
965 }
966
967 /* Detach the region from the interval tree */
968 /* This is to guarantee that the change in size
969 * causes the tree to be rebalanced if required */
970
971 detach_req(matched_region, req);
972 if (req_count(matched_region) == 0) {
973 remove_region(matched_region);
974 region = matched_region;
975 } else {
976 region = create_region();
977 if (!region) {
978 pr_err("ocmem: Unable to create region\n");
979 goto internal_error;
980 }
981 }
982 /* update the request */
983 req->req_start = alloc_addr;
984 req->req_sz = new_sz;
985 req->req_end = alloc_addr + req->req_sz;
986
987 if (req_count(region) == 0) {
988 remove_region(matched_region);
989 destroy_region(matched_region);
990 }
991
992 /* update request state */
993 SET_STATE(req, R_MUST_GROW);
994 SET_STATE(req, R_MUST_MAP);
995 req->op = SCHED_MAP;
996
997 /* attach the request to the region */
998 attach_req(region, req);
999 populate_region(region, req);
1000 update_region_prio(region);
1001
1002 /* update the tree with new region */
1003 if (insert_region(region)) {
1004 pr_err("ocmem: Failed to insert the region\n");
1005 zone->z_ops->free(zone, alloc_addr, new_sz);
1006 detach_req(region, req);
1007 update_region_prio(region);
1008 /* req will be destroyed by the caller */
1009 goto region_error;
1010 }
1011 return OP_COMPLETE;
1012
1013region_error:
1014 destroy_region(region);
1015internal_error:
1016 pr_err("ocmem: shrink: Failed\n");
1017 return OP_FAIL;
1018invalid_op_error:
1019 pr_err("ocmem: shrink: Failed to find matching region\n");
1020 return OP_FAIL;
1021}
1022
1023/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001024static int __sched_allocate(struct ocmem_req *req, bool can_block,
1025 bool can_wait)
1026{
1027 unsigned long min = req->req_min;
1028 unsigned long max = req->req_max;
1029 unsigned long step = req->req_step;
1030 int owner = req->owner;
1031 unsigned long sz = max;
1032 enum client_prio prio = req->prio;
1033 unsigned long alloc_addr = 0x0;
1034 bool retry;
1035
1036 struct ocmem_region *spanned_r = NULL;
1037 struct ocmem_region *overlap_r = NULL;
1038
1039 struct ocmem_zone *zone = get_zone(owner);
1040 struct ocmem_region *region = NULL;
1041
1042 BUG_ON(!zone);
1043
1044 if (min > (zone->z_end - zone->z_start)) {
1045 pr_err("ocmem: requested minimum size exceeds quota\n");
1046 goto invalid_op_error;
1047 }
1048
1049 if (max > (zone->z_end - zone->z_start)) {
1050 pr_err("ocmem: requested maximum size exceeds quota\n");
1051 goto invalid_op_error;
1052 }
1053
1054 if (min > zone->z_free) {
1055 pr_err("ocmem: out of memory for zone %d\n", owner);
1056 goto invalid_op_error;
1057 }
1058
1059 region = create_region();
1060
1061 if (!region) {
1062 pr_err("ocmem: Unable to create region\n");
1063 goto invalid_op_error;
1064 }
1065
1066 retry = false;
1067
Naveen Ramaraj89738952013-02-13 15:24:57 -08001068 pr_debug("ocmem: do_allocate: %s request %p size %lx\n",
1069 get_name(owner), req, sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001070
1071retry_next_step:
1072
1073 spanned_r = NULL;
1074 overlap_r = NULL;
1075
1076 spanned_r = find_region(zone->z_head);
1077 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1078
1079 if (overlap_r == NULL) {
1080 /* no conflicting regions, schedule this region */
1081 alloc_addr = zone->z_ops->allocate(zone, sz);
1082
1083 if (alloc_addr < 0) {
1084 pr_err("Zone Allocation operation failed\n");
1085 goto internal_error;
1086 }
1087
1088 /* update the request */
1089 req->req_start = alloc_addr;
1090 req->req_end = alloc_addr + sz - 1;
1091 req->req_sz = sz;
1092 req->zone = zone;
1093
1094 /* update request state */
1095 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001096 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001097 SET_STATE(req, R_ALLOCATED);
1098 SET_STATE(req, R_MUST_MAP);
1099 req->op = SCHED_NOP;
1100
1101 /* attach the request to the region */
1102 attach_req(region, req);
1103 populate_region(region, req);
1104 update_region_prio(region);
1105
1106 /* update the tree with new region */
1107 if (insert_region(region)) {
1108 pr_err("ocmem: Failed to insert the region\n");
1109 zone->z_ops->free(zone, alloc_addr, sz);
1110 detach_req(region, req);
1111 update_region_prio(region);
1112 /* req will be destroyed by the caller */
1113 goto internal_error;
1114 }
1115
1116 if (retry) {
1117 SET_STATE(req, R_MUST_GROW);
1118 SET_STATE(req, R_PENDING);
1119 req->op = SCHED_GROW;
1120 return OP_PARTIAL;
1121 }
1122 } else if (spanned_r != NULL && overlap_r != NULL) {
1123 /* resolve conflicting regions based on priority */
1124 if (overlap_r->max_prio < prio) {
1125 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001126 req->req_start = zone->z_head;
1127 req->req_end = zone->z_head + sz - 1;
1128 req->req_sz = 0x0;
1129 req->edata = NULL;
1130 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001131 } else {
1132 /* Try to allocate atleast >= 'min' immediately */
1133 sz -= step;
1134 if (sz < min)
1135 goto err_out_of_mem;
1136 retry = true;
1137 pr_debug("ocmem: Attempting with reduced size %lx\n",
1138 sz);
1139 goto retry_next_step;
1140 }
1141 } else if (overlap_r->max_prio > prio) {
1142 if (can_block == true) {
1143 SET_STATE(req, R_PENDING);
1144 SET_STATE(req, R_MUST_GROW);
1145 return OP_RESCHED;
1146 } else {
1147 if (min == max) {
1148 pr_err("Cannot allocate %lx synchronously\n",
1149 sz);
1150 goto err_out_of_mem;
1151 } else {
1152 sz -= step;
1153 if (sz < min)
1154 goto err_out_of_mem;
1155 retry = true;
1156 pr_debug("ocmem: Attempting reduced size %lx\n",
1157 sz);
1158 goto retry_next_step;
1159 }
1160 }
1161 } else {
1162 pr_err("ocmem: Undetermined behavior\n");
1163 pr_err("ocmem: New Region %p Existing %p\n", region,
1164 overlap_r);
1165 /* This is serious enough to fail */
1166 BUG();
1167 }
1168 } else if (spanned_r == NULL && overlap_r != NULL)
1169 goto err_not_supported;
1170
1171 return OP_COMPLETE;
1172
Naveen Ramaraj59907982012-10-16 17:40:38 -07001173trigger_eviction:
1174 pr_debug("Trigger eviction of region %p\n", overlap_r);
1175 destroy_region(region);
1176 return OP_EVICT;
1177
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001178err_not_supported:
1179 pr_err("ocmem: Scheduled unsupported operation\n");
1180 return OP_FAIL;
1181
1182err_out_of_mem:
1183 pr_err("ocmem: Out of memory during allocation\n");
1184internal_error:
1185 destroy_region(region);
1186invalid_op_error:
1187 return OP_FAIL;
1188}
1189
Naveen Ramaraj89738952013-02-13 15:24:57 -08001190/* Remove the request from eviction lists */
1191static void cancel_restore(struct ocmem_req *e_handle,
1192 struct ocmem_req *req)
1193{
1194 struct ocmem_eviction_data *edata = e_handle->edata;
1195
1196 if (!edata || !req)
1197 return;
1198
1199 if (list_empty(&edata->req_list))
1200 return;
1201
1202 list_del_init(&req->eviction_list);
1203 req->e_handle = NULL;
1204
1205 return;
1206}
1207
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001208static int sched_enqueue(struct ocmem_req *priv)
1209{
1210 struct ocmem_req *next = NULL;
1211 mutex_lock(&sched_queue_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001212 SET_STATE(priv, R_ENQUEUED);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001213 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1214 pr_debug("enqueued req %p\n", priv);
1215 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001216 pr_debug("pending request %p for client %s\n", next,
1217 get_name(next->owner));
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001218 }
1219 mutex_unlock(&sched_queue_mutex);
1220 return 0;
1221}
1222
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001223static void sched_dequeue(struct ocmem_req *victim_req)
1224{
1225 struct ocmem_req *req = NULL;
1226 struct ocmem_req *next = NULL;
1227 int id;
1228
1229 if (!victim_req)
1230 return;
1231
1232 id = victim_req->owner;
1233
1234 mutex_lock(&sched_queue_mutex);
1235
1236 if (list_empty(&sched_queue[id]))
1237 goto dequeue_done;
1238
1239 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1240 {
1241 if (req == victim_req) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001242 pr_debug("ocmem: Cancelling pending request %p for %s\n",
1243 req, get_name(req->owner));
1244 list_del_init(&victim_req->sched_list);
1245 CLEAR_STATE(victim_req, R_ENQUEUED);
1246 break;
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001247 }
1248 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001249dequeue_done:
1250 mutex_unlock(&sched_queue_mutex);
1251 return;
1252}
1253
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001254static struct ocmem_req *ocmem_fetch_req(void)
1255{
1256 int i;
1257 struct ocmem_req *req = NULL;
1258 struct ocmem_req *next = NULL;
1259
1260 mutex_lock(&sched_queue_mutex);
1261 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1262 if (list_empty(&sched_queue[i]))
1263 continue;
1264 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1265 {
1266 if (req) {
1267 pr_debug("ocmem: Fetched pending request %p\n",
1268 req);
1269 list_del(&req->sched_list);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001270 CLEAR_STATE(req, R_ENQUEUED);
1271 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001272 }
1273 }
1274 }
1275 mutex_unlock(&sched_queue_mutex);
1276 return req;
1277}
1278
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001279
1280unsigned long process_quota(int id)
1281{
1282 struct ocmem_zone *zone = NULL;
1283
1284 if (is_blocked(id))
1285 return 0;
1286
1287 zone = get_zone(id);
1288
1289 if (zone && zone->z_pool)
1290 return zone->z_end - zone->z_start;
1291 else
1292 return 0;
1293}
1294
1295static int do_grow(struct ocmem_req *req)
1296{
1297 struct ocmem_buf *buffer = NULL;
1298 bool can_block = true;
1299 int rc = 0;
1300
1301 down_write(&req->rw_sem);
1302 buffer = req->buffer;
1303
1304 /* Take the scheduler mutex */
1305 mutex_lock(&sched_mutex);
1306 rc = __sched_grow(req, can_block);
1307 mutex_unlock(&sched_mutex);
1308
1309 if (rc == OP_FAIL)
1310 goto err_op_fail;
1311
1312 if (rc == OP_RESCHED) {
1313 pr_debug("ocmem: Enqueue this allocation");
1314 sched_enqueue(req);
1315 }
1316
1317 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1318 buffer->addr = device_address(req->owner, req->req_start);
1319 buffer->len = req->req_sz;
1320 }
1321
1322 up_write(&req->rw_sem);
1323 return 0;
1324err_op_fail:
1325 up_write(&req->rw_sem);
1326 return -EINVAL;
1327}
1328
1329static int process_grow(struct ocmem_req *req)
1330{
1331 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001332 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001333
1334 /* Attempt to grow the region */
1335 rc = do_grow(req);
1336
1337 if (rc < 0)
1338 return -EINVAL;
1339
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001340 rc = process_map(req, req->req_start, req->req_end);
1341 if (rc < 0)
1342 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001343
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001344 offset = phys_to_offset(req->req_start);
1345
1346 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1347
1348 if (rc < 0) {
1349 pr_err("Failed to switch ON memory macros\n");
1350 goto power_ctl_error;
1351 }
1352
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001353 /* Notify the client about the buffer growth */
1354 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1355 if (rc < 0) {
1356 pr_err("No notifier callback to cater for req %p event: %d\n",
1357 req, OCMEM_ALLOC_GROW);
1358 BUG();
1359 }
1360 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001361power_ctl_error:
1362 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001363}
1364
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001365static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1366{
1367
1368 int rc = 0;
1369 struct ocmem_buf *buffer = NULL;
1370
1371 down_write(&req->rw_sem);
1372 buffer = req->buffer;
1373
1374 /* Take the scheduler mutex */
1375 mutex_lock(&sched_mutex);
1376 rc = __sched_shrink(req, shrink_size);
1377 mutex_unlock(&sched_mutex);
1378
1379 if (rc == OP_FAIL)
1380 goto err_op_fail;
1381
1382 else if (rc == OP_COMPLETE) {
1383 buffer->addr = device_address(req->owner, req->req_start);
1384 buffer->len = req->req_sz;
1385 }
1386
1387 up_write(&req->rw_sem);
1388 return 0;
1389err_op_fail:
1390 up_write(&req->rw_sem);
1391 return -EINVAL;
1392}
1393
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001394static void ocmem_sched_wk_func(struct work_struct *work);
1395DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1396
1397static int ocmem_schedule_pending(void)
1398{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001399
1400 bool need_sched = false;
1401 int i = 0;
1402
1403 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1404 if (!list_empty(&sched_queue[i])) {
1405 need_sched = true;
1406 break;
1407 }
1408 }
1409
1410 if (need_sched == true) {
1411 cancel_delayed_work(&ocmem_sched_thread);
1412 schedule_delayed_work(&ocmem_sched_thread,
1413 msecs_to_jiffies(SCHED_DELAY));
1414 pr_debug("ocmem: Scheduled delayed work\n");
1415 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001416 return 0;
1417}
1418
1419static int do_free(struct ocmem_req *req)
1420{
1421 int rc = 0;
1422 struct ocmem_buf *buffer = req->buffer;
1423
1424 down_write(&req->rw_sem);
1425
1426 if (is_mapped(req)) {
1427 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1428 goto err_free_fail;
1429 }
1430
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001431 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1432 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001433 /* Grab the sched mutex */
1434 mutex_lock(&sched_mutex);
1435 rc = __sched_free(req);
1436 mutex_unlock(&sched_mutex);
1437
1438 switch (rc) {
1439
1440 case OP_COMPLETE:
1441 buffer->addr = 0x0;
1442 buffer->len = 0x0;
1443 break;
1444 case OP_FAIL:
1445 default:
1446 goto err_free_fail;
1447 break;
1448 }
1449
1450 up_write(&req->rw_sem);
1451 return 0;
1452err_free_fail:
1453 up_write(&req->rw_sem);
1454 pr_err("ocmem: freeing req %p failed\n", req);
1455 return -EINVAL;
1456}
1457
1458int process_free(int id, struct ocmem_handle *handle)
1459{
1460 struct ocmem_req *req = NULL;
1461 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001462 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001463 int rc = 0;
1464
Naveen Ramaraj89738952013-02-13 15:24:57 -08001465 mutex_lock(&free_mutex);
1466
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001467 if (is_blocked(id)) {
1468 pr_err("Client %d cannot request free\n", id);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001469 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001470 }
1471
1472 req = handle_to_req(handle);
1473 buffer = handle_to_buffer(handle);
1474
Naveen Ramaraj89738952013-02-13 15:24:57 -08001475 if (!req) {
1476 pr_err("ocmem: No valid request to free\n");
1477 goto free_invalid;
1478 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001479
1480 if (req->req_start != core_address(id, buffer->addr)) {
1481 pr_err("Invalid buffer handle passed for free\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001482 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001483 }
1484
Naveen Ramaraj89738952013-02-13 15:24:57 -08001485 if (req->edata != NULL) {
1486 pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n",
1487 req, req->state, req->edata);
1488 goto free_invalid;
1489 }
1490
1491 if (is_pending_shrink(req)) {
1492 pr_err("ocmem: Request %p(%2lx) yet to process eviction\n",
1493 req, req->state);
1494 goto pending_shrink;
1495 }
1496
1497 /* Remove the request from any restore lists */
1498 if (req->e_handle)
1499 cancel_restore(req->e_handle, req);
1500
1501 /* Remove the request from any pending opreations */
1502 if (TEST_STATE(req, R_ENQUEUED)) {
1503 mutex_lock(&sched_mutex);
1504 sched_dequeue(req);
1505 mutex_unlock(&sched_mutex);
1506 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001507
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001508 if (!TEST_STATE(req, R_FREE)) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001509
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001510 if (TEST_STATE(req, R_MAPPED)) {
1511 /* unmap the interval and clear the memory */
1512 rc = process_unmap(req, req->req_start, req->req_end);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001513
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001514 if (rc < 0) {
1515 pr_err("ocmem: Failed to unmap %p\n", req);
1516 goto free_fail;
1517 }
Neeti Desai1d657f52013-04-26 10:24:26 -07001518 /* Turn off the memory */
1519 if (req->req_sz != 0) {
1520
1521 offset = phys_to_offset(req->req_start);
1522 rc = ocmem_memory_off(req->owner, offset,
1523 req->req_sz);
1524
1525 if (rc < 0) {
1526 pr_err("Failed to switch OFF memory macros\n");
1527 goto free_fail;
1528 }
1529 }
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001530
1531 rc = do_free(req);
1532 if (rc < 0) {
1533 pr_err("ocmem: Failed to free %p\n", req);
1534 goto free_fail;
1535 }
1536 } else
Naveen Ramaraj89738952013-02-13 15:24:57 -08001537 pr_debug("request %p was already shrunk to 0\n", req);
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001538 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001539
Neeti Desai1d657f52013-04-26 10:24:26 -07001540 if (!TEST_STATE(req, R_FREE)) {
1541 /* Turn off the memory */
1542 if (req->req_sz != 0) {
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001543
Neeti Desai1d657f52013-04-26 10:24:26 -07001544 offset = phys_to_offset(req->req_start);
1545 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001546
Neeti Desai1d657f52013-04-26 10:24:26 -07001547 if (rc < 0) {
1548 pr_err("Failed to switch OFF memory macros\n");
1549 goto free_fail;
1550 }
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001551 }
1552
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001553 /* free the allocation */
1554 rc = do_free(req);
1555 if (rc < 0)
1556 return -EINVAL;
1557 }
1558
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001559 inc_ocmem_stat(zone_of(req), NR_FREES);
1560
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001561 ocmem_destroy_req(req);
1562 handle->req = NULL;
1563
1564 ocmem_schedule_pending();
Naveen Ramaraj89738952013-02-13 15:24:57 -08001565 mutex_unlock(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001566 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001567free_fail:
1568free_invalid:
1569 mutex_unlock(&free_mutex);
1570 return -EINVAL;
1571pending_shrink:
1572 mutex_unlock(&free_mutex);
1573 return -EAGAIN;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001574}
1575
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001576static void ocmem_rdm_worker(struct work_struct *work)
1577{
1578 int offset = 0;
1579 int rc = 0;
1580 int event;
1581 struct ocmem_rdm_work *work_data = container_of(work,
1582 struct ocmem_rdm_work, work);
1583 int id = work_data->id;
1584 struct ocmem_map_list *list = work_data->list;
1585 int direction = work_data->direction;
1586 struct ocmem_handle *handle = work_data->handle;
1587 struct ocmem_req *req = handle_to_req(handle);
1588 struct ocmem_buf *buffer = handle_to_buffer(handle);
1589
1590 down_write(&req->rw_sem);
1591 offset = phys_to_offset(req->req_start);
1592 rc = ocmem_rdm_transfer(id, list, offset, direction);
1593 if (work_data->direction == TO_OCMEM)
1594 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1595 else
1596 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001597 up_write(&req->rw_sem);
1598 kfree(work_data);
1599 dispatch_notification(id, event, buffer);
1600}
1601
1602int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1603 struct ocmem_map_list *list, int direction)
1604{
1605 struct ocmem_rdm_work *work_data = NULL;
1606
1607 down_write(&req->rw_sem);
1608
1609 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1610 if (!work_data)
1611 BUG();
1612
1613 work_data->handle = handle;
1614 work_data->list = list;
1615 work_data->id = req->owner;
1616 work_data->direction = direction;
1617 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1618 up_write(&req->rw_sem);
1619 queue_work(ocmem_rdm_wq, &work_data->work);
1620 return 0;
1621}
1622
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001623int process_drop(int id, struct ocmem_handle *handle,
1624 struct ocmem_map_list *list)
1625{
1626 struct ocmem_req *req = NULL;
1627 struct ocmem_buf *buffer = NULL;
1628 int rc = 0;
1629
1630 if (is_blocked(id)) {
1631 pr_err("Client %d cannot request drop\n", id);
1632 return -EINVAL;
1633 }
1634
1635 if (is_tcm(id))
1636 pr_err("Client %d cannot request drop\n", id);
1637
1638 req = handle_to_req(handle);
1639 buffer = handle_to_buffer(handle);
1640
1641 if (!req)
1642 return -EINVAL;
1643
1644 if (req->req_start != core_address(id, buffer->addr)) {
1645 pr_err("Invalid buffer handle passed for drop\n");
1646 return -EINVAL;
1647 }
1648
1649 if (TEST_STATE(req, R_MAPPED)) {
1650 rc = process_unmap(req, req->req_start, req->req_end);
1651 if (rc < 0)
1652 return -EINVAL;
1653 } else
1654 return -EINVAL;
1655
1656 return 0;
1657}
1658
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001659int process_xfer_out(int id, struct ocmem_handle *handle,
1660 struct ocmem_map_list *list)
1661{
1662 struct ocmem_req *req = NULL;
1663 int rc = 0;
1664
1665 req = handle_to_req(handle);
1666
1667 if (!req)
1668 return -EINVAL;
1669
1670 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001671 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001672 goto transfer_out_error;
1673 }
1674
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001675 rc = queue_transfer(req, handle, list, TO_DDR);
1676
1677 if (rc < 0) {
1678 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001679 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001680 goto transfer_out_error;
1681 }
1682
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001683 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001684 return 0;
1685
1686transfer_out_error:
1687 return -EINVAL;
1688}
1689
1690int process_xfer_in(int id, struct ocmem_handle *handle,
1691 struct ocmem_map_list *list)
1692{
1693 struct ocmem_req *req = NULL;
1694 int rc = 0;
1695
1696 req = handle_to_req(handle);
1697
1698 if (!req)
1699 return -EINVAL;
1700
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001701
1702 if (!is_mapped(req)) {
1703 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001704 goto transfer_in_error;
1705 }
1706
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001707 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001708 rc = queue_transfer(req, handle, list, TO_OCMEM);
1709
1710 if (rc < 0) {
1711 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001712 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001713 goto transfer_in_error;
1714 }
1715
1716 return 0;
1717transfer_in_error:
1718 return -EINVAL;
1719}
1720
1721int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1722{
1723 struct ocmem_req *req = NULL;
1724 struct ocmem_buf *buffer = NULL;
1725 struct ocmem_eviction_data *edata = NULL;
1726 int rc = 0;
1727
1728 if (is_blocked(id)) {
1729 pr_err("Client %d cannot request free\n", id);
1730 return -EINVAL;
1731 }
1732
1733 req = handle_to_req(handle);
1734 buffer = handle_to_buffer(handle);
1735
1736 if (!req)
1737 return -EINVAL;
1738
Naveen Ramaraj89738952013-02-13 15:24:57 -08001739 mutex_lock(&free_mutex);
1740
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001741 if (req->req_start != core_address(id, buffer->addr)) {
1742 pr_err("Invalid buffer handle passed for shrink\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001743 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001744 }
1745
Naveen Ramaraj89738952013-02-13 15:24:57 -08001746 if (!req->e_handle) {
1747 pr_err("Unable to find evicting request\n");
1748 goto shrink_fail;
1749 }
1750
1751 edata = req->e_handle->edata;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001752
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001753 if (!edata) {
1754 pr_err("Unable to find eviction data\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001755 goto shrink_fail;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001756 }
1757
1758 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001759
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001760 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1761
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001762 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001763 pr_debug("req %p being shrunk to zero\n", req);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001764 if (is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001765 rc = process_unmap(req, req->req_start, req->req_end);
1766 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001767 goto shrink_fail;
1768 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001769 rc = do_free(req);
1770 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001771 goto shrink_fail;
1772 SET_STATE(req, R_FREE);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001773 } else {
1774 rc = do_shrink(req, size);
1775 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001776 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001777 }
1778
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001779 CLEAR_STATE(req, R_ALLOCATED);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001780 CLEAR_STATE(req, R_WF_SHRINK);
1781 SET_STATE(req, R_SHRUNK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001782
1783 if (atomic_dec_and_test(&edata->pending)) {
1784 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001785 complete(&edata->completion);
1786 }
1787
Naveen Ramaraj89738952013-02-13 15:24:57 -08001788 mutex_unlock(&free_mutex);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001789 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001790shrink_fail:
1791 pr_err("ocmem: Failed to shrink request %p of %s\n",
1792 req, get_name(req->owner));
1793 mutex_unlock(&free_mutex);
1794 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001795}
1796
1797int process_xfer(int id, struct ocmem_handle *handle,
1798 struct ocmem_map_list *list, int direction)
1799{
1800 int rc = 0;
1801
1802 if (is_tcm(id)) {
1803 WARN(1, "Mapping operation is invalid for client\n");
1804 return -EINVAL;
1805 }
1806
1807 if (direction == TO_DDR)
1808 rc = process_xfer_out(id, handle, list);
1809 else if (direction == TO_OCMEM)
1810 rc = process_xfer_in(id, handle, list);
1811 return rc;
1812}
1813
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001814static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001815{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001816 struct ocmem_eviction_data *edata = NULL;
1817 int prio = ocmem_client_table[id].priority;
1818
1819 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1820
1821 if (!edata) {
1822 pr_err("ocmem: Could not allocate eviction data\n");
1823 return NULL;
1824 }
1825
1826 INIT_LIST_HEAD(&edata->victim_list);
1827 INIT_LIST_HEAD(&edata->req_list);
1828 edata->prio = prio;
1829 atomic_set(&edata->pending, 0);
1830 return edata;
1831}
1832
1833static void free_eviction(struct ocmem_eviction_data *edata)
1834{
1835
1836 if (!edata)
1837 return;
1838
1839 if (!list_empty(&edata->req_list))
1840 pr_err("ocmem: Eviction data %p not empty\n", edata);
1841
1842 kfree(edata);
1843 edata = NULL;
1844}
1845
1846static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1847{
1848
1849 if (!new || !old)
1850 return false;
1851
1852 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1853 new->req_start, new->req_end,
1854 old->req_start, old->req_end);
1855
1856 if ((new->req_start < old->req_start &&
1857 new->req_end >= old->req_start) ||
1858 (new->req_start >= old->req_start &&
1859 new->req_start <= old->req_end &&
1860 new->req_end >= old->req_end)) {
1861 pr_debug("request %p overlaps with existing req %p\n",
1862 new, old);
1863 return true;
1864 }
1865 return false;
1866}
1867
1868static int __evict_common(struct ocmem_eviction_data *edata,
1869 struct ocmem_req *req)
1870{
1871 struct rb_node *rb_node = NULL;
1872 struct ocmem_req *e_req = NULL;
1873 bool needs_eviction = false;
1874 int j = 0;
1875
1876 for (rb_node = rb_first(&sched_tree); rb_node;
1877 rb_node = rb_next(rb_node)) {
1878
1879 struct ocmem_region *tmp_region = NULL;
1880
1881 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1882
1883 if (tmp_region->max_prio < edata->prio) {
1884 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1885 needs_eviction = false;
1886 e_req = find_req_match(j, tmp_region);
1887 if (!e_req)
1888 continue;
1889 if (edata->passive == true) {
1890 needs_eviction = true;
1891 } else {
1892 needs_eviction = is_overlapping(req,
1893 e_req);
1894 }
1895
1896 if (needs_eviction) {
1897 pr_debug("adding %p in region %p to eviction list\n",
1898 e_req, tmp_region);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001899 SET_STATE(e_req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001900 list_add_tail(
1901 &e_req->eviction_list,
1902 &edata->req_list);
1903 atomic_inc(&edata->pending);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001904 e_req->e_handle = req;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001905 }
1906 }
1907 } else {
1908 pr_debug("Skipped region %p\n", tmp_region);
1909 }
1910 }
1911
1912 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1913
Naveen Ramaraj89738952013-02-13 15:24:57 -08001914 return atomic_read(&edata->pending);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001915}
1916
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001917static void trigger_eviction(struct ocmem_eviction_data *edata)
1918{
1919 struct ocmem_req *req = NULL;
1920 struct ocmem_req *next = NULL;
1921 struct ocmem_buf buffer;
1922
1923 if (!edata)
1924 return;
1925
1926 BUG_ON(atomic_read(&edata->pending) == 0);
1927
1928 init_completion(&edata->completion);
1929
1930 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1931 {
1932 if (req) {
1933 pr_debug("ocmem: Evicting request %p\n", req);
1934 buffer.addr = req->req_start;
1935 buffer.len = 0x0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001936 CLEAR_STATE(req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001937 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1938 &buffer);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001939 SET_STATE(req, R_WF_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001940 }
1941 }
1942 return;
1943}
1944
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001945int process_evict(int id)
1946{
1947 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001948 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001949
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001950 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001951
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001952 if (!edata)
1953 return -EINVAL;
1954
1955 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001956
1957 mutex_lock(&sched_mutex);
1958
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001959 rc = __evict_common(edata, NULL);
1960
Naveen Ramaraj89738952013-02-13 15:24:57 -08001961 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001962 goto skip_eviction;
1963
1964 trigger_eviction(edata);
1965
1966 evictions[id] = edata;
1967
1968 mutex_unlock(&sched_mutex);
1969
1970 wait_for_completion(&edata->completion);
1971
1972 return 0;
1973
1974skip_eviction:
1975 evictions[id] = NULL;
1976 mutex_unlock(&sched_mutex);
1977 return 0;
1978}
1979
1980static int run_evict(struct ocmem_req *req)
1981{
1982 struct ocmem_eviction_data *edata = NULL;
1983 int rc = 0;
1984
1985 if (!req)
1986 return -EINVAL;
1987
1988 edata = init_eviction(req->owner);
1989
1990 if (!edata)
1991 return -EINVAL;
1992
1993 edata->passive = false;
1994
Naveen Ramaraj89738952013-02-13 15:24:57 -08001995 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001996 rc = __evict_common(edata, req);
1997
Naveen Ramaraj89738952013-02-13 15:24:57 -08001998 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001999 goto skip_eviction;
2000
2001 trigger_eviction(edata);
2002
2003 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
2004 req->edata = edata;
2005
Naveen Ramaraj89738952013-02-13 15:24:57 -08002006 mutex_unlock(&free_mutex);
2007
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002008 wait_for_completion(&edata->completion);
2009
2010 pr_debug("ocmem: eviction completed successfully\n");
2011 return 0;
2012
2013skip_eviction:
2014 pr_err("ocmem: Unable to run eviction\n");
2015 free_eviction(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002016 req->edata = NULL;
2017 mutex_unlock(&free_mutex);
2018 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002019}
2020
2021static int __restore_common(struct ocmem_eviction_data *edata)
2022{
2023
2024 struct ocmem_req *req = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002025
2026 if (!edata)
2027 return -EINVAL;
2028
Naveen Ramaraj89738952013-02-13 15:24:57 -08002029 while (!list_empty(&edata->req_list)) {
2030 req = list_first_entry(&edata->req_list, struct ocmem_req,
2031 eviction_list);
2032 list_del_init(&req->eviction_list);
2033 pr_debug("ocmem: restoring evicted request %p\n",
2034 req);
2035 req->edata = NULL;
2036 req->e_handle = NULL;
2037 req->op = SCHED_ALLOCATE;
2038 inc_ocmem_stat(zone_of(req), NR_RESTORES);
2039 sched_enqueue(req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002040 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002041
2042 pr_debug("Scheduled all evicted regions\n");
2043
2044 return 0;
2045}
2046
2047static int sched_restore(struct ocmem_req *req)
2048{
2049
2050 int rc = 0;
2051
2052 if (!req)
2053 return -EINVAL;
2054
2055 if (!req->edata)
2056 return 0;
2057
Naveen Ramaraj89738952013-02-13 15:24:57 -08002058 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002059 rc = __restore_common(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002060 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002061
2062 if (rc < 0)
2063 return -EINVAL;
2064
2065 free_eviction(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002066 req->edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002067 return 0;
2068}
2069
2070int process_restore(int id)
2071{
2072 struct ocmem_eviction_data *edata = evictions[id];
2073 int rc = 0;
2074
2075 if (!edata)
2076 return -EINVAL;
2077
Naveen Ramaraj89738952013-02-13 15:24:57 -08002078 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002079 rc = __restore_common(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002080 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002081
2082 if (rc < 0) {
2083 pr_err("Failed to restore evicted requests\n");
2084 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002085 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002086
2087 free_eviction(edata);
2088 evictions[id] = NULL;
2089 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002090 return 0;
2091}
2092
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002093static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
2094{
2095 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002096 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002097 struct ocmem_buf *buffer = req->buffer;
2098
2099 down_write(&req->rw_sem);
2100
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002101 mutex_lock(&allocation_mutex);
2102retry_allocate:
2103
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002104 /* Take the scheduler mutex */
2105 mutex_lock(&sched_mutex);
2106 rc = __sched_allocate(req, can_block, can_wait);
2107 mutex_unlock(&sched_mutex);
2108
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002109 if (rc == OP_EVICT) {
2110
2111 ret = run_evict(req);
2112
2113 if (ret == 0) {
2114 rc = sched_restore(req);
2115 if (rc < 0) {
2116 pr_err("Failed to restore for req %p\n", req);
2117 goto err_allocate_fail;
2118 }
2119 req->edata = NULL;
2120
2121 pr_debug("Attempting to re-allocate req %p\n", req);
2122 req->req_start = 0x0;
2123 req->req_end = 0x0;
2124 goto retry_allocate;
2125 } else {
2126 goto err_allocate_fail;
2127 }
2128 }
2129
2130 mutex_unlock(&allocation_mutex);
2131
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002132 if (rc == OP_FAIL) {
2133 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002134 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002135 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002136
2137 if (rc == OP_RESCHED) {
2138 buffer->addr = 0x0;
2139 buffer->len = 0x0;
2140 pr_debug("ocmem: Enqueuing req %p\n", req);
2141 sched_enqueue(req);
2142 } else if (rc == OP_PARTIAL) {
2143 buffer->addr = device_address(req->owner, req->req_start);
2144 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002145 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002146 pr_debug("ocmem: Enqueuing req %p\n", req);
2147 sched_enqueue(req);
2148 } else if (rc == OP_COMPLETE) {
2149 buffer->addr = device_address(req->owner, req->req_start);
2150 buffer->len = req->req_sz;
2151 }
2152
2153 up_write(&req->rw_sem);
2154 return 0;
2155err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002156 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002157 up_write(&req->rw_sem);
2158 return -EINVAL;
2159}
2160
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002161static int do_dump(struct ocmem_req *req, unsigned long addr)
2162{
2163
2164 void __iomem *req_vaddr;
2165 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002166 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002167
2168 down_write(&req->rw_sem);
2169
2170 offset = phys_to_offset(req->req_start);
2171
2172 req_vaddr = ocmem_vaddr + offset;
2173
2174 if (!req_vaddr)
2175 goto err_do_dump;
2176
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002177 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2178
2179 if (rc < 0)
2180 goto err_do_dump;
2181
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002182 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2183 get_name(req->owner), req->req_start,
2184 req_vaddr, addr);
2185
2186 memcpy((void *)addr, req_vaddr, req->req_sz);
2187
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002188 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2189
2190 if (rc < 0)
2191 pr_err("Failed to secure request %p of %s after dump\n",
2192 req, get_name(req->owner));
2193
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002194 up_write(&req->rw_sem);
2195 return 0;
2196err_do_dump:
2197 up_write(&req->rw_sem);
2198 return -EINVAL;
2199}
2200
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002201int process_allocate(int id, struct ocmem_handle *handle,
2202 unsigned long min, unsigned long max,
2203 unsigned long step, bool can_block, bool can_wait)
2204{
2205
2206 struct ocmem_req *req = NULL;
2207 struct ocmem_buf *buffer = NULL;
2208 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002209 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002210
2211 /* sanity checks */
2212 if (is_blocked(id)) {
2213 pr_err("Client %d cannot request allocation\n", id);
2214 return -EINVAL;
2215 }
2216
2217 if (handle->req != NULL) {
2218 pr_err("Invalid handle passed in\n");
2219 return -EINVAL;
2220 }
2221
2222 buffer = handle_to_buffer(handle);
2223 BUG_ON(buffer == NULL);
2224
2225 /* prepare a request structure to represent this transaction */
2226 req = ocmem_create_req();
2227 if (!req)
2228 return -ENOMEM;
2229
2230 req->owner = id;
2231 req->req_min = min;
2232 req->req_max = max;
2233 req->req_step = step;
2234 req->prio = ocmem_client_table[id].priority;
2235 req->op = SCHED_ALLOCATE;
2236 req->buffer = buffer;
2237
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002238 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2239
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002240 rc = do_allocate(req, can_block, can_wait);
2241
2242 if (rc < 0)
2243 goto do_allocate_error;
2244
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002245 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2246
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002247 handle->req = req;
2248
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002249 if (req->req_sz != 0) {
2250
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002251 rc = process_map(req, req->req_start, req->req_end);
2252 if (rc < 0)
2253 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002254
2255 offset = phys_to_offset(req->req_start);
2256
2257 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2258
2259 if (rc < 0) {
2260 pr_err("Failed to switch ON memory macros\n");
2261 goto power_ctl_error;
2262 }
2263 }
2264
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002265 return 0;
2266
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002267power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002268 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002269map_error:
2270 handle->req = NULL;
2271 do_free(req);
2272do_allocate_error:
2273 ocmem_destroy_req(req);
2274 return -EINVAL;
2275}
2276
2277int process_delayed_allocate(struct ocmem_req *req)
2278{
2279
2280 struct ocmem_handle *handle = NULL;
2281 int rc = 0;
2282 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002283 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002284
2285 handle = req_to_handle(req);
2286 BUG_ON(handle == NULL);
2287
2288 rc = do_allocate(req, true, false);
2289
2290 if (rc < 0)
2291 goto do_allocate_error;
2292
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002293 /* The request can still be pending */
2294 if (TEST_STATE(req, R_PENDING))
2295 return 0;
2296
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002297 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2298
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002299 if (req->req_sz != 0) {
2300
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002301 rc = process_map(req, req->req_start, req->req_end);
2302 if (rc < 0)
2303 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002304
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002305
2306 offset = phys_to_offset(req->req_start);
2307
2308 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2309
2310 if (rc < 0) {
2311 pr_err("Failed to switch ON memory macros\n");
2312 goto power_ctl_error;
2313 }
2314 }
2315
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002316 /* Notify the client about the buffer growth */
2317 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2318 if (rc < 0) {
2319 pr_err("No notifier callback to cater for req %p event: %d\n",
2320 req, OCMEM_ALLOC_GROW);
2321 BUG();
2322 }
2323 return 0;
2324
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002325power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002326 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002327map_error:
2328 handle->req = NULL;
2329 do_free(req);
2330do_allocate_error:
2331 ocmem_destroy_req(req);
2332 return -EINVAL;
2333}
2334
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002335int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2336{
2337 struct ocmem_req *req = NULL;
2338 int rc = 0;
2339
2340 req = handle_to_req(handle);
2341
2342 if (!req)
2343 return -EINVAL;
2344
2345 if (!is_mapped(req)) {
2346 pr_err("Buffer is not mapped\n");
2347 goto dump_error;
2348 }
2349
2350 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2351
2352 mutex_lock(&sched_mutex);
2353 rc = do_dump(req, addr);
2354 mutex_unlock(&sched_mutex);
2355
2356 if (rc < 0)
2357 goto dump_error;
2358
2359 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2360 return 0;
2361
2362dump_error:
2363 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2364 return -EINVAL;
2365}
2366
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002367static void ocmem_sched_wk_func(struct work_struct *work)
2368{
2369
2370 struct ocmem_buf *buffer = NULL;
2371 struct ocmem_handle *handle = NULL;
2372 struct ocmem_req *req = ocmem_fetch_req();
2373
2374 if (!req) {
2375 pr_debug("No Pending Requests found\n");
2376 return;
2377 }
2378
2379 pr_debug("ocmem: sched_wk pending req %p\n", req);
2380 handle = req_to_handle(req);
2381 buffer = handle_to_buffer(handle);
2382 BUG_ON(req->op == SCHED_NOP);
2383
2384 switch (req->op) {
2385 case SCHED_GROW:
2386 process_grow(req);
2387 break;
2388 case SCHED_ALLOCATE:
2389 process_delayed_allocate(req);
2390 break;
2391 default:
2392 pr_err("ocmem: Unknown operation encountered\n");
2393 break;
2394 }
2395 return;
2396}
2397
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002398static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2399{
2400 struct rb_node *rb_node = NULL;
2401 struct ocmem_req *req = NULL;
2402 unsigned j;
2403 mutex_lock(&sched_mutex);
2404 for (rb_node = rb_first(&sched_tree); rb_node;
2405 rb_node = rb_next(rb_node)) {
2406 struct ocmem_region *tmp_region = NULL;
2407 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2408 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2409 req = find_req_match(j, tmp_region);
2410 if (req) {
2411 seq_printf(f,
2412 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2413 get_name(req->owner),
2414 req->req_start, req->req_end,
2415 req->req_sz, req->state);
2416 }
2417 }
2418 }
2419 mutex_unlock(&sched_mutex);
2420 return 0;
2421}
2422
2423static int ocmem_allocations_open(struct inode *inode, struct file *file)
2424{
2425 return single_open(file, ocmem_allocations_show, inode->i_private);
2426}
2427
2428static const struct file_operations allocations_show_fops = {
2429 .open = ocmem_allocations_open,
2430 .read = seq_read,
2431 .llseek = seq_lseek,
2432 .release = seq_release,
2433};
2434
2435int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002436{
2437 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002438 struct ocmem_plat_data *pdata = NULL;
2439 struct device *dev = &pdev->dev;
2440
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002441 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002442 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002443 mutex_init(&allocation_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002444 mutex_init(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002445 mutex_init(&sched_mutex);
2446 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002447 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002448 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2449 INIT_LIST_HEAD(&sched_queue[i]);
2450
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002451 mutex_init(&rdm_mutex);
2452 INIT_LIST_HEAD(&rdm_queue);
2453 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2454 if (!ocmem_rdm_wq)
2455 return -ENOMEM;
2456 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2457 if (!ocmem_eviction_wq)
2458 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002459
2460 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2461 NULL, &allocations_show_fops)) {
2462 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2463 return -EBUSY;
2464 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002465 return 0;
2466}