blob: 8afe695c8fd6f447ef5dba83412b248a94b78cd9 [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
Naveen Ramaraj89738952013-02-13 15:24:57 -080028 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_ENQUEUED, /* request has been enqueued for future retry */
32 R_MUST_GROW, /* request must grow as a part of pending operation */
33 R_MUST_SHRINK, /* request must shrink */
34 R_WF_SHRINK, /* shrink must be ack'ed by a client */
35 R_SHRUNK, /* request was shrunk */
36 R_MUST_MAP, /* request must be mapped before being used */
37 R_MUST_UNMAP, /* request must be unmapped when not being used */
38 R_MAPPED, /* request is mapped and actively used by client */
39 R_UNMAPPED, /* request is not mapped, so it's not in active use */
40 R_EVICTED, /* request is evicted and must be restored */
Naveen Ramarajb9da05782012-05-07 09:07:35 -070041};
42
43#define SET_STATE(x, val) (set_bit((val), &(x)->state))
44#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
45#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
46
47enum op_res {
48 OP_COMPLETE = 0x0,
49 OP_RESCHED,
50 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070051 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070052 OP_FAIL = ~0x0,
53};
54
55/* Represents various client priorities */
56/* Note: More than one client can share a priority level */
57enum client_prio {
58 MIN_PRIO = 0x0,
59 NO_PRIO = MIN_PRIO,
60 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070061 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070062 PRIO_LP_AUDIO = 0x1,
63 PRIO_HP_AUDIO = 0x2,
64 PRIO_VOICE = 0x3,
65 PRIO_GFX_GROWTH = 0x4,
66 PRIO_VIDEO = 0x5,
67 PRIO_GFX = 0x6,
68 PRIO_OCMEM = 0x7,
69 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
70};
71
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070072static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070073static struct list_head sched_queue[MAX_OCMEM_PRIO];
74static struct mutex sched_queue_mutex;
75
76/* The duration in msecs before a pending operation is scheduled
77 * This allows an idle window between use case boundaries where various
78 * hardware state changes can occur. The value will be tweaked on actual
79 * hardware.
80*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070081/* Delay in ms for switching to low power mode for OCMEM */
82#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070083
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070084static struct list_head rdm_queue;
85static struct mutex rdm_mutex;
86static struct workqueue_struct *ocmem_rdm_wq;
87static struct workqueue_struct *ocmem_eviction_wq;
88
89static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
90
91struct ocmem_rdm_work {
92 int id;
93 struct ocmem_map_list *list;
94 struct ocmem_handle *handle;
95 int direction;
96 struct work_struct work;
97};
98
Naveen Ramarajb9da05782012-05-07 09:07:35 -070099/* OCMEM Operational modes */
100enum ocmem_client_modes {
101 OCMEM_PERFORMANCE = 1,
102 OCMEM_PASSIVE,
103 OCMEM_LOW_POWER,
104 OCMEM_MODE_MAX = OCMEM_LOW_POWER
105};
106
107/* OCMEM Addressing modes */
108enum ocmem_interconnects {
109 OCMEM_BLOCKED = 0,
110 OCMEM_PORT = 1,
111 OCMEM_OCMEMNOC = 2,
112 OCMEM_SYSNOC = 3,
113};
114
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700115enum ocmem_tz_client {
116 TZ_UNUSED = 0x0,
117 TZ_GRAPHICS,
118 TZ_VIDEO,
119 TZ_LP_AUDIO,
120 TZ_SENSORS,
121 TZ_OTHER_OS,
122 TZ_DEBUG,
123};
124
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700125/**
126 * Primary OCMEM Arbitration Table
127 **/
128struct ocmem_table {
129 int client_id;
130 int priority;
131 int mode;
132 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700133 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700135 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
136 TZ_GRAPHICS},
137 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
138 TZ_VIDEO},
139 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
140 TZ_UNUSED},
141 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
142 TZ_UNUSED},
143 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
144 TZ_UNUSED},
145 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
146 TZ_LP_AUDIO},
147 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
148 TZ_SENSORS},
149 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
150 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700151};
152
153static struct rb_root sched_tree;
154static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700155static struct mutex allocation_mutex;
Naveen Ramaraj89738952013-02-13 15:24:57 -0800156static struct mutex free_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700157
158/* A region represents a continuous interval in OCMEM address space */
159struct ocmem_region {
160 /* Chain in Interval Tree */
161 struct rb_node region_rb;
162 /* Hash map of requests */
163 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700164 /* Chain in eviction list */
165 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700166 unsigned long r_start;
167 unsigned long r_end;
168 unsigned long r_sz;
169 /* Highest priority of all requests served by this region */
170 int max_prio;
171};
172
173/* Is OCMEM tightly coupled to the client ?*/
174static inline int is_tcm(int id)
175{
176 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
177 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
178 return 1;
179 else
180 return 0;
181}
182
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700183static inline int is_iface_access(int id)
184{
185 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
186}
187
188static inline int is_remapped_access(int id)
189{
190 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
191}
192
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700193static inline int is_blocked(int id)
194{
195 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
196}
197
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700198inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
199{
200 if (handle)
201 return &handle->buffer;
202 else
203 return NULL;
204}
205
206inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
207{
208 if (buffer)
209 return container_of(buffer, struct ocmem_handle, buffer);
210 else
211 return NULL;
212}
213
214inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
215{
216 if (handle)
217 return handle->req;
218 else
219 return NULL;
220}
221
222inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
223{
224 if (req && req->buffer)
225 return container_of(req->buffer, struct ocmem_handle, buffer);
226 else
227 return NULL;
228}
229
230/* Simple wrappers which will have debug features added later */
231inline int ocmem_read(void *at)
232{
233 return readl_relaxed(at);
234}
235
236inline int ocmem_write(unsigned long val, void *at)
237{
238 writel_relaxed(val, at);
239 return 0;
240}
241
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700242inline int get_mode(int id)
243{
244 if (!check_id(id))
245 return MODE_NOT_SET;
246 else
247 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
248 WIDE_MODE : THIN_MODE;
249}
250
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700251inline int get_tz_id(int id)
252{
253 if (!check_id(id))
254 return TZ_UNUSED;
255 else
256 return ocmem_client_table[id].tz_id;
257}
258
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700259/* Returns the address that can be used by a device core to access OCMEM */
260static unsigned long device_address(int id, unsigned long addr)
261{
262 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
263 unsigned long ret_addr = 0x0;
264
265 switch (hw_interconnect) {
266 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700267 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700268 ret_addr = phys_to_offset(addr);
269 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700270 case OCMEM_SYSNOC:
271 ret_addr = addr;
272 break;
273 case OCMEM_BLOCKED:
274 ret_addr = 0x0;
275 break;
276 }
277 return ret_addr;
278}
279
280/* Returns the address as viewed by the core */
281static unsigned long core_address(int id, unsigned long addr)
282{
283 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
284 unsigned long ret_addr = 0x0;
285
286 switch (hw_interconnect) {
287 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700288 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700289 ret_addr = offset_to_phys(addr);
290 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700291 case OCMEM_SYSNOC:
292 ret_addr = addr;
293 break;
294 case OCMEM_BLOCKED:
295 ret_addr = 0x0;
296 break;
297 }
298 return ret_addr;
299}
300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700301static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
302{
303 int owner;
304 if (!req)
305 return NULL;
306 owner = req->owner;
307 return get_zone(owner);
308}
309
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700310static int insert_region(struct ocmem_region *region)
311{
312
313 struct rb_root *root = &sched_tree;
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct ocmem_region *tmp = NULL;
317 unsigned long addr = region->r_start;
318
319 while (*p) {
320 parent = *p;
321 tmp = rb_entry(parent, struct ocmem_region, region_rb);
322
323 if (tmp->r_end > addr) {
324 if (tmp->r_start <= addr)
325 break;
326 p = &(*p)->rb_left;
327 } else if (tmp->r_end <= addr)
328 p = &(*p)->rb_right;
329 }
330 rb_link_node(&region->region_rb, parent, p);
331 rb_insert_color(&region->region_rb, root);
332 return 0;
333}
334
335static int remove_region(struct ocmem_region *region)
336{
337 struct rb_root *root = &sched_tree;
338 rb_erase(&region->region_rb, root);
339 return 0;
340}
341
342static struct ocmem_req *ocmem_create_req(void)
343{
344 struct ocmem_req *p = NULL;
345
346 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
347 if (!p)
348 return NULL;
349
350 INIT_LIST_HEAD(&p->zone_list);
351 INIT_LIST_HEAD(&p->sched_list);
352 init_rwsem(&p->rw_sem);
353 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700354 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700355 return p;
356}
357
358static int ocmem_destroy_req(struct ocmem_req *req)
359{
360 kfree(req);
361 return 0;
362}
363
364static struct ocmem_region *create_region(void)
365{
366 struct ocmem_region *p = NULL;
367
368 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700372 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700373 p->r_start = p->r_end = p->r_sz = 0x0;
374 p->max_prio = NO_PRIO;
375 return p;
376}
377
378static int destroy_region(struct ocmem_region *region)
379{
Naveen Ramaraj5ec6b332013-03-27 15:24:22 -0700380 idr_destroy(&region->region_idr);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700381 kfree(region);
382 return 0;
383}
384
385static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
386{
387 int ret, id;
388
389 while (1) {
390 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
391 return -ENOMEM;
392
393 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
394
395 if (ret != -EAGAIN)
396 break;
397 }
398
399 if (!ret) {
400 req->req_id = id;
401 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
402 req, id, region);
403 return 0;
404 }
405 return -EINVAL;
406}
407
408static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
409{
410 idr_remove(&region->region_idr, req->req_id);
411 return 0;
412}
413
414static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
415{
416 region->r_start = req->req_start;
417 region->r_end = req->req_end;
418 region->r_sz = req->req_end - req->req_start + 1;
419 return 0;
420}
421
422static int region_req_count(int id, void *ptr, void *data)
423{
424 int *count = data;
425 *count = *count + 1;
426 return 0;
427}
428
429static int req_count(struct ocmem_region *region)
430{
431 int count = 0;
432 idr_for_each(&region->region_idr, region_req_count, &count);
433 return count;
434}
435
436static int compute_max_prio(int id, void *ptr, void *data)
437{
438 int *max = data;
439 struct ocmem_req *req = ptr;
440
441 if (req->prio > *max)
442 *max = req->prio;
443 return 0;
444}
445
446static int update_region_prio(struct ocmem_region *region)
447{
448 int max_prio;
449 if (req_count(region) != 0) {
450 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
451 region->max_prio = max_prio;
452 } else {
453 region->max_prio = NO_PRIO;
454 }
455 pr_debug("ocmem: Updating prio of region %p as %d\n",
456 region, max_prio);
457
458 return 0;
459}
460
461static struct ocmem_region *find_region(unsigned long addr)
462{
463 struct ocmem_region *region = NULL;
464 struct rb_node *rb_node = NULL;
465
466 rb_node = sched_tree.rb_node;
467
468 while (rb_node) {
469 struct ocmem_region *tmp_region = NULL;
470 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
471
472 if (tmp_region->r_end > addr) {
473 region = tmp_region;
474 if (tmp_region->r_start <= addr)
475 break;
476 rb_node = rb_node->rb_left;
477 } else {
478 rb_node = rb_node->rb_right;
479 }
480 }
481 return region;
482}
483
484static struct ocmem_region *find_region_intersection(unsigned long start,
485 unsigned long end)
486{
487
488 struct ocmem_region *region = NULL;
489 region = find_region(start);
490 if (region && end <= region->r_start)
491 region = NULL;
492 return region;
493}
494
495static struct ocmem_region *find_region_match(unsigned long start,
496 unsigned long end)
497{
498
499 struct ocmem_region *region = NULL;
500 region = find_region(start);
501 if (region && start == region->r_start && end == region->r_end)
502 return region;
503 return NULL;
504}
505
506static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
507{
508 struct ocmem_req *req = NULL;
509
510 if (!region)
511 return NULL;
512
513 req = idr_find(&region->region_idr, owner);
514
515 return req;
516}
517
518/* Must be called with req->sem held */
519static inline int is_mapped(struct ocmem_req *req)
520{
521 return TEST_STATE(req, R_MAPPED);
522}
523
Naveen Ramaraj89738952013-02-13 15:24:57 -0800524static inline int is_pending_shrink(struct ocmem_req *req)
525{
526 return TEST_STATE(req, R_MUST_SHRINK) ||
527 TEST_STATE(req, R_WF_SHRINK);
528}
529
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700530/* Must be called with sched_mutex held */
531static int __sched_unmap(struct ocmem_req *req)
532{
533 struct ocmem_req *matched_req = NULL;
534 struct ocmem_region *matched_region = NULL;
535
Neeti Desaidad1d8e2013-01-09 19:42:06 -0800536 if (!TEST_STATE(req, R_MAPPED))
537 goto invalid_op_error;
538
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700539 matched_region = find_region_match(req->req_start, req->req_end);
540 matched_req = find_req_match(req->req_id, matched_region);
541
542 if (!matched_region || !matched_req) {
543 pr_err("Could not find backing region for req");
544 goto invalid_op_error;
545 }
546
547 if (matched_req != req) {
548 pr_err("Request does not match backing req");
549 goto invalid_op_error;
550 }
551
552 if (!is_mapped(req)) {
553 pr_err("Request is not currently mapped");
554 goto invalid_op_error;
555 }
556
557 /* Update the request state */
558 CLEAR_STATE(req, R_MAPPED);
559 SET_STATE(req, R_MUST_MAP);
560
561 return OP_COMPLETE;
562
563invalid_op_error:
564 return OP_FAIL;
565}
566
567/* Must be called with sched_mutex held */
568static int __sched_map(struct ocmem_req *req)
569{
570 struct ocmem_req *matched_req = NULL;
571 struct ocmem_region *matched_region = NULL;
572
573 matched_region = find_region_match(req->req_start, req->req_end);
574 matched_req = find_req_match(req->req_id, matched_region);
575
576 if (!matched_region || !matched_req) {
577 pr_err("Could not find backing region for req");
578 goto invalid_op_error;
579 }
580
581 if (matched_req != req) {
582 pr_err("Request does not match backing req");
583 goto invalid_op_error;
584 }
585
586 /* Update the request state */
587 CLEAR_STATE(req, R_MUST_MAP);
588 SET_STATE(req, R_MAPPED);
589
590 return OP_COMPLETE;
591
592invalid_op_error:
593 return OP_FAIL;
594}
595
596static int do_map(struct ocmem_req *req)
597{
598 int rc = 0;
599
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700600 down_write(&req->rw_sem);
601
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700602 mutex_lock(&sched_mutex);
603 rc = __sched_map(req);
604 mutex_unlock(&sched_mutex);
605
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700606 up_write(&req->rw_sem);
607
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700608 if (rc == OP_FAIL)
609 return -EINVAL;
610
611 return 0;
612}
613
614static int do_unmap(struct ocmem_req *req)
615{
616 int rc = 0;
617
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700618 down_write(&req->rw_sem);
619
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700620 mutex_lock(&sched_mutex);
621 rc = __sched_unmap(req);
622 mutex_unlock(&sched_mutex);
623
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700624 up_write(&req->rw_sem);
625
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700626 if (rc == OP_FAIL)
627 return -EINVAL;
628
629 return 0;
630}
631
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700632static int process_map(struct ocmem_req *req, unsigned long start,
633 unsigned long end)
634{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700635 int rc = 0;
636
637 rc = ocmem_enable_core_clock();
638
639 if (rc < 0)
640 goto core_clock_fail;
641
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700642
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700643 if (is_iface_access(req->owner)) {
644 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700646 if (rc < 0)
647 goto iface_clock_fail;
648 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700649
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700650 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
651 get_mode(req->owner));
652
653 if (rc < 0) {
654 pr_err("ocmem: Failed to secure request %p for %d\n", req,
655 req->owner);
656 goto lock_failed;
657 }
658
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700659 rc = do_map(req);
660
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700661 if (rc < 0) {
662 pr_err("ocmem: Failed to map request %p for %d\n",
663 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700664 goto process_map_fail;
665
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700666 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700667 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700668 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700669
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700670process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700671 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
672lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700673 if (is_iface_access(req->owner))
674 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700675iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700676 ocmem_disable_core_clock();
677core_clock_fail:
678 pr_err("ocmem: Failed to map ocmem request\n");
679 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700680}
681
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700682static int process_unmap(struct ocmem_req *req, unsigned long start,
683 unsigned long end)
684{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700685 int rc = 0;
686
687 rc = do_unmap(req);
688
689 if (rc < 0)
690 goto process_unmap_fail;
691
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700692 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
693 req->req_sz);
694
695 if (rc < 0) {
696 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
697 req->owner);
698 goto unlock_failed;
699 }
700
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700701 if (is_iface_access(req->owner))
702 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700703 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700704 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700705 return 0;
706
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700707unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700708process_unmap_fail:
709 pr_err("ocmem: Failed to unmap ocmem request\n");
710 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700711}
712
713static int __sched_grow(struct ocmem_req *req, bool can_block)
714{
715 unsigned long min = req->req_min;
716 unsigned long max = req->req_max;
717 unsigned long step = req->req_step;
718 int owner = req->owner;
719 unsigned long curr_sz = 0;
720 unsigned long growth_sz = 0;
721 unsigned long curr_start = 0;
722 enum client_prio prio = req->prio;
723 unsigned long alloc_addr = 0x0;
724 bool retry;
725 struct ocmem_region *spanned_r = NULL;
726 struct ocmem_region *overlap_r = NULL;
727
728 struct ocmem_req *matched_req = NULL;
729 struct ocmem_region *matched_region = NULL;
730
731 struct ocmem_zone *zone = get_zone(owner);
732 struct ocmem_region *region = NULL;
733
734 matched_region = find_region_match(req->req_start, req->req_end);
735 matched_req = find_req_match(req->req_id, matched_region);
736
737 if (!matched_region || !matched_req) {
738 pr_err("Could not find backing region for req");
739 goto invalid_op_error;
740 }
741
742 if (matched_req != req) {
743 pr_err("Request does not match backing req");
744 goto invalid_op_error;
745 }
746
747 curr_sz = matched_req->req_sz;
748 curr_start = matched_req->req_start;
749 growth_sz = matched_req->req_max - matched_req->req_sz;
750
751 pr_debug("Attempting to grow req %p from %lx to %lx\n",
752 req, matched_req->req_sz, matched_req->req_max);
753
754 retry = false;
755
756 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
757
758retry_next_step:
759
760 spanned_r = NULL;
761 overlap_r = NULL;
762
763 spanned_r = find_region(zone->z_head);
764 overlap_r = find_region_intersection(zone->z_head,
765 zone->z_head + growth_sz);
766
767 if (overlap_r == NULL) {
768 /* no conflicting regions, schedule this region */
769 zone->z_ops->free(zone, curr_start, curr_sz);
770 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
771
772 if (alloc_addr < 0) {
773 pr_err("ocmem: zone allocation operation failed\n");
774 goto internal_error;
775 }
776
777 curr_sz += growth_sz;
778 /* Detach the region from the interval tree */
779 /* This is to guarantee that any change in size
780 * causes the tree to be rebalanced if required */
781
782 detach_req(matched_region, req);
783 if (req_count(matched_region) == 0) {
784 remove_region(matched_region);
785 region = matched_region;
786 } else {
787 region = create_region();
788 if (!region) {
789 pr_err("ocmem: Unable to create region\n");
790 goto region_error;
791 }
792 }
793
794 /* update the request */
795 req->req_start = alloc_addr;
796 /* increment the size to reflect new length */
797 req->req_sz = curr_sz;
798 req->req_end = alloc_addr + req->req_sz - 1;
799
800 /* update request state */
801 CLEAR_STATE(req, R_MUST_GROW);
802 SET_STATE(req, R_ALLOCATED);
803 SET_STATE(req, R_MUST_MAP);
804 req->op = SCHED_MAP;
805
806 /* update the region with new req */
807 attach_req(region, req);
808 populate_region(region, req);
809 update_region_prio(region);
810
811 /* update the tree with new region */
812 if (insert_region(region)) {
813 pr_err("ocmem: Failed to insert the region\n");
814 goto region_error;
815 }
816
817 if (retry) {
818 SET_STATE(req, R_MUST_GROW);
819 SET_STATE(req, R_PENDING);
820 req->op = SCHED_GROW;
821 return OP_PARTIAL;
822 }
823 } else if (spanned_r != NULL && overlap_r != NULL) {
824 /* resolve conflicting regions based on priority */
825 if (overlap_r->max_prio < prio) {
826 /* Growth cannot be triggered unless a previous
827 * client of lower priority was evicted */
828 pr_err("ocmem: Invalid growth scheduled\n");
829 /* This is serious enough to fail */
830 BUG();
831 return OP_FAIL;
832 } else if (overlap_r->max_prio > prio) {
833 if (min == max) {
834 /* Cannot grow at this time, try later */
835 SET_STATE(req, R_PENDING);
836 SET_STATE(req, R_MUST_GROW);
837 return OP_RESCHED;
838 } else {
839 /* Try to grow in steps */
840 growth_sz -= step;
841 /* We are OOM at this point so need to retry */
842 if (growth_sz <= curr_sz) {
843 SET_STATE(req, R_PENDING);
844 SET_STATE(req, R_MUST_GROW);
845 return OP_RESCHED;
846 }
847 retry = true;
848 pr_debug("ocmem: Attempting with reduced size %lx\n",
849 growth_sz);
850 goto retry_next_step;
851 }
852 } else {
853 pr_err("ocmem: grow: New Region %p Existing %p\n",
854 matched_region, overlap_r);
855 pr_err("ocmem: Undetermined behavior\n");
856 /* This is serious enough to fail */
857 BUG();
858 }
859 } else if (spanned_r == NULL && overlap_r != NULL) {
860 goto err_not_supported;
861 }
862
863 return OP_COMPLETE;
864
865err_not_supported:
866 pr_err("ocmem: Scheduled unsupported operation\n");
867 return OP_FAIL;
868region_error:
869 zone->z_ops->free(zone, alloc_addr, curr_sz);
870 detach_req(region, req);
871 update_region_prio(region);
872 /* req is going to be destroyed by the caller anyways */
873internal_error:
874 destroy_region(region);
875invalid_op_error:
876 return OP_FAIL;
877}
878
879/* Must be called with sched_mutex held */
880static int __sched_free(struct ocmem_req *req)
881{
882 int owner = req->owner;
883 int ret = 0;
884
885 struct ocmem_req *matched_req = NULL;
886 struct ocmem_region *matched_region = NULL;
887
888 struct ocmem_zone *zone = get_zone(owner);
889
890 BUG_ON(!zone);
891
892 matched_region = find_region_match(req->req_start, req->req_end);
893 matched_req = find_req_match(req->req_id, matched_region);
894
895 if (!matched_region || !matched_req)
896 goto invalid_op_error;
897 if (matched_req != req)
898 goto invalid_op_error;
899
900 ret = zone->z_ops->free(zone,
901 matched_req->req_start, matched_req->req_sz);
902
903 if (ret < 0)
904 goto err_op_fail;
905
906 detach_req(matched_region, matched_req);
907 update_region_prio(matched_region);
908 if (req_count(matched_region) == 0) {
909 remove_region(matched_region);
910 destroy_region(matched_region);
911 }
912
913 /* Update the request */
914 req->req_start = 0x0;
915 req->req_sz = 0x0;
916 req->req_end = 0x0;
917 SET_STATE(req, R_FREE);
918 return OP_COMPLETE;
919invalid_op_error:
920 pr_err("ocmem: free: Failed to find matching region\n");
921err_op_fail:
922 pr_err("ocmem: free: Failed\n");
923 return OP_FAIL;
924}
925
926/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700927static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
928{
929 int owner = req->owner;
930 int ret = 0;
931
932 struct ocmem_req *matched_req = NULL;
933 struct ocmem_region *matched_region = NULL;
934 struct ocmem_region *region = NULL;
935 unsigned long alloc_addr = 0x0;
936
937 struct ocmem_zone *zone = get_zone(owner);
938
939 BUG_ON(!zone);
940
941 /* The shrink should not be called for zero size */
942 BUG_ON(new_sz == 0);
943
944 matched_region = find_region_match(req->req_start, req->req_end);
945 matched_req = find_req_match(req->req_id, matched_region);
946
947 if (!matched_region || !matched_req)
948 goto invalid_op_error;
949 if (matched_req != req)
950 goto invalid_op_error;
951
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700952 ret = zone->z_ops->free(zone,
953 matched_req->req_start, matched_req->req_sz);
954
955 if (ret < 0) {
956 pr_err("Zone Allocation operation failed\n");
957 goto internal_error;
958 }
959
960 alloc_addr = zone->z_ops->allocate(zone, new_sz);
961
962 if (alloc_addr < 0) {
963 pr_err("Zone Allocation operation failed\n");
964 goto internal_error;
965 }
966
967 /* Detach the region from the interval tree */
968 /* This is to guarantee that the change in size
969 * causes the tree to be rebalanced if required */
970
971 detach_req(matched_region, req);
972 if (req_count(matched_region) == 0) {
973 remove_region(matched_region);
974 region = matched_region;
975 } else {
976 region = create_region();
977 if (!region) {
978 pr_err("ocmem: Unable to create region\n");
979 goto internal_error;
980 }
981 }
982 /* update the request */
983 req->req_start = alloc_addr;
984 req->req_sz = new_sz;
985 req->req_end = alloc_addr + req->req_sz;
986
987 if (req_count(region) == 0) {
988 remove_region(matched_region);
989 destroy_region(matched_region);
990 }
991
992 /* update request state */
993 SET_STATE(req, R_MUST_GROW);
994 SET_STATE(req, R_MUST_MAP);
995 req->op = SCHED_MAP;
996
997 /* attach the request to the region */
998 attach_req(region, req);
999 populate_region(region, req);
1000 update_region_prio(region);
1001
1002 /* update the tree with new region */
1003 if (insert_region(region)) {
1004 pr_err("ocmem: Failed to insert the region\n");
1005 zone->z_ops->free(zone, alloc_addr, new_sz);
1006 detach_req(region, req);
1007 update_region_prio(region);
1008 /* req will be destroyed by the caller */
1009 goto region_error;
1010 }
1011 return OP_COMPLETE;
1012
1013region_error:
1014 destroy_region(region);
1015internal_error:
1016 pr_err("ocmem: shrink: Failed\n");
1017 return OP_FAIL;
1018invalid_op_error:
1019 pr_err("ocmem: shrink: Failed to find matching region\n");
1020 return OP_FAIL;
1021}
1022
1023/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001024static int __sched_allocate(struct ocmem_req *req, bool can_block,
1025 bool can_wait)
1026{
1027 unsigned long min = req->req_min;
1028 unsigned long max = req->req_max;
1029 unsigned long step = req->req_step;
1030 int owner = req->owner;
1031 unsigned long sz = max;
1032 enum client_prio prio = req->prio;
1033 unsigned long alloc_addr = 0x0;
1034 bool retry;
1035
1036 struct ocmem_region *spanned_r = NULL;
1037 struct ocmem_region *overlap_r = NULL;
1038
1039 struct ocmem_zone *zone = get_zone(owner);
1040 struct ocmem_region *region = NULL;
1041
1042 BUG_ON(!zone);
1043
1044 if (min > (zone->z_end - zone->z_start)) {
1045 pr_err("ocmem: requested minimum size exceeds quota\n");
1046 goto invalid_op_error;
1047 }
1048
1049 if (max > (zone->z_end - zone->z_start)) {
1050 pr_err("ocmem: requested maximum size exceeds quota\n");
1051 goto invalid_op_error;
1052 }
1053
1054 if (min > zone->z_free) {
1055 pr_err("ocmem: out of memory for zone %d\n", owner);
1056 goto invalid_op_error;
1057 }
1058
1059 region = create_region();
1060
1061 if (!region) {
1062 pr_err("ocmem: Unable to create region\n");
1063 goto invalid_op_error;
1064 }
1065
1066 retry = false;
1067
Naveen Ramaraj89738952013-02-13 15:24:57 -08001068 pr_debug("ocmem: do_allocate: %s request %p size %lx\n",
1069 get_name(owner), req, sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001070
1071retry_next_step:
1072
1073 spanned_r = NULL;
1074 overlap_r = NULL;
1075
1076 spanned_r = find_region(zone->z_head);
1077 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1078
1079 if (overlap_r == NULL) {
1080 /* no conflicting regions, schedule this region */
1081 alloc_addr = zone->z_ops->allocate(zone, sz);
1082
1083 if (alloc_addr < 0) {
1084 pr_err("Zone Allocation operation failed\n");
1085 goto internal_error;
1086 }
1087
1088 /* update the request */
1089 req->req_start = alloc_addr;
1090 req->req_end = alloc_addr + sz - 1;
1091 req->req_sz = sz;
1092 req->zone = zone;
1093
1094 /* update request state */
1095 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001096 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001097 SET_STATE(req, R_ALLOCATED);
1098 SET_STATE(req, R_MUST_MAP);
1099 req->op = SCHED_NOP;
1100
1101 /* attach the request to the region */
1102 attach_req(region, req);
1103 populate_region(region, req);
1104 update_region_prio(region);
1105
1106 /* update the tree with new region */
1107 if (insert_region(region)) {
1108 pr_err("ocmem: Failed to insert the region\n");
1109 zone->z_ops->free(zone, alloc_addr, sz);
1110 detach_req(region, req);
1111 update_region_prio(region);
1112 /* req will be destroyed by the caller */
1113 goto internal_error;
1114 }
1115
1116 if (retry) {
1117 SET_STATE(req, R_MUST_GROW);
1118 SET_STATE(req, R_PENDING);
1119 req->op = SCHED_GROW;
1120 return OP_PARTIAL;
1121 }
1122 } else if (spanned_r != NULL && overlap_r != NULL) {
1123 /* resolve conflicting regions based on priority */
1124 if (overlap_r->max_prio < prio) {
1125 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001126 req->req_start = zone->z_head;
1127 req->req_end = zone->z_head + sz - 1;
1128 req->req_sz = 0x0;
1129 req->edata = NULL;
1130 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001131 } else {
1132 /* Try to allocate atleast >= 'min' immediately */
1133 sz -= step;
1134 if (sz < min)
1135 goto err_out_of_mem;
1136 retry = true;
1137 pr_debug("ocmem: Attempting with reduced size %lx\n",
1138 sz);
1139 goto retry_next_step;
1140 }
1141 } else if (overlap_r->max_prio > prio) {
1142 if (can_block == true) {
1143 SET_STATE(req, R_PENDING);
1144 SET_STATE(req, R_MUST_GROW);
1145 return OP_RESCHED;
1146 } else {
1147 if (min == max) {
1148 pr_err("Cannot allocate %lx synchronously\n",
1149 sz);
1150 goto err_out_of_mem;
1151 } else {
1152 sz -= step;
1153 if (sz < min)
1154 goto err_out_of_mem;
1155 retry = true;
1156 pr_debug("ocmem: Attempting reduced size %lx\n",
1157 sz);
1158 goto retry_next_step;
1159 }
1160 }
1161 } else {
1162 pr_err("ocmem: Undetermined behavior\n");
1163 pr_err("ocmem: New Region %p Existing %p\n", region,
1164 overlap_r);
1165 /* This is serious enough to fail */
1166 BUG();
1167 }
1168 } else if (spanned_r == NULL && overlap_r != NULL)
1169 goto err_not_supported;
1170
1171 return OP_COMPLETE;
1172
Naveen Ramaraj59907982012-10-16 17:40:38 -07001173trigger_eviction:
1174 pr_debug("Trigger eviction of region %p\n", overlap_r);
1175 destroy_region(region);
1176 return OP_EVICT;
1177
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001178err_not_supported:
1179 pr_err("ocmem: Scheduled unsupported operation\n");
1180 return OP_FAIL;
1181
1182err_out_of_mem:
1183 pr_err("ocmem: Out of memory during allocation\n");
1184internal_error:
1185 destroy_region(region);
1186invalid_op_error:
1187 return OP_FAIL;
1188}
1189
Naveen Ramaraj89738952013-02-13 15:24:57 -08001190/* Remove the request from eviction lists */
1191static void cancel_restore(struct ocmem_req *e_handle,
1192 struct ocmem_req *req)
1193{
1194 struct ocmem_eviction_data *edata = e_handle->edata;
1195
1196 if (!edata || !req)
1197 return;
1198
1199 if (list_empty(&edata->req_list))
1200 return;
1201
1202 list_del_init(&req->eviction_list);
1203 req->e_handle = NULL;
1204
1205 return;
1206}
1207
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001208static int sched_enqueue(struct ocmem_req *priv)
1209{
1210 struct ocmem_req *next = NULL;
1211 mutex_lock(&sched_queue_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001212 SET_STATE(priv, R_ENQUEUED);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001213 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1214 pr_debug("enqueued req %p\n", priv);
1215 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001216 pr_debug("pending request %p for client %s\n", next,
1217 get_name(next->owner));
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001218 }
1219 mutex_unlock(&sched_queue_mutex);
1220 return 0;
1221}
1222
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001223static void sched_dequeue(struct ocmem_req *victim_req)
1224{
1225 struct ocmem_req *req = NULL;
1226 struct ocmem_req *next = NULL;
1227 int id;
1228
1229 if (!victim_req)
1230 return;
1231
1232 id = victim_req->owner;
1233
1234 mutex_lock(&sched_queue_mutex);
1235
1236 if (list_empty(&sched_queue[id]))
1237 goto dequeue_done;
1238
1239 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1240 {
1241 if (req == victim_req) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001242 pr_debug("ocmem: Cancelling pending request %p for %s\n",
1243 req, get_name(req->owner));
1244 list_del_init(&victim_req->sched_list);
1245 CLEAR_STATE(victim_req, R_ENQUEUED);
1246 break;
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001247 }
1248 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001249dequeue_done:
1250 mutex_unlock(&sched_queue_mutex);
1251 return;
1252}
1253
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001254static struct ocmem_req *ocmem_fetch_req(void)
1255{
1256 int i;
1257 struct ocmem_req *req = NULL;
1258 struct ocmem_req *next = NULL;
1259
1260 mutex_lock(&sched_queue_mutex);
1261 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1262 if (list_empty(&sched_queue[i]))
1263 continue;
1264 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1265 {
1266 if (req) {
1267 pr_debug("ocmem: Fetched pending request %p\n",
1268 req);
1269 list_del(&req->sched_list);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001270 CLEAR_STATE(req, R_ENQUEUED);
1271 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001272 }
1273 }
1274 }
1275 mutex_unlock(&sched_queue_mutex);
1276 return req;
1277}
1278
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001279
1280unsigned long process_quota(int id)
1281{
1282 struct ocmem_zone *zone = NULL;
1283
1284 if (is_blocked(id))
1285 return 0;
1286
1287 zone = get_zone(id);
1288
1289 if (zone && zone->z_pool)
1290 return zone->z_end - zone->z_start;
1291 else
1292 return 0;
1293}
1294
1295static int do_grow(struct ocmem_req *req)
1296{
1297 struct ocmem_buf *buffer = NULL;
1298 bool can_block = true;
1299 int rc = 0;
1300
1301 down_write(&req->rw_sem);
1302 buffer = req->buffer;
1303
1304 /* Take the scheduler mutex */
1305 mutex_lock(&sched_mutex);
1306 rc = __sched_grow(req, can_block);
1307 mutex_unlock(&sched_mutex);
1308
1309 if (rc == OP_FAIL)
1310 goto err_op_fail;
1311
1312 if (rc == OP_RESCHED) {
1313 pr_debug("ocmem: Enqueue this allocation");
1314 sched_enqueue(req);
1315 }
1316
1317 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1318 buffer->addr = device_address(req->owner, req->req_start);
1319 buffer->len = req->req_sz;
1320 }
1321
1322 up_write(&req->rw_sem);
1323 return 0;
1324err_op_fail:
1325 up_write(&req->rw_sem);
1326 return -EINVAL;
1327}
1328
1329static int process_grow(struct ocmem_req *req)
1330{
1331 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001332 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001333
1334 /* Attempt to grow the region */
1335 rc = do_grow(req);
1336
1337 if (rc < 0)
1338 return -EINVAL;
1339
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001340 rc = process_map(req, req->req_start, req->req_end);
1341 if (rc < 0)
1342 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001343
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001344 offset = phys_to_offset(req->req_start);
1345
1346 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1347
1348 if (rc < 0) {
1349 pr_err("Failed to switch ON memory macros\n");
1350 goto power_ctl_error;
1351 }
1352
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001353 /* Notify the client about the buffer growth */
1354 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1355 if (rc < 0) {
1356 pr_err("No notifier callback to cater for req %p event: %d\n",
1357 req, OCMEM_ALLOC_GROW);
1358 BUG();
1359 }
1360 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001361power_ctl_error:
1362 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001363}
1364
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001365static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1366{
1367
1368 int rc = 0;
1369 struct ocmem_buf *buffer = NULL;
1370
1371 down_write(&req->rw_sem);
1372 buffer = req->buffer;
1373
1374 /* Take the scheduler mutex */
1375 mutex_lock(&sched_mutex);
1376 rc = __sched_shrink(req, shrink_size);
1377 mutex_unlock(&sched_mutex);
1378
1379 if (rc == OP_FAIL)
1380 goto err_op_fail;
1381
1382 else if (rc == OP_COMPLETE) {
1383 buffer->addr = device_address(req->owner, req->req_start);
1384 buffer->len = req->req_sz;
1385 }
1386
1387 up_write(&req->rw_sem);
1388 return 0;
1389err_op_fail:
1390 up_write(&req->rw_sem);
1391 return -EINVAL;
1392}
1393
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001394static void ocmem_sched_wk_func(struct work_struct *work);
1395DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1396
1397static int ocmem_schedule_pending(void)
1398{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001399
1400 bool need_sched = false;
1401 int i = 0;
1402
1403 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1404 if (!list_empty(&sched_queue[i])) {
1405 need_sched = true;
1406 break;
1407 }
1408 }
1409
1410 if (need_sched == true) {
1411 cancel_delayed_work(&ocmem_sched_thread);
1412 schedule_delayed_work(&ocmem_sched_thread,
1413 msecs_to_jiffies(SCHED_DELAY));
1414 pr_debug("ocmem: Scheduled delayed work\n");
1415 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001416 return 0;
1417}
1418
1419static int do_free(struct ocmem_req *req)
1420{
1421 int rc = 0;
1422 struct ocmem_buf *buffer = req->buffer;
1423
1424 down_write(&req->rw_sem);
1425
1426 if (is_mapped(req)) {
1427 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1428 goto err_free_fail;
1429 }
1430
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001431 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1432 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001433 /* Grab the sched mutex */
1434 mutex_lock(&sched_mutex);
1435 rc = __sched_free(req);
1436 mutex_unlock(&sched_mutex);
1437
1438 switch (rc) {
1439
1440 case OP_COMPLETE:
1441 buffer->addr = 0x0;
1442 buffer->len = 0x0;
1443 break;
1444 case OP_FAIL:
1445 default:
1446 goto err_free_fail;
1447 break;
1448 }
1449
1450 up_write(&req->rw_sem);
1451 return 0;
1452err_free_fail:
1453 up_write(&req->rw_sem);
1454 pr_err("ocmem: freeing req %p failed\n", req);
1455 return -EINVAL;
1456}
1457
1458int process_free(int id, struct ocmem_handle *handle)
1459{
1460 struct ocmem_req *req = NULL;
1461 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001462 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001463 int rc = 0;
1464
Naveen Ramaraj89738952013-02-13 15:24:57 -08001465 mutex_lock(&free_mutex);
1466
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001467 if (is_blocked(id)) {
1468 pr_err("Client %d cannot request free\n", id);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001469 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001470 }
1471
1472 req = handle_to_req(handle);
1473 buffer = handle_to_buffer(handle);
1474
Naveen Ramaraj89738952013-02-13 15:24:57 -08001475 if (!req) {
1476 pr_err("ocmem: No valid request to free\n");
1477 goto free_invalid;
1478 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001479
1480 if (req->req_start != core_address(id, buffer->addr)) {
1481 pr_err("Invalid buffer handle passed for free\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001482 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001483 }
1484
Naveen Ramaraj89738952013-02-13 15:24:57 -08001485 if (req->edata != NULL) {
1486 pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n",
1487 req, req->state, req->edata);
1488 goto free_invalid;
1489 }
1490
1491 if (is_pending_shrink(req)) {
1492 pr_err("ocmem: Request %p(%2lx) yet to process eviction\n",
1493 req, req->state);
1494 goto pending_shrink;
1495 }
1496
1497 /* Remove the request from any restore lists */
1498 if (req->e_handle)
1499 cancel_restore(req->e_handle, req);
1500
1501 /* Remove the request from any pending opreations */
1502 if (TEST_STATE(req, R_ENQUEUED)) {
1503 mutex_lock(&sched_mutex);
1504 sched_dequeue(req);
1505 mutex_unlock(&sched_mutex);
1506 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001507
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001508 if (!TEST_STATE(req, R_FREE)) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001509
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001510 if (TEST_STATE(req, R_MAPPED)) {
1511 /* unmap the interval and clear the memory */
1512 rc = process_unmap(req, req->req_start, req->req_end);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001513
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001514 if (rc < 0) {
1515 pr_err("ocmem: Failed to unmap %p\n", req);
1516 goto free_fail;
1517 }
1518
1519 rc = do_free(req);
1520 if (rc < 0) {
1521 pr_err("ocmem: Failed to free %p\n", req);
1522 goto free_fail;
1523 }
1524 } else
Naveen Ramaraj89738952013-02-13 15:24:57 -08001525 pr_debug("request %p was already shrunk to 0\n", req);
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001526 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001527
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001528 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001529 if (req->req_sz != 0) {
1530
1531 offset = phys_to_offset(req->req_start);
1532
1533 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1534
1535 if (rc < 0) {
1536 pr_err("Failed to switch OFF memory macros\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001537 goto free_fail;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001538 }
1539
1540 }
1541
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001542 if (!TEST_STATE(req, R_FREE)) {
1543 /* free the allocation */
1544 rc = do_free(req);
1545 if (rc < 0)
1546 return -EINVAL;
1547 }
1548
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001549 inc_ocmem_stat(zone_of(req), NR_FREES);
1550
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001551 ocmem_destroy_req(req);
1552 handle->req = NULL;
1553
1554 ocmem_schedule_pending();
Naveen Ramaraj89738952013-02-13 15:24:57 -08001555 mutex_unlock(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001556 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001557free_fail:
1558free_invalid:
1559 mutex_unlock(&free_mutex);
1560 return -EINVAL;
1561pending_shrink:
1562 mutex_unlock(&free_mutex);
1563 return -EAGAIN;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001564}
1565
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001566static void ocmem_rdm_worker(struct work_struct *work)
1567{
1568 int offset = 0;
1569 int rc = 0;
1570 int event;
1571 struct ocmem_rdm_work *work_data = container_of(work,
1572 struct ocmem_rdm_work, work);
1573 int id = work_data->id;
1574 struct ocmem_map_list *list = work_data->list;
1575 int direction = work_data->direction;
1576 struct ocmem_handle *handle = work_data->handle;
1577 struct ocmem_req *req = handle_to_req(handle);
1578 struct ocmem_buf *buffer = handle_to_buffer(handle);
1579
1580 down_write(&req->rw_sem);
1581 offset = phys_to_offset(req->req_start);
1582 rc = ocmem_rdm_transfer(id, list, offset, direction);
1583 if (work_data->direction == TO_OCMEM)
1584 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1585 else
1586 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001587 up_write(&req->rw_sem);
1588 kfree(work_data);
1589 dispatch_notification(id, event, buffer);
1590}
1591
1592int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1593 struct ocmem_map_list *list, int direction)
1594{
1595 struct ocmem_rdm_work *work_data = NULL;
1596
1597 down_write(&req->rw_sem);
1598
1599 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1600 if (!work_data)
1601 BUG();
1602
1603 work_data->handle = handle;
1604 work_data->list = list;
1605 work_data->id = req->owner;
1606 work_data->direction = direction;
1607 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1608 up_write(&req->rw_sem);
1609 queue_work(ocmem_rdm_wq, &work_data->work);
1610 return 0;
1611}
1612
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001613int process_drop(int id, struct ocmem_handle *handle,
1614 struct ocmem_map_list *list)
1615{
1616 struct ocmem_req *req = NULL;
1617 struct ocmem_buf *buffer = NULL;
1618 int rc = 0;
1619
1620 if (is_blocked(id)) {
1621 pr_err("Client %d cannot request drop\n", id);
1622 return -EINVAL;
1623 }
1624
1625 if (is_tcm(id))
1626 pr_err("Client %d cannot request drop\n", id);
1627
1628 req = handle_to_req(handle);
1629 buffer = handle_to_buffer(handle);
1630
1631 if (!req)
1632 return -EINVAL;
1633
1634 if (req->req_start != core_address(id, buffer->addr)) {
1635 pr_err("Invalid buffer handle passed for drop\n");
1636 return -EINVAL;
1637 }
1638
1639 if (TEST_STATE(req, R_MAPPED)) {
1640 rc = process_unmap(req, req->req_start, req->req_end);
1641 if (rc < 0)
1642 return -EINVAL;
1643 } else
1644 return -EINVAL;
1645
1646 return 0;
1647}
1648
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001649int process_xfer_out(int id, struct ocmem_handle *handle,
1650 struct ocmem_map_list *list)
1651{
1652 struct ocmem_req *req = NULL;
1653 int rc = 0;
1654
1655 req = handle_to_req(handle);
1656
1657 if (!req)
1658 return -EINVAL;
1659
1660 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001661 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001662 goto transfer_out_error;
1663 }
1664
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001665 rc = queue_transfer(req, handle, list, TO_DDR);
1666
1667 if (rc < 0) {
1668 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001669 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001670 goto transfer_out_error;
1671 }
1672
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001673 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001674 return 0;
1675
1676transfer_out_error:
1677 return -EINVAL;
1678}
1679
1680int process_xfer_in(int id, struct ocmem_handle *handle,
1681 struct ocmem_map_list *list)
1682{
1683 struct ocmem_req *req = NULL;
1684 int rc = 0;
1685
1686 req = handle_to_req(handle);
1687
1688 if (!req)
1689 return -EINVAL;
1690
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001691
1692 if (!is_mapped(req)) {
1693 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001694 goto transfer_in_error;
1695 }
1696
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001697 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001698 rc = queue_transfer(req, handle, list, TO_OCMEM);
1699
1700 if (rc < 0) {
1701 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001702 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001703 goto transfer_in_error;
1704 }
1705
1706 return 0;
1707transfer_in_error:
1708 return -EINVAL;
1709}
1710
1711int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1712{
1713 struct ocmem_req *req = NULL;
1714 struct ocmem_buf *buffer = NULL;
1715 struct ocmem_eviction_data *edata = NULL;
1716 int rc = 0;
1717
1718 if (is_blocked(id)) {
1719 pr_err("Client %d cannot request free\n", id);
1720 return -EINVAL;
1721 }
1722
1723 req = handle_to_req(handle);
1724 buffer = handle_to_buffer(handle);
1725
1726 if (!req)
1727 return -EINVAL;
1728
Naveen Ramaraj89738952013-02-13 15:24:57 -08001729 mutex_lock(&free_mutex);
1730
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001731 if (req->req_start != core_address(id, buffer->addr)) {
1732 pr_err("Invalid buffer handle passed for shrink\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001733 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001734 }
1735
Naveen Ramaraj89738952013-02-13 15:24:57 -08001736 if (!req->e_handle) {
1737 pr_err("Unable to find evicting request\n");
1738 goto shrink_fail;
1739 }
1740
1741 edata = req->e_handle->edata;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001742
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001743 if (!edata) {
1744 pr_err("Unable to find eviction data\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001745 goto shrink_fail;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001746 }
1747
1748 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001749
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001750 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1751
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001752 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001753 pr_debug("req %p being shrunk to zero\n", req);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001754 if (is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001755 rc = process_unmap(req, req->req_start, req->req_end);
1756 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001757 goto shrink_fail;
1758 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001759 rc = do_free(req);
1760 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001761 goto shrink_fail;
1762 SET_STATE(req, R_FREE);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001763 } else {
1764 rc = do_shrink(req, size);
1765 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001766 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001767 }
1768
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001769 CLEAR_STATE(req, R_ALLOCATED);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001770 CLEAR_STATE(req, R_WF_SHRINK);
1771 SET_STATE(req, R_SHRUNK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001772
1773 if (atomic_dec_and_test(&edata->pending)) {
1774 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001775 complete(&edata->completion);
1776 }
1777
Naveen Ramaraj89738952013-02-13 15:24:57 -08001778 mutex_unlock(&free_mutex);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001779 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001780shrink_fail:
1781 pr_err("ocmem: Failed to shrink request %p of %s\n",
1782 req, get_name(req->owner));
1783 mutex_unlock(&free_mutex);
1784 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001785}
1786
1787int process_xfer(int id, struct ocmem_handle *handle,
1788 struct ocmem_map_list *list, int direction)
1789{
1790 int rc = 0;
1791
1792 if (is_tcm(id)) {
1793 WARN(1, "Mapping operation is invalid for client\n");
1794 return -EINVAL;
1795 }
1796
1797 if (direction == TO_DDR)
1798 rc = process_xfer_out(id, handle, list);
1799 else if (direction == TO_OCMEM)
1800 rc = process_xfer_in(id, handle, list);
1801 return rc;
1802}
1803
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001804static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001805{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001806 struct ocmem_eviction_data *edata = NULL;
1807 int prio = ocmem_client_table[id].priority;
1808
1809 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1810
1811 if (!edata) {
1812 pr_err("ocmem: Could not allocate eviction data\n");
1813 return NULL;
1814 }
1815
1816 INIT_LIST_HEAD(&edata->victim_list);
1817 INIT_LIST_HEAD(&edata->req_list);
1818 edata->prio = prio;
1819 atomic_set(&edata->pending, 0);
1820 return edata;
1821}
1822
1823static void free_eviction(struct ocmem_eviction_data *edata)
1824{
1825
1826 if (!edata)
1827 return;
1828
1829 if (!list_empty(&edata->req_list))
1830 pr_err("ocmem: Eviction data %p not empty\n", edata);
1831
1832 kfree(edata);
1833 edata = NULL;
1834}
1835
1836static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1837{
1838
1839 if (!new || !old)
1840 return false;
1841
1842 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1843 new->req_start, new->req_end,
1844 old->req_start, old->req_end);
1845
1846 if ((new->req_start < old->req_start &&
1847 new->req_end >= old->req_start) ||
1848 (new->req_start >= old->req_start &&
1849 new->req_start <= old->req_end &&
1850 new->req_end >= old->req_end)) {
1851 pr_debug("request %p overlaps with existing req %p\n",
1852 new, old);
1853 return true;
1854 }
1855 return false;
1856}
1857
1858static int __evict_common(struct ocmem_eviction_data *edata,
1859 struct ocmem_req *req)
1860{
1861 struct rb_node *rb_node = NULL;
1862 struct ocmem_req *e_req = NULL;
1863 bool needs_eviction = false;
1864 int j = 0;
1865
1866 for (rb_node = rb_first(&sched_tree); rb_node;
1867 rb_node = rb_next(rb_node)) {
1868
1869 struct ocmem_region *tmp_region = NULL;
1870
1871 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1872
1873 if (tmp_region->max_prio < edata->prio) {
1874 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1875 needs_eviction = false;
1876 e_req = find_req_match(j, tmp_region);
1877 if (!e_req)
1878 continue;
1879 if (edata->passive == true) {
1880 needs_eviction = true;
1881 } else {
1882 needs_eviction = is_overlapping(req,
1883 e_req);
1884 }
1885
1886 if (needs_eviction) {
1887 pr_debug("adding %p in region %p to eviction list\n",
1888 e_req, tmp_region);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001889 SET_STATE(e_req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001890 list_add_tail(
1891 &e_req->eviction_list,
1892 &edata->req_list);
1893 atomic_inc(&edata->pending);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001894 e_req->e_handle = req;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001895 }
1896 }
1897 } else {
1898 pr_debug("Skipped region %p\n", tmp_region);
1899 }
1900 }
1901
1902 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1903
Naveen Ramaraj89738952013-02-13 15:24:57 -08001904 return atomic_read(&edata->pending);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001905}
1906
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001907static void trigger_eviction(struct ocmem_eviction_data *edata)
1908{
1909 struct ocmem_req *req = NULL;
1910 struct ocmem_req *next = NULL;
1911 struct ocmem_buf buffer;
1912
1913 if (!edata)
1914 return;
1915
1916 BUG_ON(atomic_read(&edata->pending) == 0);
1917
1918 init_completion(&edata->completion);
1919
1920 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1921 {
1922 if (req) {
1923 pr_debug("ocmem: Evicting request %p\n", req);
1924 buffer.addr = req->req_start;
1925 buffer.len = 0x0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001926 CLEAR_STATE(req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001927 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1928 &buffer);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001929 SET_STATE(req, R_WF_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001930 }
1931 }
1932 return;
1933}
1934
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001935int process_evict(int id)
1936{
1937 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001938 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001939
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001940 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001941
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001942 if (!edata)
1943 return -EINVAL;
1944
1945 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001946
1947 mutex_lock(&sched_mutex);
1948
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001949 rc = __evict_common(edata, NULL);
1950
Naveen Ramaraj89738952013-02-13 15:24:57 -08001951 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001952 goto skip_eviction;
1953
1954 trigger_eviction(edata);
1955
1956 evictions[id] = edata;
1957
1958 mutex_unlock(&sched_mutex);
1959
1960 wait_for_completion(&edata->completion);
1961
1962 return 0;
1963
1964skip_eviction:
1965 evictions[id] = NULL;
1966 mutex_unlock(&sched_mutex);
1967 return 0;
1968}
1969
1970static int run_evict(struct ocmem_req *req)
1971{
1972 struct ocmem_eviction_data *edata = NULL;
1973 int rc = 0;
1974
1975 if (!req)
1976 return -EINVAL;
1977
1978 edata = init_eviction(req->owner);
1979
1980 if (!edata)
1981 return -EINVAL;
1982
1983 edata->passive = false;
1984
Naveen Ramaraj89738952013-02-13 15:24:57 -08001985 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001986 rc = __evict_common(edata, req);
1987
Naveen Ramaraj89738952013-02-13 15:24:57 -08001988 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001989 goto skip_eviction;
1990
1991 trigger_eviction(edata);
1992
1993 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1994 req->edata = edata;
1995
Naveen Ramaraj89738952013-02-13 15:24:57 -08001996 mutex_unlock(&free_mutex);
1997
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001998 wait_for_completion(&edata->completion);
1999
2000 pr_debug("ocmem: eviction completed successfully\n");
2001 return 0;
2002
2003skip_eviction:
2004 pr_err("ocmem: Unable to run eviction\n");
2005 free_eviction(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002006 req->edata = NULL;
2007 mutex_unlock(&free_mutex);
2008 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002009}
2010
2011static int __restore_common(struct ocmem_eviction_data *edata)
2012{
2013
2014 struct ocmem_req *req = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002015
2016 if (!edata)
2017 return -EINVAL;
2018
Naveen Ramaraj89738952013-02-13 15:24:57 -08002019 while (!list_empty(&edata->req_list)) {
2020 req = list_first_entry(&edata->req_list, struct ocmem_req,
2021 eviction_list);
2022 list_del_init(&req->eviction_list);
2023 pr_debug("ocmem: restoring evicted request %p\n",
2024 req);
2025 req->edata = NULL;
2026 req->e_handle = NULL;
2027 req->op = SCHED_ALLOCATE;
2028 inc_ocmem_stat(zone_of(req), NR_RESTORES);
2029 sched_enqueue(req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002030 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002031
2032 pr_debug("Scheduled all evicted regions\n");
2033
2034 return 0;
2035}
2036
2037static int sched_restore(struct ocmem_req *req)
2038{
2039
2040 int rc = 0;
2041
2042 if (!req)
2043 return -EINVAL;
2044
2045 if (!req->edata)
2046 return 0;
2047
Naveen Ramaraj89738952013-02-13 15:24:57 -08002048 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002049 rc = __restore_common(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002050 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002051
2052 if (rc < 0)
2053 return -EINVAL;
2054
2055 free_eviction(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002056 req->edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002057 return 0;
2058}
2059
2060int process_restore(int id)
2061{
2062 struct ocmem_eviction_data *edata = evictions[id];
2063 int rc = 0;
2064
2065 if (!edata)
2066 return -EINVAL;
2067
Naveen Ramaraj89738952013-02-13 15:24:57 -08002068 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002069 rc = __restore_common(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002070 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002071
2072 if (rc < 0) {
2073 pr_err("Failed to restore evicted requests\n");
2074 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002075 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002076
2077 free_eviction(edata);
2078 evictions[id] = NULL;
2079 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002080 return 0;
2081}
2082
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002083static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
2084{
2085 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002086 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002087 struct ocmem_buf *buffer = req->buffer;
2088
2089 down_write(&req->rw_sem);
2090
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002091 mutex_lock(&allocation_mutex);
2092retry_allocate:
2093
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002094 /* Take the scheduler mutex */
2095 mutex_lock(&sched_mutex);
2096 rc = __sched_allocate(req, can_block, can_wait);
2097 mutex_unlock(&sched_mutex);
2098
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002099 if (rc == OP_EVICT) {
2100
2101 ret = run_evict(req);
2102
2103 if (ret == 0) {
2104 rc = sched_restore(req);
2105 if (rc < 0) {
2106 pr_err("Failed to restore for req %p\n", req);
2107 goto err_allocate_fail;
2108 }
2109 req->edata = NULL;
2110
2111 pr_debug("Attempting to re-allocate req %p\n", req);
2112 req->req_start = 0x0;
2113 req->req_end = 0x0;
2114 goto retry_allocate;
2115 } else {
2116 goto err_allocate_fail;
2117 }
2118 }
2119
2120 mutex_unlock(&allocation_mutex);
2121
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002122 if (rc == OP_FAIL) {
2123 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002124 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002125 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002126
2127 if (rc == OP_RESCHED) {
2128 buffer->addr = 0x0;
2129 buffer->len = 0x0;
2130 pr_debug("ocmem: Enqueuing req %p\n", req);
2131 sched_enqueue(req);
2132 } else if (rc == OP_PARTIAL) {
2133 buffer->addr = device_address(req->owner, req->req_start);
2134 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002135 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002136 pr_debug("ocmem: Enqueuing req %p\n", req);
2137 sched_enqueue(req);
2138 } else if (rc == OP_COMPLETE) {
2139 buffer->addr = device_address(req->owner, req->req_start);
2140 buffer->len = req->req_sz;
2141 }
2142
2143 up_write(&req->rw_sem);
2144 return 0;
2145err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002146 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002147 up_write(&req->rw_sem);
2148 return -EINVAL;
2149}
2150
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002151static int do_dump(struct ocmem_req *req, unsigned long addr)
2152{
2153
2154 void __iomem *req_vaddr;
2155 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002156 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002157
2158 down_write(&req->rw_sem);
2159
2160 offset = phys_to_offset(req->req_start);
2161
2162 req_vaddr = ocmem_vaddr + offset;
2163
2164 if (!req_vaddr)
2165 goto err_do_dump;
2166
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002167 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2168
2169 if (rc < 0)
2170 goto err_do_dump;
2171
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002172 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2173 get_name(req->owner), req->req_start,
2174 req_vaddr, addr);
2175
2176 memcpy((void *)addr, req_vaddr, req->req_sz);
2177
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002178 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2179
2180 if (rc < 0)
2181 pr_err("Failed to secure request %p of %s after dump\n",
2182 req, get_name(req->owner));
2183
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002184 up_write(&req->rw_sem);
2185 return 0;
2186err_do_dump:
2187 up_write(&req->rw_sem);
2188 return -EINVAL;
2189}
2190
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002191int process_allocate(int id, struct ocmem_handle *handle,
2192 unsigned long min, unsigned long max,
2193 unsigned long step, bool can_block, bool can_wait)
2194{
2195
2196 struct ocmem_req *req = NULL;
2197 struct ocmem_buf *buffer = NULL;
2198 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002199 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002200
2201 /* sanity checks */
2202 if (is_blocked(id)) {
2203 pr_err("Client %d cannot request allocation\n", id);
2204 return -EINVAL;
2205 }
2206
2207 if (handle->req != NULL) {
2208 pr_err("Invalid handle passed in\n");
2209 return -EINVAL;
2210 }
2211
2212 buffer = handle_to_buffer(handle);
2213 BUG_ON(buffer == NULL);
2214
2215 /* prepare a request structure to represent this transaction */
2216 req = ocmem_create_req();
2217 if (!req)
2218 return -ENOMEM;
2219
2220 req->owner = id;
2221 req->req_min = min;
2222 req->req_max = max;
2223 req->req_step = step;
2224 req->prio = ocmem_client_table[id].priority;
2225 req->op = SCHED_ALLOCATE;
2226 req->buffer = buffer;
2227
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002228 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2229
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002230 rc = do_allocate(req, can_block, can_wait);
2231
2232 if (rc < 0)
2233 goto do_allocate_error;
2234
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002235 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2236
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002237 handle->req = req;
2238
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002239 if (req->req_sz != 0) {
2240
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002241 rc = process_map(req, req->req_start, req->req_end);
2242 if (rc < 0)
2243 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002244
2245 offset = phys_to_offset(req->req_start);
2246
2247 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2248
2249 if (rc < 0) {
2250 pr_err("Failed to switch ON memory macros\n");
2251 goto power_ctl_error;
2252 }
2253 }
2254
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002255 return 0;
2256
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002257power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002258 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002259map_error:
2260 handle->req = NULL;
2261 do_free(req);
2262do_allocate_error:
2263 ocmem_destroy_req(req);
2264 return -EINVAL;
2265}
2266
2267int process_delayed_allocate(struct ocmem_req *req)
2268{
2269
2270 struct ocmem_handle *handle = NULL;
2271 int rc = 0;
2272 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002273 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002274
2275 handle = req_to_handle(req);
2276 BUG_ON(handle == NULL);
2277
2278 rc = do_allocate(req, true, false);
2279
2280 if (rc < 0)
2281 goto do_allocate_error;
2282
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002283 /* The request can still be pending */
2284 if (TEST_STATE(req, R_PENDING))
2285 return 0;
2286
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002287 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2288
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002289 if (req->req_sz != 0) {
2290
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002291 rc = process_map(req, req->req_start, req->req_end);
2292 if (rc < 0)
2293 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002294
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002295
2296 offset = phys_to_offset(req->req_start);
2297
2298 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2299
2300 if (rc < 0) {
2301 pr_err("Failed to switch ON memory macros\n");
2302 goto power_ctl_error;
2303 }
2304 }
2305
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002306 /* Notify the client about the buffer growth */
2307 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2308 if (rc < 0) {
2309 pr_err("No notifier callback to cater for req %p event: %d\n",
2310 req, OCMEM_ALLOC_GROW);
2311 BUG();
2312 }
2313 return 0;
2314
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002315power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002316 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002317map_error:
2318 handle->req = NULL;
2319 do_free(req);
2320do_allocate_error:
2321 ocmem_destroy_req(req);
2322 return -EINVAL;
2323}
2324
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002325int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2326{
2327 struct ocmem_req *req = NULL;
2328 int rc = 0;
2329
2330 req = handle_to_req(handle);
2331
2332 if (!req)
2333 return -EINVAL;
2334
2335 if (!is_mapped(req)) {
2336 pr_err("Buffer is not mapped\n");
2337 goto dump_error;
2338 }
2339
2340 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2341
2342 mutex_lock(&sched_mutex);
2343 rc = do_dump(req, addr);
2344 mutex_unlock(&sched_mutex);
2345
2346 if (rc < 0)
2347 goto dump_error;
2348
2349 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2350 return 0;
2351
2352dump_error:
2353 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2354 return -EINVAL;
2355}
2356
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002357static void ocmem_sched_wk_func(struct work_struct *work)
2358{
2359
2360 struct ocmem_buf *buffer = NULL;
2361 struct ocmem_handle *handle = NULL;
2362 struct ocmem_req *req = ocmem_fetch_req();
2363
2364 if (!req) {
2365 pr_debug("No Pending Requests found\n");
2366 return;
2367 }
2368
2369 pr_debug("ocmem: sched_wk pending req %p\n", req);
2370 handle = req_to_handle(req);
2371 buffer = handle_to_buffer(handle);
2372 BUG_ON(req->op == SCHED_NOP);
2373
2374 switch (req->op) {
2375 case SCHED_GROW:
2376 process_grow(req);
2377 break;
2378 case SCHED_ALLOCATE:
2379 process_delayed_allocate(req);
2380 break;
2381 default:
2382 pr_err("ocmem: Unknown operation encountered\n");
2383 break;
2384 }
2385 return;
2386}
2387
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002388static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2389{
2390 struct rb_node *rb_node = NULL;
2391 struct ocmem_req *req = NULL;
2392 unsigned j;
2393 mutex_lock(&sched_mutex);
2394 for (rb_node = rb_first(&sched_tree); rb_node;
2395 rb_node = rb_next(rb_node)) {
2396 struct ocmem_region *tmp_region = NULL;
2397 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2398 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2399 req = find_req_match(j, tmp_region);
2400 if (req) {
2401 seq_printf(f,
2402 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2403 get_name(req->owner),
2404 req->req_start, req->req_end,
2405 req->req_sz, req->state);
2406 }
2407 }
2408 }
2409 mutex_unlock(&sched_mutex);
2410 return 0;
2411}
2412
2413static int ocmem_allocations_open(struct inode *inode, struct file *file)
2414{
2415 return single_open(file, ocmem_allocations_show, inode->i_private);
2416}
2417
2418static const struct file_operations allocations_show_fops = {
2419 .open = ocmem_allocations_open,
2420 .read = seq_read,
2421 .llseek = seq_lseek,
2422 .release = seq_release,
2423};
2424
2425int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002426{
2427 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002428 struct ocmem_plat_data *pdata = NULL;
2429 struct device *dev = &pdev->dev;
2430
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002431 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002432 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002433 mutex_init(&allocation_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002434 mutex_init(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002435 mutex_init(&sched_mutex);
2436 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002437 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002438 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2439 INIT_LIST_HEAD(&sched_queue[i]);
2440
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002441 mutex_init(&rdm_mutex);
2442 INIT_LIST_HEAD(&rdm_queue);
2443 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2444 if (!ocmem_rdm_wq)
2445 return -ENOMEM;
2446 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2447 if (!ocmem_eviction_wq)
2448 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002449
2450 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2451 NULL, &allocations_show_fops)) {
2452 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2453 return -EBUSY;
2454 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002455 return 0;
2456}