blob: a3fd6b2ae4395cbc5ff408450e96ea104bbf1c95 [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
Naveen Ramaraj89738952013-02-13 15:24:57 -080028 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_ENQUEUED, /* request has been enqueued for future retry */
32 R_MUST_GROW, /* request must grow as a part of pending operation */
33 R_MUST_SHRINK, /* request must shrink */
34 R_WF_SHRINK, /* shrink must be ack'ed by a client */
35 R_SHRUNK, /* request was shrunk */
36 R_MUST_MAP, /* request must be mapped before being used */
37 R_MUST_UNMAP, /* request must be unmapped when not being used */
38 R_MAPPED, /* request is mapped and actively used by client */
39 R_UNMAPPED, /* request is not mapped, so it's not in active use */
40 R_EVICTED, /* request is evicted and must be restored */
Naveen Ramarajb9da05782012-05-07 09:07:35 -070041};
42
43#define SET_STATE(x, val) (set_bit((val), &(x)->state))
44#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
45#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
46
47enum op_res {
48 OP_COMPLETE = 0x0,
49 OP_RESCHED,
50 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070051 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070052 OP_FAIL = ~0x0,
53};
54
55/* Represents various client priorities */
56/* Note: More than one client can share a priority level */
57enum client_prio {
58 MIN_PRIO = 0x0,
59 NO_PRIO = MIN_PRIO,
60 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070061 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070062 PRIO_LP_AUDIO = 0x1,
63 PRIO_HP_AUDIO = 0x2,
64 PRIO_VOICE = 0x3,
65 PRIO_GFX_GROWTH = 0x4,
66 PRIO_VIDEO = 0x5,
67 PRIO_GFX = 0x6,
68 PRIO_OCMEM = 0x7,
69 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
70};
71
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070072static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070073static struct list_head sched_queue[MAX_OCMEM_PRIO];
74static struct mutex sched_queue_mutex;
75
76/* The duration in msecs before a pending operation is scheduled
77 * This allows an idle window between use case boundaries where various
78 * hardware state changes can occur. The value will be tweaked on actual
79 * hardware.
80*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070081/* Delay in ms for switching to low power mode for OCMEM */
82#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070083
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070084static struct list_head rdm_queue;
85static struct mutex rdm_mutex;
86static struct workqueue_struct *ocmem_rdm_wq;
87static struct workqueue_struct *ocmem_eviction_wq;
88
89static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
90
91struct ocmem_rdm_work {
92 int id;
93 struct ocmem_map_list *list;
94 struct ocmem_handle *handle;
95 int direction;
96 struct work_struct work;
97};
98
Naveen Ramarajb9da05782012-05-07 09:07:35 -070099/* OCMEM Operational modes */
100enum ocmem_client_modes {
101 OCMEM_PERFORMANCE = 1,
102 OCMEM_PASSIVE,
103 OCMEM_LOW_POWER,
104 OCMEM_MODE_MAX = OCMEM_LOW_POWER
105};
106
107/* OCMEM Addressing modes */
108enum ocmem_interconnects {
109 OCMEM_BLOCKED = 0,
110 OCMEM_PORT = 1,
111 OCMEM_OCMEMNOC = 2,
112 OCMEM_SYSNOC = 3,
113};
114
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700115enum ocmem_tz_client {
116 TZ_UNUSED = 0x0,
117 TZ_GRAPHICS,
118 TZ_VIDEO,
119 TZ_LP_AUDIO,
120 TZ_SENSORS,
121 TZ_OTHER_OS,
122 TZ_DEBUG,
123};
124
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700125/**
126 * Primary OCMEM Arbitration Table
127 **/
128struct ocmem_table {
129 int client_id;
130 int priority;
131 int mode;
132 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700133 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700135 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
136 TZ_GRAPHICS},
137 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
138 TZ_VIDEO},
139 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
140 TZ_UNUSED},
141 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
142 TZ_UNUSED},
143 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
144 TZ_UNUSED},
145 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
146 TZ_LP_AUDIO},
147 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
148 TZ_SENSORS},
149 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
150 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700151};
152
153static struct rb_root sched_tree;
154static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700155static struct mutex allocation_mutex;
Naveen Ramaraj89738952013-02-13 15:24:57 -0800156static struct mutex free_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700157
158/* A region represents a continuous interval in OCMEM address space */
159struct ocmem_region {
160 /* Chain in Interval Tree */
161 struct rb_node region_rb;
162 /* Hash map of requests */
163 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700164 /* Chain in eviction list */
165 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700166 unsigned long r_start;
167 unsigned long r_end;
168 unsigned long r_sz;
169 /* Highest priority of all requests served by this region */
170 int max_prio;
171};
172
173/* Is OCMEM tightly coupled to the client ?*/
174static inline int is_tcm(int id)
175{
176 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
177 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
178 return 1;
179 else
180 return 0;
181}
182
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700183static inline int is_iface_access(int id)
184{
185 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
186}
187
188static inline int is_remapped_access(int id)
189{
190 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
191}
192
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700193static inline int is_blocked(int id)
194{
195 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
196}
197
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700198inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
199{
200 if (handle)
201 return &handle->buffer;
202 else
203 return NULL;
204}
205
206inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
207{
208 if (buffer)
209 return container_of(buffer, struct ocmem_handle, buffer);
210 else
211 return NULL;
212}
213
214inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
215{
216 if (handle)
217 return handle->req;
218 else
219 return NULL;
220}
221
222inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
223{
224 if (req && req->buffer)
225 return container_of(req->buffer, struct ocmem_handle, buffer);
226 else
227 return NULL;
228}
229
230/* Simple wrappers which will have debug features added later */
231inline int ocmem_read(void *at)
232{
233 return readl_relaxed(at);
234}
235
236inline int ocmem_write(unsigned long val, void *at)
237{
238 writel_relaxed(val, at);
239 return 0;
240}
241
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700242inline int get_mode(int id)
243{
244 if (!check_id(id))
245 return MODE_NOT_SET;
246 else
247 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
248 WIDE_MODE : THIN_MODE;
249}
250
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700251inline int get_tz_id(int id)
252{
253 if (!check_id(id))
254 return TZ_UNUSED;
255 else
256 return ocmem_client_table[id].tz_id;
257}
258
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700259/* Returns the address that can be used by a device core to access OCMEM */
260static unsigned long device_address(int id, unsigned long addr)
261{
262 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
263 unsigned long ret_addr = 0x0;
264
265 switch (hw_interconnect) {
266 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700267 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700268 ret_addr = phys_to_offset(addr);
269 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700270 case OCMEM_SYSNOC:
271 ret_addr = addr;
272 break;
273 case OCMEM_BLOCKED:
274 ret_addr = 0x0;
275 break;
276 }
277 return ret_addr;
278}
279
280/* Returns the address as viewed by the core */
281static unsigned long core_address(int id, unsigned long addr)
282{
283 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
284 unsigned long ret_addr = 0x0;
285
286 switch (hw_interconnect) {
287 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700288 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700289 ret_addr = offset_to_phys(addr);
290 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700291 case OCMEM_SYSNOC:
292 ret_addr = addr;
293 break;
294 case OCMEM_BLOCKED:
295 ret_addr = 0x0;
296 break;
297 }
298 return ret_addr;
299}
300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700301static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
302{
303 int owner;
304 if (!req)
305 return NULL;
306 owner = req->owner;
307 return get_zone(owner);
308}
309
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700310static int insert_region(struct ocmem_region *region)
311{
312
313 struct rb_root *root = &sched_tree;
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct ocmem_region *tmp = NULL;
317 unsigned long addr = region->r_start;
318
319 while (*p) {
320 parent = *p;
321 tmp = rb_entry(parent, struct ocmem_region, region_rb);
322
323 if (tmp->r_end > addr) {
324 if (tmp->r_start <= addr)
325 break;
326 p = &(*p)->rb_left;
327 } else if (tmp->r_end <= addr)
328 p = &(*p)->rb_right;
329 }
330 rb_link_node(&region->region_rb, parent, p);
331 rb_insert_color(&region->region_rb, root);
332 return 0;
333}
334
335static int remove_region(struct ocmem_region *region)
336{
337 struct rb_root *root = &sched_tree;
338 rb_erase(&region->region_rb, root);
339 return 0;
340}
341
342static struct ocmem_req *ocmem_create_req(void)
343{
344 struct ocmem_req *p = NULL;
345
346 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
347 if (!p)
348 return NULL;
349
350 INIT_LIST_HEAD(&p->zone_list);
351 INIT_LIST_HEAD(&p->sched_list);
352 init_rwsem(&p->rw_sem);
353 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700354 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700355 return p;
356}
357
358static int ocmem_destroy_req(struct ocmem_req *req)
359{
360 kfree(req);
361 return 0;
362}
363
364static struct ocmem_region *create_region(void)
365{
366 struct ocmem_region *p = NULL;
367
368 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700372 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700373 p->r_start = p->r_end = p->r_sz = 0x0;
374 p->max_prio = NO_PRIO;
375 return p;
376}
377
378static int destroy_region(struct ocmem_region *region)
379{
Naveen Ramaraj5ec6b332013-03-27 15:24:22 -0700380 idr_destroy(&region->region_idr);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700381 kfree(region);
382 return 0;
383}
384
385static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
386{
387 int ret, id;
388
389 while (1) {
390 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
391 return -ENOMEM;
392
393 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
394
395 if (ret != -EAGAIN)
396 break;
397 }
398
399 if (!ret) {
400 req->req_id = id;
401 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
402 req, id, region);
403 return 0;
404 }
405 return -EINVAL;
406}
407
408static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
409{
410 idr_remove(&region->region_idr, req->req_id);
411 return 0;
412}
413
414static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
415{
416 region->r_start = req->req_start;
417 region->r_end = req->req_end;
418 region->r_sz = req->req_end - req->req_start + 1;
419 return 0;
420}
421
422static int region_req_count(int id, void *ptr, void *data)
423{
424 int *count = data;
425 *count = *count + 1;
426 return 0;
427}
428
429static int req_count(struct ocmem_region *region)
430{
431 int count = 0;
432 idr_for_each(&region->region_idr, region_req_count, &count);
433 return count;
434}
435
436static int compute_max_prio(int id, void *ptr, void *data)
437{
438 int *max = data;
439 struct ocmem_req *req = ptr;
440
441 if (req->prio > *max)
442 *max = req->prio;
443 return 0;
444}
445
446static int update_region_prio(struct ocmem_region *region)
447{
448 int max_prio;
449 if (req_count(region) != 0) {
450 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
451 region->max_prio = max_prio;
452 } else {
453 region->max_prio = NO_PRIO;
454 }
455 pr_debug("ocmem: Updating prio of region %p as %d\n",
456 region, max_prio);
457
458 return 0;
459}
460
461static struct ocmem_region *find_region(unsigned long addr)
462{
463 struct ocmem_region *region = NULL;
464 struct rb_node *rb_node = NULL;
465
466 rb_node = sched_tree.rb_node;
467
468 while (rb_node) {
469 struct ocmem_region *tmp_region = NULL;
470 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
471
472 if (tmp_region->r_end > addr) {
473 region = tmp_region;
474 if (tmp_region->r_start <= addr)
475 break;
476 rb_node = rb_node->rb_left;
477 } else {
478 rb_node = rb_node->rb_right;
479 }
480 }
481 return region;
482}
483
484static struct ocmem_region *find_region_intersection(unsigned long start,
485 unsigned long end)
486{
487
488 struct ocmem_region *region = NULL;
489 region = find_region(start);
490 if (region && end <= region->r_start)
491 region = NULL;
492 return region;
493}
494
495static struct ocmem_region *find_region_match(unsigned long start,
496 unsigned long end)
497{
498
499 struct ocmem_region *region = NULL;
500 region = find_region(start);
501 if (region && start == region->r_start && end == region->r_end)
502 return region;
503 return NULL;
504}
505
506static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
507{
508 struct ocmem_req *req = NULL;
509
510 if (!region)
511 return NULL;
512
513 req = idr_find(&region->region_idr, owner);
514
515 return req;
516}
517
518/* Must be called with req->sem held */
519static inline int is_mapped(struct ocmem_req *req)
520{
521 return TEST_STATE(req, R_MAPPED);
522}
523
Naveen Ramaraj89738952013-02-13 15:24:57 -0800524static inline int is_pending_shrink(struct ocmem_req *req)
525{
526 return TEST_STATE(req, R_MUST_SHRINK) ||
527 TEST_STATE(req, R_WF_SHRINK);
528}
529
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700530/* Must be called with sched_mutex held */
531static int __sched_unmap(struct ocmem_req *req)
532{
533 struct ocmem_req *matched_req = NULL;
534 struct ocmem_region *matched_region = NULL;
535
Neeti Desaidad1d8e2013-01-09 19:42:06 -0800536 if (!TEST_STATE(req, R_MAPPED))
537 goto invalid_op_error;
538
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700539 matched_region = find_region_match(req->req_start, req->req_end);
540 matched_req = find_req_match(req->req_id, matched_region);
541
542 if (!matched_region || !matched_req) {
543 pr_err("Could not find backing region for req");
544 goto invalid_op_error;
545 }
546
547 if (matched_req != req) {
548 pr_err("Request does not match backing req");
549 goto invalid_op_error;
550 }
551
552 if (!is_mapped(req)) {
553 pr_err("Request is not currently mapped");
554 goto invalid_op_error;
555 }
556
557 /* Update the request state */
558 CLEAR_STATE(req, R_MAPPED);
559 SET_STATE(req, R_MUST_MAP);
560
561 return OP_COMPLETE;
562
563invalid_op_error:
564 return OP_FAIL;
565}
566
567/* Must be called with sched_mutex held */
568static int __sched_map(struct ocmem_req *req)
569{
570 struct ocmem_req *matched_req = NULL;
571 struct ocmem_region *matched_region = NULL;
572
573 matched_region = find_region_match(req->req_start, req->req_end);
574 matched_req = find_req_match(req->req_id, matched_region);
575
576 if (!matched_region || !matched_req) {
577 pr_err("Could not find backing region for req");
578 goto invalid_op_error;
579 }
580
581 if (matched_req != req) {
582 pr_err("Request does not match backing req");
583 goto invalid_op_error;
584 }
585
586 /* Update the request state */
587 CLEAR_STATE(req, R_MUST_MAP);
588 SET_STATE(req, R_MAPPED);
589
590 return OP_COMPLETE;
591
592invalid_op_error:
593 return OP_FAIL;
594}
595
596static int do_map(struct ocmem_req *req)
597{
598 int rc = 0;
599
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700600 down_write(&req->rw_sem);
601
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700602 mutex_lock(&sched_mutex);
603 rc = __sched_map(req);
604 mutex_unlock(&sched_mutex);
605
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700606 up_write(&req->rw_sem);
607
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700608 if (rc == OP_FAIL)
609 return -EINVAL;
610
611 return 0;
612}
613
614static int do_unmap(struct ocmem_req *req)
615{
616 int rc = 0;
617
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700618 down_write(&req->rw_sem);
619
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700620 mutex_lock(&sched_mutex);
621 rc = __sched_unmap(req);
622 mutex_unlock(&sched_mutex);
623
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700624 up_write(&req->rw_sem);
625
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700626 if (rc == OP_FAIL)
627 return -EINVAL;
628
629 return 0;
630}
631
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700632static int process_map(struct ocmem_req *req, unsigned long start,
633 unsigned long end)
634{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700635 int rc = 0;
636
637 rc = ocmem_enable_core_clock();
638
639 if (rc < 0)
640 goto core_clock_fail;
641
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700642
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700643 if (is_iface_access(req->owner)) {
644 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700646 if (rc < 0)
647 goto iface_clock_fail;
648 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700649
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700650 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
651 get_mode(req->owner));
652
653 if (rc < 0) {
654 pr_err("ocmem: Failed to secure request %p for %d\n", req,
655 req->owner);
656 goto lock_failed;
657 }
658
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700659 rc = do_map(req);
660
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700661 if (rc < 0) {
662 pr_err("ocmem: Failed to map request %p for %d\n",
663 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700664 goto process_map_fail;
665
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700666 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700667 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700668 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700669
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700670process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700671 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
672lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700673 if (is_iface_access(req->owner))
674 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700675iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700676 ocmem_disable_core_clock();
677core_clock_fail:
678 pr_err("ocmem: Failed to map ocmem request\n");
679 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700680}
681
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700682static int process_unmap(struct ocmem_req *req, unsigned long start,
683 unsigned long end)
684{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700685 int rc = 0;
686
687 rc = do_unmap(req);
688
689 if (rc < 0)
690 goto process_unmap_fail;
691
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700692 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
693 req->req_sz);
694
695 if (rc < 0) {
696 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
697 req->owner);
698 goto unlock_failed;
699 }
700
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700701 if (is_iface_access(req->owner))
702 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700703 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700704 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700705 return 0;
706
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700707unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700708process_unmap_fail:
709 pr_err("ocmem: Failed to unmap ocmem request\n");
710 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700711}
712
713static int __sched_grow(struct ocmem_req *req, bool can_block)
714{
715 unsigned long min = req->req_min;
716 unsigned long max = req->req_max;
717 unsigned long step = req->req_step;
718 int owner = req->owner;
719 unsigned long curr_sz = 0;
720 unsigned long growth_sz = 0;
721 unsigned long curr_start = 0;
722 enum client_prio prio = req->prio;
723 unsigned long alloc_addr = 0x0;
724 bool retry;
725 struct ocmem_region *spanned_r = NULL;
726 struct ocmem_region *overlap_r = NULL;
Neeti Desai64ea8f42013-05-17 15:05:29 -0700727 int rc = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700728
729 struct ocmem_req *matched_req = NULL;
730 struct ocmem_region *matched_region = NULL;
731
732 struct ocmem_zone *zone = get_zone(owner);
733 struct ocmem_region *region = NULL;
734
735 matched_region = find_region_match(req->req_start, req->req_end);
736 matched_req = find_req_match(req->req_id, matched_region);
737
738 if (!matched_region || !matched_req) {
739 pr_err("Could not find backing region for req");
740 goto invalid_op_error;
741 }
742
743 if (matched_req != req) {
744 pr_err("Request does not match backing req");
745 goto invalid_op_error;
746 }
747
748 curr_sz = matched_req->req_sz;
749 curr_start = matched_req->req_start;
750 growth_sz = matched_req->req_max - matched_req->req_sz;
751
752 pr_debug("Attempting to grow req %p from %lx to %lx\n",
753 req, matched_req->req_sz, matched_req->req_max);
754
755 retry = false;
756
757 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
758
759retry_next_step:
760
761 spanned_r = NULL;
762 overlap_r = NULL;
763
764 spanned_r = find_region(zone->z_head);
765 overlap_r = find_region_intersection(zone->z_head,
766 zone->z_head + growth_sz);
767
768 if (overlap_r == NULL) {
769 /* no conflicting regions, schedule this region */
770 zone->z_ops->free(zone, curr_start, curr_sz);
Neeti Desai64ea8f42013-05-17 15:05:29 -0700771 rc = zone->z_ops->allocate(zone, curr_sz + growth_sz,
772 &alloc_addr);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700773
Neeti Desai64ea8f42013-05-17 15:05:29 -0700774 if (rc) {
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700775 pr_err("ocmem: zone allocation operation failed\n");
776 goto internal_error;
777 }
778
779 curr_sz += growth_sz;
780 /* Detach the region from the interval tree */
781 /* This is to guarantee that any change in size
782 * causes the tree to be rebalanced if required */
783
784 detach_req(matched_region, req);
785 if (req_count(matched_region) == 0) {
786 remove_region(matched_region);
787 region = matched_region;
788 } else {
789 region = create_region();
790 if (!region) {
791 pr_err("ocmem: Unable to create region\n");
792 goto region_error;
793 }
794 }
795
796 /* update the request */
797 req->req_start = alloc_addr;
798 /* increment the size to reflect new length */
799 req->req_sz = curr_sz;
800 req->req_end = alloc_addr + req->req_sz - 1;
801
802 /* update request state */
803 CLEAR_STATE(req, R_MUST_GROW);
804 SET_STATE(req, R_ALLOCATED);
805 SET_STATE(req, R_MUST_MAP);
806 req->op = SCHED_MAP;
807
808 /* update the region with new req */
809 attach_req(region, req);
810 populate_region(region, req);
811 update_region_prio(region);
812
813 /* update the tree with new region */
814 if (insert_region(region)) {
815 pr_err("ocmem: Failed to insert the region\n");
816 goto region_error;
817 }
818
819 if (retry) {
820 SET_STATE(req, R_MUST_GROW);
821 SET_STATE(req, R_PENDING);
822 req->op = SCHED_GROW;
823 return OP_PARTIAL;
824 }
825 } else if (spanned_r != NULL && overlap_r != NULL) {
826 /* resolve conflicting regions based on priority */
827 if (overlap_r->max_prio < prio) {
828 /* Growth cannot be triggered unless a previous
829 * client of lower priority was evicted */
830 pr_err("ocmem: Invalid growth scheduled\n");
831 /* This is serious enough to fail */
832 BUG();
833 return OP_FAIL;
834 } else if (overlap_r->max_prio > prio) {
835 if (min == max) {
836 /* Cannot grow at this time, try later */
837 SET_STATE(req, R_PENDING);
838 SET_STATE(req, R_MUST_GROW);
839 return OP_RESCHED;
840 } else {
841 /* Try to grow in steps */
842 growth_sz -= step;
843 /* We are OOM at this point so need to retry */
844 if (growth_sz <= curr_sz) {
845 SET_STATE(req, R_PENDING);
846 SET_STATE(req, R_MUST_GROW);
847 return OP_RESCHED;
848 }
849 retry = true;
850 pr_debug("ocmem: Attempting with reduced size %lx\n",
851 growth_sz);
852 goto retry_next_step;
853 }
854 } else {
855 pr_err("ocmem: grow: New Region %p Existing %p\n",
856 matched_region, overlap_r);
857 pr_err("ocmem: Undetermined behavior\n");
858 /* This is serious enough to fail */
859 BUG();
860 }
861 } else if (spanned_r == NULL && overlap_r != NULL) {
862 goto err_not_supported;
863 }
864
865 return OP_COMPLETE;
866
867err_not_supported:
868 pr_err("ocmem: Scheduled unsupported operation\n");
869 return OP_FAIL;
870region_error:
871 zone->z_ops->free(zone, alloc_addr, curr_sz);
872 detach_req(region, req);
873 update_region_prio(region);
874 /* req is going to be destroyed by the caller anyways */
875internal_error:
876 destroy_region(region);
877invalid_op_error:
878 return OP_FAIL;
879}
880
881/* Must be called with sched_mutex held */
882static int __sched_free(struct ocmem_req *req)
883{
884 int owner = req->owner;
885 int ret = 0;
886
887 struct ocmem_req *matched_req = NULL;
888 struct ocmem_region *matched_region = NULL;
889
890 struct ocmem_zone *zone = get_zone(owner);
891
892 BUG_ON(!zone);
893
894 matched_region = find_region_match(req->req_start, req->req_end);
895 matched_req = find_req_match(req->req_id, matched_region);
896
897 if (!matched_region || !matched_req)
898 goto invalid_op_error;
899 if (matched_req != req)
900 goto invalid_op_error;
901
902 ret = zone->z_ops->free(zone,
903 matched_req->req_start, matched_req->req_sz);
904
905 if (ret < 0)
906 goto err_op_fail;
907
908 detach_req(matched_region, matched_req);
909 update_region_prio(matched_region);
910 if (req_count(matched_region) == 0) {
911 remove_region(matched_region);
912 destroy_region(matched_region);
913 }
914
915 /* Update the request */
916 req->req_start = 0x0;
917 req->req_sz = 0x0;
918 req->req_end = 0x0;
919 SET_STATE(req, R_FREE);
920 return OP_COMPLETE;
921invalid_op_error:
922 pr_err("ocmem: free: Failed to find matching region\n");
923err_op_fail:
924 pr_err("ocmem: free: Failed\n");
925 return OP_FAIL;
926}
927
928/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700929static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
930{
931 int owner = req->owner;
932 int ret = 0;
933
934 struct ocmem_req *matched_req = NULL;
935 struct ocmem_region *matched_region = NULL;
936 struct ocmem_region *region = NULL;
937 unsigned long alloc_addr = 0x0;
Neeti Desai64ea8f42013-05-17 15:05:29 -0700938 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700939
940 struct ocmem_zone *zone = get_zone(owner);
941
942 BUG_ON(!zone);
943
944 /* The shrink should not be called for zero size */
945 BUG_ON(new_sz == 0);
946
947 matched_region = find_region_match(req->req_start, req->req_end);
948 matched_req = find_req_match(req->req_id, matched_region);
949
950 if (!matched_region || !matched_req)
951 goto invalid_op_error;
952 if (matched_req != req)
953 goto invalid_op_error;
954
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700955 ret = zone->z_ops->free(zone,
956 matched_req->req_start, matched_req->req_sz);
957
958 if (ret < 0) {
959 pr_err("Zone Allocation operation failed\n");
960 goto internal_error;
961 }
962
Neeti Desai64ea8f42013-05-17 15:05:29 -0700963 rc = zone->z_ops->allocate(zone, new_sz, &alloc_addr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700964
Neeti Desai64ea8f42013-05-17 15:05:29 -0700965 if (rc) {
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700966 pr_err("Zone Allocation operation failed\n");
967 goto internal_error;
968 }
969
970 /* Detach the region from the interval tree */
971 /* This is to guarantee that the change in size
972 * causes the tree to be rebalanced if required */
973
974 detach_req(matched_region, req);
975 if (req_count(matched_region) == 0) {
976 remove_region(matched_region);
977 region = matched_region;
978 } else {
979 region = create_region();
980 if (!region) {
981 pr_err("ocmem: Unable to create region\n");
982 goto internal_error;
983 }
984 }
985 /* update the request */
986 req->req_start = alloc_addr;
987 req->req_sz = new_sz;
988 req->req_end = alloc_addr + req->req_sz;
989
990 if (req_count(region) == 0) {
991 remove_region(matched_region);
992 destroy_region(matched_region);
993 }
994
995 /* update request state */
996 SET_STATE(req, R_MUST_GROW);
997 SET_STATE(req, R_MUST_MAP);
998 req->op = SCHED_MAP;
999
1000 /* attach the request to the region */
1001 attach_req(region, req);
1002 populate_region(region, req);
1003 update_region_prio(region);
1004
1005 /* update the tree with new region */
1006 if (insert_region(region)) {
1007 pr_err("ocmem: Failed to insert the region\n");
1008 zone->z_ops->free(zone, alloc_addr, new_sz);
1009 detach_req(region, req);
1010 update_region_prio(region);
1011 /* req will be destroyed by the caller */
1012 goto region_error;
1013 }
1014 return OP_COMPLETE;
1015
1016region_error:
1017 destroy_region(region);
1018internal_error:
1019 pr_err("ocmem: shrink: Failed\n");
1020 return OP_FAIL;
1021invalid_op_error:
1022 pr_err("ocmem: shrink: Failed to find matching region\n");
1023 return OP_FAIL;
1024}
1025
1026/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001027static int __sched_allocate(struct ocmem_req *req, bool can_block,
1028 bool can_wait)
1029{
1030 unsigned long min = req->req_min;
1031 unsigned long max = req->req_max;
1032 unsigned long step = req->req_step;
1033 int owner = req->owner;
1034 unsigned long sz = max;
1035 enum client_prio prio = req->prio;
1036 unsigned long alloc_addr = 0x0;
1037 bool retry;
Neeti Desai64ea8f42013-05-17 15:05:29 -07001038 int rc = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001039
1040 struct ocmem_region *spanned_r = NULL;
1041 struct ocmem_region *overlap_r = NULL;
1042
1043 struct ocmem_zone *zone = get_zone(owner);
1044 struct ocmem_region *region = NULL;
1045
1046 BUG_ON(!zone);
1047
1048 if (min > (zone->z_end - zone->z_start)) {
1049 pr_err("ocmem: requested minimum size exceeds quota\n");
1050 goto invalid_op_error;
1051 }
1052
1053 if (max > (zone->z_end - zone->z_start)) {
1054 pr_err("ocmem: requested maximum size exceeds quota\n");
1055 goto invalid_op_error;
1056 }
1057
1058 if (min > zone->z_free) {
1059 pr_err("ocmem: out of memory for zone %d\n", owner);
1060 goto invalid_op_error;
1061 }
1062
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001063 retry = false;
1064
Naveen Ramaraj89738952013-02-13 15:24:57 -08001065 pr_debug("ocmem: do_allocate: %s request %p size %lx\n",
1066 get_name(owner), req, sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001067
1068retry_next_step:
1069
1070 spanned_r = NULL;
1071 overlap_r = NULL;
1072
1073 spanned_r = find_region(zone->z_head);
1074 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1075
1076 if (overlap_r == NULL) {
Neeti Desai0e044c62013-05-28 17:37:15 -07001077
1078 region = create_region();
1079
1080 if (!region) {
1081 pr_err("ocmem: Unable to create region\n");
1082 goto invalid_op_error;
1083 }
1084
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001085 /* no conflicting regions, schedule this region */
Neeti Desai64ea8f42013-05-17 15:05:29 -07001086 rc = zone->z_ops->allocate(zone, sz, &alloc_addr);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001087
Neeti Desai64ea8f42013-05-17 15:05:29 -07001088 if (rc) {
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001089 pr_err("Zone Allocation operation failed\n");
1090 goto internal_error;
1091 }
1092
1093 /* update the request */
1094 req->req_start = alloc_addr;
1095 req->req_end = alloc_addr + sz - 1;
1096 req->req_sz = sz;
1097 req->zone = zone;
1098
1099 /* update request state */
1100 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001101 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001102 SET_STATE(req, R_ALLOCATED);
1103 SET_STATE(req, R_MUST_MAP);
1104 req->op = SCHED_NOP;
1105
1106 /* attach the request to the region */
1107 attach_req(region, req);
1108 populate_region(region, req);
1109 update_region_prio(region);
1110
1111 /* update the tree with new region */
1112 if (insert_region(region)) {
1113 pr_err("ocmem: Failed to insert the region\n");
1114 zone->z_ops->free(zone, alloc_addr, sz);
1115 detach_req(region, req);
1116 update_region_prio(region);
1117 /* req will be destroyed by the caller */
1118 goto internal_error;
1119 }
1120
1121 if (retry) {
1122 SET_STATE(req, R_MUST_GROW);
1123 SET_STATE(req, R_PENDING);
1124 req->op = SCHED_GROW;
1125 return OP_PARTIAL;
1126 }
1127 } else if (spanned_r != NULL && overlap_r != NULL) {
1128 /* resolve conflicting regions based on priority */
1129 if (overlap_r->max_prio < prio) {
1130 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001131 req->req_start = zone->z_head;
1132 req->req_end = zone->z_head + sz - 1;
1133 req->req_sz = 0x0;
1134 req->edata = NULL;
1135 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001136 } else {
1137 /* Try to allocate atleast >= 'min' immediately */
1138 sz -= step;
1139 if (sz < min)
1140 goto err_out_of_mem;
1141 retry = true;
1142 pr_debug("ocmem: Attempting with reduced size %lx\n",
1143 sz);
1144 goto retry_next_step;
1145 }
1146 } else if (overlap_r->max_prio > prio) {
1147 if (can_block == true) {
1148 SET_STATE(req, R_PENDING);
1149 SET_STATE(req, R_MUST_GROW);
1150 return OP_RESCHED;
1151 } else {
1152 if (min == max) {
1153 pr_err("Cannot allocate %lx synchronously\n",
1154 sz);
1155 goto err_out_of_mem;
1156 } else {
1157 sz -= step;
1158 if (sz < min)
1159 goto err_out_of_mem;
1160 retry = true;
1161 pr_debug("ocmem: Attempting reduced size %lx\n",
1162 sz);
1163 goto retry_next_step;
1164 }
1165 }
1166 } else {
1167 pr_err("ocmem: Undetermined behavior\n");
1168 pr_err("ocmem: New Region %p Existing %p\n", region,
1169 overlap_r);
1170 /* This is serious enough to fail */
1171 BUG();
1172 }
1173 } else if (spanned_r == NULL && overlap_r != NULL)
1174 goto err_not_supported;
1175
1176 return OP_COMPLETE;
1177
Naveen Ramaraj59907982012-10-16 17:40:38 -07001178trigger_eviction:
1179 pr_debug("Trigger eviction of region %p\n", overlap_r);
Naveen Ramaraj59907982012-10-16 17:40:38 -07001180 return OP_EVICT;
1181
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001182err_not_supported:
1183 pr_err("ocmem: Scheduled unsupported operation\n");
1184 return OP_FAIL;
1185
1186err_out_of_mem:
1187 pr_err("ocmem: Out of memory during allocation\n");
1188internal_error:
1189 destroy_region(region);
1190invalid_op_error:
1191 return OP_FAIL;
1192}
1193
Naveen Ramaraj89738952013-02-13 15:24:57 -08001194/* Remove the request from eviction lists */
1195static void cancel_restore(struct ocmem_req *e_handle,
1196 struct ocmem_req *req)
1197{
1198 struct ocmem_eviction_data *edata = e_handle->edata;
1199
1200 if (!edata || !req)
1201 return;
1202
1203 if (list_empty(&edata->req_list))
1204 return;
1205
1206 list_del_init(&req->eviction_list);
1207 req->e_handle = NULL;
1208
1209 return;
1210}
1211
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001212static int sched_enqueue(struct ocmem_req *priv)
1213{
1214 struct ocmem_req *next = NULL;
1215 mutex_lock(&sched_queue_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001216 SET_STATE(priv, R_ENQUEUED);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001217 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1218 pr_debug("enqueued req %p\n", priv);
1219 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001220 pr_debug("pending request %p for client %s\n", next,
1221 get_name(next->owner));
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001222 }
1223 mutex_unlock(&sched_queue_mutex);
1224 return 0;
1225}
1226
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001227static void sched_dequeue(struct ocmem_req *victim_req)
1228{
1229 struct ocmem_req *req = NULL;
1230 struct ocmem_req *next = NULL;
1231 int id;
1232
1233 if (!victim_req)
1234 return;
1235
1236 id = victim_req->owner;
1237
1238 mutex_lock(&sched_queue_mutex);
1239
1240 if (list_empty(&sched_queue[id]))
1241 goto dequeue_done;
1242
1243 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1244 {
1245 if (req == victim_req) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001246 pr_debug("ocmem: Cancelling pending request %p for %s\n",
1247 req, get_name(req->owner));
1248 list_del_init(&victim_req->sched_list);
1249 CLEAR_STATE(victim_req, R_ENQUEUED);
1250 break;
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001251 }
1252 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001253dequeue_done:
1254 mutex_unlock(&sched_queue_mutex);
1255 return;
1256}
1257
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001258static struct ocmem_req *ocmem_fetch_req(void)
1259{
1260 int i;
1261 struct ocmem_req *req = NULL;
1262 struct ocmem_req *next = NULL;
1263
1264 mutex_lock(&sched_queue_mutex);
1265 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1266 if (list_empty(&sched_queue[i]))
1267 continue;
1268 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1269 {
1270 if (req) {
1271 pr_debug("ocmem: Fetched pending request %p\n",
1272 req);
1273 list_del(&req->sched_list);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001274 CLEAR_STATE(req, R_ENQUEUED);
1275 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001276 }
1277 }
1278 }
1279 mutex_unlock(&sched_queue_mutex);
1280 return req;
1281}
1282
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001283
1284unsigned long process_quota(int id)
1285{
1286 struct ocmem_zone *zone = NULL;
1287
1288 if (is_blocked(id))
1289 return 0;
1290
1291 zone = get_zone(id);
1292
1293 if (zone && zone->z_pool)
1294 return zone->z_end - zone->z_start;
1295 else
1296 return 0;
1297}
1298
1299static int do_grow(struct ocmem_req *req)
1300{
1301 struct ocmem_buf *buffer = NULL;
1302 bool can_block = true;
1303 int rc = 0;
1304
1305 down_write(&req->rw_sem);
1306 buffer = req->buffer;
1307
1308 /* Take the scheduler mutex */
1309 mutex_lock(&sched_mutex);
1310 rc = __sched_grow(req, can_block);
1311 mutex_unlock(&sched_mutex);
1312
1313 if (rc == OP_FAIL)
1314 goto err_op_fail;
1315
1316 if (rc == OP_RESCHED) {
1317 pr_debug("ocmem: Enqueue this allocation");
1318 sched_enqueue(req);
1319 }
1320
1321 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1322 buffer->addr = device_address(req->owner, req->req_start);
1323 buffer->len = req->req_sz;
1324 }
1325
1326 up_write(&req->rw_sem);
1327 return 0;
1328err_op_fail:
1329 up_write(&req->rw_sem);
1330 return -EINVAL;
1331}
1332
1333static int process_grow(struct ocmem_req *req)
1334{
1335 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001336 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001337
1338 /* Attempt to grow the region */
1339 rc = do_grow(req);
1340
1341 if (rc < 0)
1342 return -EINVAL;
1343
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001344 rc = process_map(req, req->req_start, req->req_end);
1345 if (rc < 0)
1346 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001347
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001348 offset = phys_to_offset(req->req_start);
1349
1350 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1351
1352 if (rc < 0) {
1353 pr_err("Failed to switch ON memory macros\n");
1354 goto power_ctl_error;
1355 }
1356
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001357 /* Notify the client about the buffer growth */
1358 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1359 if (rc < 0) {
1360 pr_err("No notifier callback to cater for req %p event: %d\n",
1361 req, OCMEM_ALLOC_GROW);
1362 BUG();
1363 }
1364 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001365power_ctl_error:
1366 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001367}
1368
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001369static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1370{
1371
1372 int rc = 0;
1373 struct ocmem_buf *buffer = NULL;
1374
1375 down_write(&req->rw_sem);
1376 buffer = req->buffer;
1377
1378 /* Take the scheduler mutex */
1379 mutex_lock(&sched_mutex);
1380 rc = __sched_shrink(req, shrink_size);
1381 mutex_unlock(&sched_mutex);
1382
1383 if (rc == OP_FAIL)
1384 goto err_op_fail;
1385
1386 else if (rc == OP_COMPLETE) {
1387 buffer->addr = device_address(req->owner, req->req_start);
1388 buffer->len = req->req_sz;
1389 }
1390
1391 up_write(&req->rw_sem);
1392 return 0;
1393err_op_fail:
1394 up_write(&req->rw_sem);
1395 return -EINVAL;
1396}
1397
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001398static void ocmem_sched_wk_func(struct work_struct *work);
1399DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1400
1401static int ocmem_schedule_pending(void)
1402{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001403
1404 bool need_sched = false;
1405 int i = 0;
1406
1407 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1408 if (!list_empty(&sched_queue[i])) {
1409 need_sched = true;
1410 break;
1411 }
1412 }
1413
1414 if (need_sched == true) {
1415 cancel_delayed_work(&ocmem_sched_thread);
1416 schedule_delayed_work(&ocmem_sched_thread,
1417 msecs_to_jiffies(SCHED_DELAY));
1418 pr_debug("ocmem: Scheduled delayed work\n");
1419 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001420 return 0;
1421}
1422
1423static int do_free(struct ocmem_req *req)
1424{
1425 int rc = 0;
1426 struct ocmem_buf *buffer = req->buffer;
1427
1428 down_write(&req->rw_sem);
1429
1430 if (is_mapped(req)) {
1431 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1432 goto err_free_fail;
1433 }
1434
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001435 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1436 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001437 /* Grab the sched mutex */
1438 mutex_lock(&sched_mutex);
1439 rc = __sched_free(req);
1440 mutex_unlock(&sched_mutex);
1441
1442 switch (rc) {
1443
1444 case OP_COMPLETE:
1445 buffer->addr = 0x0;
1446 buffer->len = 0x0;
1447 break;
1448 case OP_FAIL:
1449 default:
1450 goto err_free_fail;
1451 break;
1452 }
1453
1454 up_write(&req->rw_sem);
1455 return 0;
1456err_free_fail:
1457 up_write(&req->rw_sem);
1458 pr_err("ocmem: freeing req %p failed\n", req);
1459 return -EINVAL;
1460}
1461
1462int process_free(int id, struct ocmem_handle *handle)
1463{
1464 struct ocmem_req *req = NULL;
1465 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001466 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001467 int rc = 0;
1468
Naveen Ramaraj89738952013-02-13 15:24:57 -08001469 mutex_lock(&free_mutex);
1470
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001471 if (is_blocked(id)) {
1472 pr_err("Client %d cannot request free\n", id);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001473 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001474 }
1475
1476 req = handle_to_req(handle);
1477 buffer = handle_to_buffer(handle);
1478
Naveen Ramaraj89738952013-02-13 15:24:57 -08001479 if (!req) {
1480 pr_err("ocmem: No valid request to free\n");
1481 goto free_invalid;
1482 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001483
1484 if (req->req_start != core_address(id, buffer->addr)) {
1485 pr_err("Invalid buffer handle passed for free\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001486 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001487 }
1488
Naveen Ramaraj89738952013-02-13 15:24:57 -08001489 if (req->edata != NULL) {
1490 pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n",
1491 req, req->state, req->edata);
1492 goto free_invalid;
1493 }
1494
1495 if (is_pending_shrink(req)) {
1496 pr_err("ocmem: Request %p(%2lx) yet to process eviction\n",
1497 req, req->state);
1498 goto pending_shrink;
1499 }
1500
1501 /* Remove the request from any restore lists */
1502 if (req->e_handle)
1503 cancel_restore(req->e_handle, req);
1504
1505 /* Remove the request from any pending opreations */
1506 if (TEST_STATE(req, R_ENQUEUED)) {
1507 mutex_lock(&sched_mutex);
1508 sched_dequeue(req);
1509 mutex_unlock(&sched_mutex);
1510 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001511
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001512 if (!TEST_STATE(req, R_FREE)) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001513
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001514 if (TEST_STATE(req, R_MAPPED)) {
1515 /* unmap the interval and clear the memory */
1516 rc = process_unmap(req, req->req_start, req->req_end);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001517
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001518 if (rc < 0) {
1519 pr_err("ocmem: Failed to unmap %p\n", req);
1520 goto free_fail;
1521 }
Neeti Desai1d657f52013-04-26 10:24:26 -07001522 /* Turn off the memory */
1523 if (req->req_sz != 0) {
1524
1525 offset = phys_to_offset(req->req_start);
1526 rc = ocmem_memory_off(req->owner, offset,
1527 req->req_sz);
1528
1529 if (rc < 0) {
1530 pr_err("Failed to switch OFF memory macros\n");
1531 goto free_fail;
1532 }
1533 }
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001534
1535 rc = do_free(req);
1536 if (rc < 0) {
1537 pr_err("ocmem: Failed to free %p\n", req);
1538 goto free_fail;
1539 }
1540 } else
Naveen Ramaraj89738952013-02-13 15:24:57 -08001541 pr_debug("request %p was already shrunk to 0\n", req);
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001542 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001543
Neeti Desai1d657f52013-04-26 10:24:26 -07001544 if (!TEST_STATE(req, R_FREE)) {
1545 /* Turn off the memory */
1546 if (req->req_sz != 0) {
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001547
Neeti Desai1d657f52013-04-26 10:24:26 -07001548 offset = phys_to_offset(req->req_start);
1549 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001550
Neeti Desai1d657f52013-04-26 10:24:26 -07001551 if (rc < 0) {
1552 pr_err("Failed to switch OFF memory macros\n");
1553 goto free_fail;
1554 }
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001555 }
1556
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001557 /* free the allocation */
1558 rc = do_free(req);
1559 if (rc < 0)
1560 return -EINVAL;
1561 }
1562
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001563 inc_ocmem_stat(zone_of(req), NR_FREES);
1564
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001565 ocmem_destroy_req(req);
1566 handle->req = NULL;
1567
1568 ocmem_schedule_pending();
Naveen Ramaraj89738952013-02-13 15:24:57 -08001569 mutex_unlock(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001570 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001571free_fail:
1572free_invalid:
1573 mutex_unlock(&free_mutex);
1574 return -EINVAL;
1575pending_shrink:
1576 mutex_unlock(&free_mutex);
1577 return -EAGAIN;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001578}
1579
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001580static void ocmem_rdm_worker(struct work_struct *work)
1581{
1582 int offset = 0;
1583 int rc = 0;
1584 int event;
1585 struct ocmem_rdm_work *work_data = container_of(work,
1586 struct ocmem_rdm_work, work);
1587 int id = work_data->id;
1588 struct ocmem_map_list *list = work_data->list;
1589 int direction = work_data->direction;
1590 struct ocmem_handle *handle = work_data->handle;
1591 struct ocmem_req *req = handle_to_req(handle);
1592 struct ocmem_buf *buffer = handle_to_buffer(handle);
1593
1594 down_write(&req->rw_sem);
1595 offset = phys_to_offset(req->req_start);
1596 rc = ocmem_rdm_transfer(id, list, offset, direction);
1597 if (work_data->direction == TO_OCMEM)
1598 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1599 else
1600 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001601 up_write(&req->rw_sem);
1602 kfree(work_data);
1603 dispatch_notification(id, event, buffer);
1604}
1605
1606int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1607 struct ocmem_map_list *list, int direction)
1608{
1609 struct ocmem_rdm_work *work_data = NULL;
1610
1611 down_write(&req->rw_sem);
1612
1613 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1614 if (!work_data)
1615 BUG();
1616
1617 work_data->handle = handle;
1618 work_data->list = list;
1619 work_data->id = req->owner;
1620 work_data->direction = direction;
1621 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1622 up_write(&req->rw_sem);
1623 queue_work(ocmem_rdm_wq, &work_data->work);
1624 return 0;
1625}
1626
Neeti Desaidad1d8e2013-01-09 19:42:06 -08001627int process_drop(int id, struct ocmem_handle *handle,
1628 struct ocmem_map_list *list)
1629{
1630 struct ocmem_req *req = NULL;
1631 struct ocmem_buf *buffer = NULL;
1632 int rc = 0;
1633
1634 if (is_blocked(id)) {
1635 pr_err("Client %d cannot request drop\n", id);
1636 return -EINVAL;
1637 }
1638
1639 if (is_tcm(id))
1640 pr_err("Client %d cannot request drop\n", id);
1641
1642 req = handle_to_req(handle);
1643 buffer = handle_to_buffer(handle);
1644
1645 if (!req)
1646 return -EINVAL;
1647
1648 if (req->req_start != core_address(id, buffer->addr)) {
1649 pr_err("Invalid buffer handle passed for drop\n");
1650 return -EINVAL;
1651 }
1652
1653 if (TEST_STATE(req, R_MAPPED)) {
1654 rc = process_unmap(req, req->req_start, req->req_end);
1655 if (rc < 0)
1656 return -EINVAL;
1657 } else
1658 return -EINVAL;
1659
1660 return 0;
1661}
1662
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001663int process_xfer_out(int id, struct ocmem_handle *handle,
1664 struct ocmem_map_list *list)
1665{
1666 struct ocmem_req *req = NULL;
1667 int rc = 0;
1668
1669 req = handle_to_req(handle);
1670
1671 if (!req)
1672 return -EINVAL;
1673
1674 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001675 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001676 goto transfer_out_error;
1677 }
1678
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001679 rc = queue_transfer(req, handle, list, TO_DDR);
1680
1681 if (rc < 0) {
1682 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001683 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001684 goto transfer_out_error;
1685 }
1686
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001687 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001688 return 0;
1689
1690transfer_out_error:
1691 return -EINVAL;
1692}
1693
1694int process_xfer_in(int id, struct ocmem_handle *handle,
1695 struct ocmem_map_list *list)
1696{
1697 struct ocmem_req *req = NULL;
1698 int rc = 0;
1699
1700 req = handle_to_req(handle);
1701
1702 if (!req)
1703 return -EINVAL;
1704
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001705
1706 if (!is_mapped(req)) {
1707 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001708 goto transfer_in_error;
1709 }
1710
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001711 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001712 rc = queue_transfer(req, handle, list, TO_OCMEM);
1713
1714 if (rc < 0) {
1715 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001716 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001717 goto transfer_in_error;
1718 }
1719
1720 return 0;
1721transfer_in_error:
1722 return -EINVAL;
1723}
1724
1725int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1726{
1727 struct ocmem_req *req = NULL;
1728 struct ocmem_buf *buffer = NULL;
1729 struct ocmem_eviction_data *edata = NULL;
1730 int rc = 0;
1731
1732 if (is_blocked(id)) {
1733 pr_err("Client %d cannot request free\n", id);
1734 return -EINVAL;
1735 }
1736
1737 req = handle_to_req(handle);
1738 buffer = handle_to_buffer(handle);
1739
1740 if (!req)
1741 return -EINVAL;
1742
Naveen Ramaraj89738952013-02-13 15:24:57 -08001743 mutex_lock(&free_mutex);
1744
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001745 if (req->req_start != core_address(id, buffer->addr)) {
1746 pr_err("Invalid buffer handle passed for shrink\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001747 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001748 }
1749
Naveen Ramaraj89738952013-02-13 15:24:57 -08001750 if (!req->e_handle) {
1751 pr_err("Unable to find evicting request\n");
1752 goto shrink_fail;
1753 }
1754
1755 edata = req->e_handle->edata;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001756
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001757 if (!edata) {
1758 pr_err("Unable to find eviction data\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001759 goto shrink_fail;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001760 }
1761
1762 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001763
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001764 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1765
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001766 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001767 pr_debug("req %p being shrunk to zero\n", req);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001768 if (is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001769 rc = process_unmap(req, req->req_start, req->req_end);
1770 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001771 goto shrink_fail;
1772 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001773 rc = do_free(req);
1774 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001775 goto shrink_fail;
1776 SET_STATE(req, R_FREE);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001777 } else {
1778 rc = do_shrink(req, size);
1779 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001780 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001781 }
1782
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001783 CLEAR_STATE(req, R_ALLOCATED);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001784 CLEAR_STATE(req, R_WF_SHRINK);
1785 SET_STATE(req, R_SHRUNK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001786
1787 if (atomic_dec_and_test(&edata->pending)) {
1788 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001789 complete(&edata->completion);
1790 }
1791
Naveen Ramaraj89738952013-02-13 15:24:57 -08001792 mutex_unlock(&free_mutex);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001793 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001794shrink_fail:
1795 pr_err("ocmem: Failed to shrink request %p of %s\n",
1796 req, get_name(req->owner));
1797 mutex_unlock(&free_mutex);
1798 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001799}
1800
1801int process_xfer(int id, struct ocmem_handle *handle,
1802 struct ocmem_map_list *list, int direction)
1803{
1804 int rc = 0;
1805
1806 if (is_tcm(id)) {
1807 WARN(1, "Mapping operation is invalid for client\n");
1808 return -EINVAL;
1809 }
1810
1811 if (direction == TO_DDR)
1812 rc = process_xfer_out(id, handle, list);
1813 else if (direction == TO_OCMEM)
1814 rc = process_xfer_in(id, handle, list);
1815 return rc;
1816}
1817
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001818static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001819{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001820 struct ocmem_eviction_data *edata = NULL;
1821 int prio = ocmem_client_table[id].priority;
1822
1823 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1824
1825 if (!edata) {
1826 pr_err("ocmem: Could not allocate eviction data\n");
1827 return NULL;
1828 }
1829
1830 INIT_LIST_HEAD(&edata->victim_list);
1831 INIT_LIST_HEAD(&edata->req_list);
1832 edata->prio = prio;
1833 atomic_set(&edata->pending, 0);
1834 return edata;
1835}
1836
1837static void free_eviction(struct ocmem_eviction_data *edata)
1838{
1839
1840 if (!edata)
1841 return;
1842
1843 if (!list_empty(&edata->req_list))
1844 pr_err("ocmem: Eviction data %p not empty\n", edata);
1845
1846 kfree(edata);
1847 edata = NULL;
1848}
1849
1850static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1851{
1852
1853 if (!new || !old)
1854 return false;
1855
1856 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1857 new->req_start, new->req_end,
1858 old->req_start, old->req_end);
1859
1860 if ((new->req_start < old->req_start &&
1861 new->req_end >= old->req_start) ||
1862 (new->req_start >= old->req_start &&
1863 new->req_start <= old->req_end &&
1864 new->req_end >= old->req_end)) {
1865 pr_debug("request %p overlaps with existing req %p\n",
1866 new, old);
1867 return true;
1868 }
1869 return false;
1870}
1871
1872static int __evict_common(struct ocmem_eviction_data *edata,
1873 struct ocmem_req *req)
1874{
1875 struct rb_node *rb_node = NULL;
1876 struct ocmem_req *e_req = NULL;
1877 bool needs_eviction = false;
1878 int j = 0;
1879
1880 for (rb_node = rb_first(&sched_tree); rb_node;
1881 rb_node = rb_next(rb_node)) {
1882
1883 struct ocmem_region *tmp_region = NULL;
1884
1885 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1886
1887 if (tmp_region->max_prio < edata->prio) {
1888 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1889 needs_eviction = false;
1890 e_req = find_req_match(j, tmp_region);
1891 if (!e_req)
1892 continue;
1893 if (edata->passive == true) {
1894 needs_eviction = true;
1895 } else {
1896 needs_eviction = is_overlapping(req,
1897 e_req);
1898 }
1899
1900 if (needs_eviction) {
1901 pr_debug("adding %p in region %p to eviction list\n",
1902 e_req, tmp_region);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001903 SET_STATE(e_req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001904 list_add_tail(
1905 &e_req->eviction_list,
1906 &edata->req_list);
1907 atomic_inc(&edata->pending);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001908 e_req->e_handle = req;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001909 }
1910 }
1911 } else {
1912 pr_debug("Skipped region %p\n", tmp_region);
1913 }
1914 }
1915
1916 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1917
Naveen Ramaraj89738952013-02-13 15:24:57 -08001918 return atomic_read(&edata->pending);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001919}
1920
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001921static void trigger_eviction(struct ocmem_eviction_data *edata)
1922{
1923 struct ocmem_req *req = NULL;
1924 struct ocmem_req *next = NULL;
1925 struct ocmem_buf buffer;
1926
1927 if (!edata)
1928 return;
1929
1930 BUG_ON(atomic_read(&edata->pending) == 0);
1931
1932 init_completion(&edata->completion);
1933
1934 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1935 {
1936 if (req) {
1937 pr_debug("ocmem: Evicting request %p\n", req);
1938 buffer.addr = req->req_start;
1939 buffer.len = 0x0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001940 CLEAR_STATE(req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001941 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1942 &buffer);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001943 SET_STATE(req, R_WF_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001944 }
1945 }
1946 return;
1947}
1948
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001949int process_evict(int id)
1950{
1951 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001952 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001953
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001954 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001955
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001956 if (!edata)
1957 return -EINVAL;
1958
1959 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001960
1961 mutex_lock(&sched_mutex);
1962
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001963 rc = __evict_common(edata, NULL);
1964
Naveen Ramaraj89738952013-02-13 15:24:57 -08001965 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001966 goto skip_eviction;
1967
1968 trigger_eviction(edata);
1969
1970 evictions[id] = edata;
1971
1972 mutex_unlock(&sched_mutex);
1973
1974 wait_for_completion(&edata->completion);
1975
1976 return 0;
1977
1978skip_eviction:
1979 evictions[id] = NULL;
1980 mutex_unlock(&sched_mutex);
1981 return 0;
1982}
1983
1984static int run_evict(struct ocmem_req *req)
1985{
1986 struct ocmem_eviction_data *edata = NULL;
1987 int rc = 0;
1988
1989 if (!req)
1990 return -EINVAL;
1991
1992 edata = init_eviction(req->owner);
1993
1994 if (!edata)
1995 return -EINVAL;
1996
1997 edata->passive = false;
1998
Naveen Ramaraj89738952013-02-13 15:24:57 -08001999 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002000 rc = __evict_common(edata, req);
2001
Naveen Ramaraj89738952013-02-13 15:24:57 -08002002 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002003 goto skip_eviction;
2004
2005 trigger_eviction(edata);
2006
2007 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
2008 req->edata = edata;
2009
Naveen Ramaraj89738952013-02-13 15:24:57 -08002010 mutex_unlock(&free_mutex);
2011
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002012 wait_for_completion(&edata->completion);
2013
2014 pr_debug("ocmem: eviction completed successfully\n");
2015 return 0;
2016
2017skip_eviction:
2018 pr_err("ocmem: Unable to run eviction\n");
2019 free_eviction(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002020 req->edata = NULL;
2021 mutex_unlock(&free_mutex);
2022 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002023}
2024
2025static int __restore_common(struct ocmem_eviction_data *edata)
2026{
2027
2028 struct ocmem_req *req = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002029
2030 if (!edata)
2031 return -EINVAL;
2032
Naveen Ramaraj89738952013-02-13 15:24:57 -08002033 while (!list_empty(&edata->req_list)) {
2034 req = list_first_entry(&edata->req_list, struct ocmem_req,
2035 eviction_list);
2036 list_del_init(&req->eviction_list);
2037 pr_debug("ocmem: restoring evicted request %p\n",
2038 req);
2039 req->edata = NULL;
2040 req->e_handle = NULL;
2041 req->op = SCHED_ALLOCATE;
2042 inc_ocmem_stat(zone_of(req), NR_RESTORES);
2043 sched_enqueue(req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002044 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002045
2046 pr_debug("Scheduled all evicted regions\n");
2047
2048 return 0;
2049}
2050
2051static int sched_restore(struct ocmem_req *req)
2052{
2053
2054 int rc = 0;
2055
2056 if (!req)
2057 return -EINVAL;
2058
2059 if (!req->edata)
2060 return 0;
2061
Naveen Ramaraj89738952013-02-13 15:24:57 -08002062 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002063 rc = __restore_common(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002064 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002065
2066 if (rc < 0)
2067 return -EINVAL;
2068
2069 free_eviction(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002070 req->edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002071 return 0;
2072}
2073
2074int process_restore(int id)
2075{
2076 struct ocmem_eviction_data *edata = evictions[id];
2077 int rc = 0;
2078
2079 if (!edata)
2080 return -EINVAL;
2081
Naveen Ramaraj89738952013-02-13 15:24:57 -08002082 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002083 rc = __restore_common(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002084 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002085
2086 if (rc < 0) {
2087 pr_err("Failed to restore evicted requests\n");
2088 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002089 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002090
2091 free_eviction(edata);
2092 evictions[id] = NULL;
2093 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002094 return 0;
2095}
2096
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002097static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
2098{
2099 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002100 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002101 struct ocmem_buf *buffer = req->buffer;
2102
2103 down_write(&req->rw_sem);
2104
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002105 mutex_lock(&allocation_mutex);
2106retry_allocate:
2107
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002108 /* Take the scheduler mutex */
2109 mutex_lock(&sched_mutex);
2110 rc = __sched_allocate(req, can_block, can_wait);
2111 mutex_unlock(&sched_mutex);
2112
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002113 if (rc == OP_EVICT) {
2114
2115 ret = run_evict(req);
2116
2117 if (ret == 0) {
2118 rc = sched_restore(req);
2119 if (rc < 0) {
2120 pr_err("Failed to restore for req %p\n", req);
2121 goto err_allocate_fail;
2122 }
2123 req->edata = NULL;
2124
2125 pr_debug("Attempting to re-allocate req %p\n", req);
2126 req->req_start = 0x0;
2127 req->req_end = 0x0;
2128 goto retry_allocate;
2129 } else {
2130 goto err_allocate_fail;
2131 }
2132 }
2133
2134 mutex_unlock(&allocation_mutex);
2135
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002136 if (rc == OP_FAIL) {
2137 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002138 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002139 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002140
2141 if (rc == OP_RESCHED) {
2142 buffer->addr = 0x0;
2143 buffer->len = 0x0;
2144 pr_debug("ocmem: Enqueuing req %p\n", req);
2145 sched_enqueue(req);
2146 } else if (rc == OP_PARTIAL) {
2147 buffer->addr = device_address(req->owner, req->req_start);
2148 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002149 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002150 pr_debug("ocmem: Enqueuing req %p\n", req);
2151 sched_enqueue(req);
2152 } else if (rc == OP_COMPLETE) {
2153 buffer->addr = device_address(req->owner, req->req_start);
2154 buffer->len = req->req_sz;
2155 }
2156
2157 up_write(&req->rw_sem);
2158 return 0;
2159err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002160 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002161 up_write(&req->rw_sem);
2162 return -EINVAL;
2163}
2164
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002165static int do_dump(struct ocmem_req *req, unsigned long addr)
2166{
2167
2168 void __iomem *req_vaddr;
2169 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002170 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002171
2172 down_write(&req->rw_sem);
2173
2174 offset = phys_to_offset(req->req_start);
2175
2176 req_vaddr = ocmem_vaddr + offset;
2177
2178 if (!req_vaddr)
2179 goto err_do_dump;
2180
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002181 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2182
2183 if (rc < 0)
2184 goto err_do_dump;
2185
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002186 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2187 get_name(req->owner), req->req_start,
2188 req_vaddr, addr);
2189
2190 memcpy((void *)addr, req_vaddr, req->req_sz);
2191
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002192 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2193
2194 if (rc < 0)
2195 pr_err("Failed to secure request %p of %s after dump\n",
2196 req, get_name(req->owner));
2197
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002198 up_write(&req->rw_sem);
2199 return 0;
2200err_do_dump:
2201 up_write(&req->rw_sem);
2202 return -EINVAL;
2203}
2204
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002205int process_allocate(int id, struct ocmem_handle *handle,
2206 unsigned long min, unsigned long max,
2207 unsigned long step, bool can_block, bool can_wait)
2208{
2209
2210 struct ocmem_req *req = NULL;
2211 struct ocmem_buf *buffer = NULL;
2212 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002213 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002214
2215 /* sanity checks */
2216 if (is_blocked(id)) {
2217 pr_err("Client %d cannot request allocation\n", id);
2218 return -EINVAL;
2219 }
2220
2221 if (handle->req != NULL) {
2222 pr_err("Invalid handle passed in\n");
2223 return -EINVAL;
2224 }
2225
2226 buffer = handle_to_buffer(handle);
2227 BUG_ON(buffer == NULL);
2228
2229 /* prepare a request structure to represent this transaction */
2230 req = ocmem_create_req();
2231 if (!req)
2232 return -ENOMEM;
2233
2234 req->owner = id;
2235 req->req_min = min;
2236 req->req_max = max;
2237 req->req_step = step;
2238 req->prio = ocmem_client_table[id].priority;
2239 req->op = SCHED_ALLOCATE;
2240 req->buffer = buffer;
2241
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002242 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2243
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002244 rc = do_allocate(req, can_block, can_wait);
2245
2246 if (rc < 0)
2247 goto do_allocate_error;
2248
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002249 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2250
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002251 handle->req = req;
2252
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002253 if (req->req_sz != 0) {
2254
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002255 rc = process_map(req, req->req_start, req->req_end);
2256 if (rc < 0)
2257 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002258
2259 offset = phys_to_offset(req->req_start);
2260
2261 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2262
2263 if (rc < 0) {
2264 pr_err("Failed to switch ON memory macros\n");
2265 goto power_ctl_error;
2266 }
2267 }
2268
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002269 return 0;
2270
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002271power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002272 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002273map_error:
2274 handle->req = NULL;
2275 do_free(req);
2276do_allocate_error:
2277 ocmem_destroy_req(req);
2278 return -EINVAL;
2279}
2280
2281int process_delayed_allocate(struct ocmem_req *req)
2282{
2283
2284 struct ocmem_handle *handle = NULL;
2285 int rc = 0;
2286 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002287 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002288
2289 handle = req_to_handle(req);
2290 BUG_ON(handle == NULL);
2291
2292 rc = do_allocate(req, true, false);
2293
2294 if (rc < 0)
2295 goto do_allocate_error;
2296
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002297 /* The request can still be pending */
2298 if (TEST_STATE(req, R_PENDING))
2299 return 0;
2300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002301 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2302
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002303 if (req->req_sz != 0) {
2304
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002305 rc = process_map(req, req->req_start, req->req_end);
2306 if (rc < 0)
2307 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002308
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002309
2310 offset = phys_to_offset(req->req_start);
2311
2312 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2313
2314 if (rc < 0) {
2315 pr_err("Failed to switch ON memory macros\n");
2316 goto power_ctl_error;
2317 }
2318 }
2319
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002320 /* Notify the client about the buffer growth */
2321 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2322 if (rc < 0) {
2323 pr_err("No notifier callback to cater for req %p event: %d\n",
2324 req, OCMEM_ALLOC_GROW);
2325 BUG();
2326 }
2327 return 0;
2328
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002329power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002330 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002331map_error:
2332 handle->req = NULL;
2333 do_free(req);
2334do_allocate_error:
2335 ocmem_destroy_req(req);
2336 return -EINVAL;
2337}
2338
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002339int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2340{
2341 struct ocmem_req *req = NULL;
2342 int rc = 0;
2343
2344 req = handle_to_req(handle);
2345
2346 if (!req)
2347 return -EINVAL;
2348
2349 if (!is_mapped(req)) {
2350 pr_err("Buffer is not mapped\n");
2351 goto dump_error;
2352 }
2353
2354 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2355
2356 mutex_lock(&sched_mutex);
2357 rc = do_dump(req, addr);
2358 mutex_unlock(&sched_mutex);
2359
2360 if (rc < 0)
2361 goto dump_error;
2362
2363 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2364 return 0;
2365
2366dump_error:
2367 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2368 return -EINVAL;
2369}
2370
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002371static void ocmem_sched_wk_func(struct work_struct *work)
2372{
2373
2374 struct ocmem_buf *buffer = NULL;
2375 struct ocmem_handle *handle = NULL;
2376 struct ocmem_req *req = ocmem_fetch_req();
2377
2378 if (!req) {
2379 pr_debug("No Pending Requests found\n");
2380 return;
2381 }
2382
2383 pr_debug("ocmem: sched_wk pending req %p\n", req);
2384 handle = req_to_handle(req);
2385 buffer = handle_to_buffer(handle);
2386 BUG_ON(req->op == SCHED_NOP);
2387
2388 switch (req->op) {
2389 case SCHED_GROW:
2390 process_grow(req);
2391 break;
2392 case SCHED_ALLOCATE:
2393 process_delayed_allocate(req);
2394 break;
2395 default:
2396 pr_err("ocmem: Unknown operation encountered\n");
2397 break;
2398 }
2399 return;
2400}
2401
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002402static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2403{
2404 struct rb_node *rb_node = NULL;
2405 struct ocmem_req *req = NULL;
2406 unsigned j;
2407 mutex_lock(&sched_mutex);
2408 for (rb_node = rb_first(&sched_tree); rb_node;
2409 rb_node = rb_next(rb_node)) {
2410 struct ocmem_region *tmp_region = NULL;
2411 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2412 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2413 req = find_req_match(j, tmp_region);
2414 if (req) {
2415 seq_printf(f,
2416 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2417 get_name(req->owner),
2418 req->req_start, req->req_end,
2419 req->req_sz, req->state);
2420 }
2421 }
2422 }
2423 mutex_unlock(&sched_mutex);
2424 return 0;
2425}
2426
2427static int ocmem_allocations_open(struct inode *inode, struct file *file)
2428{
2429 return single_open(file, ocmem_allocations_show, inode->i_private);
2430}
2431
2432static const struct file_operations allocations_show_fops = {
2433 .open = ocmem_allocations_open,
2434 .read = seq_read,
2435 .llseek = seq_lseek,
2436 .release = seq_release,
2437};
2438
2439int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002440{
2441 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002442 struct ocmem_plat_data *pdata = NULL;
2443 struct device *dev = &pdev->dev;
2444
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002445 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002446 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002447 mutex_init(&allocation_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002448 mutex_init(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002449 mutex_init(&sched_mutex);
2450 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002451 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002452 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2453 INIT_LIST_HEAD(&sched_queue[i]);
2454
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002455 mutex_init(&rdm_mutex);
2456 INIT_LIST_HEAD(&rdm_queue);
2457 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2458 if (!ocmem_rdm_wq)
2459 return -ENOMEM;
2460 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2461 if (!ocmem_eviction_wq)
2462 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002463
2464 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2465 NULL, &allocations_show_fops)) {
2466 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2467 return -EBUSY;
2468 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002469 return 0;
2470}