blob: 906bd4a9bf90c49d824fe9c79bae446391b1c97c [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
Naveen Ramaraj89738952013-02-13 15:24:57 -080028 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_ENQUEUED, /* request has been enqueued for future retry */
32 R_MUST_GROW, /* request must grow as a part of pending operation */
33 R_MUST_SHRINK, /* request must shrink */
34 R_WF_SHRINK, /* shrink must be ack'ed by a client */
35 R_SHRUNK, /* request was shrunk */
36 R_MUST_MAP, /* request must be mapped before being used */
37 R_MUST_UNMAP, /* request must be unmapped when not being used */
38 R_MAPPED, /* request is mapped and actively used by client */
39 R_UNMAPPED, /* request is not mapped, so it's not in active use */
40 R_EVICTED, /* request is evicted and must be restored */
Naveen Ramarajb9da05782012-05-07 09:07:35 -070041};
42
43#define SET_STATE(x, val) (set_bit((val), &(x)->state))
44#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
45#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
46
47enum op_res {
48 OP_COMPLETE = 0x0,
49 OP_RESCHED,
50 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070051 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070052 OP_FAIL = ~0x0,
53};
54
55/* Represents various client priorities */
56/* Note: More than one client can share a priority level */
57enum client_prio {
58 MIN_PRIO = 0x0,
59 NO_PRIO = MIN_PRIO,
60 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070061 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070062 PRIO_LP_AUDIO = 0x1,
63 PRIO_HP_AUDIO = 0x2,
64 PRIO_VOICE = 0x3,
65 PRIO_GFX_GROWTH = 0x4,
66 PRIO_VIDEO = 0x5,
67 PRIO_GFX = 0x6,
68 PRIO_OCMEM = 0x7,
69 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
70};
71
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070072static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070073static struct list_head sched_queue[MAX_OCMEM_PRIO];
74static struct mutex sched_queue_mutex;
75
76/* The duration in msecs before a pending operation is scheduled
77 * This allows an idle window between use case boundaries where various
78 * hardware state changes can occur. The value will be tweaked on actual
79 * hardware.
80*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070081/* Delay in ms for switching to low power mode for OCMEM */
82#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070083
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070084static struct list_head rdm_queue;
85static struct mutex rdm_mutex;
86static struct workqueue_struct *ocmem_rdm_wq;
87static struct workqueue_struct *ocmem_eviction_wq;
88
89static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
90
91struct ocmem_rdm_work {
92 int id;
93 struct ocmem_map_list *list;
94 struct ocmem_handle *handle;
95 int direction;
96 struct work_struct work;
97};
98
Naveen Ramarajb9da05782012-05-07 09:07:35 -070099/* OCMEM Operational modes */
100enum ocmem_client_modes {
101 OCMEM_PERFORMANCE = 1,
102 OCMEM_PASSIVE,
103 OCMEM_LOW_POWER,
104 OCMEM_MODE_MAX = OCMEM_LOW_POWER
105};
106
107/* OCMEM Addressing modes */
108enum ocmem_interconnects {
109 OCMEM_BLOCKED = 0,
110 OCMEM_PORT = 1,
111 OCMEM_OCMEMNOC = 2,
112 OCMEM_SYSNOC = 3,
113};
114
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700115enum ocmem_tz_client {
116 TZ_UNUSED = 0x0,
117 TZ_GRAPHICS,
118 TZ_VIDEO,
119 TZ_LP_AUDIO,
120 TZ_SENSORS,
121 TZ_OTHER_OS,
122 TZ_DEBUG,
123};
124
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700125/**
126 * Primary OCMEM Arbitration Table
127 **/
128struct ocmem_table {
129 int client_id;
130 int priority;
131 int mode;
132 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700133 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700135 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
136 TZ_GRAPHICS},
137 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
138 TZ_VIDEO},
139 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
140 TZ_UNUSED},
141 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
142 TZ_UNUSED},
143 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
144 TZ_UNUSED},
145 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
146 TZ_LP_AUDIO},
147 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
148 TZ_SENSORS},
149 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
150 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700151};
152
153static struct rb_root sched_tree;
154static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700155static struct mutex allocation_mutex;
Naveen Ramaraj89738952013-02-13 15:24:57 -0800156static struct mutex free_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700157
158/* A region represents a continuous interval in OCMEM address space */
159struct ocmem_region {
160 /* Chain in Interval Tree */
161 struct rb_node region_rb;
162 /* Hash map of requests */
163 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700164 /* Chain in eviction list */
165 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700166 unsigned long r_start;
167 unsigned long r_end;
168 unsigned long r_sz;
169 /* Highest priority of all requests served by this region */
170 int max_prio;
171};
172
173/* Is OCMEM tightly coupled to the client ?*/
174static inline int is_tcm(int id)
175{
176 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
177 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
178 return 1;
179 else
180 return 0;
181}
182
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700183static inline int is_iface_access(int id)
184{
185 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
186}
187
188static inline int is_remapped_access(int id)
189{
190 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
191}
192
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700193static inline int is_blocked(int id)
194{
195 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
196}
197
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700198inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
199{
200 if (handle)
201 return &handle->buffer;
202 else
203 return NULL;
204}
205
206inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
207{
208 if (buffer)
209 return container_of(buffer, struct ocmem_handle, buffer);
210 else
211 return NULL;
212}
213
214inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
215{
216 if (handle)
217 return handle->req;
218 else
219 return NULL;
220}
221
222inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
223{
224 if (req && req->buffer)
225 return container_of(req->buffer, struct ocmem_handle, buffer);
226 else
227 return NULL;
228}
229
230/* Simple wrappers which will have debug features added later */
231inline int ocmem_read(void *at)
232{
233 return readl_relaxed(at);
234}
235
236inline int ocmem_write(unsigned long val, void *at)
237{
238 writel_relaxed(val, at);
239 return 0;
240}
241
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700242inline int get_mode(int id)
243{
244 if (!check_id(id))
245 return MODE_NOT_SET;
246 else
247 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
248 WIDE_MODE : THIN_MODE;
249}
250
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700251inline int get_tz_id(int id)
252{
253 if (!check_id(id))
254 return TZ_UNUSED;
255 else
256 return ocmem_client_table[id].tz_id;
257}
258
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700259/* Returns the address that can be used by a device core to access OCMEM */
260static unsigned long device_address(int id, unsigned long addr)
261{
262 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
263 unsigned long ret_addr = 0x0;
264
265 switch (hw_interconnect) {
266 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700267 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700268 ret_addr = phys_to_offset(addr);
269 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700270 case OCMEM_SYSNOC:
271 ret_addr = addr;
272 break;
273 case OCMEM_BLOCKED:
274 ret_addr = 0x0;
275 break;
276 }
277 return ret_addr;
278}
279
280/* Returns the address as viewed by the core */
281static unsigned long core_address(int id, unsigned long addr)
282{
283 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
284 unsigned long ret_addr = 0x0;
285
286 switch (hw_interconnect) {
287 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700288 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700289 ret_addr = offset_to_phys(addr);
290 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700291 case OCMEM_SYSNOC:
292 ret_addr = addr;
293 break;
294 case OCMEM_BLOCKED:
295 ret_addr = 0x0;
296 break;
297 }
298 return ret_addr;
299}
300
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700301static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
302{
303 int owner;
304 if (!req)
305 return NULL;
306 owner = req->owner;
307 return get_zone(owner);
308}
309
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700310static int insert_region(struct ocmem_region *region)
311{
312
313 struct rb_root *root = &sched_tree;
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct ocmem_region *tmp = NULL;
317 unsigned long addr = region->r_start;
318
319 while (*p) {
320 parent = *p;
321 tmp = rb_entry(parent, struct ocmem_region, region_rb);
322
323 if (tmp->r_end > addr) {
324 if (tmp->r_start <= addr)
325 break;
326 p = &(*p)->rb_left;
327 } else if (tmp->r_end <= addr)
328 p = &(*p)->rb_right;
329 }
330 rb_link_node(&region->region_rb, parent, p);
331 rb_insert_color(&region->region_rb, root);
332 return 0;
333}
334
335static int remove_region(struct ocmem_region *region)
336{
337 struct rb_root *root = &sched_tree;
338 rb_erase(&region->region_rb, root);
339 return 0;
340}
341
342static struct ocmem_req *ocmem_create_req(void)
343{
344 struct ocmem_req *p = NULL;
345
346 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
347 if (!p)
348 return NULL;
349
350 INIT_LIST_HEAD(&p->zone_list);
351 INIT_LIST_HEAD(&p->sched_list);
352 init_rwsem(&p->rw_sem);
353 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700354 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700355 return p;
356}
357
358static int ocmem_destroy_req(struct ocmem_req *req)
359{
360 kfree(req);
361 return 0;
362}
363
364static struct ocmem_region *create_region(void)
365{
366 struct ocmem_region *p = NULL;
367
368 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
369 if (!p)
370 return NULL;
371 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700372 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700373 p->r_start = p->r_end = p->r_sz = 0x0;
374 p->max_prio = NO_PRIO;
375 return p;
376}
377
378static int destroy_region(struct ocmem_region *region)
379{
380 kfree(region);
381 return 0;
382}
383
384static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
385{
386 int ret, id;
387
388 while (1) {
389 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
390 return -ENOMEM;
391
392 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
393
394 if (ret != -EAGAIN)
395 break;
396 }
397
398 if (!ret) {
399 req->req_id = id;
400 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
401 req, id, region);
402 return 0;
403 }
404 return -EINVAL;
405}
406
407static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
408{
409 idr_remove(&region->region_idr, req->req_id);
410 return 0;
411}
412
413static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
414{
415 region->r_start = req->req_start;
416 region->r_end = req->req_end;
417 region->r_sz = req->req_end - req->req_start + 1;
418 return 0;
419}
420
421static int region_req_count(int id, void *ptr, void *data)
422{
423 int *count = data;
424 *count = *count + 1;
425 return 0;
426}
427
428static int req_count(struct ocmem_region *region)
429{
430 int count = 0;
431 idr_for_each(&region->region_idr, region_req_count, &count);
432 return count;
433}
434
435static int compute_max_prio(int id, void *ptr, void *data)
436{
437 int *max = data;
438 struct ocmem_req *req = ptr;
439
440 if (req->prio > *max)
441 *max = req->prio;
442 return 0;
443}
444
445static int update_region_prio(struct ocmem_region *region)
446{
447 int max_prio;
448 if (req_count(region) != 0) {
449 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
450 region->max_prio = max_prio;
451 } else {
452 region->max_prio = NO_PRIO;
453 }
454 pr_debug("ocmem: Updating prio of region %p as %d\n",
455 region, max_prio);
456
457 return 0;
458}
459
460static struct ocmem_region *find_region(unsigned long addr)
461{
462 struct ocmem_region *region = NULL;
463 struct rb_node *rb_node = NULL;
464
465 rb_node = sched_tree.rb_node;
466
467 while (rb_node) {
468 struct ocmem_region *tmp_region = NULL;
469 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
470
471 if (tmp_region->r_end > addr) {
472 region = tmp_region;
473 if (tmp_region->r_start <= addr)
474 break;
475 rb_node = rb_node->rb_left;
476 } else {
477 rb_node = rb_node->rb_right;
478 }
479 }
480 return region;
481}
482
483static struct ocmem_region *find_region_intersection(unsigned long start,
484 unsigned long end)
485{
486
487 struct ocmem_region *region = NULL;
488 region = find_region(start);
489 if (region && end <= region->r_start)
490 region = NULL;
491 return region;
492}
493
494static struct ocmem_region *find_region_match(unsigned long start,
495 unsigned long end)
496{
497
498 struct ocmem_region *region = NULL;
499 region = find_region(start);
500 if (region && start == region->r_start && end == region->r_end)
501 return region;
502 return NULL;
503}
504
505static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
506{
507 struct ocmem_req *req = NULL;
508
509 if (!region)
510 return NULL;
511
512 req = idr_find(&region->region_idr, owner);
513
514 return req;
515}
516
517/* Must be called with req->sem held */
518static inline int is_mapped(struct ocmem_req *req)
519{
520 return TEST_STATE(req, R_MAPPED);
521}
522
Naveen Ramaraj89738952013-02-13 15:24:57 -0800523static inline int is_pending_shrink(struct ocmem_req *req)
524{
525 return TEST_STATE(req, R_MUST_SHRINK) ||
526 TEST_STATE(req, R_WF_SHRINK);
527}
528
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700529/* Must be called with sched_mutex held */
530static int __sched_unmap(struct ocmem_req *req)
531{
532 struct ocmem_req *matched_req = NULL;
533 struct ocmem_region *matched_region = NULL;
534
535 matched_region = find_region_match(req->req_start, req->req_end);
536 matched_req = find_req_match(req->req_id, matched_region);
537
538 if (!matched_region || !matched_req) {
539 pr_err("Could not find backing region for req");
540 goto invalid_op_error;
541 }
542
543 if (matched_req != req) {
544 pr_err("Request does not match backing req");
545 goto invalid_op_error;
546 }
547
548 if (!is_mapped(req)) {
549 pr_err("Request is not currently mapped");
550 goto invalid_op_error;
551 }
552
553 /* Update the request state */
554 CLEAR_STATE(req, R_MAPPED);
555 SET_STATE(req, R_MUST_MAP);
556
557 return OP_COMPLETE;
558
559invalid_op_error:
560 return OP_FAIL;
561}
562
563/* Must be called with sched_mutex held */
564static int __sched_map(struct ocmem_req *req)
565{
566 struct ocmem_req *matched_req = NULL;
567 struct ocmem_region *matched_region = NULL;
568
569 matched_region = find_region_match(req->req_start, req->req_end);
570 matched_req = find_req_match(req->req_id, matched_region);
571
572 if (!matched_region || !matched_req) {
573 pr_err("Could not find backing region for req");
574 goto invalid_op_error;
575 }
576
577 if (matched_req != req) {
578 pr_err("Request does not match backing req");
579 goto invalid_op_error;
580 }
581
582 /* Update the request state */
583 CLEAR_STATE(req, R_MUST_MAP);
584 SET_STATE(req, R_MAPPED);
585
586 return OP_COMPLETE;
587
588invalid_op_error:
589 return OP_FAIL;
590}
591
592static int do_map(struct ocmem_req *req)
593{
594 int rc = 0;
595
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700596 down_write(&req->rw_sem);
597
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700598 mutex_lock(&sched_mutex);
599 rc = __sched_map(req);
600 mutex_unlock(&sched_mutex);
601
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700602 up_write(&req->rw_sem);
603
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700604 if (rc == OP_FAIL)
605 return -EINVAL;
606
607 return 0;
608}
609
610static int do_unmap(struct ocmem_req *req)
611{
612 int rc = 0;
613
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700614 down_write(&req->rw_sem);
615
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700616 mutex_lock(&sched_mutex);
617 rc = __sched_unmap(req);
618 mutex_unlock(&sched_mutex);
619
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700620 up_write(&req->rw_sem);
621
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700622 if (rc == OP_FAIL)
623 return -EINVAL;
624
625 return 0;
626}
627
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700628static int process_map(struct ocmem_req *req, unsigned long start,
629 unsigned long end)
630{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700631 int rc = 0;
632
633 rc = ocmem_enable_core_clock();
634
635 if (rc < 0)
636 goto core_clock_fail;
637
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700638
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700639 if (is_iface_access(req->owner)) {
640 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700641
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700642 if (rc < 0)
643 goto iface_clock_fail;
644 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700646 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
647 get_mode(req->owner));
648
649 if (rc < 0) {
650 pr_err("ocmem: Failed to secure request %p for %d\n", req,
651 req->owner);
652 goto lock_failed;
653 }
654
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700655 rc = do_map(req);
656
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700657 if (rc < 0) {
658 pr_err("ocmem: Failed to map request %p for %d\n",
659 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700660 goto process_map_fail;
661
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700662 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700663 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700664 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700665
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700666process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700667 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
668lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700669 if (is_iface_access(req->owner))
670 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700671iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700672 ocmem_disable_core_clock();
673core_clock_fail:
674 pr_err("ocmem: Failed to map ocmem request\n");
675 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700676}
677
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700678static int process_unmap(struct ocmem_req *req, unsigned long start,
679 unsigned long end)
680{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700681 int rc = 0;
682
683 rc = do_unmap(req);
684
685 if (rc < 0)
686 goto process_unmap_fail;
687
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700688 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
689 req->req_sz);
690
691 if (rc < 0) {
692 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
693 req->owner);
694 goto unlock_failed;
695 }
696
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700697 if (is_iface_access(req->owner))
698 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700699 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700700 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700701 return 0;
702
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700703unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700704process_unmap_fail:
705 pr_err("ocmem: Failed to unmap ocmem request\n");
706 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700707}
708
709static int __sched_grow(struct ocmem_req *req, bool can_block)
710{
711 unsigned long min = req->req_min;
712 unsigned long max = req->req_max;
713 unsigned long step = req->req_step;
714 int owner = req->owner;
715 unsigned long curr_sz = 0;
716 unsigned long growth_sz = 0;
717 unsigned long curr_start = 0;
718 enum client_prio prio = req->prio;
719 unsigned long alloc_addr = 0x0;
720 bool retry;
721 struct ocmem_region *spanned_r = NULL;
722 struct ocmem_region *overlap_r = NULL;
723
724 struct ocmem_req *matched_req = NULL;
725 struct ocmem_region *matched_region = NULL;
726
727 struct ocmem_zone *zone = get_zone(owner);
728 struct ocmem_region *region = NULL;
729
730 matched_region = find_region_match(req->req_start, req->req_end);
731 matched_req = find_req_match(req->req_id, matched_region);
732
733 if (!matched_region || !matched_req) {
734 pr_err("Could not find backing region for req");
735 goto invalid_op_error;
736 }
737
738 if (matched_req != req) {
739 pr_err("Request does not match backing req");
740 goto invalid_op_error;
741 }
742
743 curr_sz = matched_req->req_sz;
744 curr_start = matched_req->req_start;
745 growth_sz = matched_req->req_max - matched_req->req_sz;
746
747 pr_debug("Attempting to grow req %p from %lx to %lx\n",
748 req, matched_req->req_sz, matched_req->req_max);
749
750 retry = false;
751
752 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
753
754retry_next_step:
755
756 spanned_r = NULL;
757 overlap_r = NULL;
758
759 spanned_r = find_region(zone->z_head);
760 overlap_r = find_region_intersection(zone->z_head,
761 zone->z_head + growth_sz);
762
763 if (overlap_r == NULL) {
764 /* no conflicting regions, schedule this region */
765 zone->z_ops->free(zone, curr_start, curr_sz);
766 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
767
768 if (alloc_addr < 0) {
769 pr_err("ocmem: zone allocation operation failed\n");
770 goto internal_error;
771 }
772
773 curr_sz += growth_sz;
774 /* Detach the region from the interval tree */
775 /* This is to guarantee that any change in size
776 * causes the tree to be rebalanced if required */
777
778 detach_req(matched_region, req);
779 if (req_count(matched_region) == 0) {
780 remove_region(matched_region);
781 region = matched_region;
782 } else {
783 region = create_region();
784 if (!region) {
785 pr_err("ocmem: Unable to create region\n");
786 goto region_error;
787 }
788 }
789
790 /* update the request */
791 req->req_start = alloc_addr;
792 /* increment the size to reflect new length */
793 req->req_sz = curr_sz;
794 req->req_end = alloc_addr + req->req_sz - 1;
795
796 /* update request state */
797 CLEAR_STATE(req, R_MUST_GROW);
798 SET_STATE(req, R_ALLOCATED);
799 SET_STATE(req, R_MUST_MAP);
800 req->op = SCHED_MAP;
801
802 /* update the region with new req */
803 attach_req(region, req);
804 populate_region(region, req);
805 update_region_prio(region);
806
807 /* update the tree with new region */
808 if (insert_region(region)) {
809 pr_err("ocmem: Failed to insert the region\n");
810 goto region_error;
811 }
812
813 if (retry) {
814 SET_STATE(req, R_MUST_GROW);
815 SET_STATE(req, R_PENDING);
816 req->op = SCHED_GROW;
817 return OP_PARTIAL;
818 }
819 } else if (spanned_r != NULL && overlap_r != NULL) {
820 /* resolve conflicting regions based on priority */
821 if (overlap_r->max_prio < prio) {
822 /* Growth cannot be triggered unless a previous
823 * client of lower priority was evicted */
824 pr_err("ocmem: Invalid growth scheduled\n");
825 /* This is serious enough to fail */
826 BUG();
827 return OP_FAIL;
828 } else if (overlap_r->max_prio > prio) {
829 if (min == max) {
830 /* Cannot grow at this time, try later */
831 SET_STATE(req, R_PENDING);
832 SET_STATE(req, R_MUST_GROW);
833 return OP_RESCHED;
834 } else {
835 /* Try to grow in steps */
836 growth_sz -= step;
837 /* We are OOM at this point so need to retry */
838 if (growth_sz <= curr_sz) {
839 SET_STATE(req, R_PENDING);
840 SET_STATE(req, R_MUST_GROW);
841 return OP_RESCHED;
842 }
843 retry = true;
844 pr_debug("ocmem: Attempting with reduced size %lx\n",
845 growth_sz);
846 goto retry_next_step;
847 }
848 } else {
849 pr_err("ocmem: grow: New Region %p Existing %p\n",
850 matched_region, overlap_r);
851 pr_err("ocmem: Undetermined behavior\n");
852 /* This is serious enough to fail */
853 BUG();
854 }
855 } else if (spanned_r == NULL && overlap_r != NULL) {
856 goto err_not_supported;
857 }
858
859 return OP_COMPLETE;
860
861err_not_supported:
862 pr_err("ocmem: Scheduled unsupported operation\n");
863 return OP_FAIL;
864region_error:
865 zone->z_ops->free(zone, alloc_addr, curr_sz);
866 detach_req(region, req);
867 update_region_prio(region);
868 /* req is going to be destroyed by the caller anyways */
869internal_error:
870 destroy_region(region);
871invalid_op_error:
872 return OP_FAIL;
873}
874
875/* Must be called with sched_mutex held */
876static int __sched_free(struct ocmem_req *req)
877{
878 int owner = req->owner;
879 int ret = 0;
880
881 struct ocmem_req *matched_req = NULL;
882 struct ocmem_region *matched_region = NULL;
883
884 struct ocmem_zone *zone = get_zone(owner);
885
886 BUG_ON(!zone);
887
888 matched_region = find_region_match(req->req_start, req->req_end);
889 matched_req = find_req_match(req->req_id, matched_region);
890
891 if (!matched_region || !matched_req)
892 goto invalid_op_error;
893 if (matched_req != req)
894 goto invalid_op_error;
895
896 ret = zone->z_ops->free(zone,
897 matched_req->req_start, matched_req->req_sz);
898
899 if (ret < 0)
900 goto err_op_fail;
901
902 detach_req(matched_region, matched_req);
903 update_region_prio(matched_region);
904 if (req_count(matched_region) == 0) {
905 remove_region(matched_region);
906 destroy_region(matched_region);
907 }
908
909 /* Update the request */
910 req->req_start = 0x0;
911 req->req_sz = 0x0;
912 req->req_end = 0x0;
913 SET_STATE(req, R_FREE);
914 return OP_COMPLETE;
915invalid_op_error:
916 pr_err("ocmem: free: Failed to find matching region\n");
917err_op_fail:
918 pr_err("ocmem: free: Failed\n");
919 return OP_FAIL;
920}
921
922/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700923static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
924{
925 int owner = req->owner;
926 int ret = 0;
927
928 struct ocmem_req *matched_req = NULL;
929 struct ocmem_region *matched_region = NULL;
930 struct ocmem_region *region = NULL;
931 unsigned long alloc_addr = 0x0;
932
933 struct ocmem_zone *zone = get_zone(owner);
934
935 BUG_ON(!zone);
936
937 /* The shrink should not be called for zero size */
938 BUG_ON(new_sz == 0);
939
940 matched_region = find_region_match(req->req_start, req->req_end);
941 matched_req = find_req_match(req->req_id, matched_region);
942
943 if (!matched_region || !matched_req)
944 goto invalid_op_error;
945 if (matched_req != req)
946 goto invalid_op_error;
947
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700948 ret = zone->z_ops->free(zone,
949 matched_req->req_start, matched_req->req_sz);
950
951 if (ret < 0) {
952 pr_err("Zone Allocation operation failed\n");
953 goto internal_error;
954 }
955
956 alloc_addr = zone->z_ops->allocate(zone, new_sz);
957
958 if (alloc_addr < 0) {
959 pr_err("Zone Allocation operation failed\n");
960 goto internal_error;
961 }
962
963 /* Detach the region from the interval tree */
964 /* This is to guarantee that the change in size
965 * causes the tree to be rebalanced if required */
966
967 detach_req(matched_region, req);
968 if (req_count(matched_region) == 0) {
969 remove_region(matched_region);
970 region = matched_region;
971 } else {
972 region = create_region();
973 if (!region) {
974 pr_err("ocmem: Unable to create region\n");
975 goto internal_error;
976 }
977 }
978 /* update the request */
979 req->req_start = alloc_addr;
980 req->req_sz = new_sz;
981 req->req_end = alloc_addr + req->req_sz;
982
983 if (req_count(region) == 0) {
984 remove_region(matched_region);
985 destroy_region(matched_region);
986 }
987
988 /* update request state */
989 SET_STATE(req, R_MUST_GROW);
990 SET_STATE(req, R_MUST_MAP);
991 req->op = SCHED_MAP;
992
993 /* attach the request to the region */
994 attach_req(region, req);
995 populate_region(region, req);
996 update_region_prio(region);
997
998 /* update the tree with new region */
999 if (insert_region(region)) {
1000 pr_err("ocmem: Failed to insert the region\n");
1001 zone->z_ops->free(zone, alloc_addr, new_sz);
1002 detach_req(region, req);
1003 update_region_prio(region);
1004 /* req will be destroyed by the caller */
1005 goto region_error;
1006 }
1007 return OP_COMPLETE;
1008
1009region_error:
1010 destroy_region(region);
1011internal_error:
1012 pr_err("ocmem: shrink: Failed\n");
1013 return OP_FAIL;
1014invalid_op_error:
1015 pr_err("ocmem: shrink: Failed to find matching region\n");
1016 return OP_FAIL;
1017}
1018
1019/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001020static int __sched_allocate(struct ocmem_req *req, bool can_block,
1021 bool can_wait)
1022{
1023 unsigned long min = req->req_min;
1024 unsigned long max = req->req_max;
1025 unsigned long step = req->req_step;
1026 int owner = req->owner;
1027 unsigned long sz = max;
1028 enum client_prio prio = req->prio;
1029 unsigned long alloc_addr = 0x0;
1030 bool retry;
1031
1032 struct ocmem_region *spanned_r = NULL;
1033 struct ocmem_region *overlap_r = NULL;
1034
1035 struct ocmem_zone *zone = get_zone(owner);
1036 struct ocmem_region *region = NULL;
1037
1038 BUG_ON(!zone);
1039
1040 if (min > (zone->z_end - zone->z_start)) {
1041 pr_err("ocmem: requested minimum size exceeds quota\n");
1042 goto invalid_op_error;
1043 }
1044
1045 if (max > (zone->z_end - zone->z_start)) {
1046 pr_err("ocmem: requested maximum size exceeds quota\n");
1047 goto invalid_op_error;
1048 }
1049
1050 if (min > zone->z_free) {
1051 pr_err("ocmem: out of memory for zone %d\n", owner);
1052 goto invalid_op_error;
1053 }
1054
1055 region = create_region();
1056
1057 if (!region) {
1058 pr_err("ocmem: Unable to create region\n");
1059 goto invalid_op_error;
1060 }
1061
1062 retry = false;
1063
Naveen Ramaraj89738952013-02-13 15:24:57 -08001064 pr_debug("ocmem: do_allocate: %s request %p size %lx\n",
1065 get_name(owner), req, sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001066
1067retry_next_step:
1068
1069 spanned_r = NULL;
1070 overlap_r = NULL;
1071
1072 spanned_r = find_region(zone->z_head);
1073 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1074
1075 if (overlap_r == NULL) {
1076 /* no conflicting regions, schedule this region */
1077 alloc_addr = zone->z_ops->allocate(zone, sz);
1078
1079 if (alloc_addr < 0) {
1080 pr_err("Zone Allocation operation failed\n");
1081 goto internal_error;
1082 }
1083
1084 /* update the request */
1085 req->req_start = alloc_addr;
1086 req->req_end = alloc_addr + sz - 1;
1087 req->req_sz = sz;
1088 req->zone = zone;
1089
1090 /* update request state */
1091 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001092 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001093 SET_STATE(req, R_ALLOCATED);
1094 SET_STATE(req, R_MUST_MAP);
1095 req->op = SCHED_NOP;
1096
1097 /* attach the request to the region */
1098 attach_req(region, req);
1099 populate_region(region, req);
1100 update_region_prio(region);
1101
1102 /* update the tree with new region */
1103 if (insert_region(region)) {
1104 pr_err("ocmem: Failed to insert the region\n");
1105 zone->z_ops->free(zone, alloc_addr, sz);
1106 detach_req(region, req);
1107 update_region_prio(region);
1108 /* req will be destroyed by the caller */
1109 goto internal_error;
1110 }
1111
1112 if (retry) {
1113 SET_STATE(req, R_MUST_GROW);
1114 SET_STATE(req, R_PENDING);
1115 req->op = SCHED_GROW;
1116 return OP_PARTIAL;
1117 }
1118 } else if (spanned_r != NULL && overlap_r != NULL) {
1119 /* resolve conflicting regions based on priority */
1120 if (overlap_r->max_prio < prio) {
1121 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001122 req->req_start = zone->z_head;
1123 req->req_end = zone->z_head + sz - 1;
1124 req->req_sz = 0x0;
1125 req->edata = NULL;
1126 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001127 } else {
1128 /* Try to allocate atleast >= 'min' immediately */
1129 sz -= step;
1130 if (sz < min)
1131 goto err_out_of_mem;
1132 retry = true;
1133 pr_debug("ocmem: Attempting with reduced size %lx\n",
1134 sz);
1135 goto retry_next_step;
1136 }
1137 } else if (overlap_r->max_prio > prio) {
1138 if (can_block == true) {
1139 SET_STATE(req, R_PENDING);
1140 SET_STATE(req, R_MUST_GROW);
1141 return OP_RESCHED;
1142 } else {
1143 if (min == max) {
1144 pr_err("Cannot allocate %lx synchronously\n",
1145 sz);
1146 goto err_out_of_mem;
1147 } else {
1148 sz -= step;
1149 if (sz < min)
1150 goto err_out_of_mem;
1151 retry = true;
1152 pr_debug("ocmem: Attempting reduced size %lx\n",
1153 sz);
1154 goto retry_next_step;
1155 }
1156 }
1157 } else {
1158 pr_err("ocmem: Undetermined behavior\n");
1159 pr_err("ocmem: New Region %p Existing %p\n", region,
1160 overlap_r);
1161 /* This is serious enough to fail */
1162 BUG();
1163 }
1164 } else if (spanned_r == NULL && overlap_r != NULL)
1165 goto err_not_supported;
1166
1167 return OP_COMPLETE;
1168
Naveen Ramaraj59907982012-10-16 17:40:38 -07001169trigger_eviction:
1170 pr_debug("Trigger eviction of region %p\n", overlap_r);
1171 destroy_region(region);
1172 return OP_EVICT;
1173
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001174err_not_supported:
1175 pr_err("ocmem: Scheduled unsupported operation\n");
1176 return OP_FAIL;
1177
1178err_out_of_mem:
1179 pr_err("ocmem: Out of memory during allocation\n");
1180internal_error:
1181 destroy_region(region);
1182invalid_op_error:
1183 return OP_FAIL;
1184}
1185
Naveen Ramaraj89738952013-02-13 15:24:57 -08001186/* Remove the request from eviction lists */
1187static void cancel_restore(struct ocmem_req *e_handle,
1188 struct ocmem_req *req)
1189{
1190 struct ocmem_eviction_data *edata = e_handle->edata;
1191
1192 if (!edata || !req)
1193 return;
1194
1195 if (list_empty(&edata->req_list))
1196 return;
1197
1198 list_del_init(&req->eviction_list);
1199 req->e_handle = NULL;
1200
1201 return;
1202}
1203
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001204static int sched_enqueue(struct ocmem_req *priv)
1205{
1206 struct ocmem_req *next = NULL;
1207 mutex_lock(&sched_queue_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001208 SET_STATE(priv, R_ENQUEUED);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001209 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1210 pr_debug("enqueued req %p\n", priv);
1211 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001212 pr_debug("pending request %p for client %s\n", next,
1213 get_name(next->owner));
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001214 }
1215 mutex_unlock(&sched_queue_mutex);
1216 return 0;
1217}
1218
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001219static void sched_dequeue(struct ocmem_req *victim_req)
1220{
1221 struct ocmem_req *req = NULL;
1222 struct ocmem_req *next = NULL;
1223 int id;
1224
1225 if (!victim_req)
1226 return;
1227
1228 id = victim_req->owner;
1229
1230 mutex_lock(&sched_queue_mutex);
1231
1232 if (list_empty(&sched_queue[id]))
1233 goto dequeue_done;
1234
1235 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1236 {
1237 if (req == victim_req) {
Naveen Ramaraj89738952013-02-13 15:24:57 -08001238 pr_debug("ocmem: Cancelling pending request %p for %s\n",
1239 req, get_name(req->owner));
1240 list_del_init(&victim_req->sched_list);
1241 CLEAR_STATE(victim_req, R_ENQUEUED);
1242 break;
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001243 }
1244 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001245dequeue_done:
1246 mutex_unlock(&sched_queue_mutex);
1247 return;
1248}
1249
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001250static struct ocmem_req *ocmem_fetch_req(void)
1251{
1252 int i;
1253 struct ocmem_req *req = NULL;
1254 struct ocmem_req *next = NULL;
1255
1256 mutex_lock(&sched_queue_mutex);
1257 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1258 if (list_empty(&sched_queue[i]))
1259 continue;
1260 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1261 {
1262 if (req) {
1263 pr_debug("ocmem: Fetched pending request %p\n",
1264 req);
1265 list_del(&req->sched_list);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001266 CLEAR_STATE(req, R_ENQUEUED);
1267 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001268 }
1269 }
1270 }
1271 mutex_unlock(&sched_queue_mutex);
1272 return req;
1273}
1274
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001275
1276unsigned long process_quota(int id)
1277{
1278 struct ocmem_zone *zone = NULL;
1279
1280 if (is_blocked(id))
1281 return 0;
1282
1283 zone = get_zone(id);
1284
1285 if (zone && zone->z_pool)
1286 return zone->z_end - zone->z_start;
1287 else
1288 return 0;
1289}
1290
1291static int do_grow(struct ocmem_req *req)
1292{
1293 struct ocmem_buf *buffer = NULL;
1294 bool can_block = true;
1295 int rc = 0;
1296
1297 down_write(&req->rw_sem);
1298 buffer = req->buffer;
1299
1300 /* Take the scheduler mutex */
1301 mutex_lock(&sched_mutex);
1302 rc = __sched_grow(req, can_block);
1303 mutex_unlock(&sched_mutex);
1304
1305 if (rc == OP_FAIL)
1306 goto err_op_fail;
1307
1308 if (rc == OP_RESCHED) {
1309 pr_debug("ocmem: Enqueue this allocation");
1310 sched_enqueue(req);
1311 }
1312
1313 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1314 buffer->addr = device_address(req->owner, req->req_start);
1315 buffer->len = req->req_sz;
1316 }
1317
1318 up_write(&req->rw_sem);
1319 return 0;
1320err_op_fail:
1321 up_write(&req->rw_sem);
1322 return -EINVAL;
1323}
1324
1325static int process_grow(struct ocmem_req *req)
1326{
1327 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001328 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001329
1330 /* Attempt to grow the region */
1331 rc = do_grow(req);
1332
1333 if (rc < 0)
1334 return -EINVAL;
1335
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001336 rc = process_map(req, req->req_start, req->req_end);
1337 if (rc < 0)
1338 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001339
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001340 offset = phys_to_offset(req->req_start);
1341
1342 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1343
1344 if (rc < 0) {
1345 pr_err("Failed to switch ON memory macros\n");
1346 goto power_ctl_error;
1347 }
1348
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001349 /* Notify the client about the buffer growth */
1350 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1351 if (rc < 0) {
1352 pr_err("No notifier callback to cater for req %p event: %d\n",
1353 req, OCMEM_ALLOC_GROW);
1354 BUG();
1355 }
1356 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001357power_ctl_error:
1358 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001359}
1360
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001361static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1362{
1363
1364 int rc = 0;
1365 struct ocmem_buf *buffer = NULL;
1366
1367 down_write(&req->rw_sem);
1368 buffer = req->buffer;
1369
1370 /* Take the scheduler mutex */
1371 mutex_lock(&sched_mutex);
1372 rc = __sched_shrink(req, shrink_size);
1373 mutex_unlock(&sched_mutex);
1374
1375 if (rc == OP_FAIL)
1376 goto err_op_fail;
1377
1378 else if (rc == OP_COMPLETE) {
1379 buffer->addr = device_address(req->owner, req->req_start);
1380 buffer->len = req->req_sz;
1381 }
1382
1383 up_write(&req->rw_sem);
1384 return 0;
1385err_op_fail:
1386 up_write(&req->rw_sem);
1387 return -EINVAL;
1388}
1389
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001390static void ocmem_sched_wk_func(struct work_struct *work);
1391DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1392
1393static int ocmem_schedule_pending(void)
1394{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001395
1396 bool need_sched = false;
1397 int i = 0;
1398
1399 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1400 if (!list_empty(&sched_queue[i])) {
1401 need_sched = true;
1402 break;
1403 }
1404 }
1405
1406 if (need_sched == true) {
1407 cancel_delayed_work(&ocmem_sched_thread);
1408 schedule_delayed_work(&ocmem_sched_thread,
1409 msecs_to_jiffies(SCHED_DELAY));
1410 pr_debug("ocmem: Scheduled delayed work\n");
1411 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001412 return 0;
1413}
1414
1415static int do_free(struct ocmem_req *req)
1416{
1417 int rc = 0;
1418 struct ocmem_buf *buffer = req->buffer;
1419
1420 down_write(&req->rw_sem);
1421
1422 if (is_mapped(req)) {
1423 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1424 goto err_free_fail;
1425 }
1426
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001427 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1428 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001429 /* Grab the sched mutex */
1430 mutex_lock(&sched_mutex);
1431 rc = __sched_free(req);
1432 mutex_unlock(&sched_mutex);
1433
1434 switch (rc) {
1435
1436 case OP_COMPLETE:
1437 buffer->addr = 0x0;
1438 buffer->len = 0x0;
1439 break;
1440 case OP_FAIL:
1441 default:
1442 goto err_free_fail;
1443 break;
1444 }
1445
1446 up_write(&req->rw_sem);
1447 return 0;
1448err_free_fail:
1449 up_write(&req->rw_sem);
1450 pr_err("ocmem: freeing req %p failed\n", req);
1451 return -EINVAL;
1452}
1453
1454int process_free(int id, struct ocmem_handle *handle)
1455{
1456 struct ocmem_req *req = NULL;
1457 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001458 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001459 int rc = 0;
1460
Naveen Ramaraj89738952013-02-13 15:24:57 -08001461 mutex_lock(&free_mutex);
1462
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001463 if (is_blocked(id)) {
1464 pr_err("Client %d cannot request free\n", id);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001465 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001466 }
1467
1468 req = handle_to_req(handle);
1469 buffer = handle_to_buffer(handle);
1470
Naveen Ramaraj89738952013-02-13 15:24:57 -08001471 if (!req) {
1472 pr_err("ocmem: No valid request to free\n");
1473 goto free_invalid;
1474 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001475
1476 if (req->req_start != core_address(id, buffer->addr)) {
1477 pr_err("Invalid buffer handle passed for free\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001478 goto free_invalid;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001479 }
1480
Naveen Ramaraj89738952013-02-13 15:24:57 -08001481 if (req->edata != NULL) {
1482 pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n",
1483 req, req->state, req->edata);
1484 goto free_invalid;
1485 }
1486
1487 if (is_pending_shrink(req)) {
1488 pr_err("ocmem: Request %p(%2lx) yet to process eviction\n",
1489 req, req->state);
1490 goto pending_shrink;
1491 }
1492
1493 /* Remove the request from any restore lists */
1494 if (req->e_handle)
1495 cancel_restore(req->e_handle, req);
1496
1497 /* Remove the request from any pending opreations */
1498 if (TEST_STATE(req, R_ENQUEUED)) {
1499 mutex_lock(&sched_mutex);
1500 sched_dequeue(req);
1501 mutex_unlock(&sched_mutex);
1502 }
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001503
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001504 if (TEST_STATE(req, R_MAPPED)) {
1505 /* unmap the interval and clear the memory */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001506 rc = process_unmap(req, req->req_start, req->req_end);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001507
1508 if (rc < 0) {
1509 pr_err("ocmem: Failed to unmap %p\n", req);
1510 goto free_fail;
1511 }
1512
1513 rc = do_free(req);
1514 if (rc < 0) {
1515 pr_err("ocmem: Failed to free %p\n", req);
1516 goto free_fail;
1517 }
1518 } else
1519 pr_debug("request %p was already shrunk to 0\n", req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001520
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001521 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001522 if (req->req_sz != 0) {
1523
1524 offset = phys_to_offset(req->req_start);
1525
1526 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1527
1528 if (rc < 0) {
1529 pr_err("Failed to switch OFF memory macros\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001530 goto free_fail;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001531 }
1532
1533 }
1534
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001535 if (!TEST_STATE(req, R_FREE)) {
1536 /* free the allocation */
1537 rc = do_free(req);
1538 if (rc < 0)
1539 return -EINVAL;
1540 }
1541
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001542 inc_ocmem_stat(zone_of(req), NR_FREES);
1543
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001544 ocmem_destroy_req(req);
1545 handle->req = NULL;
1546
1547 ocmem_schedule_pending();
Naveen Ramaraj89738952013-02-13 15:24:57 -08001548 mutex_unlock(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001549 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001550free_fail:
1551free_invalid:
1552 mutex_unlock(&free_mutex);
1553 return -EINVAL;
1554pending_shrink:
1555 mutex_unlock(&free_mutex);
1556 return -EAGAIN;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001557}
1558
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001559static void ocmem_rdm_worker(struct work_struct *work)
1560{
1561 int offset = 0;
1562 int rc = 0;
1563 int event;
1564 struct ocmem_rdm_work *work_data = container_of(work,
1565 struct ocmem_rdm_work, work);
1566 int id = work_data->id;
1567 struct ocmem_map_list *list = work_data->list;
1568 int direction = work_data->direction;
1569 struct ocmem_handle *handle = work_data->handle;
1570 struct ocmem_req *req = handle_to_req(handle);
1571 struct ocmem_buf *buffer = handle_to_buffer(handle);
1572
1573 down_write(&req->rw_sem);
1574 offset = phys_to_offset(req->req_start);
1575 rc = ocmem_rdm_transfer(id, list, offset, direction);
1576 if (work_data->direction == TO_OCMEM)
1577 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1578 else
1579 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001580 up_write(&req->rw_sem);
1581 kfree(work_data);
1582 dispatch_notification(id, event, buffer);
1583}
1584
1585int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1586 struct ocmem_map_list *list, int direction)
1587{
1588 struct ocmem_rdm_work *work_data = NULL;
1589
1590 down_write(&req->rw_sem);
1591
1592 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1593 if (!work_data)
1594 BUG();
1595
1596 work_data->handle = handle;
1597 work_data->list = list;
1598 work_data->id = req->owner;
1599 work_data->direction = direction;
1600 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1601 up_write(&req->rw_sem);
1602 queue_work(ocmem_rdm_wq, &work_data->work);
1603 return 0;
1604}
1605
1606int process_xfer_out(int id, struct ocmem_handle *handle,
1607 struct ocmem_map_list *list)
1608{
1609 struct ocmem_req *req = NULL;
1610 int rc = 0;
1611
1612 req = handle_to_req(handle);
1613
1614 if (!req)
1615 return -EINVAL;
1616
1617 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001618 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001619 goto transfer_out_error;
1620 }
1621
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001622 rc = queue_transfer(req, handle, list, TO_DDR);
1623
1624 if (rc < 0) {
1625 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001626 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001627 goto transfer_out_error;
1628 }
1629
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001630 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001631 return 0;
1632
1633transfer_out_error:
1634 return -EINVAL;
1635}
1636
1637int process_xfer_in(int id, struct ocmem_handle *handle,
1638 struct ocmem_map_list *list)
1639{
1640 struct ocmem_req *req = NULL;
1641 int rc = 0;
1642
1643 req = handle_to_req(handle);
1644
1645 if (!req)
1646 return -EINVAL;
1647
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001648
1649 if (!is_mapped(req)) {
1650 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001651 goto transfer_in_error;
1652 }
1653
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001654 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001655 rc = queue_transfer(req, handle, list, TO_OCMEM);
1656
1657 if (rc < 0) {
1658 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001659 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001660 goto transfer_in_error;
1661 }
1662
1663 return 0;
1664transfer_in_error:
1665 return -EINVAL;
1666}
1667
1668int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1669{
1670 struct ocmem_req *req = NULL;
1671 struct ocmem_buf *buffer = NULL;
1672 struct ocmem_eviction_data *edata = NULL;
1673 int rc = 0;
1674
1675 if (is_blocked(id)) {
1676 pr_err("Client %d cannot request free\n", id);
1677 return -EINVAL;
1678 }
1679
1680 req = handle_to_req(handle);
1681 buffer = handle_to_buffer(handle);
1682
1683 if (!req)
1684 return -EINVAL;
1685
Naveen Ramaraj89738952013-02-13 15:24:57 -08001686 mutex_lock(&free_mutex);
1687
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001688 if (req->req_start != core_address(id, buffer->addr)) {
1689 pr_err("Invalid buffer handle passed for shrink\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001690 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001691 }
1692
Naveen Ramaraj89738952013-02-13 15:24:57 -08001693 if (!req->e_handle) {
1694 pr_err("Unable to find evicting request\n");
1695 goto shrink_fail;
1696 }
1697
1698 edata = req->e_handle->edata;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001699
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001700 if (!edata) {
1701 pr_err("Unable to find eviction data\n");
Naveen Ramaraj89738952013-02-13 15:24:57 -08001702 goto shrink_fail;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001703 }
1704
1705 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001706
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001707 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1708
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001709 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001710 pr_debug("req %p being shrunk to zero\n", req);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001711 if (is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001712 rc = process_unmap(req, req->req_start, req->req_end);
1713 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001714 goto shrink_fail;
1715 }
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001716 rc = do_free(req);
1717 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001718 goto shrink_fail;
1719 SET_STATE(req, R_FREE);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001720 } else {
1721 rc = do_shrink(req, size);
1722 if (rc < 0)
Naveen Ramaraj89738952013-02-13 15:24:57 -08001723 goto shrink_fail;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001724 }
1725
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001726 CLEAR_STATE(req, R_ALLOCATED);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001727 CLEAR_STATE(req, R_WF_SHRINK);
1728 SET_STATE(req, R_SHRUNK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001729
1730 if (atomic_dec_and_test(&edata->pending)) {
1731 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001732 complete(&edata->completion);
1733 }
1734
Naveen Ramaraj89738952013-02-13 15:24:57 -08001735 mutex_unlock(&free_mutex);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001736 return 0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001737shrink_fail:
1738 pr_err("ocmem: Failed to shrink request %p of %s\n",
1739 req, get_name(req->owner));
1740 mutex_unlock(&free_mutex);
1741 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001742}
1743
1744int process_xfer(int id, struct ocmem_handle *handle,
1745 struct ocmem_map_list *list, int direction)
1746{
1747 int rc = 0;
1748
1749 if (is_tcm(id)) {
1750 WARN(1, "Mapping operation is invalid for client\n");
1751 return -EINVAL;
1752 }
1753
1754 if (direction == TO_DDR)
1755 rc = process_xfer_out(id, handle, list);
1756 else if (direction == TO_OCMEM)
1757 rc = process_xfer_in(id, handle, list);
1758 return rc;
1759}
1760
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001761static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001762{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001763 struct ocmem_eviction_data *edata = NULL;
1764 int prio = ocmem_client_table[id].priority;
1765
1766 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1767
1768 if (!edata) {
1769 pr_err("ocmem: Could not allocate eviction data\n");
1770 return NULL;
1771 }
1772
1773 INIT_LIST_HEAD(&edata->victim_list);
1774 INIT_LIST_HEAD(&edata->req_list);
1775 edata->prio = prio;
1776 atomic_set(&edata->pending, 0);
1777 return edata;
1778}
1779
1780static void free_eviction(struct ocmem_eviction_data *edata)
1781{
1782
1783 if (!edata)
1784 return;
1785
1786 if (!list_empty(&edata->req_list))
1787 pr_err("ocmem: Eviction data %p not empty\n", edata);
1788
1789 kfree(edata);
1790 edata = NULL;
1791}
1792
1793static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1794{
1795
1796 if (!new || !old)
1797 return false;
1798
1799 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1800 new->req_start, new->req_end,
1801 old->req_start, old->req_end);
1802
1803 if ((new->req_start < old->req_start &&
1804 new->req_end >= old->req_start) ||
1805 (new->req_start >= old->req_start &&
1806 new->req_start <= old->req_end &&
1807 new->req_end >= old->req_end)) {
1808 pr_debug("request %p overlaps with existing req %p\n",
1809 new, old);
1810 return true;
1811 }
1812 return false;
1813}
1814
1815static int __evict_common(struct ocmem_eviction_data *edata,
1816 struct ocmem_req *req)
1817{
1818 struct rb_node *rb_node = NULL;
1819 struct ocmem_req *e_req = NULL;
1820 bool needs_eviction = false;
1821 int j = 0;
1822
1823 for (rb_node = rb_first(&sched_tree); rb_node;
1824 rb_node = rb_next(rb_node)) {
1825
1826 struct ocmem_region *tmp_region = NULL;
1827
1828 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1829
1830 if (tmp_region->max_prio < edata->prio) {
1831 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1832 needs_eviction = false;
1833 e_req = find_req_match(j, tmp_region);
1834 if (!e_req)
1835 continue;
1836 if (edata->passive == true) {
1837 needs_eviction = true;
1838 } else {
1839 needs_eviction = is_overlapping(req,
1840 e_req);
1841 }
1842
1843 if (needs_eviction) {
1844 pr_debug("adding %p in region %p to eviction list\n",
1845 e_req, tmp_region);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001846 SET_STATE(e_req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001847 list_add_tail(
1848 &e_req->eviction_list,
1849 &edata->req_list);
1850 atomic_inc(&edata->pending);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001851 e_req->e_handle = req;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001852 }
1853 }
1854 } else {
1855 pr_debug("Skipped region %p\n", tmp_region);
1856 }
1857 }
1858
1859 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1860
Naveen Ramaraj89738952013-02-13 15:24:57 -08001861 return atomic_read(&edata->pending);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001862}
1863
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001864static void trigger_eviction(struct ocmem_eviction_data *edata)
1865{
1866 struct ocmem_req *req = NULL;
1867 struct ocmem_req *next = NULL;
1868 struct ocmem_buf buffer;
1869
1870 if (!edata)
1871 return;
1872
1873 BUG_ON(atomic_read(&edata->pending) == 0);
1874
1875 init_completion(&edata->completion);
1876
1877 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1878 {
1879 if (req) {
1880 pr_debug("ocmem: Evicting request %p\n", req);
1881 buffer.addr = req->req_start;
1882 buffer.len = 0x0;
Naveen Ramaraj89738952013-02-13 15:24:57 -08001883 CLEAR_STATE(req, R_MUST_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001884 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1885 &buffer);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001886 SET_STATE(req, R_WF_SHRINK);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001887 }
1888 }
1889 return;
1890}
1891
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001892int process_evict(int id)
1893{
1894 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001895 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001896
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001897 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001898
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001899 if (!edata)
1900 return -EINVAL;
1901
1902 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001903
1904 mutex_lock(&sched_mutex);
1905
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001906 rc = __evict_common(edata, NULL);
1907
Naveen Ramaraj89738952013-02-13 15:24:57 -08001908 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001909 goto skip_eviction;
1910
1911 trigger_eviction(edata);
1912
1913 evictions[id] = edata;
1914
1915 mutex_unlock(&sched_mutex);
1916
1917 wait_for_completion(&edata->completion);
1918
1919 return 0;
1920
1921skip_eviction:
1922 evictions[id] = NULL;
1923 mutex_unlock(&sched_mutex);
1924 return 0;
1925}
1926
1927static int run_evict(struct ocmem_req *req)
1928{
1929 struct ocmem_eviction_data *edata = NULL;
1930 int rc = 0;
1931
1932 if (!req)
1933 return -EINVAL;
1934
1935 edata = init_eviction(req->owner);
1936
1937 if (!edata)
1938 return -EINVAL;
1939
1940 edata->passive = false;
1941
Naveen Ramaraj89738952013-02-13 15:24:57 -08001942 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001943 rc = __evict_common(edata, req);
1944
Naveen Ramaraj89738952013-02-13 15:24:57 -08001945 if (rc == 0)
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001946 goto skip_eviction;
1947
1948 trigger_eviction(edata);
1949
1950 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1951 req->edata = edata;
1952
Naveen Ramaraj89738952013-02-13 15:24:57 -08001953 mutex_unlock(&free_mutex);
1954
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001955 wait_for_completion(&edata->completion);
1956
1957 pr_debug("ocmem: eviction completed successfully\n");
1958 return 0;
1959
1960skip_eviction:
1961 pr_err("ocmem: Unable to run eviction\n");
1962 free_eviction(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08001963 req->edata = NULL;
1964 mutex_unlock(&free_mutex);
1965 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001966}
1967
1968static int __restore_common(struct ocmem_eviction_data *edata)
1969{
1970
1971 struct ocmem_req *req = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001972
1973 if (!edata)
1974 return -EINVAL;
1975
Naveen Ramaraj89738952013-02-13 15:24:57 -08001976 while (!list_empty(&edata->req_list)) {
1977 req = list_first_entry(&edata->req_list, struct ocmem_req,
1978 eviction_list);
1979 list_del_init(&req->eviction_list);
1980 pr_debug("ocmem: restoring evicted request %p\n",
1981 req);
1982 req->edata = NULL;
1983 req->e_handle = NULL;
1984 req->op = SCHED_ALLOCATE;
1985 inc_ocmem_stat(zone_of(req), NR_RESTORES);
1986 sched_enqueue(req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001987 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001988
1989 pr_debug("Scheduled all evicted regions\n");
1990
1991 return 0;
1992}
1993
1994static int sched_restore(struct ocmem_req *req)
1995{
1996
1997 int rc = 0;
1998
1999 if (!req)
2000 return -EINVAL;
2001
2002 if (!req->edata)
2003 return 0;
2004
Naveen Ramaraj89738952013-02-13 15:24:57 -08002005 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002006 rc = __restore_common(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002007 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002008
2009 if (rc < 0)
2010 return -EINVAL;
2011
2012 free_eviction(req->edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002013 req->edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002014 return 0;
2015}
2016
2017int process_restore(int id)
2018{
2019 struct ocmem_eviction_data *edata = evictions[id];
2020 int rc = 0;
2021
2022 if (!edata)
2023 return -EINVAL;
2024
Naveen Ramaraj89738952013-02-13 15:24:57 -08002025 mutex_lock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002026 rc = __restore_common(edata);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002027 mutex_unlock(&free_mutex);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002028
2029 if (rc < 0) {
2030 pr_err("Failed to restore evicted requests\n");
2031 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002032 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002033
2034 free_eviction(edata);
2035 evictions[id] = NULL;
2036 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002037 return 0;
2038}
2039
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002040static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
2041{
2042 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002043 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002044 struct ocmem_buf *buffer = req->buffer;
2045
2046 down_write(&req->rw_sem);
2047
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002048 mutex_lock(&allocation_mutex);
2049retry_allocate:
2050
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002051 /* Take the scheduler mutex */
2052 mutex_lock(&sched_mutex);
2053 rc = __sched_allocate(req, can_block, can_wait);
2054 mutex_unlock(&sched_mutex);
2055
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002056 if (rc == OP_EVICT) {
2057
2058 ret = run_evict(req);
2059
2060 if (ret == 0) {
2061 rc = sched_restore(req);
2062 if (rc < 0) {
2063 pr_err("Failed to restore for req %p\n", req);
2064 goto err_allocate_fail;
2065 }
2066 req->edata = NULL;
2067
2068 pr_debug("Attempting to re-allocate req %p\n", req);
2069 req->req_start = 0x0;
2070 req->req_end = 0x0;
2071 goto retry_allocate;
2072 } else {
2073 goto err_allocate_fail;
2074 }
2075 }
2076
2077 mutex_unlock(&allocation_mutex);
2078
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002079 if (rc == OP_FAIL) {
2080 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002081 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002082 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002083
2084 if (rc == OP_RESCHED) {
2085 buffer->addr = 0x0;
2086 buffer->len = 0x0;
2087 pr_debug("ocmem: Enqueuing req %p\n", req);
2088 sched_enqueue(req);
2089 } else if (rc == OP_PARTIAL) {
2090 buffer->addr = device_address(req->owner, req->req_start);
2091 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002092 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002093 pr_debug("ocmem: Enqueuing req %p\n", req);
2094 sched_enqueue(req);
2095 } else if (rc == OP_COMPLETE) {
2096 buffer->addr = device_address(req->owner, req->req_start);
2097 buffer->len = req->req_sz;
2098 }
2099
2100 up_write(&req->rw_sem);
2101 return 0;
2102err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002103 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002104 up_write(&req->rw_sem);
2105 return -EINVAL;
2106}
2107
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002108static int do_dump(struct ocmem_req *req, unsigned long addr)
2109{
2110
2111 void __iomem *req_vaddr;
2112 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002113 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002114
2115 down_write(&req->rw_sem);
2116
2117 offset = phys_to_offset(req->req_start);
2118
2119 req_vaddr = ocmem_vaddr + offset;
2120
2121 if (!req_vaddr)
2122 goto err_do_dump;
2123
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002124 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2125
2126 if (rc < 0)
2127 goto err_do_dump;
2128
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002129 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2130 get_name(req->owner), req->req_start,
2131 req_vaddr, addr);
2132
2133 memcpy((void *)addr, req_vaddr, req->req_sz);
2134
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002135 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2136
2137 if (rc < 0)
2138 pr_err("Failed to secure request %p of %s after dump\n",
2139 req, get_name(req->owner));
2140
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002141 up_write(&req->rw_sem);
2142 return 0;
2143err_do_dump:
2144 up_write(&req->rw_sem);
2145 return -EINVAL;
2146}
2147
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002148int process_allocate(int id, struct ocmem_handle *handle,
2149 unsigned long min, unsigned long max,
2150 unsigned long step, bool can_block, bool can_wait)
2151{
2152
2153 struct ocmem_req *req = NULL;
2154 struct ocmem_buf *buffer = NULL;
2155 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002156 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002157
2158 /* sanity checks */
2159 if (is_blocked(id)) {
2160 pr_err("Client %d cannot request allocation\n", id);
2161 return -EINVAL;
2162 }
2163
2164 if (handle->req != NULL) {
2165 pr_err("Invalid handle passed in\n");
2166 return -EINVAL;
2167 }
2168
2169 buffer = handle_to_buffer(handle);
2170 BUG_ON(buffer == NULL);
2171
2172 /* prepare a request structure to represent this transaction */
2173 req = ocmem_create_req();
2174 if (!req)
2175 return -ENOMEM;
2176
2177 req->owner = id;
2178 req->req_min = min;
2179 req->req_max = max;
2180 req->req_step = step;
2181 req->prio = ocmem_client_table[id].priority;
2182 req->op = SCHED_ALLOCATE;
2183 req->buffer = buffer;
2184
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002185 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2186
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002187 rc = do_allocate(req, can_block, can_wait);
2188
2189 if (rc < 0)
2190 goto do_allocate_error;
2191
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002192 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2193
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002194 handle->req = req;
2195
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002196 if (req->req_sz != 0) {
2197
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002198 rc = process_map(req, req->req_start, req->req_end);
2199 if (rc < 0)
2200 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002201
2202 offset = phys_to_offset(req->req_start);
2203
2204 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2205
2206 if (rc < 0) {
2207 pr_err("Failed to switch ON memory macros\n");
2208 goto power_ctl_error;
2209 }
2210 }
2211
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002212 return 0;
2213
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002214power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002215 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002216map_error:
2217 handle->req = NULL;
2218 do_free(req);
2219do_allocate_error:
2220 ocmem_destroy_req(req);
2221 return -EINVAL;
2222}
2223
2224int process_delayed_allocate(struct ocmem_req *req)
2225{
2226
2227 struct ocmem_handle *handle = NULL;
2228 int rc = 0;
2229 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002230 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002231
2232 handle = req_to_handle(req);
2233 BUG_ON(handle == NULL);
2234
2235 rc = do_allocate(req, true, false);
2236
2237 if (rc < 0)
2238 goto do_allocate_error;
2239
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002240 /* The request can still be pending */
2241 if (TEST_STATE(req, R_PENDING))
2242 return 0;
2243
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002244 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2245
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002246 if (req->req_sz != 0) {
2247
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002248 rc = process_map(req, req->req_start, req->req_end);
2249 if (rc < 0)
2250 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002251
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002252
2253 offset = phys_to_offset(req->req_start);
2254
2255 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2256
2257 if (rc < 0) {
2258 pr_err("Failed to switch ON memory macros\n");
2259 goto power_ctl_error;
2260 }
2261 }
2262
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002263 /* Notify the client about the buffer growth */
2264 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2265 if (rc < 0) {
2266 pr_err("No notifier callback to cater for req %p event: %d\n",
2267 req, OCMEM_ALLOC_GROW);
2268 BUG();
2269 }
2270 return 0;
2271
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002272power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002273 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002274map_error:
2275 handle->req = NULL;
2276 do_free(req);
2277do_allocate_error:
2278 ocmem_destroy_req(req);
2279 return -EINVAL;
2280}
2281
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002282int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2283{
2284 struct ocmem_req *req = NULL;
2285 int rc = 0;
2286
2287 req = handle_to_req(handle);
2288
2289 if (!req)
2290 return -EINVAL;
2291
2292 if (!is_mapped(req)) {
2293 pr_err("Buffer is not mapped\n");
2294 goto dump_error;
2295 }
2296
2297 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2298
2299 mutex_lock(&sched_mutex);
2300 rc = do_dump(req, addr);
2301 mutex_unlock(&sched_mutex);
2302
2303 if (rc < 0)
2304 goto dump_error;
2305
2306 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2307 return 0;
2308
2309dump_error:
2310 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2311 return -EINVAL;
2312}
2313
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002314static void ocmem_sched_wk_func(struct work_struct *work)
2315{
2316
2317 struct ocmem_buf *buffer = NULL;
2318 struct ocmem_handle *handle = NULL;
2319 struct ocmem_req *req = ocmem_fetch_req();
2320
2321 if (!req) {
2322 pr_debug("No Pending Requests found\n");
2323 return;
2324 }
2325
2326 pr_debug("ocmem: sched_wk pending req %p\n", req);
2327 handle = req_to_handle(req);
2328 buffer = handle_to_buffer(handle);
2329 BUG_ON(req->op == SCHED_NOP);
2330
2331 switch (req->op) {
2332 case SCHED_GROW:
2333 process_grow(req);
2334 break;
2335 case SCHED_ALLOCATE:
2336 process_delayed_allocate(req);
2337 break;
2338 default:
2339 pr_err("ocmem: Unknown operation encountered\n");
2340 break;
2341 }
2342 return;
2343}
2344
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002345static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2346{
2347 struct rb_node *rb_node = NULL;
2348 struct ocmem_req *req = NULL;
2349 unsigned j;
2350 mutex_lock(&sched_mutex);
2351 for (rb_node = rb_first(&sched_tree); rb_node;
2352 rb_node = rb_next(rb_node)) {
2353 struct ocmem_region *tmp_region = NULL;
2354 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2355 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2356 req = find_req_match(j, tmp_region);
2357 if (req) {
2358 seq_printf(f,
2359 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2360 get_name(req->owner),
2361 req->req_start, req->req_end,
2362 req->req_sz, req->state);
2363 }
2364 }
2365 }
2366 mutex_unlock(&sched_mutex);
2367 return 0;
2368}
2369
2370static int ocmem_allocations_open(struct inode *inode, struct file *file)
2371{
2372 return single_open(file, ocmem_allocations_show, inode->i_private);
2373}
2374
2375static const struct file_operations allocations_show_fops = {
2376 .open = ocmem_allocations_open,
2377 .read = seq_read,
2378 .llseek = seq_lseek,
2379 .release = seq_release,
2380};
2381
2382int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002383{
2384 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002385 struct ocmem_plat_data *pdata = NULL;
2386 struct device *dev = &pdev->dev;
2387
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002388 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002389 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002390 mutex_init(&allocation_mutex);
Naveen Ramaraj89738952013-02-13 15:24:57 -08002391 mutex_init(&free_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002392 mutex_init(&sched_mutex);
2393 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002394 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002395 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2396 INIT_LIST_HEAD(&sched_queue[i]);
2397
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002398 mutex_init(&rdm_mutex);
2399 INIT_LIST_HEAD(&rdm_queue);
2400 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2401 if (!ocmem_rdm_wq)
2402 return -ENOMEM;
2403 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2404 if (!ocmem_eviction_wq)
2405 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002406
2407 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2408 NULL, &allocations_show_fops)) {
2409 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2410 return -EBUSY;
2411 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002412 return 0;
2413}