blob: f6d066df6b126316735074b2e4f1229b612f5175 [file] [log] [blame]
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
48 OP_FAIL = ~0x0,
49};
50
51/* Represents various client priorities */
52/* Note: More than one client can share a priority level */
53enum client_prio {
54 MIN_PRIO = 0x0,
55 NO_PRIO = MIN_PRIO,
56 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070057 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070058 PRIO_LP_AUDIO = 0x1,
59 PRIO_HP_AUDIO = 0x2,
60 PRIO_VOICE = 0x3,
61 PRIO_GFX_GROWTH = 0x4,
62 PRIO_VIDEO = 0x5,
63 PRIO_GFX = 0x6,
64 PRIO_OCMEM = 0x7,
65 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
66};
67
68static struct list_head sched_queue[MAX_OCMEM_PRIO];
69static struct mutex sched_queue_mutex;
70
71/* The duration in msecs before a pending operation is scheduled
72 * This allows an idle window between use case boundaries where various
73 * hardware state changes can occur. The value will be tweaked on actual
74 * hardware.
75*/
76#define SCHED_DELAY 10
77
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070078static struct list_head rdm_queue;
79static struct mutex rdm_mutex;
80static struct workqueue_struct *ocmem_rdm_wq;
81static struct workqueue_struct *ocmem_eviction_wq;
82
83static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
84
85struct ocmem_rdm_work {
86 int id;
87 struct ocmem_map_list *list;
88 struct ocmem_handle *handle;
89 int direction;
90 struct work_struct work;
91};
92
Naveen Ramarajb9da05782012-05-07 09:07:35 -070093/* OCMEM Operational modes */
94enum ocmem_client_modes {
95 OCMEM_PERFORMANCE = 1,
96 OCMEM_PASSIVE,
97 OCMEM_LOW_POWER,
98 OCMEM_MODE_MAX = OCMEM_LOW_POWER
99};
100
101/* OCMEM Addressing modes */
102enum ocmem_interconnects {
103 OCMEM_BLOCKED = 0,
104 OCMEM_PORT = 1,
105 OCMEM_OCMEMNOC = 2,
106 OCMEM_SYSNOC = 3,
107};
108
109/**
110 * Primary OCMEM Arbitration Table
111 **/
112struct ocmem_table {
113 int client_id;
114 int priority;
115 int mode;
116 int hw_interconnect;
117} ocmem_client_table[OCMEM_CLIENT_MAX] = {
118 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
119 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
120 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
121 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
122 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
123 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
124 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700125 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700126};
127
128static struct rb_root sched_tree;
129static struct mutex sched_mutex;
130
131/* A region represents a continuous interval in OCMEM address space */
132struct ocmem_region {
133 /* Chain in Interval Tree */
134 struct rb_node region_rb;
135 /* Hash map of requests */
136 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700137 /* Chain in eviction list */
138 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700139 unsigned long r_start;
140 unsigned long r_end;
141 unsigned long r_sz;
142 /* Highest priority of all requests served by this region */
143 int max_prio;
144};
145
146/* Is OCMEM tightly coupled to the client ?*/
147static inline int is_tcm(int id)
148{
149 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
150 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
151 return 1;
152 else
153 return 0;
154}
155
156static inline int is_blocked(int id)
157{
158 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
159}
160
161/* Returns the address that can be used by a device core to access OCMEM */
162static unsigned long device_address(int id, unsigned long addr)
163{
164 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
165 unsigned long ret_addr = 0x0;
166
167 switch (hw_interconnect) {
168 case OCMEM_PORT:
169 ret_addr = phys_to_offset(addr);
170 break;
171 case OCMEM_OCMEMNOC:
172 case OCMEM_SYSNOC:
173 ret_addr = addr;
174 break;
175 case OCMEM_BLOCKED:
176 ret_addr = 0x0;
177 break;
178 }
179 return ret_addr;
180}
181
182/* Returns the address as viewed by the core */
183static unsigned long core_address(int id, unsigned long addr)
184{
185 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
186 unsigned long ret_addr = 0x0;
187
188 switch (hw_interconnect) {
189 case OCMEM_PORT:
190 ret_addr = offset_to_phys(addr);
191 break;
192 case OCMEM_OCMEMNOC:
193 case OCMEM_SYSNOC:
194 ret_addr = addr;
195 break;
196 case OCMEM_BLOCKED:
197 ret_addr = 0x0;
198 break;
199 }
200 return ret_addr;
201}
202
203static int insert_region(struct ocmem_region *region)
204{
205
206 struct rb_root *root = &sched_tree;
207 struct rb_node **p = &root->rb_node;
208 struct rb_node *parent = NULL;
209 struct ocmem_region *tmp = NULL;
210 unsigned long addr = region->r_start;
211
212 while (*p) {
213 parent = *p;
214 tmp = rb_entry(parent, struct ocmem_region, region_rb);
215
216 if (tmp->r_end > addr) {
217 if (tmp->r_start <= addr)
218 break;
219 p = &(*p)->rb_left;
220 } else if (tmp->r_end <= addr)
221 p = &(*p)->rb_right;
222 }
223 rb_link_node(&region->region_rb, parent, p);
224 rb_insert_color(&region->region_rb, root);
225 return 0;
226}
227
228static int remove_region(struct ocmem_region *region)
229{
230 struct rb_root *root = &sched_tree;
231 rb_erase(&region->region_rb, root);
232 return 0;
233}
234
235static struct ocmem_req *ocmem_create_req(void)
236{
237 struct ocmem_req *p = NULL;
238
239 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
240 if (!p)
241 return NULL;
242
243 INIT_LIST_HEAD(&p->zone_list);
244 INIT_LIST_HEAD(&p->sched_list);
245 init_rwsem(&p->rw_sem);
246 SET_STATE(p, R_FREE);
247 return p;
248}
249
250static int ocmem_destroy_req(struct ocmem_req *req)
251{
252 kfree(req);
253 return 0;
254}
255
256static struct ocmem_region *create_region(void)
257{
258 struct ocmem_region *p = NULL;
259
260 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
261 if (!p)
262 return NULL;
263 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700264 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700265 p->r_start = p->r_end = p->r_sz = 0x0;
266 p->max_prio = NO_PRIO;
267 return p;
268}
269
270static int destroy_region(struct ocmem_region *region)
271{
272 kfree(region);
273 return 0;
274}
275
276static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
277{
278 int ret, id;
279
280 while (1) {
281 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
282 return -ENOMEM;
283
284 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
285
286 if (ret != -EAGAIN)
287 break;
288 }
289
290 if (!ret) {
291 req->req_id = id;
292 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
293 req, id, region);
294 return 0;
295 }
296 return -EINVAL;
297}
298
299static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
300{
301 idr_remove(&region->region_idr, req->req_id);
302 return 0;
303}
304
305static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
306{
307 region->r_start = req->req_start;
308 region->r_end = req->req_end;
309 region->r_sz = req->req_end - req->req_start + 1;
310 return 0;
311}
312
313static int region_req_count(int id, void *ptr, void *data)
314{
315 int *count = data;
316 *count = *count + 1;
317 return 0;
318}
319
320static int req_count(struct ocmem_region *region)
321{
322 int count = 0;
323 idr_for_each(&region->region_idr, region_req_count, &count);
324 return count;
325}
326
327static int compute_max_prio(int id, void *ptr, void *data)
328{
329 int *max = data;
330 struct ocmem_req *req = ptr;
331
332 if (req->prio > *max)
333 *max = req->prio;
334 return 0;
335}
336
337static int update_region_prio(struct ocmem_region *region)
338{
339 int max_prio;
340 if (req_count(region) != 0) {
341 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
342 region->max_prio = max_prio;
343 } else {
344 region->max_prio = NO_PRIO;
345 }
346 pr_debug("ocmem: Updating prio of region %p as %d\n",
347 region, max_prio);
348
349 return 0;
350}
351
352static struct ocmem_region *find_region(unsigned long addr)
353{
354 struct ocmem_region *region = NULL;
355 struct rb_node *rb_node = NULL;
356
357 rb_node = sched_tree.rb_node;
358
359 while (rb_node) {
360 struct ocmem_region *tmp_region = NULL;
361 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
362
363 if (tmp_region->r_end > addr) {
364 region = tmp_region;
365 if (tmp_region->r_start <= addr)
366 break;
367 rb_node = rb_node->rb_left;
368 } else {
369 rb_node = rb_node->rb_right;
370 }
371 }
372 return region;
373}
374
375static struct ocmem_region *find_region_intersection(unsigned long start,
376 unsigned long end)
377{
378
379 struct ocmem_region *region = NULL;
380 region = find_region(start);
381 if (region && end <= region->r_start)
382 region = NULL;
383 return region;
384}
385
386static struct ocmem_region *find_region_match(unsigned long start,
387 unsigned long end)
388{
389
390 struct ocmem_region *region = NULL;
391 region = find_region(start);
392 if (region && start == region->r_start && end == region->r_end)
393 return region;
394 return NULL;
395}
396
397static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
398{
399 struct ocmem_req *req = NULL;
400
401 if (!region)
402 return NULL;
403
404 req = idr_find(&region->region_idr, owner);
405
406 return req;
407}
408
409/* Must be called with req->sem held */
410static inline int is_mapped(struct ocmem_req *req)
411{
412 return TEST_STATE(req, R_MAPPED);
413}
414
415/* Must be called with sched_mutex held */
416static int __sched_unmap(struct ocmem_req *req)
417{
418 struct ocmem_req *matched_req = NULL;
419 struct ocmem_region *matched_region = NULL;
420
421 matched_region = find_region_match(req->req_start, req->req_end);
422 matched_req = find_req_match(req->req_id, matched_region);
423
424 if (!matched_region || !matched_req) {
425 pr_err("Could not find backing region for req");
426 goto invalid_op_error;
427 }
428
429 if (matched_req != req) {
430 pr_err("Request does not match backing req");
431 goto invalid_op_error;
432 }
433
434 if (!is_mapped(req)) {
435 pr_err("Request is not currently mapped");
436 goto invalid_op_error;
437 }
438
439 /* Update the request state */
440 CLEAR_STATE(req, R_MAPPED);
441 SET_STATE(req, R_MUST_MAP);
442
443 return OP_COMPLETE;
444
445invalid_op_error:
446 return OP_FAIL;
447}
448
449/* Must be called with sched_mutex held */
450static int __sched_map(struct ocmem_req *req)
451{
452 struct ocmem_req *matched_req = NULL;
453 struct ocmem_region *matched_region = NULL;
454
455 matched_region = find_region_match(req->req_start, req->req_end);
456 matched_req = find_req_match(req->req_id, matched_region);
457
458 if (!matched_region || !matched_req) {
459 pr_err("Could not find backing region for req");
460 goto invalid_op_error;
461 }
462
463 if (matched_req != req) {
464 pr_err("Request does not match backing req");
465 goto invalid_op_error;
466 }
467
468 /* Update the request state */
469 CLEAR_STATE(req, R_MUST_MAP);
470 SET_STATE(req, R_MAPPED);
471
472 return OP_COMPLETE;
473
474invalid_op_error:
475 return OP_FAIL;
476}
477
478static int do_map(struct ocmem_req *req)
479{
480 int rc = 0;
481
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700482 down_write(&req->rw_sem);
483
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700484 mutex_lock(&sched_mutex);
485 rc = __sched_map(req);
486 mutex_unlock(&sched_mutex);
487
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700488 up_write(&req->rw_sem);
489
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700490 if (rc == OP_FAIL)
491 return -EINVAL;
492
493 return 0;
494}
495
496static int do_unmap(struct ocmem_req *req)
497{
498 int rc = 0;
499
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700500 down_write(&req->rw_sem);
501
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700502 mutex_lock(&sched_mutex);
503 rc = __sched_unmap(req);
504 mutex_unlock(&sched_mutex);
505
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700506 up_write(&req->rw_sem);
507
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700508 if (rc == OP_FAIL)
509 return -EINVAL;
510
511 return 0;
512}
513
514/* process map is a wrapper where power control will be added later */
515static int process_map(struct ocmem_req *req, unsigned long start,
516 unsigned long end)
517{
518 return do_map(req);
519}
520
521/* process unmap is a wrapper where power control will be added later */
522static int process_unmap(struct ocmem_req *req, unsigned long start,
523 unsigned long end)
524{
525 return do_unmap(req);
526}
527
528static int __sched_grow(struct ocmem_req *req, bool can_block)
529{
530 unsigned long min = req->req_min;
531 unsigned long max = req->req_max;
532 unsigned long step = req->req_step;
533 int owner = req->owner;
534 unsigned long curr_sz = 0;
535 unsigned long growth_sz = 0;
536 unsigned long curr_start = 0;
537 enum client_prio prio = req->prio;
538 unsigned long alloc_addr = 0x0;
539 bool retry;
540 struct ocmem_region *spanned_r = NULL;
541 struct ocmem_region *overlap_r = NULL;
542
543 struct ocmem_req *matched_req = NULL;
544 struct ocmem_region *matched_region = NULL;
545
546 struct ocmem_zone *zone = get_zone(owner);
547 struct ocmem_region *region = NULL;
548
549 matched_region = find_region_match(req->req_start, req->req_end);
550 matched_req = find_req_match(req->req_id, matched_region);
551
552 if (!matched_region || !matched_req) {
553 pr_err("Could not find backing region for req");
554 goto invalid_op_error;
555 }
556
557 if (matched_req != req) {
558 pr_err("Request does not match backing req");
559 goto invalid_op_error;
560 }
561
562 curr_sz = matched_req->req_sz;
563 curr_start = matched_req->req_start;
564 growth_sz = matched_req->req_max - matched_req->req_sz;
565
566 pr_debug("Attempting to grow req %p from %lx to %lx\n",
567 req, matched_req->req_sz, matched_req->req_max);
568
569 retry = false;
570
571 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
572
573retry_next_step:
574
575 spanned_r = NULL;
576 overlap_r = NULL;
577
578 spanned_r = find_region(zone->z_head);
579 overlap_r = find_region_intersection(zone->z_head,
580 zone->z_head + growth_sz);
581
582 if (overlap_r == NULL) {
583 /* no conflicting regions, schedule this region */
584 zone->z_ops->free(zone, curr_start, curr_sz);
585 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
586
587 if (alloc_addr < 0) {
588 pr_err("ocmem: zone allocation operation failed\n");
589 goto internal_error;
590 }
591
592 curr_sz += growth_sz;
593 /* Detach the region from the interval tree */
594 /* This is to guarantee that any change in size
595 * causes the tree to be rebalanced if required */
596
597 detach_req(matched_region, req);
598 if (req_count(matched_region) == 0) {
599 remove_region(matched_region);
600 region = matched_region;
601 } else {
602 region = create_region();
603 if (!region) {
604 pr_err("ocmem: Unable to create region\n");
605 goto region_error;
606 }
607 }
608
609 /* update the request */
610 req->req_start = alloc_addr;
611 /* increment the size to reflect new length */
612 req->req_sz = curr_sz;
613 req->req_end = alloc_addr + req->req_sz - 1;
614
615 /* update request state */
616 CLEAR_STATE(req, R_MUST_GROW);
617 SET_STATE(req, R_ALLOCATED);
618 SET_STATE(req, R_MUST_MAP);
619 req->op = SCHED_MAP;
620
621 /* update the region with new req */
622 attach_req(region, req);
623 populate_region(region, req);
624 update_region_prio(region);
625
626 /* update the tree with new region */
627 if (insert_region(region)) {
628 pr_err("ocmem: Failed to insert the region\n");
629 goto region_error;
630 }
631
632 if (retry) {
633 SET_STATE(req, R_MUST_GROW);
634 SET_STATE(req, R_PENDING);
635 req->op = SCHED_GROW;
636 return OP_PARTIAL;
637 }
638 } else if (spanned_r != NULL && overlap_r != NULL) {
639 /* resolve conflicting regions based on priority */
640 if (overlap_r->max_prio < prio) {
641 /* Growth cannot be triggered unless a previous
642 * client of lower priority was evicted */
643 pr_err("ocmem: Invalid growth scheduled\n");
644 /* This is serious enough to fail */
645 BUG();
646 return OP_FAIL;
647 } else if (overlap_r->max_prio > prio) {
648 if (min == max) {
649 /* Cannot grow at this time, try later */
650 SET_STATE(req, R_PENDING);
651 SET_STATE(req, R_MUST_GROW);
652 return OP_RESCHED;
653 } else {
654 /* Try to grow in steps */
655 growth_sz -= step;
656 /* We are OOM at this point so need to retry */
657 if (growth_sz <= curr_sz) {
658 SET_STATE(req, R_PENDING);
659 SET_STATE(req, R_MUST_GROW);
660 return OP_RESCHED;
661 }
662 retry = true;
663 pr_debug("ocmem: Attempting with reduced size %lx\n",
664 growth_sz);
665 goto retry_next_step;
666 }
667 } else {
668 pr_err("ocmem: grow: New Region %p Existing %p\n",
669 matched_region, overlap_r);
670 pr_err("ocmem: Undetermined behavior\n");
671 /* This is serious enough to fail */
672 BUG();
673 }
674 } else if (spanned_r == NULL && overlap_r != NULL) {
675 goto err_not_supported;
676 }
677
678 return OP_COMPLETE;
679
680err_not_supported:
681 pr_err("ocmem: Scheduled unsupported operation\n");
682 return OP_FAIL;
683region_error:
684 zone->z_ops->free(zone, alloc_addr, curr_sz);
685 detach_req(region, req);
686 update_region_prio(region);
687 /* req is going to be destroyed by the caller anyways */
688internal_error:
689 destroy_region(region);
690invalid_op_error:
691 return OP_FAIL;
692}
693
694/* Must be called with sched_mutex held */
695static int __sched_free(struct ocmem_req *req)
696{
697 int owner = req->owner;
698 int ret = 0;
699
700 struct ocmem_req *matched_req = NULL;
701 struct ocmem_region *matched_region = NULL;
702
703 struct ocmem_zone *zone = get_zone(owner);
704
705 BUG_ON(!zone);
706
707 matched_region = find_region_match(req->req_start, req->req_end);
708 matched_req = find_req_match(req->req_id, matched_region);
709
710 if (!matched_region || !matched_req)
711 goto invalid_op_error;
712 if (matched_req != req)
713 goto invalid_op_error;
714
715 ret = zone->z_ops->free(zone,
716 matched_req->req_start, matched_req->req_sz);
717
718 if (ret < 0)
719 goto err_op_fail;
720
721 detach_req(matched_region, matched_req);
722 update_region_prio(matched_region);
723 if (req_count(matched_region) == 0) {
724 remove_region(matched_region);
725 destroy_region(matched_region);
726 }
727
728 /* Update the request */
729 req->req_start = 0x0;
730 req->req_sz = 0x0;
731 req->req_end = 0x0;
732 SET_STATE(req, R_FREE);
733 return OP_COMPLETE;
734invalid_op_error:
735 pr_err("ocmem: free: Failed to find matching region\n");
736err_op_fail:
737 pr_err("ocmem: free: Failed\n");
738 return OP_FAIL;
739}
740
741/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700742static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
743{
744 int owner = req->owner;
745 int ret = 0;
746
747 struct ocmem_req *matched_req = NULL;
748 struct ocmem_region *matched_region = NULL;
749 struct ocmem_region *region = NULL;
750 unsigned long alloc_addr = 0x0;
751
752 struct ocmem_zone *zone = get_zone(owner);
753
754 BUG_ON(!zone);
755
756 /* The shrink should not be called for zero size */
757 BUG_ON(new_sz == 0);
758
759 matched_region = find_region_match(req->req_start, req->req_end);
760 matched_req = find_req_match(req->req_id, matched_region);
761
762 if (!matched_region || !matched_req)
763 goto invalid_op_error;
764 if (matched_req != req)
765 goto invalid_op_error;
766
767
768 ret = zone->z_ops->free(zone,
769 matched_req->req_start, matched_req->req_sz);
770
771 if (ret < 0) {
772 pr_err("Zone Allocation operation failed\n");
773 goto internal_error;
774 }
775
776 alloc_addr = zone->z_ops->allocate(zone, new_sz);
777
778 if (alloc_addr < 0) {
779 pr_err("Zone Allocation operation failed\n");
780 goto internal_error;
781 }
782
783 /* Detach the region from the interval tree */
784 /* This is to guarantee that the change in size
785 * causes the tree to be rebalanced if required */
786
787 detach_req(matched_region, req);
788 if (req_count(matched_region) == 0) {
789 remove_region(matched_region);
790 region = matched_region;
791 } else {
792 region = create_region();
793 if (!region) {
794 pr_err("ocmem: Unable to create region\n");
795 goto internal_error;
796 }
797 }
798 /* update the request */
799 req->req_start = alloc_addr;
800 req->req_sz = new_sz;
801 req->req_end = alloc_addr + req->req_sz;
802
803 if (req_count(region) == 0) {
804 remove_region(matched_region);
805 destroy_region(matched_region);
806 }
807
808 /* update request state */
809 SET_STATE(req, R_MUST_GROW);
810 SET_STATE(req, R_MUST_MAP);
811 req->op = SCHED_MAP;
812
813 /* attach the request to the region */
814 attach_req(region, req);
815 populate_region(region, req);
816 update_region_prio(region);
817
818 /* update the tree with new region */
819 if (insert_region(region)) {
820 pr_err("ocmem: Failed to insert the region\n");
821 zone->z_ops->free(zone, alloc_addr, new_sz);
822 detach_req(region, req);
823 update_region_prio(region);
824 /* req will be destroyed by the caller */
825 goto region_error;
826 }
827 return OP_COMPLETE;
828
829region_error:
830 destroy_region(region);
831internal_error:
832 pr_err("ocmem: shrink: Failed\n");
833 return OP_FAIL;
834invalid_op_error:
835 pr_err("ocmem: shrink: Failed to find matching region\n");
836 return OP_FAIL;
837}
838
839/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700840static int __sched_allocate(struct ocmem_req *req, bool can_block,
841 bool can_wait)
842{
843 unsigned long min = req->req_min;
844 unsigned long max = req->req_max;
845 unsigned long step = req->req_step;
846 int owner = req->owner;
847 unsigned long sz = max;
848 enum client_prio prio = req->prio;
849 unsigned long alloc_addr = 0x0;
850 bool retry;
851
852 struct ocmem_region *spanned_r = NULL;
853 struct ocmem_region *overlap_r = NULL;
854
855 struct ocmem_zone *zone = get_zone(owner);
856 struct ocmem_region *region = NULL;
857
858 BUG_ON(!zone);
859
860 if (min > (zone->z_end - zone->z_start)) {
861 pr_err("ocmem: requested minimum size exceeds quota\n");
862 goto invalid_op_error;
863 }
864
865 if (max > (zone->z_end - zone->z_start)) {
866 pr_err("ocmem: requested maximum size exceeds quota\n");
867 goto invalid_op_error;
868 }
869
870 if (min > zone->z_free) {
871 pr_err("ocmem: out of memory for zone %d\n", owner);
872 goto invalid_op_error;
873 }
874
875 region = create_region();
876
877 if (!region) {
878 pr_err("ocmem: Unable to create region\n");
879 goto invalid_op_error;
880 }
881
882 retry = false;
883
884 pr_debug("ocmem: ALLOCATE: request size %lx\n", sz);
885
886retry_next_step:
887
888 spanned_r = NULL;
889 overlap_r = NULL;
890
891 spanned_r = find_region(zone->z_head);
892 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
893
894 if (overlap_r == NULL) {
895 /* no conflicting regions, schedule this region */
896 alloc_addr = zone->z_ops->allocate(zone, sz);
897
898 if (alloc_addr < 0) {
899 pr_err("Zone Allocation operation failed\n");
900 goto internal_error;
901 }
902
903 /* update the request */
904 req->req_start = alloc_addr;
905 req->req_end = alloc_addr + sz - 1;
906 req->req_sz = sz;
907 req->zone = zone;
908
909 /* update request state */
910 CLEAR_STATE(req, R_FREE);
911 SET_STATE(req, R_ALLOCATED);
912 SET_STATE(req, R_MUST_MAP);
913 req->op = SCHED_NOP;
914
915 /* attach the request to the region */
916 attach_req(region, req);
917 populate_region(region, req);
918 update_region_prio(region);
919
920 /* update the tree with new region */
921 if (insert_region(region)) {
922 pr_err("ocmem: Failed to insert the region\n");
923 zone->z_ops->free(zone, alloc_addr, sz);
924 detach_req(region, req);
925 update_region_prio(region);
926 /* req will be destroyed by the caller */
927 goto internal_error;
928 }
929
930 if (retry) {
931 SET_STATE(req, R_MUST_GROW);
932 SET_STATE(req, R_PENDING);
933 req->op = SCHED_GROW;
934 return OP_PARTIAL;
935 }
936 } else if (spanned_r != NULL && overlap_r != NULL) {
937 /* resolve conflicting regions based on priority */
938 if (overlap_r->max_prio < prio) {
939 if (min == max) {
940 pr_err("ocmem: Requires eviction support\n");
941 goto err_not_supported;
942 } else {
943 /* Try to allocate atleast >= 'min' immediately */
944 sz -= step;
945 if (sz < min)
946 goto err_out_of_mem;
947 retry = true;
948 pr_debug("ocmem: Attempting with reduced size %lx\n",
949 sz);
950 goto retry_next_step;
951 }
952 } else if (overlap_r->max_prio > prio) {
953 if (can_block == true) {
954 SET_STATE(req, R_PENDING);
955 SET_STATE(req, R_MUST_GROW);
956 return OP_RESCHED;
957 } else {
958 if (min == max) {
959 pr_err("Cannot allocate %lx synchronously\n",
960 sz);
961 goto err_out_of_mem;
962 } else {
963 sz -= step;
964 if (sz < min)
965 goto err_out_of_mem;
966 retry = true;
967 pr_debug("ocmem: Attempting reduced size %lx\n",
968 sz);
969 goto retry_next_step;
970 }
971 }
972 } else {
973 pr_err("ocmem: Undetermined behavior\n");
974 pr_err("ocmem: New Region %p Existing %p\n", region,
975 overlap_r);
976 /* This is serious enough to fail */
977 BUG();
978 }
979 } else if (spanned_r == NULL && overlap_r != NULL)
980 goto err_not_supported;
981
982 return OP_COMPLETE;
983
984err_not_supported:
985 pr_err("ocmem: Scheduled unsupported operation\n");
986 return OP_FAIL;
987
988err_out_of_mem:
989 pr_err("ocmem: Out of memory during allocation\n");
990internal_error:
991 destroy_region(region);
992invalid_op_error:
993 return OP_FAIL;
994}
995
996static int sched_enqueue(struct ocmem_req *priv)
997{
998 struct ocmem_req *next = NULL;
999 mutex_lock(&sched_queue_mutex);
1000 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1001 pr_debug("enqueued req %p\n", priv);
1002 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1003 pr_debug("pending requests for client %p\n", next);
1004 }
1005 mutex_unlock(&sched_queue_mutex);
1006 return 0;
1007}
1008
1009static struct ocmem_req *ocmem_fetch_req(void)
1010{
1011 int i;
1012 struct ocmem_req *req = NULL;
1013 struct ocmem_req *next = NULL;
1014
1015 mutex_lock(&sched_queue_mutex);
1016 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1017 if (list_empty(&sched_queue[i]))
1018 continue;
1019 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1020 {
1021 if (req) {
1022 pr_debug("ocmem: Fetched pending request %p\n",
1023 req);
1024 list_del(&req->sched_list);
1025 break;
1026 }
1027 }
1028 }
1029 mutex_unlock(&sched_queue_mutex);
1030 return req;
1031}
1032
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001033
1034unsigned long process_quota(int id)
1035{
1036 struct ocmem_zone *zone = NULL;
1037
1038 if (is_blocked(id))
1039 return 0;
1040
1041 zone = get_zone(id);
1042
1043 if (zone && zone->z_pool)
1044 return zone->z_end - zone->z_start;
1045 else
1046 return 0;
1047}
1048
1049static int do_grow(struct ocmem_req *req)
1050{
1051 struct ocmem_buf *buffer = NULL;
1052 bool can_block = true;
1053 int rc = 0;
1054
1055 down_write(&req->rw_sem);
1056 buffer = req->buffer;
1057
1058 /* Take the scheduler mutex */
1059 mutex_lock(&sched_mutex);
1060 rc = __sched_grow(req, can_block);
1061 mutex_unlock(&sched_mutex);
1062
1063 if (rc == OP_FAIL)
1064 goto err_op_fail;
1065
1066 if (rc == OP_RESCHED) {
1067 pr_debug("ocmem: Enqueue this allocation");
1068 sched_enqueue(req);
1069 }
1070
1071 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1072 buffer->addr = device_address(req->owner, req->req_start);
1073 buffer->len = req->req_sz;
1074 }
1075
1076 up_write(&req->rw_sem);
1077 return 0;
1078err_op_fail:
1079 up_write(&req->rw_sem);
1080 return -EINVAL;
1081}
1082
1083static int process_grow(struct ocmem_req *req)
1084{
1085 int rc = 0;
1086
1087 /* Attempt to grow the region */
1088 rc = do_grow(req);
1089
1090 if (rc < 0)
1091 return -EINVAL;
1092
1093 /* Map the newly grown region */
1094 if (is_tcm(req->owner)) {
1095 rc = process_map(req, req->req_start, req->req_end);
1096 if (rc < 0)
1097 return -EINVAL;
1098 }
1099
1100 /* Notify the client about the buffer growth */
1101 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1102 if (rc < 0) {
1103 pr_err("No notifier callback to cater for req %p event: %d\n",
1104 req, OCMEM_ALLOC_GROW);
1105 BUG();
1106 }
1107 return 0;
1108}
1109
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001110static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1111{
1112
1113 int rc = 0;
1114 struct ocmem_buf *buffer = NULL;
1115
1116 down_write(&req->rw_sem);
1117 buffer = req->buffer;
1118
1119 /* Take the scheduler mutex */
1120 mutex_lock(&sched_mutex);
1121 rc = __sched_shrink(req, shrink_size);
1122 mutex_unlock(&sched_mutex);
1123
1124 if (rc == OP_FAIL)
1125 goto err_op_fail;
1126
1127 else if (rc == OP_COMPLETE) {
1128 buffer->addr = device_address(req->owner, req->req_start);
1129 buffer->len = req->req_sz;
1130 }
1131
1132 up_write(&req->rw_sem);
1133 return 0;
1134err_op_fail:
1135 up_write(&req->rw_sem);
1136 return -EINVAL;
1137}
1138
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001139static void ocmem_sched_wk_func(struct work_struct *work);
1140DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1141
1142static int ocmem_schedule_pending(void)
1143{
1144 schedule_delayed_work(&ocmem_sched_thread,
1145 msecs_to_jiffies(SCHED_DELAY));
1146 return 0;
1147}
1148
1149static int do_free(struct ocmem_req *req)
1150{
1151 int rc = 0;
1152 struct ocmem_buf *buffer = req->buffer;
1153
1154 down_write(&req->rw_sem);
1155
1156 if (is_mapped(req)) {
1157 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1158 goto err_free_fail;
1159 }
1160
1161 /* Grab the sched mutex */
1162 mutex_lock(&sched_mutex);
1163 rc = __sched_free(req);
1164 mutex_unlock(&sched_mutex);
1165
1166 switch (rc) {
1167
1168 case OP_COMPLETE:
1169 buffer->addr = 0x0;
1170 buffer->len = 0x0;
1171 break;
1172 case OP_FAIL:
1173 default:
1174 goto err_free_fail;
1175 break;
1176 }
1177
1178 up_write(&req->rw_sem);
1179 return 0;
1180err_free_fail:
1181 up_write(&req->rw_sem);
1182 pr_err("ocmem: freeing req %p failed\n", req);
1183 return -EINVAL;
1184}
1185
1186int process_free(int id, struct ocmem_handle *handle)
1187{
1188 struct ocmem_req *req = NULL;
1189 struct ocmem_buf *buffer = NULL;
1190 int rc = 0;
1191
1192 if (is_blocked(id)) {
1193 pr_err("Client %d cannot request free\n", id);
1194 return -EINVAL;
1195 }
1196
1197 req = handle_to_req(handle);
1198 buffer = handle_to_buffer(handle);
1199
1200 if (!req)
1201 return -EINVAL;
1202
1203 if (req->req_start != core_address(id, buffer->addr)) {
1204 pr_err("Invalid buffer handle passed for free\n");
1205 return -EINVAL;
1206 }
1207
1208 if (is_tcm(req->owner)) {
1209 rc = process_unmap(req, req->req_start, req->req_end);
1210 if (rc < 0)
1211 return -EINVAL;
1212 }
1213
1214 rc = do_free(req);
1215
1216 if (rc < 0)
1217 return -EINVAL;
1218
1219 ocmem_destroy_req(req);
1220 handle->req = NULL;
1221
1222 ocmem_schedule_pending();
1223 return 0;
1224}
1225
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001226static void ocmem_rdm_worker(struct work_struct *work)
1227{
1228 int offset = 0;
1229 int rc = 0;
1230 int event;
1231 struct ocmem_rdm_work *work_data = container_of(work,
1232 struct ocmem_rdm_work, work);
1233 int id = work_data->id;
1234 struct ocmem_map_list *list = work_data->list;
1235 int direction = work_data->direction;
1236 struct ocmem_handle *handle = work_data->handle;
1237 struct ocmem_req *req = handle_to_req(handle);
1238 struct ocmem_buf *buffer = handle_to_buffer(handle);
1239
1240 down_write(&req->rw_sem);
1241 offset = phys_to_offset(req->req_start);
1242 rc = ocmem_rdm_transfer(id, list, offset, direction);
1243 if (work_data->direction == TO_OCMEM)
1244 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1245 else
1246 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
1247
1248 up_write(&req->rw_sem);
1249 kfree(work_data);
1250 dispatch_notification(id, event, buffer);
1251}
1252
1253int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1254 struct ocmem_map_list *list, int direction)
1255{
1256 struct ocmem_rdm_work *work_data = NULL;
1257
1258 down_write(&req->rw_sem);
1259
1260 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1261 if (!work_data)
1262 BUG();
1263
1264 work_data->handle = handle;
1265 work_data->list = list;
1266 work_data->id = req->owner;
1267 work_data->direction = direction;
1268 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1269 up_write(&req->rw_sem);
1270 queue_work(ocmem_rdm_wq, &work_data->work);
1271 return 0;
1272}
1273
1274int process_xfer_out(int id, struct ocmem_handle *handle,
1275 struct ocmem_map_list *list)
1276{
1277 struct ocmem_req *req = NULL;
1278 int rc = 0;
1279
1280 req = handle_to_req(handle);
1281
1282 if (!req)
1283 return -EINVAL;
1284
1285 if (!is_mapped(req)) {
1286 pr_err("Buffer is not already mapped\n");
1287 goto transfer_out_error;
1288 }
1289
1290 rc = process_unmap(req, req->req_start, req->req_end);
1291 if (rc < 0) {
1292 pr_err("Unmapping the buffer failed\n");
1293 goto transfer_out_error;
1294 }
1295
1296 rc = queue_transfer(req, handle, list, TO_DDR);
1297
1298 if (rc < 0) {
1299 pr_err("Failed to queue rdm transfer to DDR\n");
1300 goto transfer_out_error;
1301 }
1302
1303 return 0;
1304
1305transfer_out_error:
1306 return -EINVAL;
1307}
1308
1309int process_xfer_in(int id, struct ocmem_handle *handle,
1310 struct ocmem_map_list *list)
1311{
1312 struct ocmem_req *req = NULL;
1313 int rc = 0;
1314
1315 req = handle_to_req(handle);
1316
1317 if (!req)
1318 return -EINVAL;
1319
1320 if (is_mapped(req)) {
1321 pr_err("Buffer is already mapped\n");
1322 goto transfer_in_error;
1323 }
1324
1325 rc = process_map(req, req->req_start, req->req_end);
1326 if (rc < 0) {
1327 pr_err("Mapping the buffer failed\n");
1328 goto transfer_in_error;
1329 }
1330
1331 rc = queue_transfer(req, handle, list, TO_OCMEM);
1332
1333 if (rc < 0) {
1334 pr_err("Failed to queue rdm transfer to OCMEM\n");
1335 goto transfer_in_error;
1336 }
1337
1338 return 0;
1339transfer_in_error:
1340 return -EINVAL;
1341}
1342
1343int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1344{
1345 struct ocmem_req *req = NULL;
1346 struct ocmem_buf *buffer = NULL;
1347 struct ocmem_eviction_data *edata = NULL;
1348 int rc = 0;
1349
1350 if (is_blocked(id)) {
1351 pr_err("Client %d cannot request free\n", id);
1352 return -EINVAL;
1353 }
1354
1355 req = handle_to_req(handle);
1356 buffer = handle_to_buffer(handle);
1357
1358 if (!req)
1359 return -EINVAL;
1360
1361 if (req->req_start != core_address(id, buffer->addr)) {
1362 pr_err("Invalid buffer handle passed for shrink\n");
1363 return -EINVAL;
1364 }
1365
1366 edata = req->edata;
1367
1368 if (is_tcm(req->owner))
1369 do_unmap(req);
1370
1371 if (size == 0) {
1372 pr_info("req %p being shrunk to zero\n", req);
1373 rc = do_free(req);
1374 if (rc < 0)
1375 return -EINVAL;
1376 } else {
1377 rc = do_shrink(req, size);
1378 if (rc < 0)
1379 return -EINVAL;
1380 }
1381
1382 edata->pending--;
1383 if (edata->pending == 0) {
1384 pr_debug("All regions evicted");
1385 complete(&edata->completion);
1386 }
1387
1388 return 0;
1389}
1390
1391int process_xfer(int id, struct ocmem_handle *handle,
1392 struct ocmem_map_list *list, int direction)
1393{
1394 int rc = 0;
1395
1396 if (is_tcm(id)) {
1397 WARN(1, "Mapping operation is invalid for client\n");
1398 return -EINVAL;
1399 }
1400
1401 if (direction == TO_DDR)
1402 rc = process_xfer_out(id, handle, list);
1403 else if (direction == TO_OCMEM)
1404 rc = process_xfer_in(id, handle, list);
1405 return rc;
1406}
1407
1408int ocmem_eviction_thread(struct work_struct *work)
1409{
1410 return 0;
1411}
1412
1413int process_evict(int id)
1414{
1415 struct ocmem_eviction_data *edata = NULL;
1416 int prio = ocmem_client_table[id].priority;
1417 struct rb_node *rb_node = NULL;
1418 struct ocmem_req *req = NULL;
1419 struct ocmem_buf buffer;
1420 int j = 0;
1421
1422 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1423
1424 INIT_LIST_HEAD(&edata->victim_list);
1425 INIT_LIST_HEAD(&edata->req_list);
1426 edata->prio = prio;
1427 edata->pending = 0;
1428 edata->passive = 1;
1429 evictions[id] = edata;
1430
1431 mutex_lock(&sched_mutex);
1432
1433 for (rb_node = rb_first(&sched_tree); rb_node;
1434 rb_node = rb_next(rb_node)) {
1435 struct ocmem_region *tmp_region = NULL;
1436 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1437 if (tmp_region->max_prio < prio) {
1438 for (j = id - 1; j > NO_PRIO; j--) {
1439 req = find_req_match(j, tmp_region);
1440 if (req) {
1441 pr_info("adding %p to eviction list\n",
1442 tmp_region);
1443 list_add_tail(
1444 &tmp_region->eviction_list,
1445 &edata->victim_list);
1446 list_add_tail(
1447 &req->eviction_list,
1448 &edata->req_list);
1449 edata->pending++;
1450 req->edata = edata;
1451 buffer.addr = req->req_start;
1452 buffer.len = 0x0;
1453 dispatch_notification(req->owner,
1454 OCMEM_ALLOC_SHRINK, &buffer);
1455 }
1456 }
1457 } else {
1458 pr_info("skipping %p from eviction\n", tmp_region);
1459 }
1460 }
1461 mutex_unlock(&sched_mutex);
1462 pr_debug("Waiting for all regions to be shrunk\n");
1463 if (edata->pending > 0) {
1464 init_completion(&edata->completion);
1465 wait_for_completion(&edata->completion);
1466 }
1467 return 0;
1468}
1469
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001470static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1471{
1472 int rc = 0;
1473 struct ocmem_buf *buffer = req->buffer;
1474
1475 down_write(&req->rw_sem);
1476
1477 /* Take the scheduler mutex */
1478 mutex_lock(&sched_mutex);
1479 rc = __sched_allocate(req, can_block, can_wait);
1480 mutex_unlock(&sched_mutex);
1481
1482 if (rc == OP_FAIL)
1483 goto err_allocate_fail;
1484
1485 if (rc == OP_RESCHED) {
1486 buffer->addr = 0x0;
1487 buffer->len = 0x0;
1488 pr_debug("ocmem: Enqueuing req %p\n", req);
1489 sched_enqueue(req);
1490 } else if (rc == OP_PARTIAL) {
1491 buffer->addr = device_address(req->owner, req->req_start);
1492 buffer->len = req->req_sz;
1493 pr_debug("ocmem: Enqueuing req %p\n", req);
1494 sched_enqueue(req);
1495 } else if (rc == OP_COMPLETE) {
1496 buffer->addr = device_address(req->owner, req->req_start);
1497 buffer->len = req->req_sz;
1498 }
1499
1500 up_write(&req->rw_sem);
1501 return 0;
1502err_allocate_fail:
1503 up_write(&req->rw_sem);
1504 return -EINVAL;
1505}
1506
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001507int process_restore(int id)
1508{
1509 struct ocmem_req *req = NULL;
1510 struct ocmem_req *next = NULL;
1511 struct ocmem_eviction_data *edata = evictions[id];
1512
1513 if (!edata)
1514 return 0;
1515
1516 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1517 {
1518 if (req) {
1519 pr_debug("ocmem: Fetched evicted request %p\n",
1520 req);
1521 list_del(&req->sched_list);
1522 req->op = SCHED_ALLOCATE;
1523 sched_enqueue(req);
1524 }
1525 }
1526 kfree(edata);
1527 evictions[id] = NULL;
1528 pr_debug("Restore all evicted regions\n");
1529 ocmem_schedule_pending();
1530 return 0;
1531}
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001532
1533int process_allocate(int id, struct ocmem_handle *handle,
1534 unsigned long min, unsigned long max,
1535 unsigned long step, bool can_block, bool can_wait)
1536{
1537
1538 struct ocmem_req *req = NULL;
1539 struct ocmem_buf *buffer = NULL;
1540 int rc = 0;
1541
1542 /* sanity checks */
1543 if (is_blocked(id)) {
1544 pr_err("Client %d cannot request allocation\n", id);
1545 return -EINVAL;
1546 }
1547
1548 if (handle->req != NULL) {
1549 pr_err("Invalid handle passed in\n");
1550 return -EINVAL;
1551 }
1552
1553 buffer = handle_to_buffer(handle);
1554 BUG_ON(buffer == NULL);
1555
1556 /* prepare a request structure to represent this transaction */
1557 req = ocmem_create_req();
1558 if (!req)
1559 return -ENOMEM;
1560
1561 req->owner = id;
1562 req->req_min = min;
1563 req->req_max = max;
1564 req->req_step = step;
1565 req->prio = ocmem_client_table[id].priority;
1566 req->op = SCHED_ALLOCATE;
1567 req->buffer = buffer;
1568
1569 rc = do_allocate(req, can_block, can_wait);
1570
1571 if (rc < 0)
1572 goto do_allocate_error;
1573
1574 handle->req = req;
1575
1576 if (is_tcm(id)) {
1577 rc = process_map(req, req->req_start, req->req_end);
1578 if (rc < 0)
1579 goto map_error;
1580 }
1581
1582 return 0;
1583
1584map_error:
1585 handle->req = NULL;
1586 do_free(req);
1587do_allocate_error:
1588 ocmem_destroy_req(req);
1589 return -EINVAL;
1590}
1591
1592int process_delayed_allocate(struct ocmem_req *req)
1593{
1594
1595 struct ocmem_handle *handle = NULL;
1596 int rc = 0;
1597 int id = req->owner;
1598
1599 handle = req_to_handle(req);
1600 BUG_ON(handle == NULL);
1601
1602 rc = do_allocate(req, true, false);
1603
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001604
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001605 if (rc < 0)
1606 goto do_allocate_error;
1607
1608 if (is_tcm(id)) {
1609 rc = process_map(req, req->req_start, req->req_end);
1610 if (rc < 0)
1611 goto map_error;
1612 }
1613
1614 /* Notify the client about the buffer growth */
1615 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
1616 if (rc < 0) {
1617 pr_err("No notifier callback to cater for req %p event: %d\n",
1618 req, OCMEM_ALLOC_GROW);
1619 BUG();
1620 }
1621 return 0;
1622
1623map_error:
1624 handle->req = NULL;
1625 do_free(req);
1626do_allocate_error:
1627 ocmem_destroy_req(req);
1628 return -EINVAL;
1629}
1630
1631static void ocmem_sched_wk_func(struct work_struct *work)
1632{
1633
1634 struct ocmem_buf *buffer = NULL;
1635 struct ocmem_handle *handle = NULL;
1636 struct ocmem_req *req = ocmem_fetch_req();
1637
1638 if (!req) {
1639 pr_debug("No Pending Requests found\n");
1640 return;
1641 }
1642
1643 pr_debug("ocmem: sched_wk pending req %p\n", req);
1644 handle = req_to_handle(req);
1645 buffer = handle_to_buffer(handle);
1646 BUG_ON(req->op == SCHED_NOP);
1647
1648 switch (req->op) {
1649 case SCHED_GROW:
1650 process_grow(req);
1651 break;
1652 case SCHED_ALLOCATE:
1653 process_delayed_allocate(req);
1654 break;
1655 default:
1656 pr_err("ocmem: Unknown operation encountered\n");
1657 break;
1658 }
1659 return;
1660}
1661
1662int ocmem_sched_init(void)
1663{
1664 int i = 0;
1665 sched_tree = RB_ROOT;
1666 mutex_init(&sched_mutex);
1667 mutex_init(&sched_queue_mutex);
1668 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
1669 INIT_LIST_HEAD(&sched_queue[i]);
1670
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001671 mutex_init(&rdm_mutex);
1672 INIT_LIST_HEAD(&rdm_queue);
1673 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
1674 if (!ocmem_rdm_wq)
1675 return -ENOMEM;
1676 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
1677 if (!ocmem_eviction_wq)
1678 return -ENOMEM;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001679 return 0;
1680}