blob: e8854d5c6317f8f966fe47af699531b20b2384e8 [file] [log] [blame]
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
48 OP_FAIL = ~0x0,
49};
50
51/* Represents various client priorities */
52/* Note: More than one client can share a priority level */
53enum client_prio {
54 MIN_PRIO = 0x0,
55 NO_PRIO = MIN_PRIO,
56 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070057 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070058 PRIO_LP_AUDIO = 0x1,
59 PRIO_HP_AUDIO = 0x2,
60 PRIO_VOICE = 0x3,
61 PRIO_GFX_GROWTH = 0x4,
62 PRIO_VIDEO = 0x5,
63 PRIO_GFX = 0x6,
64 PRIO_OCMEM = 0x7,
65 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
66};
67
68static struct list_head sched_queue[MAX_OCMEM_PRIO];
69static struct mutex sched_queue_mutex;
70
71/* The duration in msecs before a pending operation is scheduled
72 * This allows an idle window between use case boundaries where various
73 * hardware state changes can occur. The value will be tweaked on actual
74 * hardware.
75*/
76#define SCHED_DELAY 10
77
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070078static struct list_head rdm_queue;
79static struct mutex rdm_mutex;
80static struct workqueue_struct *ocmem_rdm_wq;
81static struct workqueue_struct *ocmem_eviction_wq;
82
83static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
84
85struct ocmem_rdm_work {
86 int id;
87 struct ocmem_map_list *list;
88 struct ocmem_handle *handle;
89 int direction;
90 struct work_struct work;
91};
92
Naveen Ramarajb9da05782012-05-07 09:07:35 -070093/* OCMEM Operational modes */
94enum ocmem_client_modes {
95 OCMEM_PERFORMANCE = 1,
96 OCMEM_PASSIVE,
97 OCMEM_LOW_POWER,
98 OCMEM_MODE_MAX = OCMEM_LOW_POWER
99};
100
101/* OCMEM Addressing modes */
102enum ocmem_interconnects {
103 OCMEM_BLOCKED = 0,
104 OCMEM_PORT = 1,
105 OCMEM_OCMEMNOC = 2,
106 OCMEM_SYSNOC = 3,
107};
108
109/**
110 * Primary OCMEM Arbitration Table
111 **/
112struct ocmem_table {
113 int client_id;
114 int priority;
115 int mode;
116 int hw_interconnect;
117} ocmem_client_table[OCMEM_CLIENT_MAX] = {
118 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramaraje9f11f32012-08-13 22:46:50 -0700119 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700120 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
121 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
122 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
123 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
124 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700125 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700126};
127
128static struct rb_root sched_tree;
129static struct mutex sched_mutex;
130
131/* A region represents a continuous interval in OCMEM address space */
132struct ocmem_region {
133 /* Chain in Interval Tree */
134 struct rb_node region_rb;
135 /* Hash map of requests */
136 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700137 /* Chain in eviction list */
138 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700139 unsigned long r_start;
140 unsigned long r_end;
141 unsigned long r_sz;
142 /* Highest priority of all requests served by this region */
143 int max_prio;
144};
145
146/* Is OCMEM tightly coupled to the client ?*/
147static inline int is_tcm(int id)
148{
149 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
150 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
151 return 1;
152 else
153 return 0;
154}
155
156static inline int is_blocked(int id)
157{
158 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
159}
160
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700161inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
162{
163 if (handle)
164 return &handle->buffer;
165 else
166 return NULL;
167}
168
169inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
170{
171 if (buffer)
172 return container_of(buffer, struct ocmem_handle, buffer);
173 else
174 return NULL;
175}
176
177inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
178{
179 if (handle)
180 return handle->req;
181 else
182 return NULL;
183}
184
185inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
186{
187 if (req && req->buffer)
188 return container_of(req->buffer, struct ocmem_handle, buffer);
189 else
190 return NULL;
191}
192
193/* Simple wrappers which will have debug features added later */
194inline int ocmem_read(void *at)
195{
196 return readl_relaxed(at);
197}
198
199inline int ocmem_write(unsigned long val, void *at)
200{
201 writel_relaxed(val, at);
202 return 0;
203}
204
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700205inline int get_mode(int id)
206{
207 if (!check_id(id))
208 return MODE_NOT_SET;
209 else
210 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
211 WIDE_MODE : THIN_MODE;
212}
213
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700214/* Returns the address that can be used by a device core to access OCMEM */
215static unsigned long device_address(int id, unsigned long addr)
216{
217 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
218 unsigned long ret_addr = 0x0;
219
220 switch (hw_interconnect) {
221 case OCMEM_PORT:
222 ret_addr = phys_to_offset(addr);
223 break;
224 case OCMEM_OCMEMNOC:
225 case OCMEM_SYSNOC:
226 ret_addr = addr;
227 break;
228 case OCMEM_BLOCKED:
229 ret_addr = 0x0;
230 break;
231 }
232 return ret_addr;
233}
234
235/* Returns the address as viewed by the core */
236static unsigned long core_address(int id, unsigned long addr)
237{
238 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
239 unsigned long ret_addr = 0x0;
240
241 switch (hw_interconnect) {
242 case OCMEM_PORT:
243 ret_addr = offset_to_phys(addr);
244 break;
245 case OCMEM_OCMEMNOC:
246 case OCMEM_SYSNOC:
247 ret_addr = addr;
248 break;
249 case OCMEM_BLOCKED:
250 ret_addr = 0x0;
251 break;
252 }
253 return ret_addr;
254}
255
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700256static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
257{
258 int owner;
259 if (!req)
260 return NULL;
261 owner = req->owner;
262 return get_zone(owner);
263}
264
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700265static int insert_region(struct ocmem_region *region)
266{
267
268 struct rb_root *root = &sched_tree;
269 struct rb_node **p = &root->rb_node;
270 struct rb_node *parent = NULL;
271 struct ocmem_region *tmp = NULL;
272 unsigned long addr = region->r_start;
273
274 while (*p) {
275 parent = *p;
276 tmp = rb_entry(parent, struct ocmem_region, region_rb);
277
278 if (tmp->r_end > addr) {
279 if (tmp->r_start <= addr)
280 break;
281 p = &(*p)->rb_left;
282 } else if (tmp->r_end <= addr)
283 p = &(*p)->rb_right;
284 }
285 rb_link_node(&region->region_rb, parent, p);
286 rb_insert_color(&region->region_rb, root);
287 return 0;
288}
289
290static int remove_region(struct ocmem_region *region)
291{
292 struct rb_root *root = &sched_tree;
293 rb_erase(&region->region_rb, root);
294 return 0;
295}
296
297static struct ocmem_req *ocmem_create_req(void)
298{
299 struct ocmem_req *p = NULL;
300
301 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
302 if (!p)
303 return NULL;
304
305 INIT_LIST_HEAD(&p->zone_list);
306 INIT_LIST_HEAD(&p->sched_list);
307 init_rwsem(&p->rw_sem);
308 SET_STATE(p, R_FREE);
309 return p;
310}
311
312static int ocmem_destroy_req(struct ocmem_req *req)
313{
314 kfree(req);
315 return 0;
316}
317
318static struct ocmem_region *create_region(void)
319{
320 struct ocmem_region *p = NULL;
321
322 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
323 if (!p)
324 return NULL;
325 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700326 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700327 p->r_start = p->r_end = p->r_sz = 0x0;
328 p->max_prio = NO_PRIO;
329 return p;
330}
331
332static int destroy_region(struct ocmem_region *region)
333{
334 kfree(region);
335 return 0;
336}
337
338static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
339{
340 int ret, id;
341
342 while (1) {
343 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
344 return -ENOMEM;
345
346 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
347
348 if (ret != -EAGAIN)
349 break;
350 }
351
352 if (!ret) {
353 req->req_id = id;
354 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
355 req, id, region);
356 return 0;
357 }
358 return -EINVAL;
359}
360
361static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
362{
363 idr_remove(&region->region_idr, req->req_id);
364 return 0;
365}
366
367static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
368{
369 region->r_start = req->req_start;
370 region->r_end = req->req_end;
371 region->r_sz = req->req_end - req->req_start + 1;
372 return 0;
373}
374
375static int region_req_count(int id, void *ptr, void *data)
376{
377 int *count = data;
378 *count = *count + 1;
379 return 0;
380}
381
382static int req_count(struct ocmem_region *region)
383{
384 int count = 0;
385 idr_for_each(&region->region_idr, region_req_count, &count);
386 return count;
387}
388
389static int compute_max_prio(int id, void *ptr, void *data)
390{
391 int *max = data;
392 struct ocmem_req *req = ptr;
393
394 if (req->prio > *max)
395 *max = req->prio;
396 return 0;
397}
398
399static int update_region_prio(struct ocmem_region *region)
400{
401 int max_prio;
402 if (req_count(region) != 0) {
403 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
404 region->max_prio = max_prio;
405 } else {
406 region->max_prio = NO_PRIO;
407 }
408 pr_debug("ocmem: Updating prio of region %p as %d\n",
409 region, max_prio);
410
411 return 0;
412}
413
414static struct ocmem_region *find_region(unsigned long addr)
415{
416 struct ocmem_region *region = NULL;
417 struct rb_node *rb_node = NULL;
418
419 rb_node = sched_tree.rb_node;
420
421 while (rb_node) {
422 struct ocmem_region *tmp_region = NULL;
423 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
424
425 if (tmp_region->r_end > addr) {
426 region = tmp_region;
427 if (tmp_region->r_start <= addr)
428 break;
429 rb_node = rb_node->rb_left;
430 } else {
431 rb_node = rb_node->rb_right;
432 }
433 }
434 return region;
435}
436
437static struct ocmem_region *find_region_intersection(unsigned long start,
438 unsigned long end)
439{
440
441 struct ocmem_region *region = NULL;
442 region = find_region(start);
443 if (region && end <= region->r_start)
444 region = NULL;
445 return region;
446}
447
448static struct ocmem_region *find_region_match(unsigned long start,
449 unsigned long end)
450{
451
452 struct ocmem_region *region = NULL;
453 region = find_region(start);
454 if (region && start == region->r_start && end == region->r_end)
455 return region;
456 return NULL;
457}
458
459static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
460{
461 struct ocmem_req *req = NULL;
462
463 if (!region)
464 return NULL;
465
466 req = idr_find(&region->region_idr, owner);
467
468 return req;
469}
470
471/* Must be called with req->sem held */
472static inline int is_mapped(struct ocmem_req *req)
473{
474 return TEST_STATE(req, R_MAPPED);
475}
476
477/* Must be called with sched_mutex held */
478static int __sched_unmap(struct ocmem_req *req)
479{
480 struct ocmem_req *matched_req = NULL;
481 struct ocmem_region *matched_region = NULL;
482
483 matched_region = find_region_match(req->req_start, req->req_end);
484 matched_req = find_req_match(req->req_id, matched_region);
485
486 if (!matched_region || !matched_req) {
487 pr_err("Could not find backing region for req");
488 goto invalid_op_error;
489 }
490
491 if (matched_req != req) {
492 pr_err("Request does not match backing req");
493 goto invalid_op_error;
494 }
495
496 if (!is_mapped(req)) {
497 pr_err("Request is not currently mapped");
498 goto invalid_op_error;
499 }
500
501 /* Update the request state */
502 CLEAR_STATE(req, R_MAPPED);
503 SET_STATE(req, R_MUST_MAP);
504
505 return OP_COMPLETE;
506
507invalid_op_error:
508 return OP_FAIL;
509}
510
511/* Must be called with sched_mutex held */
512static int __sched_map(struct ocmem_req *req)
513{
514 struct ocmem_req *matched_req = NULL;
515 struct ocmem_region *matched_region = NULL;
516
517 matched_region = find_region_match(req->req_start, req->req_end);
518 matched_req = find_req_match(req->req_id, matched_region);
519
520 if (!matched_region || !matched_req) {
521 pr_err("Could not find backing region for req");
522 goto invalid_op_error;
523 }
524
525 if (matched_req != req) {
526 pr_err("Request does not match backing req");
527 goto invalid_op_error;
528 }
529
530 /* Update the request state */
531 CLEAR_STATE(req, R_MUST_MAP);
532 SET_STATE(req, R_MAPPED);
533
534 return OP_COMPLETE;
535
536invalid_op_error:
537 return OP_FAIL;
538}
539
540static int do_map(struct ocmem_req *req)
541{
542 int rc = 0;
543
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700544 down_write(&req->rw_sem);
545
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700546 mutex_lock(&sched_mutex);
547 rc = __sched_map(req);
548 mutex_unlock(&sched_mutex);
549
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700550 up_write(&req->rw_sem);
551
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700552 if (rc == OP_FAIL)
553 return -EINVAL;
554
555 return 0;
556}
557
558static int do_unmap(struct ocmem_req *req)
559{
560 int rc = 0;
561
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700562 down_write(&req->rw_sem);
563
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700564 mutex_lock(&sched_mutex);
565 rc = __sched_unmap(req);
566 mutex_unlock(&sched_mutex);
567
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700568 up_write(&req->rw_sem);
569
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700570 if (rc == OP_FAIL)
571 return -EINVAL;
572
573 return 0;
574}
575
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700576static int process_map(struct ocmem_req *req, unsigned long start,
577 unsigned long end)
578{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700579 int rc = 0;
580
581 rc = ocmem_enable_core_clock();
582
583 if (rc < 0)
584 goto core_clock_fail;
585
586 rc = ocmem_enable_iface_clock();
587
588 if (rc < 0)
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700589 goto iface_clock_fail;
590
591 rc = ocmem_enable_br_clock();
592
593 if (rc < 0)
594 goto br_clock_fail;
595
596 rc = do_map(req);
597
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700598 if (rc < 0) {
599 pr_err("ocmem: Failed to map request %p for %d\n",
600 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700601 goto process_map_fail;
602
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700603 }
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700604
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700605 if (ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
606 get_mode(req->owner))) {
607 pr_err("ocmem: Failed to secure request %p for %d\n", req,
608 req->owner);
609 rc = -EINVAL;
610 goto lock_failed;
611 }
612
613 return 0;
614lock_failed:
615 do_unmap(req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700616process_map_fail:
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700617 ocmem_disable_br_clock();
618br_clock_fail:
619 ocmem_disable_iface_clock();
620iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700621 ocmem_disable_core_clock();
622core_clock_fail:
623 pr_err("ocmem: Failed to map ocmem request\n");
624 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700625}
626
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700627static int process_unmap(struct ocmem_req *req, unsigned long start,
628 unsigned long end)
629{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700630 int rc = 0;
631
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700632 if (ocmem_unlock(req->owner, phys_to_offset(req->req_start),
633 req->req_sz)) {
634 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
635 req->owner);
636 rc = -EINVAL;
637 goto unlock_failed;
638 }
639
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700640 rc = do_unmap(req);
641
642 if (rc < 0)
643 goto process_unmap_fail;
644
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645 ocmem_disable_br_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700646 ocmem_disable_iface_clock();
647 ocmem_disable_core_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700648 return 0;
649
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700650unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700651process_unmap_fail:
652 pr_err("ocmem: Failed to unmap ocmem request\n");
653 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700654}
655
656static int __sched_grow(struct ocmem_req *req, bool can_block)
657{
658 unsigned long min = req->req_min;
659 unsigned long max = req->req_max;
660 unsigned long step = req->req_step;
661 int owner = req->owner;
662 unsigned long curr_sz = 0;
663 unsigned long growth_sz = 0;
664 unsigned long curr_start = 0;
665 enum client_prio prio = req->prio;
666 unsigned long alloc_addr = 0x0;
667 bool retry;
668 struct ocmem_region *spanned_r = NULL;
669 struct ocmem_region *overlap_r = NULL;
670
671 struct ocmem_req *matched_req = NULL;
672 struct ocmem_region *matched_region = NULL;
673
674 struct ocmem_zone *zone = get_zone(owner);
675 struct ocmem_region *region = NULL;
676
677 matched_region = find_region_match(req->req_start, req->req_end);
678 matched_req = find_req_match(req->req_id, matched_region);
679
680 if (!matched_region || !matched_req) {
681 pr_err("Could not find backing region for req");
682 goto invalid_op_error;
683 }
684
685 if (matched_req != req) {
686 pr_err("Request does not match backing req");
687 goto invalid_op_error;
688 }
689
690 curr_sz = matched_req->req_sz;
691 curr_start = matched_req->req_start;
692 growth_sz = matched_req->req_max - matched_req->req_sz;
693
694 pr_debug("Attempting to grow req %p from %lx to %lx\n",
695 req, matched_req->req_sz, matched_req->req_max);
696
697 retry = false;
698
699 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
700
701retry_next_step:
702
703 spanned_r = NULL;
704 overlap_r = NULL;
705
706 spanned_r = find_region(zone->z_head);
707 overlap_r = find_region_intersection(zone->z_head,
708 zone->z_head + growth_sz);
709
710 if (overlap_r == NULL) {
711 /* no conflicting regions, schedule this region */
712 zone->z_ops->free(zone, curr_start, curr_sz);
713 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
714
715 if (alloc_addr < 0) {
716 pr_err("ocmem: zone allocation operation failed\n");
717 goto internal_error;
718 }
719
720 curr_sz += growth_sz;
721 /* Detach the region from the interval tree */
722 /* This is to guarantee that any change in size
723 * causes the tree to be rebalanced if required */
724
725 detach_req(matched_region, req);
726 if (req_count(matched_region) == 0) {
727 remove_region(matched_region);
728 region = matched_region;
729 } else {
730 region = create_region();
731 if (!region) {
732 pr_err("ocmem: Unable to create region\n");
733 goto region_error;
734 }
735 }
736
737 /* update the request */
738 req->req_start = alloc_addr;
739 /* increment the size to reflect new length */
740 req->req_sz = curr_sz;
741 req->req_end = alloc_addr + req->req_sz - 1;
742
743 /* update request state */
744 CLEAR_STATE(req, R_MUST_GROW);
745 SET_STATE(req, R_ALLOCATED);
746 SET_STATE(req, R_MUST_MAP);
747 req->op = SCHED_MAP;
748
749 /* update the region with new req */
750 attach_req(region, req);
751 populate_region(region, req);
752 update_region_prio(region);
753
754 /* update the tree with new region */
755 if (insert_region(region)) {
756 pr_err("ocmem: Failed to insert the region\n");
757 goto region_error;
758 }
759
760 if (retry) {
761 SET_STATE(req, R_MUST_GROW);
762 SET_STATE(req, R_PENDING);
763 req->op = SCHED_GROW;
764 return OP_PARTIAL;
765 }
766 } else if (spanned_r != NULL && overlap_r != NULL) {
767 /* resolve conflicting regions based on priority */
768 if (overlap_r->max_prio < prio) {
769 /* Growth cannot be triggered unless a previous
770 * client of lower priority was evicted */
771 pr_err("ocmem: Invalid growth scheduled\n");
772 /* This is serious enough to fail */
773 BUG();
774 return OP_FAIL;
775 } else if (overlap_r->max_prio > prio) {
776 if (min == max) {
777 /* Cannot grow at this time, try later */
778 SET_STATE(req, R_PENDING);
779 SET_STATE(req, R_MUST_GROW);
780 return OP_RESCHED;
781 } else {
782 /* Try to grow in steps */
783 growth_sz -= step;
784 /* We are OOM at this point so need to retry */
785 if (growth_sz <= curr_sz) {
786 SET_STATE(req, R_PENDING);
787 SET_STATE(req, R_MUST_GROW);
788 return OP_RESCHED;
789 }
790 retry = true;
791 pr_debug("ocmem: Attempting with reduced size %lx\n",
792 growth_sz);
793 goto retry_next_step;
794 }
795 } else {
796 pr_err("ocmem: grow: New Region %p Existing %p\n",
797 matched_region, overlap_r);
798 pr_err("ocmem: Undetermined behavior\n");
799 /* This is serious enough to fail */
800 BUG();
801 }
802 } else if (spanned_r == NULL && overlap_r != NULL) {
803 goto err_not_supported;
804 }
805
806 return OP_COMPLETE;
807
808err_not_supported:
809 pr_err("ocmem: Scheduled unsupported operation\n");
810 return OP_FAIL;
811region_error:
812 zone->z_ops->free(zone, alloc_addr, curr_sz);
813 detach_req(region, req);
814 update_region_prio(region);
815 /* req is going to be destroyed by the caller anyways */
816internal_error:
817 destroy_region(region);
818invalid_op_error:
819 return OP_FAIL;
820}
821
822/* Must be called with sched_mutex held */
823static int __sched_free(struct ocmem_req *req)
824{
825 int owner = req->owner;
826 int ret = 0;
827
828 struct ocmem_req *matched_req = NULL;
829 struct ocmem_region *matched_region = NULL;
830
831 struct ocmem_zone *zone = get_zone(owner);
832
833 BUG_ON(!zone);
834
835 matched_region = find_region_match(req->req_start, req->req_end);
836 matched_req = find_req_match(req->req_id, matched_region);
837
838 if (!matched_region || !matched_req)
839 goto invalid_op_error;
840 if (matched_req != req)
841 goto invalid_op_error;
842
843 ret = zone->z_ops->free(zone,
844 matched_req->req_start, matched_req->req_sz);
845
846 if (ret < 0)
847 goto err_op_fail;
848
849 detach_req(matched_region, matched_req);
850 update_region_prio(matched_region);
851 if (req_count(matched_region) == 0) {
852 remove_region(matched_region);
853 destroy_region(matched_region);
854 }
855
856 /* Update the request */
857 req->req_start = 0x0;
858 req->req_sz = 0x0;
859 req->req_end = 0x0;
860 SET_STATE(req, R_FREE);
861 return OP_COMPLETE;
862invalid_op_error:
863 pr_err("ocmem: free: Failed to find matching region\n");
864err_op_fail:
865 pr_err("ocmem: free: Failed\n");
866 return OP_FAIL;
867}
868
869/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700870static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
871{
872 int owner = req->owner;
873 int ret = 0;
874
875 struct ocmem_req *matched_req = NULL;
876 struct ocmem_region *matched_region = NULL;
877 struct ocmem_region *region = NULL;
878 unsigned long alloc_addr = 0x0;
879
880 struct ocmem_zone *zone = get_zone(owner);
881
882 BUG_ON(!zone);
883
884 /* The shrink should not be called for zero size */
885 BUG_ON(new_sz == 0);
886
887 matched_region = find_region_match(req->req_start, req->req_end);
888 matched_req = find_req_match(req->req_id, matched_region);
889
890 if (!matched_region || !matched_req)
891 goto invalid_op_error;
892 if (matched_req != req)
893 goto invalid_op_error;
894
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700895 ret = zone->z_ops->free(zone,
896 matched_req->req_start, matched_req->req_sz);
897
898 if (ret < 0) {
899 pr_err("Zone Allocation operation failed\n");
900 goto internal_error;
901 }
902
903 alloc_addr = zone->z_ops->allocate(zone, new_sz);
904
905 if (alloc_addr < 0) {
906 pr_err("Zone Allocation operation failed\n");
907 goto internal_error;
908 }
909
910 /* Detach the region from the interval tree */
911 /* This is to guarantee that the change in size
912 * causes the tree to be rebalanced if required */
913
914 detach_req(matched_region, req);
915 if (req_count(matched_region) == 0) {
916 remove_region(matched_region);
917 region = matched_region;
918 } else {
919 region = create_region();
920 if (!region) {
921 pr_err("ocmem: Unable to create region\n");
922 goto internal_error;
923 }
924 }
925 /* update the request */
926 req->req_start = alloc_addr;
927 req->req_sz = new_sz;
928 req->req_end = alloc_addr + req->req_sz;
929
930 if (req_count(region) == 0) {
931 remove_region(matched_region);
932 destroy_region(matched_region);
933 }
934
935 /* update request state */
936 SET_STATE(req, R_MUST_GROW);
937 SET_STATE(req, R_MUST_MAP);
938 req->op = SCHED_MAP;
939
940 /* attach the request to the region */
941 attach_req(region, req);
942 populate_region(region, req);
943 update_region_prio(region);
944
945 /* update the tree with new region */
946 if (insert_region(region)) {
947 pr_err("ocmem: Failed to insert the region\n");
948 zone->z_ops->free(zone, alloc_addr, new_sz);
949 detach_req(region, req);
950 update_region_prio(region);
951 /* req will be destroyed by the caller */
952 goto region_error;
953 }
954 return OP_COMPLETE;
955
956region_error:
957 destroy_region(region);
958internal_error:
959 pr_err("ocmem: shrink: Failed\n");
960 return OP_FAIL;
961invalid_op_error:
962 pr_err("ocmem: shrink: Failed to find matching region\n");
963 return OP_FAIL;
964}
965
966/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700967static int __sched_allocate(struct ocmem_req *req, bool can_block,
968 bool can_wait)
969{
970 unsigned long min = req->req_min;
971 unsigned long max = req->req_max;
972 unsigned long step = req->req_step;
973 int owner = req->owner;
974 unsigned long sz = max;
975 enum client_prio prio = req->prio;
976 unsigned long alloc_addr = 0x0;
977 bool retry;
978
979 struct ocmem_region *spanned_r = NULL;
980 struct ocmem_region *overlap_r = NULL;
981
982 struct ocmem_zone *zone = get_zone(owner);
983 struct ocmem_region *region = NULL;
984
985 BUG_ON(!zone);
986
987 if (min > (zone->z_end - zone->z_start)) {
988 pr_err("ocmem: requested minimum size exceeds quota\n");
989 goto invalid_op_error;
990 }
991
992 if (max > (zone->z_end - zone->z_start)) {
993 pr_err("ocmem: requested maximum size exceeds quota\n");
994 goto invalid_op_error;
995 }
996
997 if (min > zone->z_free) {
998 pr_err("ocmem: out of memory for zone %d\n", owner);
999 goto invalid_op_error;
1000 }
1001
1002 region = create_region();
1003
1004 if (!region) {
1005 pr_err("ocmem: Unable to create region\n");
1006 goto invalid_op_error;
1007 }
1008
1009 retry = false;
1010
1011 pr_debug("ocmem: ALLOCATE: request size %lx\n", sz);
1012
1013retry_next_step:
1014
1015 spanned_r = NULL;
1016 overlap_r = NULL;
1017
1018 spanned_r = find_region(zone->z_head);
1019 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1020
1021 if (overlap_r == NULL) {
1022 /* no conflicting regions, schedule this region */
1023 alloc_addr = zone->z_ops->allocate(zone, sz);
1024
1025 if (alloc_addr < 0) {
1026 pr_err("Zone Allocation operation failed\n");
1027 goto internal_error;
1028 }
1029
1030 /* update the request */
1031 req->req_start = alloc_addr;
1032 req->req_end = alloc_addr + sz - 1;
1033 req->req_sz = sz;
1034 req->zone = zone;
1035
1036 /* update request state */
1037 CLEAR_STATE(req, R_FREE);
1038 SET_STATE(req, R_ALLOCATED);
1039 SET_STATE(req, R_MUST_MAP);
1040 req->op = SCHED_NOP;
1041
1042 /* attach the request to the region */
1043 attach_req(region, req);
1044 populate_region(region, req);
1045 update_region_prio(region);
1046
1047 /* update the tree with new region */
1048 if (insert_region(region)) {
1049 pr_err("ocmem: Failed to insert the region\n");
1050 zone->z_ops->free(zone, alloc_addr, sz);
1051 detach_req(region, req);
1052 update_region_prio(region);
1053 /* req will be destroyed by the caller */
1054 goto internal_error;
1055 }
1056
1057 if (retry) {
1058 SET_STATE(req, R_MUST_GROW);
1059 SET_STATE(req, R_PENDING);
1060 req->op = SCHED_GROW;
1061 return OP_PARTIAL;
1062 }
1063 } else if (spanned_r != NULL && overlap_r != NULL) {
1064 /* resolve conflicting regions based on priority */
1065 if (overlap_r->max_prio < prio) {
1066 if (min == max) {
1067 pr_err("ocmem: Requires eviction support\n");
1068 goto err_not_supported;
1069 } else {
1070 /* Try to allocate atleast >= 'min' immediately */
1071 sz -= step;
1072 if (sz < min)
1073 goto err_out_of_mem;
1074 retry = true;
1075 pr_debug("ocmem: Attempting with reduced size %lx\n",
1076 sz);
1077 goto retry_next_step;
1078 }
1079 } else if (overlap_r->max_prio > prio) {
1080 if (can_block == true) {
1081 SET_STATE(req, R_PENDING);
1082 SET_STATE(req, R_MUST_GROW);
1083 return OP_RESCHED;
1084 } else {
1085 if (min == max) {
1086 pr_err("Cannot allocate %lx synchronously\n",
1087 sz);
1088 goto err_out_of_mem;
1089 } else {
1090 sz -= step;
1091 if (sz < min)
1092 goto err_out_of_mem;
1093 retry = true;
1094 pr_debug("ocmem: Attempting reduced size %lx\n",
1095 sz);
1096 goto retry_next_step;
1097 }
1098 }
1099 } else {
1100 pr_err("ocmem: Undetermined behavior\n");
1101 pr_err("ocmem: New Region %p Existing %p\n", region,
1102 overlap_r);
1103 /* This is serious enough to fail */
1104 BUG();
1105 }
1106 } else if (spanned_r == NULL && overlap_r != NULL)
1107 goto err_not_supported;
1108
1109 return OP_COMPLETE;
1110
1111err_not_supported:
1112 pr_err("ocmem: Scheduled unsupported operation\n");
1113 return OP_FAIL;
1114
1115err_out_of_mem:
1116 pr_err("ocmem: Out of memory during allocation\n");
1117internal_error:
1118 destroy_region(region);
1119invalid_op_error:
1120 return OP_FAIL;
1121}
1122
1123static int sched_enqueue(struct ocmem_req *priv)
1124{
1125 struct ocmem_req *next = NULL;
1126 mutex_lock(&sched_queue_mutex);
1127 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1128 pr_debug("enqueued req %p\n", priv);
1129 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1130 pr_debug("pending requests for client %p\n", next);
1131 }
1132 mutex_unlock(&sched_queue_mutex);
1133 return 0;
1134}
1135
1136static struct ocmem_req *ocmem_fetch_req(void)
1137{
1138 int i;
1139 struct ocmem_req *req = NULL;
1140 struct ocmem_req *next = NULL;
1141
1142 mutex_lock(&sched_queue_mutex);
1143 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1144 if (list_empty(&sched_queue[i]))
1145 continue;
1146 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1147 {
1148 if (req) {
1149 pr_debug("ocmem: Fetched pending request %p\n",
1150 req);
1151 list_del(&req->sched_list);
1152 break;
1153 }
1154 }
1155 }
1156 mutex_unlock(&sched_queue_mutex);
1157 return req;
1158}
1159
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001160
1161unsigned long process_quota(int id)
1162{
1163 struct ocmem_zone *zone = NULL;
1164
1165 if (is_blocked(id))
1166 return 0;
1167
1168 zone = get_zone(id);
1169
1170 if (zone && zone->z_pool)
1171 return zone->z_end - zone->z_start;
1172 else
1173 return 0;
1174}
1175
1176static int do_grow(struct ocmem_req *req)
1177{
1178 struct ocmem_buf *buffer = NULL;
1179 bool can_block = true;
1180 int rc = 0;
1181
1182 down_write(&req->rw_sem);
1183 buffer = req->buffer;
1184
1185 /* Take the scheduler mutex */
1186 mutex_lock(&sched_mutex);
1187 rc = __sched_grow(req, can_block);
1188 mutex_unlock(&sched_mutex);
1189
1190 if (rc == OP_FAIL)
1191 goto err_op_fail;
1192
1193 if (rc == OP_RESCHED) {
1194 pr_debug("ocmem: Enqueue this allocation");
1195 sched_enqueue(req);
1196 }
1197
1198 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1199 buffer->addr = device_address(req->owner, req->req_start);
1200 buffer->len = req->req_sz;
1201 }
1202
1203 up_write(&req->rw_sem);
1204 return 0;
1205err_op_fail:
1206 up_write(&req->rw_sem);
1207 return -EINVAL;
1208}
1209
1210static int process_grow(struct ocmem_req *req)
1211{
1212 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001213 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001214
1215 /* Attempt to grow the region */
1216 rc = do_grow(req);
1217
1218 if (rc < 0)
1219 return -EINVAL;
1220
1221 /* Map the newly grown region */
1222 if (is_tcm(req->owner)) {
1223 rc = process_map(req, req->req_start, req->req_end);
1224 if (rc < 0)
1225 return -EINVAL;
1226 }
1227
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001228 offset = phys_to_offset(req->req_start);
1229
1230 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1231
1232 if (rc < 0) {
1233 pr_err("Failed to switch ON memory macros\n");
1234 goto power_ctl_error;
1235 }
1236
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001237 /* Notify the client about the buffer growth */
1238 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1239 if (rc < 0) {
1240 pr_err("No notifier callback to cater for req %p event: %d\n",
1241 req, OCMEM_ALLOC_GROW);
1242 BUG();
1243 }
1244 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001245power_ctl_error:
1246 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001247}
1248
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001249static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1250{
1251
1252 int rc = 0;
1253 struct ocmem_buf *buffer = NULL;
1254
1255 down_write(&req->rw_sem);
1256 buffer = req->buffer;
1257
1258 /* Take the scheduler mutex */
1259 mutex_lock(&sched_mutex);
1260 rc = __sched_shrink(req, shrink_size);
1261 mutex_unlock(&sched_mutex);
1262
1263 if (rc == OP_FAIL)
1264 goto err_op_fail;
1265
1266 else if (rc == OP_COMPLETE) {
1267 buffer->addr = device_address(req->owner, req->req_start);
1268 buffer->len = req->req_sz;
1269 }
1270
1271 up_write(&req->rw_sem);
1272 return 0;
1273err_op_fail:
1274 up_write(&req->rw_sem);
1275 return -EINVAL;
1276}
1277
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001278static void ocmem_sched_wk_func(struct work_struct *work);
1279DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1280
1281static int ocmem_schedule_pending(void)
1282{
1283 schedule_delayed_work(&ocmem_sched_thread,
1284 msecs_to_jiffies(SCHED_DELAY));
1285 return 0;
1286}
1287
1288static int do_free(struct ocmem_req *req)
1289{
1290 int rc = 0;
1291 struct ocmem_buf *buffer = req->buffer;
1292
1293 down_write(&req->rw_sem);
1294
1295 if (is_mapped(req)) {
1296 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1297 goto err_free_fail;
1298 }
1299
1300 /* Grab the sched mutex */
1301 mutex_lock(&sched_mutex);
1302 rc = __sched_free(req);
1303 mutex_unlock(&sched_mutex);
1304
1305 switch (rc) {
1306
1307 case OP_COMPLETE:
1308 buffer->addr = 0x0;
1309 buffer->len = 0x0;
1310 break;
1311 case OP_FAIL:
1312 default:
1313 goto err_free_fail;
1314 break;
1315 }
1316
1317 up_write(&req->rw_sem);
1318 return 0;
1319err_free_fail:
1320 up_write(&req->rw_sem);
1321 pr_err("ocmem: freeing req %p failed\n", req);
1322 return -EINVAL;
1323}
1324
1325int process_free(int id, struct ocmem_handle *handle)
1326{
1327 struct ocmem_req *req = NULL;
1328 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001329 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001330 int rc = 0;
1331
1332 if (is_blocked(id)) {
1333 pr_err("Client %d cannot request free\n", id);
1334 return -EINVAL;
1335 }
1336
1337 req = handle_to_req(handle);
1338 buffer = handle_to_buffer(handle);
1339
1340 if (!req)
1341 return -EINVAL;
1342
1343 if (req->req_start != core_address(id, buffer->addr)) {
1344 pr_err("Invalid buffer handle passed for free\n");
1345 return -EINVAL;
1346 }
1347
1348 if (is_tcm(req->owner)) {
1349 rc = process_unmap(req, req->req_start, req->req_end);
1350 if (rc < 0)
1351 return -EINVAL;
1352 }
1353
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001354 if (req->req_sz != 0) {
1355
1356 offset = phys_to_offset(req->req_start);
1357
1358 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1359
1360 if (rc < 0) {
1361 pr_err("Failed to switch OFF memory macros\n");
1362 return -EINVAL;
1363 }
1364
1365 }
1366
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001367 rc = do_free(req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001368 if (rc < 0)
1369 return -EINVAL;
1370
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001371 inc_ocmem_stat(zone_of(req), NR_FREES);
1372
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001373 ocmem_destroy_req(req);
1374 handle->req = NULL;
1375
1376 ocmem_schedule_pending();
1377 return 0;
1378}
1379
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001380static void ocmem_rdm_worker(struct work_struct *work)
1381{
1382 int offset = 0;
1383 int rc = 0;
1384 int event;
1385 struct ocmem_rdm_work *work_data = container_of(work,
1386 struct ocmem_rdm_work, work);
1387 int id = work_data->id;
1388 struct ocmem_map_list *list = work_data->list;
1389 int direction = work_data->direction;
1390 struct ocmem_handle *handle = work_data->handle;
1391 struct ocmem_req *req = handle_to_req(handle);
1392 struct ocmem_buf *buffer = handle_to_buffer(handle);
1393
1394 down_write(&req->rw_sem);
1395 offset = phys_to_offset(req->req_start);
1396 rc = ocmem_rdm_transfer(id, list, offset, direction);
1397 if (work_data->direction == TO_OCMEM)
1398 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1399 else
1400 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001401 up_write(&req->rw_sem);
1402 kfree(work_data);
1403 dispatch_notification(id, event, buffer);
1404}
1405
1406int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1407 struct ocmem_map_list *list, int direction)
1408{
1409 struct ocmem_rdm_work *work_data = NULL;
1410
1411 down_write(&req->rw_sem);
1412
1413 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1414 if (!work_data)
1415 BUG();
1416
1417 work_data->handle = handle;
1418 work_data->list = list;
1419 work_data->id = req->owner;
1420 work_data->direction = direction;
1421 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1422 up_write(&req->rw_sem);
1423 queue_work(ocmem_rdm_wq, &work_data->work);
1424 return 0;
1425}
1426
1427int process_xfer_out(int id, struct ocmem_handle *handle,
1428 struct ocmem_map_list *list)
1429{
1430 struct ocmem_req *req = NULL;
1431 int rc = 0;
1432
1433 req = handle_to_req(handle);
1434
1435 if (!req)
1436 return -EINVAL;
1437
1438 if (!is_mapped(req)) {
1439 pr_err("Buffer is not already mapped\n");
1440 goto transfer_out_error;
1441 }
1442
1443 rc = process_unmap(req, req->req_start, req->req_end);
1444 if (rc < 0) {
1445 pr_err("Unmapping the buffer failed\n");
1446 goto transfer_out_error;
1447 }
1448
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001449 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
1450
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001451 rc = queue_transfer(req, handle, list, TO_DDR);
1452
1453 if (rc < 0) {
1454 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001455 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001456 goto transfer_out_error;
1457 }
1458
1459 return 0;
1460
1461transfer_out_error:
1462 return -EINVAL;
1463}
1464
1465int process_xfer_in(int id, struct ocmem_handle *handle,
1466 struct ocmem_map_list *list)
1467{
1468 struct ocmem_req *req = NULL;
1469 int rc = 0;
1470
1471 req = handle_to_req(handle);
1472
1473 if (!req)
1474 return -EINVAL;
1475
1476 if (is_mapped(req)) {
1477 pr_err("Buffer is already mapped\n");
1478 goto transfer_in_error;
1479 }
1480
1481 rc = process_map(req, req->req_start, req->req_end);
1482 if (rc < 0) {
1483 pr_err("Mapping the buffer failed\n");
1484 goto transfer_in_error;
1485 }
1486
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001487 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
1488
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001489 rc = queue_transfer(req, handle, list, TO_OCMEM);
1490
1491 if (rc < 0) {
1492 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001493 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001494 goto transfer_in_error;
1495 }
1496
1497 return 0;
1498transfer_in_error:
1499 return -EINVAL;
1500}
1501
1502int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1503{
1504 struct ocmem_req *req = NULL;
1505 struct ocmem_buf *buffer = NULL;
1506 struct ocmem_eviction_data *edata = NULL;
1507 int rc = 0;
1508
1509 if (is_blocked(id)) {
1510 pr_err("Client %d cannot request free\n", id);
1511 return -EINVAL;
1512 }
1513
1514 req = handle_to_req(handle);
1515 buffer = handle_to_buffer(handle);
1516
1517 if (!req)
1518 return -EINVAL;
1519
1520 if (req->req_start != core_address(id, buffer->addr)) {
1521 pr_err("Invalid buffer handle passed for shrink\n");
1522 return -EINVAL;
1523 }
1524
1525 edata = req->edata;
1526
1527 if (is_tcm(req->owner))
1528 do_unmap(req);
1529
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001530 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1531
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001532 if (size == 0) {
1533 pr_info("req %p being shrunk to zero\n", req);
1534 rc = do_free(req);
1535 if (rc < 0)
1536 return -EINVAL;
1537 } else {
1538 rc = do_shrink(req, size);
1539 if (rc < 0)
1540 return -EINVAL;
1541 }
1542
1543 edata->pending--;
1544 if (edata->pending == 0) {
1545 pr_debug("All regions evicted");
1546 complete(&edata->completion);
1547 }
1548
1549 return 0;
1550}
1551
1552int process_xfer(int id, struct ocmem_handle *handle,
1553 struct ocmem_map_list *list, int direction)
1554{
1555 int rc = 0;
1556
1557 if (is_tcm(id)) {
1558 WARN(1, "Mapping operation is invalid for client\n");
1559 return -EINVAL;
1560 }
1561
1562 if (direction == TO_DDR)
1563 rc = process_xfer_out(id, handle, list);
1564 else if (direction == TO_OCMEM)
1565 rc = process_xfer_in(id, handle, list);
1566 return rc;
1567}
1568
1569int ocmem_eviction_thread(struct work_struct *work)
1570{
1571 return 0;
1572}
1573
1574int process_evict(int id)
1575{
1576 struct ocmem_eviction_data *edata = NULL;
1577 int prio = ocmem_client_table[id].priority;
1578 struct rb_node *rb_node = NULL;
1579 struct ocmem_req *req = NULL;
1580 struct ocmem_buf buffer;
1581 int j = 0;
1582
1583 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1584
1585 INIT_LIST_HEAD(&edata->victim_list);
1586 INIT_LIST_HEAD(&edata->req_list);
1587 edata->prio = prio;
1588 edata->pending = 0;
1589 edata->passive = 1;
1590 evictions[id] = edata;
1591
1592 mutex_lock(&sched_mutex);
1593
1594 for (rb_node = rb_first(&sched_tree); rb_node;
1595 rb_node = rb_next(rb_node)) {
1596 struct ocmem_region *tmp_region = NULL;
1597 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1598 if (tmp_region->max_prio < prio) {
1599 for (j = id - 1; j > NO_PRIO; j--) {
1600 req = find_req_match(j, tmp_region);
1601 if (req) {
1602 pr_info("adding %p to eviction list\n",
1603 tmp_region);
1604 list_add_tail(
1605 &tmp_region->eviction_list,
1606 &edata->victim_list);
1607 list_add_tail(
1608 &req->eviction_list,
1609 &edata->req_list);
1610 edata->pending++;
1611 req->edata = edata;
1612 buffer.addr = req->req_start;
1613 buffer.len = 0x0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001614 inc_ocmem_stat(zone_of(req),
1615 NR_EVICTIONS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001616 dispatch_notification(req->owner,
1617 OCMEM_ALLOC_SHRINK, &buffer);
1618 }
1619 }
1620 } else {
1621 pr_info("skipping %p from eviction\n", tmp_region);
1622 }
1623 }
1624 mutex_unlock(&sched_mutex);
1625 pr_debug("Waiting for all regions to be shrunk\n");
1626 if (edata->pending > 0) {
1627 init_completion(&edata->completion);
1628 wait_for_completion(&edata->completion);
1629 }
1630 return 0;
1631}
1632
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001633static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1634{
1635 int rc = 0;
1636 struct ocmem_buf *buffer = req->buffer;
1637
1638 down_write(&req->rw_sem);
1639
1640 /* Take the scheduler mutex */
1641 mutex_lock(&sched_mutex);
1642 rc = __sched_allocate(req, can_block, can_wait);
1643 mutex_unlock(&sched_mutex);
1644
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001645 if (rc == OP_FAIL) {
1646 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001647 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001648 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001649
1650 if (rc == OP_RESCHED) {
1651 buffer->addr = 0x0;
1652 buffer->len = 0x0;
1653 pr_debug("ocmem: Enqueuing req %p\n", req);
1654 sched_enqueue(req);
1655 } else if (rc == OP_PARTIAL) {
1656 buffer->addr = device_address(req->owner, req->req_start);
1657 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001658 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001659 pr_debug("ocmem: Enqueuing req %p\n", req);
1660 sched_enqueue(req);
1661 } else if (rc == OP_COMPLETE) {
1662 buffer->addr = device_address(req->owner, req->req_start);
1663 buffer->len = req->req_sz;
1664 }
1665
1666 up_write(&req->rw_sem);
1667 return 0;
1668err_allocate_fail:
1669 up_write(&req->rw_sem);
1670 return -EINVAL;
1671}
1672
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001673int process_restore(int id)
1674{
1675 struct ocmem_req *req = NULL;
1676 struct ocmem_req *next = NULL;
1677 struct ocmem_eviction_data *edata = evictions[id];
1678
1679 if (!edata)
1680 return 0;
1681
1682 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1683 {
1684 if (req) {
1685 pr_debug("ocmem: Fetched evicted request %p\n",
1686 req);
1687 list_del(&req->sched_list);
1688 req->op = SCHED_ALLOCATE;
1689 sched_enqueue(req);
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001690 inc_ocmem_stat(zone_of(req), NR_RESTORES);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001691 }
1692 }
1693 kfree(edata);
1694 evictions[id] = NULL;
1695 pr_debug("Restore all evicted regions\n");
1696 ocmem_schedule_pending();
1697 return 0;
1698}
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001699
1700int process_allocate(int id, struct ocmem_handle *handle,
1701 unsigned long min, unsigned long max,
1702 unsigned long step, bool can_block, bool can_wait)
1703{
1704
1705 struct ocmem_req *req = NULL;
1706 struct ocmem_buf *buffer = NULL;
1707 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001708 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001709
1710 /* sanity checks */
1711 if (is_blocked(id)) {
1712 pr_err("Client %d cannot request allocation\n", id);
1713 return -EINVAL;
1714 }
1715
1716 if (handle->req != NULL) {
1717 pr_err("Invalid handle passed in\n");
1718 return -EINVAL;
1719 }
1720
1721 buffer = handle_to_buffer(handle);
1722 BUG_ON(buffer == NULL);
1723
1724 /* prepare a request structure to represent this transaction */
1725 req = ocmem_create_req();
1726 if (!req)
1727 return -ENOMEM;
1728
1729 req->owner = id;
1730 req->req_min = min;
1731 req->req_max = max;
1732 req->req_step = step;
1733 req->prio = ocmem_client_table[id].priority;
1734 req->op = SCHED_ALLOCATE;
1735 req->buffer = buffer;
1736
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001737 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
1738
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001739 rc = do_allocate(req, can_block, can_wait);
1740
1741 if (rc < 0)
1742 goto do_allocate_error;
1743
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001744 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
1745
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001746 handle->req = req;
1747
1748 if (is_tcm(id)) {
1749 rc = process_map(req, req->req_start, req->req_end);
1750 if (rc < 0)
1751 goto map_error;
1752 }
1753
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001754 if (req->req_sz != 0) {
1755
1756 offset = phys_to_offset(req->req_start);
1757
1758 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1759
1760 if (rc < 0) {
1761 pr_err("Failed to switch ON memory macros\n");
1762 goto power_ctl_error;
1763 }
1764 }
1765
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001766 return 0;
1767
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001768power_ctl_error:
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001769map_error:
1770 handle->req = NULL;
1771 do_free(req);
1772do_allocate_error:
1773 ocmem_destroy_req(req);
1774 return -EINVAL;
1775}
1776
1777int process_delayed_allocate(struct ocmem_req *req)
1778{
1779
1780 struct ocmem_handle *handle = NULL;
1781 int rc = 0;
1782 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001783 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001784
1785 handle = req_to_handle(req);
1786 BUG_ON(handle == NULL);
1787
1788 rc = do_allocate(req, true, false);
1789
1790 if (rc < 0)
1791 goto do_allocate_error;
1792
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001793 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
1794
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001795 if (is_tcm(id)) {
1796 rc = process_map(req, req->req_start, req->req_end);
1797 if (rc < 0)
1798 goto map_error;
1799 }
1800
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001801 if (req->req_sz != 0) {
1802
1803 offset = phys_to_offset(req->req_start);
1804
1805 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1806
1807 if (rc < 0) {
1808 pr_err("Failed to switch ON memory macros\n");
1809 goto power_ctl_error;
1810 }
1811 }
1812
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001813 /* Notify the client about the buffer growth */
1814 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
1815 if (rc < 0) {
1816 pr_err("No notifier callback to cater for req %p event: %d\n",
1817 req, OCMEM_ALLOC_GROW);
1818 BUG();
1819 }
1820 return 0;
1821
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001822power_ctl_error:
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001823map_error:
1824 handle->req = NULL;
1825 do_free(req);
1826do_allocate_error:
1827 ocmem_destroy_req(req);
1828 return -EINVAL;
1829}
1830
1831static void ocmem_sched_wk_func(struct work_struct *work)
1832{
1833
1834 struct ocmem_buf *buffer = NULL;
1835 struct ocmem_handle *handle = NULL;
1836 struct ocmem_req *req = ocmem_fetch_req();
1837
1838 if (!req) {
1839 pr_debug("No Pending Requests found\n");
1840 return;
1841 }
1842
1843 pr_debug("ocmem: sched_wk pending req %p\n", req);
1844 handle = req_to_handle(req);
1845 buffer = handle_to_buffer(handle);
1846 BUG_ON(req->op == SCHED_NOP);
1847
1848 switch (req->op) {
1849 case SCHED_GROW:
1850 process_grow(req);
1851 break;
1852 case SCHED_ALLOCATE:
1853 process_delayed_allocate(req);
1854 break;
1855 default:
1856 pr_err("ocmem: Unknown operation encountered\n");
1857 break;
1858 }
1859 return;
1860}
1861
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001862static int ocmem_allocations_show(struct seq_file *f, void *dummy)
1863{
1864 struct rb_node *rb_node = NULL;
1865 struct ocmem_req *req = NULL;
1866 unsigned j;
1867 mutex_lock(&sched_mutex);
1868 for (rb_node = rb_first(&sched_tree); rb_node;
1869 rb_node = rb_next(rb_node)) {
1870 struct ocmem_region *tmp_region = NULL;
1871 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1872 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
1873 req = find_req_match(j, tmp_region);
1874 if (req) {
1875 seq_printf(f,
1876 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
1877 get_name(req->owner),
1878 req->req_start, req->req_end,
1879 req->req_sz, req->state);
1880 }
1881 }
1882 }
1883 mutex_unlock(&sched_mutex);
1884 return 0;
1885}
1886
1887static int ocmem_allocations_open(struct inode *inode, struct file *file)
1888{
1889 return single_open(file, ocmem_allocations_show, inode->i_private);
1890}
1891
1892static const struct file_operations allocations_show_fops = {
1893 .open = ocmem_allocations_open,
1894 .read = seq_read,
1895 .llseek = seq_lseek,
1896 .release = seq_release,
1897};
1898
1899int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001900{
1901 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001902 struct ocmem_plat_data *pdata = NULL;
1903 struct device *dev = &pdev->dev;
1904
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001905 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001906 pdata = platform_get_drvdata(pdev);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001907 mutex_init(&sched_mutex);
1908 mutex_init(&sched_queue_mutex);
1909 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
1910 INIT_LIST_HEAD(&sched_queue[i]);
1911
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001912 mutex_init(&rdm_mutex);
1913 INIT_LIST_HEAD(&rdm_queue);
1914 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
1915 if (!ocmem_rdm_wq)
1916 return -ENOMEM;
1917 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
1918 if (!ocmem_eviction_wq)
1919 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001920
1921 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
1922 NULL, &allocations_show_fops)) {
1923 dev_err(dev, "Unable to create debugfs node for scheduler\n");
1924 return -EBUSY;
1925 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001926 return 0;
1927}