blob: 8f9bab5d2a99570a60f2183937d84a2e9162e0dd [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070048 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070049 OP_FAIL = ~0x0,
50};
51
52/* Represents various client priorities */
53/* Note: More than one client can share a priority level */
54enum client_prio {
55 MIN_PRIO = 0x0,
56 NO_PRIO = MIN_PRIO,
57 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070058 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070059 PRIO_LP_AUDIO = 0x1,
60 PRIO_HP_AUDIO = 0x2,
61 PRIO_VOICE = 0x3,
62 PRIO_GFX_GROWTH = 0x4,
63 PRIO_VIDEO = 0x5,
64 PRIO_GFX = 0x6,
65 PRIO_OCMEM = 0x7,
66 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
67};
68
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070069static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070070static struct list_head sched_queue[MAX_OCMEM_PRIO];
71static struct mutex sched_queue_mutex;
72
73/* The duration in msecs before a pending operation is scheduled
74 * This allows an idle window between use case boundaries where various
75 * hardware state changes can occur. The value will be tweaked on actual
76 * hardware.
77*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070078/* Delay in ms for switching to low power mode for OCMEM */
79#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070080
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081static struct list_head rdm_queue;
82static struct mutex rdm_mutex;
83static struct workqueue_struct *ocmem_rdm_wq;
84static struct workqueue_struct *ocmem_eviction_wq;
85
86static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
87
88struct ocmem_rdm_work {
89 int id;
90 struct ocmem_map_list *list;
91 struct ocmem_handle *handle;
92 int direction;
93 struct work_struct work;
94};
95
Naveen Ramarajb9da05782012-05-07 09:07:35 -070096/* OCMEM Operational modes */
97enum ocmem_client_modes {
98 OCMEM_PERFORMANCE = 1,
99 OCMEM_PASSIVE,
100 OCMEM_LOW_POWER,
101 OCMEM_MODE_MAX = OCMEM_LOW_POWER
102};
103
104/* OCMEM Addressing modes */
105enum ocmem_interconnects {
106 OCMEM_BLOCKED = 0,
107 OCMEM_PORT = 1,
108 OCMEM_OCMEMNOC = 2,
109 OCMEM_SYSNOC = 3,
110};
111
112/**
113 * Primary OCMEM Arbitration Table
114 **/
115struct ocmem_table {
116 int client_id;
117 int priority;
118 int mode;
119 int hw_interconnect;
120} ocmem_client_table[OCMEM_CLIENT_MAX] = {
121 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700122 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700123 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
124 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
125 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
126 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
127 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700128 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700129};
130
131static struct rb_root sched_tree;
132static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700133static struct mutex allocation_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134
135/* A region represents a continuous interval in OCMEM address space */
136struct ocmem_region {
137 /* Chain in Interval Tree */
138 struct rb_node region_rb;
139 /* Hash map of requests */
140 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700141 /* Chain in eviction list */
142 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700143 unsigned long r_start;
144 unsigned long r_end;
145 unsigned long r_sz;
146 /* Highest priority of all requests served by this region */
147 int max_prio;
148};
149
150/* Is OCMEM tightly coupled to the client ?*/
151static inline int is_tcm(int id)
152{
153 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
154 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
155 return 1;
156 else
157 return 0;
158}
159
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700160static inline int is_iface_access(int id)
161{
162 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
163}
164
165static inline int is_remapped_access(int id)
166{
167 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
168}
169
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700170static inline int is_blocked(int id)
171{
172 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
173}
174
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700175inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
176{
177 if (handle)
178 return &handle->buffer;
179 else
180 return NULL;
181}
182
183inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
184{
185 if (buffer)
186 return container_of(buffer, struct ocmem_handle, buffer);
187 else
188 return NULL;
189}
190
191inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
192{
193 if (handle)
194 return handle->req;
195 else
196 return NULL;
197}
198
199inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
200{
201 if (req && req->buffer)
202 return container_of(req->buffer, struct ocmem_handle, buffer);
203 else
204 return NULL;
205}
206
207/* Simple wrappers which will have debug features added later */
208inline int ocmem_read(void *at)
209{
210 return readl_relaxed(at);
211}
212
213inline int ocmem_write(unsigned long val, void *at)
214{
215 writel_relaxed(val, at);
216 return 0;
217}
218
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700219inline int get_mode(int id)
220{
221 if (!check_id(id))
222 return MODE_NOT_SET;
223 else
224 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
225 WIDE_MODE : THIN_MODE;
226}
227
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700228/* Returns the address that can be used by a device core to access OCMEM */
229static unsigned long device_address(int id, unsigned long addr)
230{
231 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
232 unsigned long ret_addr = 0x0;
233
234 switch (hw_interconnect) {
235 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700236 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700237 ret_addr = phys_to_offset(addr);
238 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700239 case OCMEM_SYSNOC:
240 ret_addr = addr;
241 break;
242 case OCMEM_BLOCKED:
243 ret_addr = 0x0;
244 break;
245 }
246 return ret_addr;
247}
248
249/* Returns the address as viewed by the core */
250static unsigned long core_address(int id, unsigned long addr)
251{
252 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
253 unsigned long ret_addr = 0x0;
254
255 switch (hw_interconnect) {
256 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700257 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700258 ret_addr = offset_to_phys(addr);
259 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700260 case OCMEM_SYSNOC:
261 ret_addr = addr;
262 break;
263 case OCMEM_BLOCKED:
264 ret_addr = 0x0;
265 break;
266 }
267 return ret_addr;
268}
269
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700270static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
271{
272 int owner;
273 if (!req)
274 return NULL;
275 owner = req->owner;
276 return get_zone(owner);
277}
278
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700279static int insert_region(struct ocmem_region *region)
280{
281
282 struct rb_root *root = &sched_tree;
283 struct rb_node **p = &root->rb_node;
284 struct rb_node *parent = NULL;
285 struct ocmem_region *tmp = NULL;
286 unsigned long addr = region->r_start;
287
288 while (*p) {
289 parent = *p;
290 tmp = rb_entry(parent, struct ocmem_region, region_rb);
291
292 if (tmp->r_end > addr) {
293 if (tmp->r_start <= addr)
294 break;
295 p = &(*p)->rb_left;
296 } else if (tmp->r_end <= addr)
297 p = &(*p)->rb_right;
298 }
299 rb_link_node(&region->region_rb, parent, p);
300 rb_insert_color(&region->region_rb, root);
301 return 0;
302}
303
304static int remove_region(struct ocmem_region *region)
305{
306 struct rb_root *root = &sched_tree;
307 rb_erase(&region->region_rb, root);
308 return 0;
309}
310
311static struct ocmem_req *ocmem_create_req(void)
312{
313 struct ocmem_req *p = NULL;
314
315 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
316 if (!p)
317 return NULL;
318
319 INIT_LIST_HEAD(&p->zone_list);
320 INIT_LIST_HEAD(&p->sched_list);
321 init_rwsem(&p->rw_sem);
322 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700323 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700324 return p;
325}
326
327static int ocmem_destroy_req(struct ocmem_req *req)
328{
329 kfree(req);
330 return 0;
331}
332
333static struct ocmem_region *create_region(void)
334{
335 struct ocmem_region *p = NULL;
336
337 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
338 if (!p)
339 return NULL;
340 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700341 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700342 p->r_start = p->r_end = p->r_sz = 0x0;
343 p->max_prio = NO_PRIO;
344 return p;
345}
346
347static int destroy_region(struct ocmem_region *region)
348{
349 kfree(region);
350 return 0;
351}
352
353static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
354{
355 int ret, id;
356
357 while (1) {
358 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
359 return -ENOMEM;
360
361 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
362
363 if (ret != -EAGAIN)
364 break;
365 }
366
367 if (!ret) {
368 req->req_id = id;
369 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
370 req, id, region);
371 return 0;
372 }
373 return -EINVAL;
374}
375
376static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
377{
378 idr_remove(&region->region_idr, req->req_id);
379 return 0;
380}
381
382static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
383{
384 region->r_start = req->req_start;
385 region->r_end = req->req_end;
386 region->r_sz = req->req_end - req->req_start + 1;
387 return 0;
388}
389
390static int region_req_count(int id, void *ptr, void *data)
391{
392 int *count = data;
393 *count = *count + 1;
394 return 0;
395}
396
397static int req_count(struct ocmem_region *region)
398{
399 int count = 0;
400 idr_for_each(&region->region_idr, region_req_count, &count);
401 return count;
402}
403
404static int compute_max_prio(int id, void *ptr, void *data)
405{
406 int *max = data;
407 struct ocmem_req *req = ptr;
408
409 if (req->prio > *max)
410 *max = req->prio;
411 return 0;
412}
413
414static int update_region_prio(struct ocmem_region *region)
415{
416 int max_prio;
417 if (req_count(region) != 0) {
418 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
419 region->max_prio = max_prio;
420 } else {
421 region->max_prio = NO_PRIO;
422 }
423 pr_debug("ocmem: Updating prio of region %p as %d\n",
424 region, max_prio);
425
426 return 0;
427}
428
429static struct ocmem_region *find_region(unsigned long addr)
430{
431 struct ocmem_region *region = NULL;
432 struct rb_node *rb_node = NULL;
433
434 rb_node = sched_tree.rb_node;
435
436 while (rb_node) {
437 struct ocmem_region *tmp_region = NULL;
438 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
439
440 if (tmp_region->r_end > addr) {
441 region = tmp_region;
442 if (tmp_region->r_start <= addr)
443 break;
444 rb_node = rb_node->rb_left;
445 } else {
446 rb_node = rb_node->rb_right;
447 }
448 }
449 return region;
450}
451
452static struct ocmem_region *find_region_intersection(unsigned long start,
453 unsigned long end)
454{
455
456 struct ocmem_region *region = NULL;
457 region = find_region(start);
458 if (region && end <= region->r_start)
459 region = NULL;
460 return region;
461}
462
463static struct ocmem_region *find_region_match(unsigned long start,
464 unsigned long end)
465{
466
467 struct ocmem_region *region = NULL;
468 region = find_region(start);
469 if (region && start == region->r_start && end == region->r_end)
470 return region;
471 return NULL;
472}
473
474static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
475{
476 struct ocmem_req *req = NULL;
477
478 if (!region)
479 return NULL;
480
481 req = idr_find(&region->region_idr, owner);
482
483 return req;
484}
485
486/* Must be called with req->sem held */
487static inline int is_mapped(struct ocmem_req *req)
488{
489 return TEST_STATE(req, R_MAPPED);
490}
491
492/* Must be called with sched_mutex held */
493static int __sched_unmap(struct ocmem_req *req)
494{
495 struct ocmem_req *matched_req = NULL;
496 struct ocmem_region *matched_region = NULL;
497
498 matched_region = find_region_match(req->req_start, req->req_end);
499 matched_req = find_req_match(req->req_id, matched_region);
500
501 if (!matched_region || !matched_req) {
502 pr_err("Could not find backing region for req");
503 goto invalid_op_error;
504 }
505
506 if (matched_req != req) {
507 pr_err("Request does not match backing req");
508 goto invalid_op_error;
509 }
510
511 if (!is_mapped(req)) {
512 pr_err("Request is not currently mapped");
513 goto invalid_op_error;
514 }
515
516 /* Update the request state */
517 CLEAR_STATE(req, R_MAPPED);
518 SET_STATE(req, R_MUST_MAP);
519
520 return OP_COMPLETE;
521
522invalid_op_error:
523 return OP_FAIL;
524}
525
526/* Must be called with sched_mutex held */
527static int __sched_map(struct ocmem_req *req)
528{
529 struct ocmem_req *matched_req = NULL;
530 struct ocmem_region *matched_region = NULL;
531
532 matched_region = find_region_match(req->req_start, req->req_end);
533 matched_req = find_req_match(req->req_id, matched_region);
534
535 if (!matched_region || !matched_req) {
536 pr_err("Could not find backing region for req");
537 goto invalid_op_error;
538 }
539
540 if (matched_req != req) {
541 pr_err("Request does not match backing req");
542 goto invalid_op_error;
543 }
544
545 /* Update the request state */
546 CLEAR_STATE(req, R_MUST_MAP);
547 SET_STATE(req, R_MAPPED);
548
549 return OP_COMPLETE;
550
551invalid_op_error:
552 return OP_FAIL;
553}
554
555static int do_map(struct ocmem_req *req)
556{
557 int rc = 0;
558
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700559 down_write(&req->rw_sem);
560
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700561 mutex_lock(&sched_mutex);
562 rc = __sched_map(req);
563 mutex_unlock(&sched_mutex);
564
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700565 up_write(&req->rw_sem);
566
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700567 if (rc == OP_FAIL)
568 return -EINVAL;
569
570 return 0;
571}
572
573static int do_unmap(struct ocmem_req *req)
574{
575 int rc = 0;
576
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700577 down_write(&req->rw_sem);
578
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700579 mutex_lock(&sched_mutex);
580 rc = __sched_unmap(req);
581 mutex_unlock(&sched_mutex);
582
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700583 up_write(&req->rw_sem);
584
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700585 if (rc == OP_FAIL)
586 return -EINVAL;
587
588 return 0;
589}
590
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700591static int process_map(struct ocmem_req *req, unsigned long start,
592 unsigned long end)
593{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700594 int rc = 0;
595
596 rc = ocmem_enable_core_clock();
597
598 if (rc < 0)
599 goto core_clock_fail;
600
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700601
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700602 if (is_iface_access(req->owner)) {
603 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700604
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700605 if (rc < 0)
606 goto iface_clock_fail;
607 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700608
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700609 if (is_remapped_access(req->owner)) {
610 rc = ocmem_enable_br_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700611
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700612 if (rc < 0)
613 goto br_clock_fail;
614 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700615
616 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
617 get_mode(req->owner));
618
619 if (rc < 0) {
620 pr_err("ocmem: Failed to secure request %p for %d\n", req,
621 req->owner);
622 goto lock_failed;
623 }
624
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700625 rc = do_map(req);
626
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700627 if (rc < 0) {
628 pr_err("ocmem: Failed to map request %p for %d\n",
629 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700630 goto process_map_fail;
631
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700632 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700633 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700634 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700635
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700636process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700637 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
638lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700639 if (is_remapped_access(req->owner))
640 ocmem_disable_br_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700641br_clock_fail:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700642 if (is_iface_access(req->owner))
643 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700644iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700645 ocmem_disable_core_clock();
646core_clock_fail:
647 pr_err("ocmem: Failed to map ocmem request\n");
648 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700649}
650
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700651static int process_unmap(struct ocmem_req *req, unsigned long start,
652 unsigned long end)
653{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700654 int rc = 0;
655
656 rc = do_unmap(req);
657
658 if (rc < 0)
659 goto process_unmap_fail;
660
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700661 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
662 req->req_sz);
663
664 if (rc < 0) {
665 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
666 req->owner);
667 goto unlock_failed;
668 }
669
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700670 if (is_remapped_access(req->owner))
671 ocmem_disable_br_clock();
672 if (is_iface_access(req->owner))
673 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700674 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700675 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700676 return 0;
677
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700678unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700679process_unmap_fail:
680 pr_err("ocmem: Failed to unmap ocmem request\n");
681 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700682}
683
684static int __sched_grow(struct ocmem_req *req, bool can_block)
685{
686 unsigned long min = req->req_min;
687 unsigned long max = req->req_max;
688 unsigned long step = req->req_step;
689 int owner = req->owner;
690 unsigned long curr_sz = 0;
691 unsigned long growth_sz = 0;
692 unsigned long curr_start = 0;
693 enum client_prio prio = req->prio;
694 unsigned long alloc_addr = 0x0;
695 bool retry;
696 struct ocmem_region *spanned_r = NULL;
697 struct ocmem_region *overlap_r = NULL;
698
699 struct ocmem_req *matched_req = NULL;
700 struct ocmem_region *matched_region = NULL;
701
702 struct ocmem_zone *zone = get_zone(owner);
703 struct ocmem_region *region = NULL;
704
705 matched_region = find_region_match(req->req_start, req->req_end);
706 matched_req = find_req_match(req->req_id, matched_region);
707
708 if (!matched_region || !matched_req) {
709 pr_err("Could not find backing region for req");
710 goto invalid_op_error;
711 }
712
713 if (matched_req != req) {
714 pr_err("Request does not match backing req");
715 goto invalid_op_error;
716 }
717
718 curr_sz = matched_req->req_sz;
719 curr_start = matched_req->req_start;
720 growth_sz = matched_req->req_max - matched_req->req_sz;
721
722 pr_debug("Attempting to grow req %p from %lx to %lx\n",
723 req, matched_req->req_sz, matched_req->req_max);
724
725 retry = false;
726
727 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
728
729retry_next_step:
730
731 spanned_r = NULL;
732 overlap_r = NULL;
733
734 spanned_r = find_region(zone->z_head);
735 overlap_r = find_region_intersection(zone->z_head,
736 zone->z_head + growth_sz);
737
738 if (overlap_r == NULL) {
739 /* no conflicting regions, schedule this region */
740 zone->z_ops->free(zone, curr_start, curr_sz);
741 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
742
743 if (alloc_addr < 0) {
744 pr_err("ocmem: zone allocation operation failed\n");
745 goto internal_error;
746 }
747
748 curr_sz += growth_sz;
749 /* Detach the region from the interval tree */
750 /* This is to guarantee that any change in size
751 * causes the tree to be rebalanced if required */
752
753 detach_req(matched_region, req);
754 if (req_count(matched_region) == 0) {
755 remove_region(matched_region);
756 region = matched_region;
757 } else {
758 region = create_region();
759 if (!region) {
760 pr_err("ocmem: Unable to create region\n");
761 goto region_error;
762 }
763 }
764
765 /* update the request */
766 req->req_start = alloc_addr;
767 /* increment the size to reflect new length */
768 req->req_sz = curr_sz;
769 req->req_end = alloc_addr + req->req_sz - 1;
770
771 /* update request state */
772 CLEAR_STATE(req, R_MUST_GROW);
773 SET_STATE(req, R_ALLOCATED);
774 SET_STATE(req, R_MUST_MAP);
775 req->op = SCHED_MAP;
776
777 /* update the region with new req */
778 attach_req(region, req);
779 populate_region(region, req);
780 update_region_prio(region);
781
782 /* update the tree with new region */
783 if (insert_region(region)) {
784 pr_err("ocmem: Failed to insert the region\n");
785 goto region_error;
786 }
787
788 if (retry) {
789 SET_STATE(req, R_MUST_GROW);
790 SET_STATE(req, R_PENDING);
791 req->op = SCHED_GROW;
792 return OP_PARTIAL;
793 }
794 } else if (spanned_r != NULL && overlap_r != NULL) {
795 /* resolve conflicting regions based on priority */
796 if (overlap_r->max_prio < prio) {
797 /* Growth cannot be triggered unless a previous
798 * client of lower priority was evicted */
799 pr_err("ocmem: Invalid growth scheduled\n");
800 /* This is serious enough to fail */
801 BUG();
802 return OP_FAIL;
803 } else if (overlap_r->max_prio > prio) {
804 if (min == max) {
805 /* Cannot grow at this time, try later */
806 SET_STATE(req, R_PENDING);
807 SET_STATE(req, R_MUST_GROW);
808 return OP_RESCHED;
809 } else {
810 /* Try to grow in steps */
811 growth_sz -= step;
812 /* We are OOM at this point so need to retry */
813 if (growth_sz <= curr_sz) {
814 SET_STATE(req, R_PENDING);
815 SET_STATE(req, R_MUST_GROW);
816 return OP_RESCHED;
817 }
818 retry = true;
819 pr_debug("ocmem: Attempting with reduced size %lx\n",
820 growth_sz);
821 goto retry_next_step;
822 }
823 } else {
824 pr_err("ocmem: grow: New Region %p Existing %p\n",
825 matched_region, overlap_r);
826 pr_err("ocmem: Undetermined behavior\n");
827 /* This is serious enough to fail */
828 BUG();
829 }
830 } else if (spanned_r == NULL && overlap_r != NULL) {
831 goto err_not_supported;
832 }
833
834 return OP_COMPLETE;
835
836err_not_supported:
837 pr_err("ocmem: Scheduled unsupported operation\n");
838 return OP_FAIL;
839region_error:
840 zone->z_ops->free(zone, alloc_addr, curr_sz);
841 detach_req(region, req);
842 update_region_prio(region);
843 /* req is going to be destroyed by the caller anyways */
844internal_error:
845 destroy_region(region);
846invalid_op_error:
847 return OP_FAIL;
848}
849
850/* Must be called with sched_mutex held */
851static int __sched_free(struct ocmem_req *req)
852{
853 int owner = req->owner;
854 int ret = 0;
855
856 struct ocmem_req *matched_req = NULL;
857 struct ocmem_region *matched_region = NULL;
858
859 struct ocmem_zone *zone = get_zone(owner);
860
861 BUG_ON(!zone);
862
863 matched_region = find_region_match(req->req_start, req->req_end);
864 matched_req = find_req_match(req->req_id, matched_region);
865
866 if (!matched_region || !matched_req)
867 goto invalid_op_error;
868 if (matched_req != req)
869 goto invalid_op_error;
870
871 ret = zone->z_ops->free(zone,
872 matched_req->req_start, matched_req->req_sz);
873
874 if (ret < 0)
875 goto err_op_fail;
876
877 detach_req(matched_region, matched_req);
878 update_region_prio(matched_region);
879 if (req_count(matched_region) == 0) {
880 remove_region(matched_region);
881 destroy_region(matched_region);
882 }
883
884 /* Update the request */
885 req->req_start = 0x0;
886 req->req_sz = 0x0;
887 req->req_end = 0x0;
888 SET_STATE(req, R_FREE);
889 return OP_COMPLETE;
890invalid_op_error:
891 pr_err("ocmem: free: Failed to find matching region\n");
892err_op_fail:
893 pr_err("ocmem: free: Failed\n");
894 return OP_FAIL;
895}
896
897/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700898static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
899{
900 int owner = req->owner;
901 int ret = 0;
902
903 struct ocmem_req *matched_req = NULL;
904 struct ocmem_region *matched_region = NULL;
905 struct ocmem_region *region = NULL;
906 unsigned long alloc_addr = 0x0;
907
908 struct ocmem_zone *zone = get_zone(owner);
909
910 BUG_ON(!zone);
911
912 /* The shrink should not be called for zero size */
913 BUG_ON(new_sz == 0);
914
915 matched_region = find_region_match(req->req_start, req->req_end);
916 matched_req = find_req_match(req->req_id, matched_region);
917
918 if (!matched_region || !matched_req)
919 goto invalid_op_error;
920 if (matched_req != req)
921 goto invalid_op_error;
922
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700923 ret = zone->z_ops->free(zone,
924 matched_req->req_start, matched_req->req_sz);
925
926 if (ret < 0) {
927 pr_err("Zone Allocation operation failed\n");
928 goto internal_error;
929 }
930
931 alloc_addr = zone->z_ops->allocate(zone, new_sz);
932
933 if (alloc_addr < 0) {
934 pr_err("Zone Allocation operation failed\n");
935 goto internal_error;
936 }
937
938 /* Detach the region from the interval tree */
939 /* This is to guarantee that the change in size
940 * causes the tree to be rebalanced if required */
941
942 detach_req(matched_region, req);
943 if (req_count(matched_region) == 0) {
944 remove_region(matched_region);
945 region = matched_region;
946 } else {
947 region = create_region();
948 if (!region) {
949 pr_err("ocmem: Unable to create region\n");
950 goto internal_error;
951 }
952 }
953 /* update the request */
954 req->req_start = alloc_addr;
955 req->req_sz = new_sz;
956 req->req_end = alloc_addr + req->req_sz;
957
958 if (req_count(region) == 0) {
959 remove_region(matched_region);
960 destroy_region(matched_region);
961 }
962
963 /* update request state */
964 SET_STATE(req, R_MUST_GROW);
965 SET_STATE(req, R_MUST_MAP);
966 req->op = SCHED_MAP;
967
968 /* attach the request to the region */
969 attach_req(region, req);
970 populate_region(region, req);
971 update_region_prio(region);
972
973 /* update the tree with new region */
974 if (insert_region(region)) {
975 pr_err("ocmem: Failed to insert the region\n");
976 zone->z_ops->free(zone, alloc_addr, new_sz);
977 detach_req(region, req);
978 update_region_prio(region);
979 /* req will be destroyed by the caller */
980 goto region_error;
981 }
982 return OP_COMPLETE;
983
984region_error:
985 destroy_region(region);
986internal_error:
987 pr_err("ocmem: shrink: Failed\n");
988 return OP_FAIL;
989invalid_op_error:
990 pr_err("ocmem: shrink: Failed to find matching region\n");
991 return OP_FAIL;
992}
993
994/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700995static int __sched_allocate(struct ocmem_req *req, bool can_block,
996 bool can_wait)
997{
998 unsigned long min = req->req_min;
999 unsigned long max = req->req_max;
1000 unsigned long step = req->req_step;
1001 int owner = req->owner;
1002 unsigned long sz = max;
1003 enum client_prio prio = req->prio;
1004 unsigned long alloc_addr = 0x0;
1005 bool retry;
1006
1007 struct ocmem_region *spanned_r = NULL;
1008 struct ocmem_region *overlap_r = NULL;
1009
1010 struct ocmem_zone *zone = get_zone(owner);
1011 struct ocmem_region *region = NULL;
1012
1013 BUG_ON(!zone);
1014
1015 if (min > (zone->z_end - zone->z_start)) {
1016 pr_err("ocmem: requested minimum size exceeds quota\n");
1017 goto invalid_op_error;
1018 }
1019
1020 if (max > (zone->z_end - zone->z_start)) {
1021 pr_err("ocmem: requested maximum size exceeds quota\n");
1022 goto invalid_op_error;
1023 }
1024
1025 if (min > zone->z_free) {
1026 pr_err("ocmem: out of memory for zone %d\n", owner);
1027 goto invalid_op_error;
1028 }
1029
1030 region = create_region();
1031
1032 if (!region) {
1033 pr_err("ocmem: Unable to create region\n");
1034 goto invalid_op_error;
1035 }
1036
1037 retry = false;
1038
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001039 pr_debug("ocmem: do_allocate: %s request size %lx\n",
1040 get_name(owner), sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001041
1042retry_next_step:
1043
1044 spanned_r = NULL;
1045 overlap_r = NULL;
1046
1047 spanned_r = find_region(zone->z_head);
1048 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1049
1050 if (overlap_r == NULL) {
1051 /* no conflicting regions, schedule this region */
1052 alloc_addr = zone->z_ops->allocate(zone, sz);
1053
1054 if (alloc_addr < 0) {
1055 pr_err("Zone Allocation operation failed\n");
1056 goto internal_error;
1057 }
1058
1059 /* update the request */
1060 req->req_start = alloc_addr;
1061 req->req_end = alloc_addr + sz - 1;
1062 req->req_sz = sz;
1063 req->zone = zone;
1064
1065 /* update request state */
1066 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001067 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001068 SET_STATE(req, R_ALLOCATED);
1069 SET_STATE(req, R_MUST_MAP);
1070 req->op = SCHED_NOP;
1071
1072 /* attach the request to the region */
1073 attach_req(region, req);
1074 populate_region(region, req);
1075 update_region_prio(region);
1076
1077 /* update the tree with new region */
1078 if (insert_region(region)) {
1079 pr_err("ocmem: Failed to insert the region\n");
1080 zone->z_ops->free(zone, alloc_addr, sz);
1081 detach_req(region, req);
1082 update_region_prio(region);
1083 /* req will be destroyed by the caller */
1084 goto internal_error;
1085 }
1086
1087 if (retry) {
1088 SET_STATE(req, R_MUST_GROW);
1089 SET_STATE(req, R_PENDING);
1090 req->op = SCHED_GROW;
1091 return OP_PARTIAL;
1092 }
1093 } else if (spanned_r != NULL && overlap_r != NULL) {
1094 /* resolve conflicting regions based on priority */
1095 if (overlap_r->max_prio < prio) {
1096 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001097 req->req_start = zone->z_head;
1098 req->req_end = zone->z_head + sz - 1;
1099 req->req_sz = 0x0;
1100 req->edata = NULL;
1101 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001102 } else {
1103 /* Try to allocate atleast >= 'min' immediately */
1104 sz -= step;
1105 if (sz < min)
1106 goto err_out_of_mem;
1107 retry = true;
1108 pr_debug("ocmem: Attempting with reduced size %lx\n",
1109 sz);
1110 goto retry_next_step;
1111 }
1112 } else if (overlap_r->max_prio > prio) {
1113 if (can_block == true) {
1114 SET_STATE(req, R_PENDING);
1115 SET_STATE(req, R_MUST_GROW);
1116 return OP_RESCHED;
1117 } else {
1118 if (min == max) {
1119 pr_err("Cannot allocate %lx synchronously\n",
1120 sz);
1121 goto err_out_of_mem;
1122 } else {
1123 sz -= step;
1124 if (sz < min)
1125 goto err_out_of_mem;
1126 retry = true;
1127 pr_debug("ocmem: Attempting reduced size %lx\n",
1128 sz);
1129 goto retry_next_step;
1130 }
1131 }
1132 } else {
1133 pr_err("ocmem: Undetermined behavior\n");
1134 pr_err("ocmem: New Region %p Existing %p\n", region,
1135 overlap_r);
1136 /* This is serious enough to fail */
1137 BUG();
1138 }
1139 } else if (spanned_r == NULL && overlap_r != NULL)
1140 goto err_not_supported;
1141
1142 return OP_COMPLETE;
1143
Naveen Ramaraj59907982012-10-16 17:40:38 -07001144trigger_eviction:
1145 pr_debug("Trigger eviction of region %p\n", overlap_r);
1146 destroy_region(region);
1147 return OP_EVICT;
1148
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001149err_not_supported:
1150 pr_err("ocmem: Scheduled unsupported operation\n");
1151 return OP_FAIL;
1152
1153err_out_of_mem:
1154 pr_err("ocmem: Out of memory during allocation\n");
1155internal_error:
1156 destroy_region(region);
1157invalid_op_error:
1158 return OP_FAIL;
1159}
1160
1161static int sched_enqueue(struct ocmem_req *priv)
1162{
1163 struct ocmem_req *next = NULL;
1164 mutex_lock(&sched_queue_mutex);
1165 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1166 pr_debug("enqueued req %p\n", priv);
1167 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1168 pr_debug("pending requests for client %p\n", next);
1169 }
1170 mutex_unlock(&sched_queue_mutex);
1171 return 0;
1172}
1173
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001174static void sched_dequeue(struct ocmem_req *victim_req)
1175{
1176 struct ocmem_req *req = NULL;
1177 struct ocmem_req *next = NULL;
1178 int id;
1179
1180 if (!victim_req)
1181 return;
1182
1183 id = victim_req->owner;
1184
1185 mutex_lock(&sched_queue_mutex);
1186
1187 if (list_empty(&sched_queue[id]))
1188 goto dequeue_done;
1189
1190 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1191 {
1192 if (req == victim_req) {
1193 pr_debug("ocmem: Cancelling pending request %p\n",
1194 req);
1195 list_del(&req->sched_list);
1196 goto dequeue_done;
1197 }
1198 }
1199
1200dequeue_done:
1201 mutex_unlock(&sched_queue_mutex);
1202 return;
1203}
1204
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001205static struct ocmem_req *ocmem_fetch_req(void)
1206{
1207 int i;
1208 struct ocmem_req *req = NULL;
1209 struct ocmem_req *next = NULL;
1210
1211 mutex_lock(&sched_queue_mutex);
1212 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1213 if (list_empty(&sched_queue[i]))
1214 continue;
1215 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1216 {
1217 if (req) {
1218 pr_debug("ocmem: Fetched pending request %p\n",
1219 req);
1220 list_del(&req->sched_list);
1221 break;
1222 }
1223 }
1224 }
1225 mutex_unlock(&sched_queue_mutex);
1226 return req;
1227}
1228
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001229
1230unsigned long process_quota(int id)
1231{
1232 struct ocmem_zone *zone = NULL;
1233
1234 if (is_blocked(id))
1235 return 0;
1236
1237 zone = get_zone(id);
1238
1239 if (zone && zone->z_pool)
1240 return zone->z_end - zone->z_start;
1241 else
1242 return 0;
1243}
1244
1245static int do_grow(struct ocmem_req *req)
1246{
1247 struct ocmem_buf *buffer = NULL;
1248 bool can_block = true;
1249 int rc = 0;
1250
1251 down_write(&req->rw_sem);
1252 buffer = req->buffer;
1253
1254 /* Take the scheduler mutex */
1255 mutex_lock(&sched_mutex);
1256 rc = __sched_grow(req, can_block);
1257 mutex_unlock(&sched_mutex);
1258
1259 if (rc == OP_FAIL)
1260 goto err_op_fail;
1261
1262 if (rc == OP_RESCHED) {
1263 pr_debug("ocmem: Enqueue this allocation");
1264 sched_enqueue(req);
1265 }
1266
1267 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1268 buffer->addr = device_address(req->owner, req->req_start);
1269 buffer->len = req->req_sz;
1270 }
1271
1272 up_write(&req->rw_sem);
1273 return 0;
1274err_op_fail:
1275 up_write(&req->rw_sem);
1276 return -EINVAL;
1277}
1278
1279static int process_grow(struct ocmem_req *req)
1280{
1281 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001282 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001283
1284 /* Attempt to grow the region */
1285 rc = do_grow(req);
1286
1287 if (rc < 0)
1288 return -EINVAL;
1289
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001290 rc = process_map(req, req->req_start, req->req_end);
1291 if (rc < 0)
1292 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001293
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001294 offset = phys_to_offset(req->req_start);
1295
1296 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1297
1298 if (rc < 0) {
1299 pr_err("Failed to switch ON memory macros\n");
1300 goto power_ctl_error;
1301 }
1302
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001303 /* Notify the client about the buffer growth */
1304 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1305 if (rc < 0) {
1306 pr_err("No notifier callback to cater for req %p event: %d\n",
1307 req, OCMEM_ALLOC_GROW);
1308 BUG();
1309 }
1310 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001311power_ctl_error:
1312 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001313}
1314
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001315static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1316{
1317
1318 int rc = 0;
1319 struct ocmem_buf *buffer = NULL;
1320
1321 down_write(&req->rw_sem);
1322 buffer = req->buffer;
1323
1324 /* Take the scheduler mutex */
1325 mutex_lock(&sched_mutex);
1326 rc = __sched_shrink(req, shrink_size);
1327 mutex_unlock(&sched_mutex);
1328
1329 if (rc == OP_FAIL)
1330 goto err_op_fail;
1331
1332 else if (rc == OP_COMPLETE) {
1333 buffer->addr = device_address(req->owner, req->req_start);
1334 buffer->len = req->req_sz;
1335 }
1336
1337 up_write(&req->rw_sem);
1338 return 0;
1339err_op_fail:
1340 up_write(&req->rw_sem);
1341 return -EINVAL;
1342}
1343
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001344static void ocmem_sched_wk_func(struct work_struct *work);
1345DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1346
1347static int ocmem_schedule_pending(void)
1348{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001349
1350 bool need_sched = false;
1351 int i = 0;
1352
1353 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1354 if (!list_empty(&sched_queue[i])) {
1355 need_sched = true;
1356 break;
1357 }
1358 }
1359
1360 if (need_sched == true) {
1361 cancel_delayed_work(&ocmem_sched_thread);
1362 schedule_delayed_work(&ocmem_sched_thread,
1363 msecs_to_jiffies(SCHED_DELAY));
1364 pr_debug("ocmem: Scheduled delayed work\n");
1365 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001366 return 0;
1367}
1368
1369static int do_free(struct ocmem_req *req)
1370{
1371 int rc = 0;
1372 struct ocmem_buf *buffer = req->buffer;
1373
1374 down_write(&req->rw_sem);
1375
1376 if (is_mapped(req)) {
1377 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1378 goto err_free_fail;
1379 }
1380
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001381 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1382 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001383 /* Grab the sched mutex */
1384 mutex_lock(&sched_mutex);
1385 rc = __sched_free(req);
1386 mutex_unlock(&sched_mutex);
1387
1388 switch (rc) {
1389
1390 case OP_COMPLETE:
1391 buffer->addr = 0x0;
1392 buffer->len = 0x0;
1393 break;
1394 case OP_FAIL:
1395 default:
1396 goto err_free_fail;
1397 break;
1398 }
1399
1400 up_write(&req->rw_sem);
1401 return 0;
1402err_free_fail:
1403 up_write(&req->rw_sem);
1404 pr_err("ocmem: freeing req %p failed\n", req);
1405 return -EINVAL;
1406}
1407
1408int process_free(int id, struct ocmem_handle *handle)
1409{
1410 struct ocmem_req *req = NULL;
1411 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001412 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001413 int rc = 0;
1414
1415 if (is_blocked(id)) {
1416 pr_err("Client %d cannot request free\n", id);
1417 return -EINVAL;
1418 }
1419
1420 req = handle_to_req(handle);
1421 buffer = handle_to_buffer(handle);
1422
1423 if (!req)
1424 return -EINVAL;
1425
1426 if (req->req_start != core_address(id, buffer->addr)) {
1427 pr_err("Invalid buffer handle passed for free\n");
1428 return -EINVAL;
1429 }
1430
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001431 mutex_lock(&sched_mutex);
1432 sched_dequeue(req);
1433 mutex_unlock(&sched_mutex);
1434
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001435 if (TEST_STATE(req, R_MAPPED)) {
1436 /* unmap the interval and clear the memory */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001437 rc = process_unmap(req, req->req_start, req->req_end);
1438 if (rc < 0)
1439 return -EINVAL;
1440 }
1441
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001442 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001443 if (req->req_sz != 0) {
1444
1445 offset = phys_to_offset(req->req_start);
1446
1447 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1448
1449 if (rc < 0) {
1450 pr_err("Failed to switch OFF memory macros\n");
1451 return -EINVAL;
1452 }
1453
1454 }
1455
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001456 if (!TEST_STATE(req, R_FREE)) {
1457 /* free the allocation */
1458 rc = do_free(req);
1459 if (rc < 0)
1460 return -EINVAL;
1461 }
1462
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001463 inc_ocmem_stat(zone_of(req), NR_FREES);
1464
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001465 ocmem_destroy_req(req);
1466 handle->req = NULL;
1467
1468 ocmem_schedule_pending();
1469 return 0;
1470}
1471
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001472static void ocmem_rdm_worker(struct work_struct *work)
1473{
1474 int offset = 0;
1475 int rc = 0;
1476 int event;
1477 struct ocmem_rdm_work *work_data = container_of(work,
1478 struct ocmem_rdm_work, work);
1479 int id = work_data->id;
1480 struct ocmem_map_list *list = work_data->list;
1481 int direction = work_data->direction;
1482 struct ocmem_handle *handle = work_data->handle;
1483 struct ocmem_req *req = handle_to_req(handle);
1484 struct ocmem_buf *buffer = handle_to_buffer(handle);
1485
1486 down_write(&req->rw_sem);
1487 offset = phys_to_offset(req->req_start);
1488 rc = ocmem_rdm_transfer(id, list, offset, direction);
1489 if (work_data->direction == TO_OCMEM)
1490 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1491 else
1492 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001493 up_write(&req->rw_sem);
1494 kfree(work_data);
1495 dispatch_notification(id, event, buffer);
1496}
1497
1498int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1499 struct ocmem_map_list *list, int direction)
1500{
1501 struct ocmem_rdm_work *work_data = NULL;
1502
1503 down_write(&req->rw_sem);
1504
1505 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1506 if (!work_data)
1507 BUG();
1508
1509 work_data->handle = handle;
1510 work_data->list = list;
1511 work_data->id = req->owner;
1512 work_data->direction = direction;
1513 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1514 up_write(&req->rw_sem);
1515 queue_work(ocmem_rdm_wq, &work_data->work);
1516 return 0;
1517}
1518
1519int process_xfer_out(int id, struct ocmem_handle *handle,
1520 struct ocmem_map_list *list)
1521{
1522 struct ocmem_req *req = NULL;
1523 int rc = 0;
1524
1525 req = handle_to_req(handle);
1526
1527 if (!req)
1528 return -EINVAL;
1529
1530 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001531 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001532 goto transfer_out_error;
1533 }
1534
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001535 rc = queue_transfer(req, handle, list, TO_DDR);
1536
1537 if (rc < 0) {
1538 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001539 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001540 goto transfer_out_error;
1541 }
1542
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001543 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001544 return 0;
1545
1546transfer_out_error:
1547 return -EINVAL;
1548}
1549
1550int process_xfer_in(int id, struct ocmem_handle *handle,
1551 struct ocmem_map_list *list)
1552{
1553 struct ocmem_req *req = NULL;
1554 int rc = 0;
1555
1556 req = handle_to_req(handle);
1557
1558 if (!req)
1559 return -EINVAL;
1560
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001561
1562 if (!is_mapped(req)) {
1563 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001564 goto transfer_in_error;
1565 }
1566
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001567
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001568 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001569 rc = queue_transfer(req, handle, list, TO_OCMEM);
1570
1571 if (rc < 0) {
1572 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001573 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001574 goto transfer_in_error;
1575 }
1576
1577 return 0;
1578transfer_in_error:
1579 return -EINVAL;
1580}
1581
1582int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1583{
1584 struct ocmem_req *req = NULL;
1585 struct ocmem_buf *buffer = NULL;
1586 struct ocmem_eviction_data *edata = NULL;
1587 int rc = 0;
1588
1589 if (is_blocked(id)) {
1590 pr_err("Client %d cannot request free\n", id);
1591 return -EINVAL;
1592 }
1593
1594 req = handle_to_req(handle);
1595 buffer = handle_to_buffer(handle);
1596
1597 if (!req)
1598 return -EINVAL;
1599
1600 if (req->req_start != core_address(id, buffer->addr)) {
1601 pr_err("Invalid buffer handle passed for shrink\n");
1602 return -EINVAL;
1603 }
1604
1605 edata = req->edata;
1606
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001607 if (!edata) {
1608 pr_err("Unable to find eviction data\n");
1609 return -EINVAL;
1610 }
1611
1612 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001613
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001614 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1615
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001616 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001617 pr_debug("req %p being shrunk to zero\n", req);
1618 if (is_mapped(req))
1619 rc = process_unmap(req, req->req_start, req->req_end);
1620 if (rc < 0)
1621 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001622 rc = do_free(req);
1623 if (rc < 0)
1624 return -EINVAL;
1625 } else {
1626 rc = do_shrink(req, size);
1627 if (rc < 0)
1628 return -EINVAL;
1629 }
1630
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001631 req->edata = NULL;
1632 CLEAR_STATE(req, R_ALLOCATED);
1633 SET_STATE(req, R_FREE);
1634
1635 if (atomic_dec_and_test(&edata->pending)) {
1636 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001637 complete(&edata->completion);
1638 }
1639
1640 return 0;
1641}
1642
1643int process_xfer(int id, struct ocmem_handle *handle,
1644 struct ocmem_map_list *list, int direction)
1645{
1646 int rc = 0;
1647
1648 if (is_tcm(id)) {
1649 WARN(1, "Mapping operation is invalid for client\n");
1650 return -EINVAL;
1651 }
1652
1653 if (direction == TO_DDR)
1654 rc = process_xfer_out(id, handle, list);
1655 else if (direction == TO_OCMEM)
1656 rc = process_xfer_in(id, handle, list);
1657 return rc;
1658}
1659
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001660static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001661{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001662 struct ocmem_eviction_data *edata = NULL;
1663 int prio = ocmem_client_table[id].priority;
1664
1665 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1666
1667 if (!edata) {
1668 pr_err("ocmem: Could not allocate eviction data\n");
1669 return NULL;
1670 }
1671
1672 INIT_LIST_HEAD(&edata->victim_list);
1673 INIT_LIST_HEAD(&edata->req_list);
1674 edata->prio = prio;
1675 atomic_set(&edata->pending, 0);
1676 return edata;
1677}
1678
1679static void free_eviction(struct ocmem_eviction_data *edata)
1680{
1681
1682 if (!edata)
1683 return;
1684
1685 if (!list_empty(&edata->req_list))
1686 pr_err("ocmem: Eviction data %p not empty\n", edata);
1687
1688 kfree(edata);
1689 edata = NULL;
1690}
1691
1692static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1693{
1694
1695 if (!new || !old)
1696 return false;
1697
1698 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1699 new->req_start, new->req_end,
1700 old->req_start, old->req_end);
1701
1702 if ((new->req_start < old->req_start &&
1703 new->req_end >= old->req_start) ||
1704 (new->req_start >= old->req_start &&
1705 new->req_start <= old->req_end &&
1706 new->req_end >= old->req_end)) {
1707 pr_debug("request %p overlaps with existing req %p\n",
1708 new, old);
1709 return true;
1710 }
1711 return false;
1712}
1713
1714static int __evict_common(struct ocmem_eviction_data *edata,
1715 struct ocmem_req *req)
1716{
1717 struct rb_node *rb_node = NULL;
1718 struct ocmem_req *e_req = NULL;
1719 bool needs_eviction = false;
1720 int j = 0;
1721
1722 for (rb_node = rb_first(&sched_tree); rb_node;
1723 rb_node = rb_next(rb_node)) {
1724
1725 struct ocmem_region *tmp_region = NULL;
1726
1727 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1728
1729 if (tmp_region->max_prio < edata->prio) {
1730 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1731 needs_eviction = false;
1732 e_req = find_req_match(j, tmp_region);
1733 if (!e_req)
1734 continue;
1735 if (edata->passive == true) {
1736 needs_eviction = true;
1737 } else {
1738 needs_eviction = is_overlapping(req,
1739 e_req);
1740 }
1741
1742 if (needs_eviction) {
1743 pr_debug("adding %p in region %p to eviction list\n",
1744 e_req, tmp_region);
1745 list_add_tail(
1746 &e_req->eviction_list,
1747 &edata->req_list);
1748 atomic_inc(&edata->pending);
1749 e_req->edata = edata;
1750 }
1751 }
1752 } else {
1753 pr_debug("Skipped region %p\n", tmp_region);
1754 }
1755 }
1756
1757 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1758
1759 if (!atomic_read(&edata->pending))
1760 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001761 return 0;
1762}
1763
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001764static void trigger_eviction(struct ocmem_eviction_data *edata)
1765{
1766 struct ocmem_req *req = NULL;
1767 struct ocmem_req *next = NULL;
1768 struct ocmem_buf buffer;
1769
1770 if (!edata)
1771 return;
1772
1773 BUG_ON(atomic_read(&edata->pending) == 0);
1774
1775 init_completion(&edata->completion);
1776
1777 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1778 {
1779 if (req) {
1780 pr_debug("ocmem: Evicting request %p\n", req);
1781 buffer.addr = req->req_start;
1782 buffer.len = 0x0;
1783 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1784 &buffer);
1785 }
1786 }
1787 return;
1788}
1789
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001790int process_evict(int id)
1791{
1792 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001793 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001794
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001795 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001796
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001797 if (!edata)
1798 return -EINVAL;
1799
1800 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001801
1802 mutex_lock(&sched_mutex);
1803
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001804 rc = __evict_common(edata, NULL);
1805
1806 if (rc < 0)
1807 goto skip_eviction;
1808
1809 trigger_eviction(edata);
1810
1811 evictions[id] = edata;
1812
1813 mutex_unlock(&sched_mutex);
1814
1815 wait_for_completion(&edata->completion);
1816
1817 return 0;
1818
1819skip_eviction:
1820 evictions[id] = NULL;
1821 mutex_unlock(&sched_mutex);
1822 return 0;
1823}
1824
1825static int run_evict(struct ocmem_req *req)
1826{
1827 struct ocmem_eviction_data *edata = NULL;
1828 int rc = 0;
1829
1830 if (!req)
1831 return -EINVAL;
1832
1833 edata = init_eviction(req->owner);
1834
1835 if (!edata)
1836 return -EINVAL;
1837
1838 edata->passive = false;
1839
1840 rc = __evict_common(edata, req);
1841
1842 if (rc < 0)
1843 goto skip_eviction;
1844
1845 trigger_eviction(edata);
1846
1847 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1848 req->edata = edata;
1849
1850 wait_for_completion(&edata->completion);
1851
1852 pr_debug("ocmem: eviction completed successfully\n");
1853 return 0;
1854
1855skip_eviction:
1856 pr_err("ocmem: Unable to run eviction\n");
1857 free_eviction(edata);
1858 return -EINVAL;
1859}
1860
1861static int __restore_common(struct ocmem_eviction_data *edata)
1862{
1863
1864 struct ocmem_req *req = NULL;
1865 struct ocmem_req *next = NULL;
1866
1867 if (!edata)
1868 return -EINVAL;
1869
1870 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1871 {
1872 if (req) {
1873 pr_debug("ocmem: restoring evicted request %p\n",
1874 req);
1875 list_del(&req->eviction_list);
1876 req->op = SCHED_ALLOCATE;
1877 sched_enqueue(req);
1878 inc_ocmem_stat(zone_of(req), NR_RESTORES);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001879 }
1880 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001881
1882 pr_debug("Scheduled all evicted regions\n");
1883
1884 return 0;
1885}
1886
1887static int sched_restore(struct ocmem_req *req)
1888{
1889
1890 int rc = 0;
1891
1892 if (!req)
1893 return -EINVAL;
1894
1895 if (!req->edata)
1896 return 0;
1897
1898 rc = __restore_common(req->edata);
1899
1900 if (rc < 0)
1901 return -EINVAL;
1902
1903 free_eviction(req->edata);
1904 return 0;
1905}
1906
1907int process_restore(int id)
1908{
1909 struct ocmem_eviction_data *edata = evictions[id];
1910 int rc = 0;
1911
1912 if (!edata)
1913 return -EINVAL;
1914
1915 rc = __restore_common(edata);
1916
1917 if (rc < 0) {
1918 pr_err("Failed to restore evicted requests\n");
1919 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001920 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001921
1922 free_eviction(edata);
1923 evictions[id] = NULL;
1924 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001925 return 0;
1926}
1927
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001928static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1929{
1930 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001931 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001932 struct ocmem_buf *buffer = req->buffer;
1933
1934 down_write(&req->rw_sem);
1935
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001936 mutex_lock(&allocation_mutex);
1937retry_allocate:
1938
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001939 /* Take the scheduler mutex */
1940 mutex_lock(&sched_mutex);
1941 rc = __sched_allocate(req, can_block, can_wait);
1942 mutex_unlock(&sched_mutex);
1943
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001944 if (rc == OP_EVICT) {
1945
1946 ret = run_evict(req);
1947
1948 if (ret == 0) {
1949 rc = sched_restore(req);
1950 if (rc < 0) {
1951 pr_err("Failed to restore for req %p\n", req);
1952 goto err_allocate_fail;
1953 }
1954 req->edata = NULL;
1955
1956 pr_debug("Attempting to re-allocate req %p\n", req);
1957 req->req_start = 0x0;
1958 req->req_end = 0x0;
1959 goto retry_allocate;
1960 } else {
1961 goto err_allocate_fail;
1962 }
1963 }
1964
1965 mutex_unlock(&allocation_mutex);
1966
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001967 if (rc == OP_FAIL) {
1968 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001969 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001970 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001971
1972 if (rc == OP_RESCHED) {
1973 buffer->addr = 0x0;
1974 buffer->len = 0x0;
1975 pr_debug("ocmem: Enqueuing req %p\n", req);
1976 sched_enqueue(req);
1977 } else if (rc == OP_PARTIAL) {
1978 buffer->addr = device_address(req->owner, req->req_start);
1979 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001980 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001981 pr_debug("ocmem: Enqueuing req %p\n", req);
1982 sched_enqueue(req);
1983 } else if (rc == OP_COMPLETE) {
1984 buffer->addr = device_address(req->owner, req->req_start);
1985 buffer->len = req->req_sz;
1986 }
1987
1988 up_write(&req->rw_sem);
1989 return 0;
1990err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001991 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001992 up_write(&req->rw_sem);
1993 return -EINVAL;
1994}
1995
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07001996static int do_dump(struct ocmem_req *req, unsigned long addr)
1997{
1998
1999 void __iomem *req_vaddr;
2000 unsigned long offset = 0x0;
2001
2002 down_write(&req->rw_sem);
2003
2004 offset = phys_to_offset(req->req_start);
2005
2006 req_vaddr = ocmem_vaddr + offset;
2007
2008 if (!req_vaddr)
2009 goto err_do_dump;
2010
2011 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2012 get_name(req->owner), req->req_start,
2013 req_vaddr, addr);
2014
2015 memcpy((void *)addr, req_vaddr, req->req_sz);
2016
2017 up_write(&req->rw_sem);
2018 return 0;
2019err_do_dump:
2020 up_write(&req->rw_sem);
2021 return -EINVAL;
2022}
2023
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002024int process_allocate(int id, struct ocmem_handle *handle,
2025 unsigned long min, unsigned long max,
2026 unsigned long step, bool can_block, bool can_wait)
2027{
2028
2029 struct ocmem_req *req = NULL;
2030 struct ocmem_buf *buffer = NULL;
2031 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002032 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002033
2034 /* sanity checks */
2035 if (is_blocked(id)) {
2036 pr_err("Client %d cannot request allocation\n", id);
2037 return -EINVAL;
2038 }
2039
2040 if (handle->req != NULL) {
2041 pr_err("Invalid handle passed in\n");
2042 return -EINVAL;
2043 }
2044
2045 buffer = handle_to_buffer(handle);
2046 BUG_ON(buffer == NULL);
2047
2048 /* prepare a request structure to represent this transaction */
2049 req = ocmem_create_req();
2050 if (!req)
2051 return -ENOMEM;
2052
2053 req->owner = id;
2054 req->req_min = min;
2055 req->req_max = max;
2056 req->req_step = step;
2057 req->prio = ocmem_client_table[id].priority;
2058 req->op = SCHED_ALLOCATE;
2059 req->buffer = buffer;
2060
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002061 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2062
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002063 rc = do_allocate(req, can_block, can_wait);
2064
2065 if (rc < 0)
2066 goto do_allocate_error;
2067
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002068 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2069
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002070 handle->req = req;
2071
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002072 if (req->req_sz != 0) {
2073
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002074 rc = process_map(req, req->req_start, req->req_end);
2075 if (rc < 0)
2076 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002077
2078 offset = phys_to_offset(req->req_start);
2079
2080 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2081
2082 if (rc < 0) {
2083 pr_err("Failed to switch ON memory macros\n");
2084 goto power_ctl_error;
2085 }
2086 }
2087
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002088 return 0;
2089
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002090power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002091 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002092map_error:
2093 handle->req = NULL;
2094 do_free(req);
2095do_allocate_error:
2096 ocmem_destroy_req(req);
2097 return -EINVAL;
2098}
2099
2100int process_delayed_allocate(struct ocmem_req *req)
2101{
2102
2103 struct ocmem_handle *handle = NULL;
2104 int rc = 0;
2105 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002106 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002107
2108 handle = req_to_handle(req);
2109 BUG_ON(handle == NULL);
2110
2111 rc = do_allocate(req, true, false);
2112
2113 if (rc < 0)
2114 goto do_allocate_error;
2115
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002116 /* The request can still be pending */
2117 if (TEST_STATE(req, R_PENDING))
2118 return 0;
2119
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002120 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2121
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002122 if (req->req_sz != 0) {
2123
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002124 rc = process_map(req, req->req_start, req->req_end);
2125 if (rc < 0)
2126 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002127
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002128
2129 offset = phys_to_offset(req->req_start);
2130
2131 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2132
2133 if (rc < 0) {
2134 pr_err("Failed to switch ON memory macros\n");
2135 goto power_ctl_error;
2136 }
2137 }
2138
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002139 /* Notify the client about the buffer growth */
2140 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2141 if (rc < 0) {
2142 pr_err("No notifier callback to cater for req %p event: %d\n",
2143 req, OCMEM_ALLOC_GROW);
2144 BUG();
2145 }
2146 return 0;
2147
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002148power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002149 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002150map_error:
2151 handle->req = NULL;
2152 do_free(req);
2153do_allocate_error:
2154 ocmem_destroy_req(req);
2155 return -EINVAL;
2156}
2157
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002158int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2159{
2160 struct ocmem_req *req = NULL;
2161 int rc = 0;
2162
2163 req = handle_to_req(handle);
2164
2165 if (!req)
2166 return -EINVAL;
2167
2168 if (!is_mapped(req)) {
2169 pr_err("Buffer is not mapped\n");
2170 goto dump_error;
2171 }
2172
2173 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2174
2175 mutex_lock(&sched_mutex);
2176 rc = do_dump(req, addr);
2177 mutex_unlock(&sched_mutex);
2178
2179 if (rc < 0)
2180 goto dump_error;
2181
2182 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2183 return 0;
2184
2185dump_error:
2186 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2187 return -EINVAL;
2188}
2189
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002190static void ocmem_sched_wk_func(struct work_struct *work)
2191{
2192
2193 struct ocmem_buf *buffer = NULL;
2194 struct ocmem_handle *handle = NULL;
2195 struct ocmem_req *req = ocmem_fetch_req();
2196
2197 if (!req) {
2198 pr_debug("No Pending Requests found\n");
2199 return;
2200 }
2201
2202 pr_debug("ocmem: sched_wk pending req %p\n", req);
2203 handle = req_to_handle(req);
2204 buffer = handle_to_buffer(handle);
2205 BUG_ON(req->op == SCHED_NOP);
2206
2207 switch (req->op) {
2208 case SCHED_GROW:
2209 process_grow(req);
2210 break;
2211 case SCHED_ALLOCATE:
2212 process_delayed_allocate(req);
2213 break;
2214 default:
2215 pr_err("ocmem: Unknown operation encountered\n");
2216 break;
2217 }
2218 return;
2219}
2220
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002221static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2222{
2223 struct rb_node *rb_node = NULL;
2224 struct ocmem_req *req = NULL;
2225 unsigned j;
2226 mutex_lock(&sched_mutex);
2227 for (rb_node = rb_first(&sched_tree); rb_node;
2228 rb_node = rb_next(rb_node)) {
2229 struct ocmem_region *tmp_region = NULL;
2230 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2231 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2232 req = find_req_match(j, tmp_region);
2233 if (req) {
2234 seq_printf(f,
2235 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2236 get_name(req->owner),
2237 req->req_start, req->req_end,
2238 req->req_sz, req->state);
2239 }
2240 }
2241 }
2242 mutex_unlock(&sched_mutex);
2243 return 0;
2244}
2245
2246static int ocmem_allocations_open(struct inode *inode, struct file *file)
2247{
2248 return single_open(file, ocmem_allocations_show, inode->i_private);
2249}
2250
2251static const struct file_operations allocations_show_fops = {
2252 .open = ocmem_allocations_open,
2253 .read = seq_read,
2254 .llseek = seq_lseek,
2255 .release = seq_release,
2256};
2257
2258int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002259{
2260 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002261 struct ocmem_plat_data *pdata = NULL;
2262 struct device *dev = &pdev->dev;
2263
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002264 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002265 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002266 mutex_init(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002267 mutex_init(&sched_mutex);
2268 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002269 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002270 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2271 INIT_LIST_HEAD(&sched_queue[i]);
2272
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002273 mutex_init(&rdm_mutex);
2274 INIT_LIST_HEAD(&rdm_queue);
2275 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2276 if (!ocmem_rdm_wq)
2277 return -ENOMEM;
2278 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2279 if (!ocmem_eviction_wq)
2280 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002281
2282 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2283 NULL, &allocations_show_fops)) {
2284 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2285 return -EBUSY;
2286 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002287 return 0;
2288}