blob: 37dec309b08217afb071d60a645ad2bc70fadcf2 [file] [log] [blame]
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070048 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070049 OP_FAIL = ~0x0,
50};
51
52/* Represents various client priorities */
53/* Note: More than one client can share a priority level */
54enum client_prio {
55 MIN_PRIO = 0x0,
56 NO_PRIO = MIN_PRIO,
57 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070058 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070059 PRIO_LP_AUDIO = 0x1,
60 PRIO_HP_AUDIO = 0x2,
61 PRIO_VOICE = 0x3,
62 PRIO_GFX_GROWTH = 0x4,
63 PRIO_VIDEO = 0x5,
64 PRIO_GFX = 0x6,
65 PRIO_OCMEM = 0x7,
66 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
67};
68
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070069static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070070static struct list_head sched_queue[MAX_OCMEM_PRIO];
71static struct mutex sched_queue_mutex;
72
73/* The duration in msecs before a pending operation is scheduled
74 * This allows an idle window between use case boundaries where various
75 * hardware state changes can occur. The value will be tweaked on actual
76 * hardware.
77*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070078/* Delay in ms for switching to low power mode for OCMEM */
79#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070080
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081static struct list_head rdm_queue;
82static struct mutex rdm_mutex;
83static struct workqueue_struct *ocmem_rdm_wq;
84static struct workqueue_struct *ocmem_eviction_wq;
85
86static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
87
88struct ocmem_rdm_work {
89 int id;
90 struct ocmem_map_list *list;
91 struct ocmem_handle *handle;
92 int direction;
93 struct work_struct work;
94};
95
Naveen Ramarajb9da05782012-05-07 09:07:35 -070096/* OCMEM Operational modes */
97enum ocmem_client_modes {
98 OCMEM_PERFORMANCE = 1,
99 OCMEM_PASSIVE,
100 OCMEM_LOW_POWER,
101 OCMEM_MODE_MAX = OCMEM_LOW_POWER
102};
103
104/* OCMEM Addressing modes */
105enum ocmem_interconnects {
106 OCMEM_BLOCKED = 0,
107 OCMEM_PORT = 1,
108 OCMEM_OCMEMNOC = 2,
109 OCMEM_SYSNOC = 3,
110};
111
112/**
113 * Primary OCMEM Arbitration Table
114 **/
115struct ocmem_table {
116 int client_id;
117 int priority;
118 int mode;
119 int hw_interconnect;
120} ocmem_client_table[OCMEM_CLIENT_MAX] = {
121 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700122 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700123 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
124 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
125 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
126 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
127 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700128 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700129};
130
131static struct rb_root sched_tree;
132static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700133static struct mutex allocation_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134
135/* A region represents a continuous interval in OCMEM address space */
136struct ocmem_region {
137 /* Chain in Interval Tree */
138 struct rb_node region_rb;
139 /* Hash map of requests */
140 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700141 /* Chain in eviction list */
142 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700143 unsigned long r_start;
144 unsigned long r_end;
145 unsigned long r_sz;
146 /* Highest priority of all requests served by this region */
147 int max_prio;
148};
149
150/* Is OCMEM tightly coupled to the client ?*/
151static inline int is_tcm(int id)
152{
153 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
154 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
155 return 1;
156 else
157 return 0;
158}
159
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700160static inline int is_iface_access(int id)
161{
162 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
163}
164
165static inline int is_remapped_access(int id)
166{
167 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
168}
169
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700170static inline int is_blocked(int id)
171{
172 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
173}
174
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700175inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
176{
177 if (handle)
178 return &handle->buffer;
179 else
180 return NULL;
181}
182
183inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
184{
185 if (buffer)
186 return container_of(buffer, struct ocmem_handle, buffer);
187 else
188 return NULL;
189}
190
191inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
192{
193 if (handle)
194 return handle->req;
195 else
196 return NULL;
197}
198
199inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
200{
201 if (req && req->buffer)
202 return container_of(req->buffer, struct ocmem_handle, buffer);
203 else
204 return NULL;
205}
206
207/* Simple wrappers which will have debug features added later */
208inline int ocmem_read(void *at)
209{
210 return readl_relaxed(at);
211}
212
213inline int ocmem_write(unsigned long val, void *at)
214{
215 writel_relaxed(val, at);
216 return 0;
217}
218
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700219inline int get_mode(int id)
220{
221 if (!check_id(id))
222 return MODE_NOT_SET;
223 else
224 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
225 WIDE_MODE : THIN_MODE;
226}
227
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700228/* Returns the address that can be used by a device core to access OCMEM */
229static unsigned long device_address(int id, unsigned long addr)
230{
231 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
232 unsigned long ret_addr = 0x0;
233
234 switch (hw_interconnect) {
235 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700236 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700237 ret_addr = phys_to_offset(addr);
238 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700239 case OCMEM_SYSNOC:
240 ret_addr = addr;
241 break;
242 case OCMEM_BLOCKED:
243 ret_addr = 0x0;
244 break;
245 }
246 return ret_addr;
247}
248
249/* Returns the address as viewed by the core */
250static unsigned long core_address(int id, unsigned long addr)
251{
252 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
253 unsigned long ret_addr = 0x0;
254
255 switch (hw_interconnect) {
256 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700257 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700258 ret_addr = offset_to_phys(addr);
259 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700260 case OCMEM_SYSNOC:
261 ret_addr = addr;
262 break;
263 case OCMEM_BLOCKED:
264 ret_addr = 0x0;
265 break;
266 }
267 return ret_addr;
268}
269
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700270static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
271{
272 int owner;
273 if (!req)
274 return NULL;
275 owner = req->owner;
276 return get_zone(owner);
277}
278
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700279static int insert_region(struct ocmem_region *region)
280{
281
282 struct rb_root *root = &sched_tree;
283 struct rb_node **p = &root->rb_node;
284 struct rb_node *parent = NULL;
285 struct ocmem_region *tmp = NULL;
286 unsigned long addr = region->r_start;
287
288 while (*p) {
289 parent = *p;
290 tmp = rb_entry(parent, struct ocmem_region, region_rb);
291
292 if (tmp->r_end > addr) {
293 if (tmp->r_start <= addr)
294 break;
295 p = &(*p)->rb_left;
296 } else if (tmp->r_end <= addr)
297 p = &(*p)->rb_right;
298 }
299 rb_link_node(&region->region_rb, parent, p);
300 rb_insert_color(&region->region_rb, root);
301 return 0;
302}
303
304static int remove_region(struct ocmem_region *region)
305{
306 struct rb_root *root = &sched_tree;
307 rb_erase(&region->region_rb, root);
308 return 0;
309}
310
311static struct ocmem_req *ocmem_create_req(void)
312{
313 struct ocmem_req *p = NULL;
314
315 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
316 if (!p)
317 return NULL;
318
319 INIT_LIST_HEAD(&p->zone_list);
320 INIT_LIST_HEAD(&p->sched_list);
321 init_rwsem(&p->rw_sem);
322 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700323 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700324 return p;
325}
326
327static int ocmem_destroy_req(struct ocmem_req *req)
328{
329 kfree(req);
330 return 0;
331}
332
333static struct ocmem_region *create_region(void)
334{
335 struct ocmem_region *p = NULL;
336
337 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
338 if (!p)
339 return NULL;
340 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700341 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700342 p->r_start = p->r_end = p->r_sz = 0x0;
343 p->max_prio = NO_PRIO;
344 return p;
345}
346
347static int destroy_region(struct ocmem_region *region)
348{
349 kfree(region);
350 return 0;
351}
352
353static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
354{
355 int ret, id;
356
357 while (1) {
358 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
359 return -ENOMEM;
360
361 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
362
363 if (ret != -EAGAIN)
364 break;
365 }
366
367 if (!ret) {
368 req->req_id = id;
369 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
370 req, id, region);
371 return 0;
372 }
373 return -EINVAL;
374}
375
376static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
377{
378 idr_remove(&region->region_idr, req->req_id);
379 return 0;
380}
381
382static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
383{
384 region->r_start = req->req_start;
385 region->r_end = req->req_end;
386 region->r_sz = req->req_end - req->req_start + 1;
387 return 0;
388}
389
390static int region_req_count(int id, void *ptr, void *data)
391{
392 int *count = data;
393 *count = *count + 1;
394 return 0;
395}
396
397static int req_count(struct ocmem_region *region)
398{
399 int count = 0;
400 idr_for_each(&region->region_idr, region_req_count, &count);
401 return count;
402}
403
404static int compute_max_prio(int id, void *ptr, void *data)
405{
406 int *max = data;
407 struct ocmem_req *req = ptr;
408
409 if (req->prio > *max)
410 *max = req->prio;
411 return 0;
412}
413
414static int update_region_prio(struct ocmem_region *region)
415{
416 int max_prio;
417 if (req_count(region) != 0) {
418 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
419 region->max_prio = max_prio;
420 } else {
421 region->max_prio = NO_PRIO;
422 }
423 pr_debug("ocmem: Updating prio of region %p as %d\n",
424 region, max_prio);
425
426 return 0;
427}
428
429static struct ocmem_region *find_region(unsigned long addr)
430{
431 struct ocmem_region *region = NULL;
432 struct rb_node *rb_node = NULL;
433
434 rb_node = sched_tree.rb_node;
435
436 while (rb_node) {
437 struct ocmem_region *tmp_region = NULL;
438 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
439
440 if (tmp_region->r_end > addr) {
441 region = tmp_region;
442 if (tmp_region->r_start <= addr)
443 break;
444 rb_node = rb_node->rb_left;
445 } else {
446 rb_node = rb_node->rb_right;
447 }
448 }
449 return region;
450}
451
452static struct ocmem_region *find_region_intersection(unsigned long start,
453 unsigned long end)
454{
455
456 struct ocmem_region *region = NULL;
457 region = find_region(start);
458 if (region && end <= region->r_start)
459 region = NULL;
460 return region;
461}
462
463static struct ocmem_region *find_region_match(unsigned long start,
464 unsigned long end)
465{
466
467 struct ocmem_region *region = NULL;
468 region = find_region(start);
469 if (region && start == region->r_start && end == region->r_end)
470 return region;
471 return NULL;
472}
473
474static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
475{
476 struct ocmem_req *req = NULL;
477
478 if (!region)
479 return NULL;
480
481 req = idr_find(&region->region_idr, owner);
482
483 return req;
484}
485
486/* Must be called with req->sem held */
487static inline int is_mapped(struct ocmem_req *req)
488{
489 return TEST_STATE(req, R_MAPPED);
490}
491
492/* Must be called with sched_mutex held */
493static int __sched_unmap(struct ocmem_req *req)
494{
495 struct ocmem_req *matched_req = NULL;
496 struct ocmem_region *matched_region = NULL;
497
498 matched_region = find_region_match(req->req_start, req->req_end);
499 matched_req = find_req_match(req->req_id, matched_region);
500
501 if (!matched_region || !matched_req) {
502 pr_err("Could not find backing region for req");
503 goto invalid_op_error;
504 }
505
506 if (matched_req != req) {
507 pr_err("Request does not match backing req");
508 goto invalid_op_error;
509 }
510
511 if (!is_mapped(req)) {
512 pr_err("Request is not currently mapped");
513 goto invalid_op_error;
514 }
515
516 /* Update the request state */
517 CLEAR_STATE(req, R_MAPPED);
518 SET_STATE(req, R_MUST_MAP);
519
520 return OP_COMPLETE;
521
522invalid_op_error:
523 return OP_FAIL;
524}
525
526/* Must be called with sched_mutex held */
527static int __sched_map(struct ocmem_req *req)
528{
529 struct ocmem_req *matched_req = NULL;
530 struct ocmem_region *matched_region = NULL;
531
532 matched_region = find_region_match(req->req_start, req->req_end);
533 matched_req = find_req_match(req->req_id, matched_region);
534
535 if (!matched_region || !matched_req) {
536 pr_err("Could not find backing region for req");
537 goto invalid_op_error;
538 }
539
540 if (matched_req != req) {
541 pr_err("Request does not match backing req");
542 goto invalid_op_error;
543 }
544
545 /* Update the request state */
546 CLEAR_STATE(req, R_MUST_MAP);
547 SET_STATE(req, R_MAPPED);
548
549 return OP_COMPLETE;
550
551invalid_op_error:
552 return OP_FAIL;
553}
554
555static int do_map(struct ocmem_req *req)
556{
557 int rc = 0;
558
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700559 down_write(&req->rw_sem);
560
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700561 mutex_lock(&sched_mutex);
562 rc = __sched_map(req);
563 mutex_unlock(&sched_mutex);
564
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700565 up_write(&req->rw_sem);
566
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700567 if (rc == OP_FAIL)
568 return -EINVAL;
569
570 return 0;
571}
572
573static int do_unmap(struct ocmem_req *req)
574{
575 int rc = 0;
576
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700577 down_write(&req->rw_sem);
578
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700579 mutex_lock(&sched_mutex);
580 rc = __sched_unmap(req);
581 mutex_unlock(&sched_mutex);
582
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700583 up_write(&req->rw_sem);
584
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700585 if (rc == OP_FAIL)
586 return -EINVAL;
587
588 return 0;
589}
590
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700591static int process_map(struct ocmem_req *req, unsigned long start,
592 unsigned long end)
593{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700594 int rc = 0;
595
596 rc = ocmem_enable_core_clock();
597
598 if (rc < 0)
599 goto core_clock_fail;
600
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700601
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700602 if (is_iface_access(req->owner)) {
603 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700604
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700605 if (rc < 0)
606 goto iface_clock_fail;
607 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700608
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700609 if (is_remapped_access(req->owner)) {
610 rc = ocmem_enable_br_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700611
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700612 if (rc < 0)
613 goto br_clock_fail;
614 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700615
616 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
617 get_mode(req->owner));
618
619 if (rc < 0) {
620 pr_err("ocmem: Failed to secure request %p for %d\n", req,
621 req->owner);
622 goto lock_failed;
623 }
624
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700625 rc = do_map(req);
626
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700627 if (rc < 0) {
628 pr_err("ocmem: Failed to map request %p for %d\n",
629 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700630 goto process_map_fail;
631
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700632 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700633 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700634 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700635
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700636process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700637 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
638lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700639 if (is_remapped_access(req->owner))
640 ocmem_disable_br_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700641br_clock_fail:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700642 if (is_iface_access(req->owner))
643 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700644iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700645 ocmem_disable_core_clock();
646core_clock_fail:
647 pr_err("ocmem: Failed to map ocmem request\n");
648 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700649}
650
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700651static int process_unmap(struct ocmem_req *req, unsigned long start,
652 unsigned long end)
653{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700654 int rc = 0;
655
656 rc = do_unmap(req);
657
658 if (rc < 0)
659 goto process_unmap_fail;
660
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700661 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
662 req->req_sz);
663
664 if (rc < 0) {
665 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
666 req->owner);
667 goto unlock_failed;
668 }
669
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700670 if (is_remapped_access(req->owner))
671 ocmem_disable_br_clock();
672 if (is_iface_access(req->owner))
673 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700674 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700675 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700676 return 0;
677
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700678unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700679process_unmap_fail:
680 pr_err("ocmem: Failed to unmap ocmem request\n");
681 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700682}
683
684static int __sched_grow(struct ocmem_req *req, bool can_block)
685{
686 unsigned long min = req->req_min;
687 unsigned long max = req->req_max;
688 unsigned long step = req->req_step;
689 int owner = req->owner;
690 unsigned long curr_sz = 0;
691 unsigned long growth_sz = 0;
692 unsigned long curr_start = 0;
693 enum client_prio prio = req->prio;
694 unsigned long alloc_addr = 0x0;
695 bool retry;
696 struct ocmem_region *spanned_r = NULL;
697 struct ocmem_region *overlap_r = NULL;
698
699 struct ocmem_req *matched_req = NULL;
700 struct ocmem_region *matched_region = NULL;
701
702 struct ocmem_zone *zone = get_zone(owner);
703 struct ocmem_region *region = NULL;
704
705 matched_region = find_region_match(req->req_start, req->req_end);
706 matched_req = find_req_match(req->req_id, matched_region);
707
708 if (!matched_region || !matched_req) {
709 pr_err("Could not find backing region for req");
710 goto invalid_op_error;
711 }
712
713 if (matched_req != req) {
714 pr_err("Request does not match backing req");
715 goto invalid_op_error;
716 }
717
718 curr_sz = matched_req->req_sz;
719 curr_start = matched_req->req_start;
720 growth_sz = matched_req->req_max - matched_req->req_sz;
721
722 pr_debug("Attempting to grow req %p from %lx to %lx\n",
723 req, matched_req->req_sz, matched_req->req_max);
724
725 retry = false;
726
727 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
728
729retry_next_step:
730
731 spanned_r = NULL;
732 overlap_r = NULL;
733
734 spanned_r = find_region(zone->z_head);
735 overlap_r = find_region_intersection(zone->z_head,
736 zone->z_head + growth_sz);
737
738 if (overlap_r == NULL) {
739 /* no conflicting regions, schedule this region */
740 zone->z_ops->free(zone, curr_start, curr_sz);
741 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
742
743 if (alloc_addr < 0) {
744 pr_err("ocmem: zone allocation operation failed\n");
745 goto internal_error;
746 }
747
748 curr_sz += growth_sz;
749 /* Detach the region from the interval tree */
750 /* This is to guarantee that any change in size
751 * causes the tree to be rebalanced if required */
752
753 detach_req(matched_region, req);
754 if (req_count(matched_region) == 0) {
755 remove_region(matched_region);
756 region = matched_region;
757 } else {
758 region = create_region();
759 if (!region) {
760 pr_err("ocmem: Unable to create region\n");
761 goto region_error;
762 }
763 }
764
765 /* update the request */
766 req->req_start = alloc_addr;
767 /* increment the size to reflect new length */
768 req->req_sz = curr_sz;
769 req->req_end = alloc_addr + req->req_sz - 1;
770
771 /* update request state */
772 CLEAR_STATE(req, R_MUST_GROW);
773 SET_STATE(req, R_ALLOCATED);
774 SET_STATE(req, R_MUST_MAP);
775 req->op = SCHED_MAP;
776
777 /* update the region with new req */
778 attach_req(region, req);
779 populate_region(region, req);
780 update_region_prio(region);
781
782 /* update the tree with new region */
783 if (insert_region(region)) {
784 pr_err("ocmem: Failed to insert the region\n");
785 goto region_error;
786 }
787
788 if (retry) {
789 SET_STATE(req, R_MUST_GROW);
790 SET_STATE(req, R_PENDING);
791 req->op = SCHED_GROW;
792 return OP_PARTIAL;
793 }
794 } else if (spanned_r != NULL && overlap_r != NULL) {
795 /* resolve conflicting regions based on priority */
796 if (overlap_r->max_prio < prio) {
797 /* Growth cannot be triggered unless a previous
798 * client of lower priority was evicted */
799 pr_err("ocmem: Invalid growth scheduled\n");
800 /* This is serious enough to fail */
801 BUG();
802 return OP_FAIL;
803 } else if (overlap_r->max_prio > prio) {
804 if (min == max) {
805 /* Cannot grow at this time, try later */
806 SET_STATE(req, R_PENDING);
807 SET_STATE(req, R_MUST_GROW);
808 return OP_RESCHED;
809 } else {
810 /* Try to grow in steps */
811 growth_sz -= step;
812 /* We are OOM at this point so need to retry */
813 if (growth_sz <= curr_sz) {
814 SET_STATE(req, R_PENDING);
815 SET_STATE(req, R_MUST_GROW);
816 return OP_RESCHED;
817 }
818 retry = true;
819 pr_debug("ocmem: Attempting with reduced size %lx\n",
820 growth_sz);
821 goto retry_next_step;
822 }
823 } else {
824 pr_err("ocmem: grow: New Region %p Existing %p\n",
825 matched_region, overlap_r);
826 pr_err("ocmem: Undetermined behavior\n");
827 /* This is serious enough to fail */
828 BUG();
829 }
830 } else if (spanned_r == NULL && overlap_r != NULL) {
831 goto err_not_supported;
832 }
833
834 return OP_COMPLETE;
835
836err_not_supported:
837 pr_err("ocmem: Scheduled unsupported operation\n");
838 return OP_FAIL;
839region_error:
840 zone->z_ops->free(zone, alloc_addr, curr_sz);
841 detach_req(region, req);
842 update_region_prio(region);
843 /* req is going to be destroyed by the caller anyways */
844internal_error:
845 destroy_region(region);
846invalid_op_error:
847 return OP_FAIL;
848}
849
850/* Must be called with sched_mutex held */
851static int __sched_free(struct ocmem_req *req)
852{
853 int owner = req->owner;
854 int ret = 0;
855
856 struct ocmem_req *matched_req = NULL;
857 struct ocmem_region *matched_region = NULL;
858
859 struct ocmem_zone *zone = get_zone(owner);
860
861 BUG_ON(!zone);
862
863 matched_region = find_region_match(req->req_start, req->req_end);
864 matched_req = find_req_match(req->req_id, matched_region);
865
866 if (!matched_region || !matched_req)
867 goto invalid_op_error;
868 if (matched_req != req)
869 goto invalid_op_error;
870
871 ret = zone->z_ops->free(zone,
872 matched_req->req_start, matched_req->req_sz);
873
874 if (ret < 0)
875 goto err_op_fail;
876
877 detach_req(matched_region, matched_req);
878 update_region_prio(matched_region);
879 if (req_count(matched_region) == 0) {
880 remove_region(matched_region);
881 destroy_region(matched_region);
882 }
883
884 /* Update the request */
885 req->req_start = 0x0;
886 req->req_sz = 0x0;
887 req->req_end = 0x0;
888 SET_STATE(req, R_FREE);
889 return OP_COMPLETE;
890invalid_op_error:
891 pr_err("ocmem: free: Failed to find matching region\n");
892err_op_fail:
893 pr_err("ocmem: free: Failed\n");
894 return OP_FAIL;
895}
896
897/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700898static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
899{
900 int owner = req->owner;
901 int ret = 0;
902
903 struct ocmem_req *matched_req = NULL;
904 struct ocmem_region *matched_region = NULL;
905 struct ocmem_region *region = NULL;
906 unsigned long alloc_addr = 0x0;
907
908 struct ocmem_zone *zone = get_zone(owner);
909
910 BUG_ON(!zone);
911
912 /* The shrink should not be called for zero size */
913 BUG_ON(new_sz == 0);
914
915 matched_region = find_region_match(req->req_start, req->req_end);
916 matched_req = find_req_match(req->req_id, matched_region);
917
918 if (!matched_region || !matched_req)
919 goto invalid_op_error;
920 if (matched_req != req)
921 goto invalid_op_error;
922
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700923 ret = zone->z_ops->free(zone,
924 matched_req->req_start, matched_req->req_sz);
925
926 if (ret < 0) {
927 pr_err("Zone Allocation operation failed\n");
928 goto internal_error;
929 }
930
931 alloc_addr = zone->z_ops->allocate(zone, new_sz);
932
933 if (alloc_addr < 0) {
934 pr_err("Zone Allocation operation failed\n");
935 goto internal_error;
936 }
937
938 /* Detach the region from the interval tree */
939 /* This is to guarantee that the change in size
940 * causes the tree to be rebalanced if required */
941
942 detach_req(matched_region, req);
943 if (req_count(matched_region) == 0) {
944 remove_region(matched_region);
945 region = matched_region;
946 } else {
947 region = create_region();
948 if (!region) {
949 pr_err("ocmem: Unable to create region\n");
950 goto internal_error;
951 }
952 }
953 /* update the request */
954 req->req_start = alloc_addr;
955 req->req_sz = new_sz;
956 req->req_end = alloc_addr + req->req_sz;
957
958 if (req_count(region) == 0) {
959 remove_region(matched_region);
960 destroy_region(matched_region);
961 }
962
963 /* update request state */
964 SET_STATE(req, R_MUST_GROW);
965 SET_STATE(req, R_MUST_MAP);
966 req->op = SCHED_MAP;
967
968 /* attach the request to the region */
969 attach_req(region, req);
970 populate_region(region, req);
971 update_region_prio(region);
972
973 /* update the tree with new region */
974 if (insert_region(region)) {
975 pr_err("ocmem: Failed to insert the region\n");
976 zone->z_ops->free(zone, alloc_addr, new_sz);
977 detach_req(region, req);
978 update_region_prio(region);
979 /* req will be destroyed by the caller */
980 goto region_error;
981 }
982 return OP_COMPLETE;
983
984region_error:
985 destroy_region(region);
986internal_error:
987 pr_err("ocmem: shrink: Failed\n");
988 return OP_FAIL;
989invalid_op_error:
990 pr_err("ocmem: shrink: Failed to find matching region\n");
991 return OP_FAIL;
992}
993
994/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700995static int __sched_allocate(struct ocmem_req *req, bool can_block,
996 bool can_wait)
997{
998 unsigned long min = req->req_min;
999 unsigned long max = req->req_max;
1000 unsigned long step = req->req_step;
1001 int owner = req->owner;
1002 unsigned long sz = max;
1003 enum client_prio prio = req->prio;
1004 unsigned long alloc_addr = 0x0;
1005 bool retry;
1006
1007 struct ocmem_region *spanned_r = NULL;
1008 struct ocmem_region *overlap_r = NULL;
1009
1010 struct ocmem_zone *zone = get_zone(owner);
1011 struct ocmem_region *region = NULL;
1012
1013 BUG_ON(!zone);
1014
1015 if (min > (zone->z_end - zone->z_start)) {
1016 pr_err("ocmem: requested minimum size exceeds quota\n");
1017 goto invalid_op_error;
1018 }
1019
1020 if (max > (zone->z_end - zone->z_start)) {
1021 pr_err("ocmem: requested maximum size exceeds quota\n");
1022 goto invalid_op_error;
1023 }
1024
1025 if (min > zone->z_free) {
1026 pr_err("ocmem: out of memory for zone %d\n", owner);
1027 goto invalid_op_error;
1028 }
1029
1030 region = create_region();
1031
1032 if (!region) {
1033 pr_err("ocmem: Unable to create region\n");
1034 goto invalid_op_error;
1035 }
1036
1037 retry = false;
1038
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001039 pr_debug("ocmem: do_allocate: %s request size %lx\n",
1040 get_name(owner), sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001041
1042retry_next_step:
1043
1044 spanned_r = NULL;
1045 overlap_r = NULL;
1046
1047 spanned_r = find_region(zone->z_head);
1048 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1049
1050 if (overlap_r == NULL) {
1051 /* no conflicting regions, schedule this region */
1052 alloc_addr = zone->z_ops->allocate(zone, sz);
1053
1054 if (alloc_addr < 0) {
1055 pr_err("Zone Allocation operation failed\n");
1056 goto internal_error;
1057 }
1058
1059 /* update the request */
1060 req->req_start = alloc_addr;
1061 req->req_end = alloc_addr + sz - 1;
1062 req->req_sz = sz;
1063 req->zone = zone;
1064
1065 /* update request state */
1066 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001067 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001068 SET_STATE(req, R_ALLOCATED);
1069 SET_STATE(req, R_MUST_MAP);
1070 req->op = SCHED_NOP;
1071
1072 /* attach the request to the region */
1073 attach_req(region, req);
1074 populate_region(region, req);
1075 update_region_prio(region);
1076
1077 /* update the tree with new region */
1078 if (insert_region(region)) {
1079 pr_err("ocmem: Failed to insert the region\n");
1080 zone->z_ops->free(zone, alloc_addr, sz);
1081 detach_req(region, req);
1082 update_region_prio(region);
1083 /* req will be destroyed by the caller */
1084 goto internal_error;
1085 }
1086
1087 if (retry) {
1088 SET_STATE(req, R_MUST_GROW);
1089 SET_STATE(req, R_PENDING);
1090 req->op = SCHED_GROW;
1091 return OP_PARTIAL;
1092 }
1093 } else if (spanned_r != NULL && overlap_r != NULL) {
1094 /* resolve conflicting regions based on priority */
1095 if (overlap_r->max_prio < prio) {
1096 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001097 req->req_start = zone->z_head;
1098 req->req_end = zone->z_head + sz - 1;
1099 req->req_sz = 0x0;
1100 req->edata = NULL;
1101 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001102 } else {
1103 /* Try to allocate atleast >= 'min' immediately */
1104 sz -= step;
1105 if (sz < min)
1106 goto err_out_of_mem;
1107 retry = true;
1108 pr_debug("ocmem: Attempting with reduced size %lx\n",
1109 sz);
1110 goto retry_next_step;
1111 }
1112 } else if (overlap_r->max_prio > prio) {
1113 if (can_block == true) {
1114 SET_STATE(req, R_PENDING);
1115 SET_STATE(req, R_MUST_GROW);
1116 return OP_RESCHED;
1117 } else {
1118 if (min == max) {
1119 pr_err("Cannot allocate %lx synchronously\n",
1120 sz);
1121 goto err_out_of_mem;
1122 } else {
1123 sz -= step;
1124 if (sz < min)
1125 goto err_out_of_mem;
1126 retry = true;
1127 pr_debug("ocmem: Attempting reduced size %lx\n",
1128 sz);
1129 goto retry_next_step;
1130 }
1131 }
1132 } else {
1133 pr_err("ocmem: Undetermined behavior\n");
1134 pr_err("ocmem: New Region %p Existing %p\n", region,
1135 overlap_r);
1136 /* This is serious enough to fail */
1137 BUG();
1138 }
1139 } else if (spanned_r == NULL && overlap_r != NULL)
1140 goto err_not_supported;
1141
1142 return OP_COMPLETE;
1143
Naveen Ramaraj59907982012-10-16 17:40:38 -07001144trigger_eviction:
1145 pr_debug("Trigger eviction of region %p\n", overlap_r);
1146 destroy_region(region);
1147 return OP_EVICT;
1148
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001149err_not_supported:
1150 pr_err("ocmem: Scheduled unsupported operation\n");
1151 return OP_FAIL;
1152
1153err_out_of_mem:
1154 pr_err("ocmem: Out of memory during allocation\n");
1155internal_error:
1156 destroy_region(region);
1157invalid_op_error:
1158 return OP_FAIL;
1159}
1160
1161static int sched_enqueue(struct ocmem_req *priv)
1162{
1163 struct ocmem_req *next = NULL;
1164 mutex_lock(&sched_queue_mutex);
1165 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1166 pr_debug("enqueued req %p\n", priv);
1167 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1168 pr_debug("pending requests for client %p\n", next);
1169 }
1170 mutex_unlock(&sched_queue_mutex);
1171 return 0;
1172}
1173
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001174static void sched_dequeue(struct ocmem_req *victim_req)
1175{
1176 struct ocmem_req *req = NULL;
1177 struct ocmem_req *next = NULL;
1178 int id;
1179
1180 if (!victim_req)
1181 return;
1182
1183 id = victim_req->owner;
1184
1185 mutex_lock(&sched_queue_mutex);
1186
1187 if (list_empty(&sched_queue[id]))
1188 goto dequeue_done;
1189
1190 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1191 {
1192 if (req == victim_req) {
1193 pr_debug("ocmem: Cancelling pending request %p\n",
1194 req);
1195 list_del(&req->sched_list);
1196 goto dequeue_done;
1197 }
1198 }
1199
1200dequeue_done:
1201 mutex_unlock(&sched_queue_mutex);
1202 return;
1203}
1204
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001205static struct ocmem_req *ocmem_fetch_req(void)
1206{
1207 int i;
1208 struct ocmem_req *req = NULL;
1209 struct ocmem_req *next = NULL;
1210
1211 mutex_lock(&sched_queue_mutex);
1212 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1213 if (list_empty(&sched_queue[i]))
1214 continue;
1215 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1216 {
1217 if (req) {
1218 pr_debug("ocmem: Fetched pending request %p\n",
1219 req);
1220 list_del(&req->sched_list);
1221 break;
1222 }
1223 }
1224 }
1225 mutex_unlock(&sched_queue_mutex);
1226 return req;
1227}
1228
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001229
1230unsigned long process_quota(int id)
1231{
1232 struct ocmem_zone *zone = NULL;
1233
1234 if (is_blocked(id))
1235 return 0;
1236
1237 zone = get_zone(id);
1238
1239 if (zone && zone->z_pool)
1240 return zone->z_end - zone->z_start;
1241 else
1242 return 0;
1243}
1244
1245static int do_grow(struct ocmem_req *req)
1246{
1247 struct ocmem_buf *buffer = NULL;
1248 bool can_block = true;
1249 int rc = 0;
1250
1251 down_write(&req->rw_sem);
1252 buffer = req->buffer;
1253
1254 /* Take the scheduler mutex */
1255 mutex_lock(&sched_mutex);
1256 rc = __sched_grow(req, can_block);
1257 mutex_unlock(&sched_mutex);
1258
1259 if (rc == OP_FAIL)
1260 goto err_op_fail;
1261
1262 if (rc == OP_RESCHED) {
1263 pr_debug("ocmem: Enqueue this allocation");
1264 sched_enqueue(req);
1265 }
1266
1267 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1268 buffer->addr = device_address(req->owner, req->req_start);
1269 buffer->len = req->req_sz;
1270 }
1271
1272 up_write(&req->rw_sem);
1273 return 0;
1274err_op_fail:
1275 up_write(&req->rw_sem);
1276 return -EINVAL;
1277}
1278
1279static int process_grow(struct ocmem_req *req)
1280{
1281 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001282 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001283
1284 /* Attempt to grow the region */
1285 rc = do_grow(req);
1286
1287 if (rc < 0)
1288 return -EINVAL;
1289
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001290 rc = process_map(req, req->req_start, req->req_end);
1291 if (rc < 0)
1292 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001293
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001294 offset = phys_to_offset(req->req_start);
1295
1296 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1297
1298 if (rc < 0) {
1299 pr_err("Failed to switch ON memory macros\n");
1300 goto power_ctl_error;
1301 }
1302
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001303 /* Notify the client about the buffer growth */
1304 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1305 if (rc < 0) {
1306 pr_err("No notifier callback to cater for req %p event: %d\n",
1307 req, OCMEM_ALLOC_GROW);
1308 BUG();
1309 }
1310 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001311power_ctl_error:
1312 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001313}
1314
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001315static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1316{
1317
1318 int rc = 0;
1319 struct ocmem_buf *buffer = NULL;
1320
1321 down_write(&req->rw_sem);
1322 buffer = req->buffer;
1323
1324 /* Take the scheduler mutex */
1325 mutex_lock(&sched_mutex);
1326 rc = __sched_shrink(req, shrink_size);
1327 mutex_unlock(&sched_mutex);
1328
1329 if (rc == OP_FAIL)
1330 goto err_op_fail;
1331
1332 else if (rc == OP_COMPLETE) {
1333 buffer->addr = device_address(req->owner, req->req_start);
1334 buffer->len = req->req_sz;
1335 }
1336
1337 up_write(&req->rw_sem);
1338 return 0;
1339err_op_fail:
1340 up_write(&req->rw_sem);
1341 return -EINVAL;
1342}
1343
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001344static void ocmem_sched_wk_func(struct work_struct *work);
1345DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1346
1347static int ocmem_schedule_pending(void)
1348{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001349
1350 bool need_sched = false;
1351 int i = 0;
1352
1353 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1354 if (!list_empty(&sched_queue[i])) {
1355 need_sched = true;
1356 break;
1357 }
1358 }
1359
1360 if (need_sched == true) {
1361 cancel_delayed_work(&ocmem_sched_thread);
1362 schedule_delayed_work(&ocmem_sched_thread,
1363 msecs_to_jiffies(SCHED_DELAY));
1364 pr_debug("ocmem: Scheduled delayed work\n");
1365 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001366 return 0;
1367}
1368
1369static int do_free(struct ocmem_req *req)
1370{
1371 int rc = 0;
1372 struct ocmem_buf *buffer = req->buffer;
1373
1374 down_write(&req->rw_sem);
1375
1376 if (is_mapped(req)) {
1377 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1378 goto err_free_fail;
1379 }
1380
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001381 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1382 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001383 /* Grab the sched mutex */
1384 mutex_lock(&sched_mutex);
1385 rc = __sched_free(req);
1386 mutex_unlock(&sched_mutex);
1387
1388 switch (rc) {
1389
1390 case OP_COMPLETE:
1391 buffer->addr = 0x0;
1392 buffer->len = 0x0;
1393 break;
1394 case OP_FAIL:
1395 default:
1396 goto err_free_fail;
1397 break;
1398 }
1399
1400 up_write(&req->rw_sem);
1401 return 0;
1402err_free_fail:
1403 up_write(&req->rw_sem);
1404 pr_err("ocmem: freeing req %p failed\n", req);
1405 return -EINVAL;
1406}
1407
1408int process_free(int id, struct ocmem_handle *handle)
1409{
1410 struct ocmem_req *req = NULL;
1411 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001412 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001413 int rc = 0;
1414
1415 if (is_blocked(id)) {
1416 pr_err("Client %d cannot request free\n", id);
1417 return -EINVAL;
1418 }
1419
1420 req = handle_to_req(handle);
1421 buffer = handle_to_buffer(handle);
1422
1423 if (!req)
1424 return -EINVAL;
1425
1426 if (req->req_start != core_address(id, buffer->addr)) {
1427 pr_err("Invalid buffer handle passed for free\n");
1428 return -EINVAL;
1429 }
1430
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001431 mutex_lock(&sched_mutex);
1432 sched_dequeue(req);
1433 mutex_unlock(&sched_mutex);
1434
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001435 if (!TEST_STATE(req, R_FREE)) {
1436
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001437 rc = process_unmap(req, req->req_start, req->req_end);
1438 if (rc < 0)
1439 return -EINVAL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001440
1441 rc = do_free(req);
1442 if (rc < 0)
1443 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001444 }
1445
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001446 if (req->req_sz != 0) {
1447
1448 offset = phys_to_offset(req->req_start);
1449
1450 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1451
1452 if (rc < 0) {
1453 pr_err("Failed to switch OFF memory macros\n");
1454 return -EINVAL;
1455 }
1456
1457 }
1458
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001459 inc_ocmem_stat(zone_of(req), NR_FREES);
1460
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001461 ocmem_destroy_req(req);
1462 handle->req = NULL;
1463
1464 ocmem_schedule_pending();
1465 return 0;
1466}
1467
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001468static void ocmem_rdm_worker(struct work_struct *work)
1469{
1470 int offset = 0;
1471 int rc = 0;
1472 int event;
1473 struct ocmem_rdm_work *work_data = container_of(work,
1474 struct ocmem_rdm_work, work);
1475 int id = work_data->id;
1476 struct ocmem_map_list *list = work_data->list;
1477 int direction = work_data->direction;
1478 struct ocmem_handle *handle = work_data->handle;
1479 struct ocmem_req *req = handle_to_req(handle);
1480 struct ocmem_buf *buffer = handle_to_buffer(handle);
1481
1482 down_write(&req->rw_sem);
1483 offset = phys_to_offset(req->req_start);
1484 rc = ocmem_rdm_transfer(id, list, offset, direction);
1485 if (work_data->direction == TO_OCMEM)
1486 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1487 else
1488 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001489 up_write(&req->rw_sem);
1490 kfree(work_data);
1491 dispatch_notification(id, event, buffer);
1492}
1493
1494int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1495 struct ocmem_map_list *list, int direction)
1496{
1497 struct ocmem_rdm_work *work_data = NULL;
1498
1499 down_write(&req->rw_sem);
1500
1501 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1502 if (!work_data)
1503 BUG();
1504
1505 work_data->handle = handle;
1506 work_data->list = list;
1507 work_data->id = req->owner;
1508 work_data->direction = direction;
1509 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1510 up_write(&req->rw_sem);
1511 queue_work(ocmem_rdm_wq, &work_data->work);
1512 return 0;
1513}
1514
1515int process_xfer_out(int id, struct ocmem_handle *handle,
1516 struct ocmem_map_list *list)
1517{
1518 struct ocmem_req *req = NULL;
1519 int rc = 0;
1520
1521 req = handle_to_req(handle);
1522
1523 if (!req)
1524 return -EINVAL;
1525
1526 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001527 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001528 goto transfer_out_error;
1529 }
1530
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001531 rc = queue_transfer(req, handle, list, TO_DDR);
1532
1533 if (rc < 0) {
1534 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001535 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001536 goto transfer_out_error;
1537 }
1538
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001539 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001540 return 0;
1541
1542transfer_out_error:
1543 return -EINVAL;
1544}
1545
1546int process_xfer_in(int id, struct ocmem_handle *handle,
1547 struct ocmem_map_list *list)
1548{
1549 struct ocmem_req *req = NULL;
1550 int rc = 0;
1551
1552 req = handle_to_req(handle);
1553
1554 if (!req)
1555 return -EINVAL;
1556
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001557
1558 if (!is_mapped(req)) {
1559 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001560 goto transfer_in_error;
1561 }
1562
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001563
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001564 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001565 rc = queue_transfer(req, handle, list, TO_OCMEM);
1566
1567 if (rc < 0) {
1568 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001569 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001570 goto transfer_in_error;
1571 }
1572
1573 return 0;
1574transfer_in_error:
1575 return -EINVAL;
1576}
1577
1578int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1579{
1580 struct ocmem_req *req = NULL;
1581 struct ocmem_buf *buffer = NULL;
1582 struct ocmem_eviction_data *edata = NULL;
1583 int rc = 0;
1584
1585 if (is_blocked(id)) {
1586 pr_err("Client %d cannot request free\n", id);
1587 return -EINVAL;
1588 }
1589
1590 req = handle_to_req(handle);
1591 buffer = handle_to_buffer(handle);
1592
1593 if (!req)
1594 return -EINVAL;
1595
1596 if (req->req_start != core_address(id, buffer->addr)) {
1597 pr_err("Invalid buffer handle passed for shrink\n");
1598 return -EINVAL;
1599 }
1600
1601 edata = req->edata;
1602
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001603 if (!edata) {
1604 pr_err("Unable to find eviction data\n");
1605 return -EINVAL;
1606 }
1607
1608 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001609
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001610 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1611
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001612 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001613 pr_debug("req %p being shrunk to zero\n", req);
1614 if (is_mapped(req))
1615 rc = process_unmap(req, req->req_start, req->req_end);
1616 if (rc < 0)
1617 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001618 rc = do_free(req);
1619 if (rc < 0)
1620 return -EINVAL;
1621 } else {
1622 rc = do_shrink(req, size);
1623 if (rc < 0)
1624 return -EINVAL;
1625 }
1626
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001627 req->edata = NULL;
1628 CLEAR_STATE(req, R_ALLOCATED);
1629 SET_STATE(req, R_FREE);
1630
1631 if (atomic_dec_and_test(&edata->pending)) {
1632 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001633 complete(&edata->completion);
1634 }
1635
1636 return 0;
1637}
1638
1639int process_xfer(int id, struct ocmem_handle *handle,
1640 struct ocmem_map_list *list, int direction)
1641{
1642 int rc = 0;
1643
1644 if (is_tcm(id)) {
1645 WARN(1, "Mapping operation is invalid for client\n");
1646 return -EINVAL;
1647 }
1648
1649 if (direction == TO_DDR)
1650 rc = process_xfer_out(id, handle, list);
1651 else if (direction == TO_OCMEM)
1652 rc = process_xfer_in(id, handle, list);
1653 return rc;
1654}
1655
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001656static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001657{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001658 struct ocmem_eviction_data *edata = NULL;
1659 int prio = ocmem_client_table[id].priority;
1660
1661 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1662
1663 if (!edata) {
1664 pr_err("ocmem: Could not allocate eviction data\n");
1665 return NULL;
1666 }
1667
1668 INIT_LIST_HEAD(&edata->victim_list);
1669 INIT_LIST_HEAD(&edata->req_list);
1670 edata->prio = prio;
1671 atomic_set(&edata->pending, 0);
1672 return edata;
1673}
1674
1675static void free_eviction(struct ocmem_eviction_data *edata)
1676{
1677
1678 if (!edata)
1679 return;
1680
1681 if (!list_empty(&edata->req_list))
1682 pr_err("ocmem: Eviction data %p not empty\n", edata);
1683
1684 kfree(edata);
1685 edata = NULL;
1686}
1687
1688static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1689{
1690
1691 if (!new || !old)
1692 return false;
1693
1694 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1695 new->req_start, new->req_end,
1696 old->req_start, old->req_end);
1697
1698 if ((new->req_start < old->req_start &&
1699 new->req_end >= old->req_start) ||
1700 (new->req_start >= old->req_start &&
1701 new->req_start <= old->req_end &&
1702 new->req_end >= old->req_end)) {
1703 pr_debug("request %p overlaps with existing req %p\n",
1704 new, old);
1705 return true;
1706 }
1707 return false;
1708}
1709
1710static int __evict_common(struct ocmem_eviction_data *edata,
1711 struct ocmem_req *req)
1712{
1713 struct rb_node *rb_node = NULL;
1714 struct ocmem_req *e_req = NULL;
1715 bool needs_eviction = false;
1716 int j = 0;
1717
1718 for (rb_node = rb_first(&sched_tree); rb_node;
1719 rb_node = rb_next(rb_node)) {
1720
1721 struct ocmem_region *tmp_region = NULL;
1722
1723 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1724
1725 if (tmp_region->max_prio < edata->prio) {
1726 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1727 needs_eviction = false;
1728 e_req = find_req_match(j, tmp_region);
1729 if (!e_req)
1730 continue;
1731 if (edata->passive == true) {
1732 needs_eviction = true;
1733 } else {
1734 needs_eviction = is_overlapping(req,
1735 e_req);
1736 }
1737
1738 if (needs_eviction) {
1739 pr_debug("adding %p in region %p to eviction list\n",
1740 e_req, tmp_region);
1741 list_add_tail(
1742 &e_req->eviction_list,
1743 &edata->req_list);
1744 atomic_inc(&edata->pending);
1745 e_req->edata = edata;
1746 }
1747 }
1748 } else {
1749 pr_debug("Skipped region %p\n", tmp_region);
1750 }
1751 }
1752
1753 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1754
1755 if (!atomic_read(&edata->pending))
1756 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001757 return 0;
1758}
1759
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001760static void trigger_eviction(struct ocmem_eviction_data *edata)
1761{
1762 struct ocmem_req *req = NULL;
1763 struct ocmem_req *next = NULL;
1764 struct ocmem_buf buffer;
1765
1766 if (!edata)
1767 return;
1768
1769 BUG_ON(atomic_read(&edata->pending) == 0);
1770
1771 init_completion(&edata->completion);
1772
1773 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1774 {
1775 if (req) {
1776 pr_debug("ocmem: Evicting request %p\n", req);
1777 buffer.addr = req->req_start;
1778 buffer.len = 0x0;
1779 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1780 &buffer);
1781 }
1782 }
1783 return;
1784}
1785
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001786int process_evict(int id)
1787{
1788 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001789 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001790
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001791 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001792
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001793 if (!edata)
1794 return -EINVAL;
1795
1796 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001797
1798 mutex_lock(&sched_mutex);
1799
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001800 rc = __evict_common(edata, NULL);
1801
1802 if (rc < 0)
1803 goto skip_eviction;
1804
1805 trigger_eviction(edata);
1806
1807 evictions[id] = edata;
1808
1809 mutex_unlock(&sched_mutex);
1810
1811 wait_for_completion(&edata->completion);
1812
1813 return 0;
1814
1815skip_eviction:
1816 evictions[id] = NULL;
1817 mutex_unlock(&sched_mutex);
1818 return 0;
1819}
1820
1821static int run_evict(struct ocmem_req *req)
1822{
1823 struct ocmem_eviction_data *edata = NULL;
1824 int rc = 0;
1825
1826 if (!req)
1827 return -EINVAL;
1828
1829 edata = init_eviction(req->owner);
1830
1831 if (!edata)
1832 return -EINVAL;
1833
1834 edata->passive = false;
1835
1836 rc = __evict_common(edata, req);
1837
1838 if (rc < 0)
1839 goto skip_eviction;
1840
1841 trigger_eviction(edata);
1842
1843 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1844 req->edata = edata;
1845
1846 wait_for_completion(&edata->completion);
1847
1848 pr_debug("ocmem: eviction completed successfully\n");
1849 return 0;
1850
1851skip_eviction:
1852 pr_err("ocmem: Unable to run eviction\n");
1853 free_eviction(edata);
1854 return -EINVAL;
1855}
1856
1857static int __restore_common(struct ocmem_eviction_data *edata)
1858{
1859
1860 struct ocmem_req *req = NULL;
1861 struct ocmem_req *next = NULL;
1862
1863 if (!edata)
1864 return -EINVAL;
1865
1866 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1867 {
1868 if (req) {
1869 pr_debug("ocmem: restoring evicted request %p\n",
1870 req);
1871 list_del(&req->eviction_list);
1872 req->op = SCHED_ALLOCATE;
1873 sched_enqueue(req);
1874 inc_ocmem_stat(zone_of(req), NR_RESTORES);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001875 }
1876 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001877
1878 pr_debug("Scheduled all evicted regions\n");
1879
1880 return 0;
1881}
1882
1883static int sched_restore(struct ocmem_req *req)
1884{
1885
1886 int rc = 0;
1887
1888 if (!req)
1889 return -EINVAL;
1890
1891 if (!req->edata)
1892 return 0;
1893
1894 rc = __restore_common(req->edata);
1895
1896 if (rc < 0)
1897 return -EINVAL;
1898
1899 free_eviction(req->edata);
1900 return 0;
1901}
1902
1903int process_restore(int id)
1904{
1905 struct ocmem_eviction_data *edata = evictions[id];
1906 int rc = 0;
1907
1908 if (!edata)
1909 return -EINVAL;
1910
1911 rc = __restore_common(edata);
1912
1913 if (rc < 0) {
1914 pr_err("Failed to restore evicted requests\n");
1915 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001916 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001917
1918 free_eviction(edata);
1919 evictions[id] = NULL;
1920 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001921 return 0;
1922}
1923
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001924static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1925{
1926 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001927 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001928 struct ocmem_buf *buffer = req->buffer;
1929
1930 down_write(&req->rw_sem);
1931
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001932 mutex_lock(&allocation_mutex);
1933retry_allocate:
1934
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001935 /* Take the scheduler mutex */
1936 mutex_lock(&sched_mutex);
1937 rc = __sched_allocate(req, can_block, can_wait);
1938 mutex_unlock(&sched_mutex);
1939
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001940 if (rc == OP_EVICT) {
1941
1942 ret = run_evict(req);
1943
1944 if (ret == 0) {
1945 rc = sched_restore(req);
1946 if (rc < 0) {
1947 pr_err("Failed to restore for req %p\n", req);
1948 goto err_allocate_fail;
1949 }
1950 req->edata = NULL;
1951
1952 pr_debug("Attempting to re-allocate req %p\n", req);
1953 req->req_start = 0x0;
1954 req->req_end = 0x0;
1955 goto retry_allocate;
1956 } else {
1957 goto err_allocate_fail;
1958 }
1959 }
1960
1961 mutex_unlock(&allocation_mutex);
1962
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001963 if (rc == OP_FAIL) {
1964 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001965 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001966 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001967
1968 if (rc == OP_RESCHED) {
1969 buffer->addr = 0x0;
1970 buffer->len = 0x0;
1971 pr_debug("ocmem: Enqueuing req %p\n", req);
1972 sched_enqueue(req);
1973 } else if (rc == OP_PARTIAL) {
1974 buffer->addr = device_address(req->owner, req->req_start);
1975 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001976 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001977 pr_debug("ocmem: Enqueuing req %p\n", req);
1978 sched_enqueue(req);
1979 } else if (rc == OP_COMPLETE) {
1980 buffer->addr = device_address(req->owner, req->req_start);
1981 buffer->len = req->req_sz;
1982 }
1983
1984 up_write(&req->rw_sem);
1985 return 0;
1986err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001987 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001988 up_write(&req->rw_sem);
1989 return -EINVAL;
1990}
1991
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07001992static int do_dump(struct ocmem_req *req, unsigned long addr)
1993{
1994
1995 void __iomem *req_vaddr;
1996 unsigned long offset = 0x0;
1997
1998 down_write(&req->rw_sem);
1999
2000 offset = phys_to_offset(req->req_start);
2001
2002 req_vaddr = ocmem_vaddr + offset;
2003
2004 if (!req_vaddr)
2005 goto err_do_dump;
2006
2007 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2008 get_name(req->owner), req->req_start,
2009 req_vaddr, addr);
2010
2011 memcpy((void *)addr, req_vaddr, req->req_sz);
2012
2013 up_write(&req->rw_sem);
2014 return 0;
2015err_do_dump:
2016 up_write(&req->rw_sem);
2017 return -EINVAL;
2018}
2019
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002020int process_allocate(int id, struct ocmem_handle *handle,
2021 unsigned long min, unsigned long max,
2022 unsigned long step, bool can_block, bool can_wait)
2023{
2024
2025 struct ocmem_req *req = NULL;
2026 struct ocmem_buf *buffer = NULL;
2027 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002028 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002029
2030 /* sanity checks */
2031 if (is_blocked(id)) {
2032 pr_err("Client %d cannot request allocation\n", id);
2033 return -EINVAL;
2034 }
2035
2036 if (handle->req != NULL) {
2037 pr_err("Invalid handle passed in\n");
2038 return -EINVAL;
2039 }
2040
2041 buffer = handle_to_buffer(handle);
2042 BUG_ON(buffer == NULL);
2043
2044 /* prepare a request structure to represent this transaction */
2045 req = ocmem_create_req();
2046 if (!req)
2047 return -ENOMEM;
2048
2049 req->owner = id;
2050 req->req_min = min;
2051 req->req_max = max;
2052 req->req_step = step;
2053 req->prio = ocmem_client_table[id].priority;
2054 req->op = SCHED_ALLOCATE;
2055 req->buffer = buffer;
2056
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002057 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2058
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002059 rc = do_allocate(req, can_block, can_wait);
2060
2061 if (rc < 0)
2062 goto do_allocate_error;
2063
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002064 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2065
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002066 handle->req = req;
2067
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002068 if (req->req_sz != 0) {
2069
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002070 rc = process_map(req, req->req_start, req->req_end);
2071 if (rc < 0)
2072 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002073
2074 offset = phys_to_offset(req->req_start);
2075
2076 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2077
2078 if (rc < 0) {
2079 pr_err("Failed to switch ON memory macros\n");
2080 goto power_ctl_error;
2081 }
2082 }
2083
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002084 return 0;
2085
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002086power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002087 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002088map_error:
2089 handle->req = NULL;
2090 do_free(req);
2091do_allocate_error:
2092 ocmem_destroy_req(req);
2093 return -EINVAL;
2094}
2095
2096int process_delayed_allocate(struct ocmem_req *req)
2097{
2098
2099 struct ocmem_handle *handle = NULL;
2100 int rc = 0;
2101 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002102 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002103
2104 handle = req_to_handle(req);
2105 BUG_ON(handle == NULL);
2106
2107 rc = do_allocate(req, true, false);
2108
2109 if (rc < 0)
2110 goto do_allocate_error;
2111
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002112 /* The request can still be pending */
2113 if (TEST_STATE(req, R_PENDING))
2114 return 0;
2115
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002116 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2117
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002118 if (req->req_sz != 0) {
2119
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002120 rc = process_map(req, req->req_start, req->req_end);
2121 if (rc < 0)
2122 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002123
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002124
2125 offset = phys_to_offset(req->req_start);
2126
2127 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2128
2129 if (rc < 0) {
2130 pr_err("Failed to switch ON memory macros\n");
2131 goto power_ctl_error;
2132 }
2133 }
2134
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002135 /* Notify the client about the buffer growth */
2136 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2137 if (rc < 0) {
2138 pr_err("No notifier callback to cater for req %p event: %d\n",
2139 req, OCMEM_ALLOC_GROW);
2140 BUG();
2141 }
2142 return 0;
2143
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002144power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002145 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002146map_error:
2147 handle->req = NULL;
2148 do_free(req);
2149do_allocate_error:
2150 ocmem_destroy_req(req);
2151 return -EINVAL;
2152}
2153
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002154int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2155{
2156 struct ocmem_req *req = NULL;
2157 int rc = 0;
2158
2159 req = handle_to_req(handle);
2160
2161 if (!req)
2162 return -EINVAL;
2163
2164 if (!is_mapped(req)) {
2165 pr_err("Buffer is not mapped\n");
2166 goto dump_error;
2167 }
2168
2169 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2170
2171 mutex_lock(&sched_mutex);
2172 rc = do_dump(req, addr);
2173 mutex_unlock(&sched_mutex);
2174
2175 if (rc < 0)
2176 goto dump_error;
2177
2178 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2179 return 0;
2180
2181dump_error:
2182 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2183 return -EINVAL;
2184}
2185
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002186static void ocmem_sched_wk_func(struct work_struct *work)
2187{
2188
2189 struct ocmem_buf *buffer = NULL;
2190 struct ocmem_handle *handle = NULL;
2191 struct ocmem_req *req = ocmem_fetch_req();
2192
2193 if (!req) {
2194 pr_debug("No Pending Requests found\n");
2195 return;
2196 }
2197
2198 pr_debug("ocmem: sched_wk pending req %p\n", req);
2199 handle = req_to_handle(req);
2200 buffer = handle_to_buffer(handle);
2201 BUG_ON(req->op == SCHED_NOP);
2202
2203 switch (req->op) {
2204 case SCHED_GROW:
2205 process_grow(req);
2206 break;
2207 case SCHED_ALLOCATE:
2208 process_delayed_allocate(req);
2209 break;
2210 default:
2211 pr_err("ocmem: Unknown operation encountered\n");
2212 break;
2213 }
2214 return;
2215}
2216
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002217static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2218{
2219 struct rb_node *rb_node = NULL;
2220 struct ocmem_req *req = NULL;
2221 unsigned j;
2222 mutex_lock(&sched_mutex);
2223 for (rb_node = rb_first(&sched_tree); rb_node;
2224 rb_node = rb_next(rb_node)) {
2225 struct ocmem_region *tmp_region = NULL;
2226 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2227 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2228 req = find_req_match(j, tmp_region);
2229 if (req) {
2230 seq_printf(f,
2231 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2232 get_name(req->owner),
2233 req->req_start, req->req_end,
2234 req->req_sz, req->state);
2235 }
2236 }
2237 }
2238 mutex_unlock(&sched_mutex);
2239 return 0;
2240}
2241
2242static int ocmem_allocations_open(struct inode *inode, struct file *file)
2243{
2244 return single_open(file, ocmem_allocations_show, inode->i_private);
2245}
2246
2247static const struct file_operations allocations_show_fops = {
2248 .open = ocmem_allocations_open,
2249 .read = seq_read,
2250 .llseek = seq_lseek,
2251 .release = seq_release,
2252};
2253
2254int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002255{
2256 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002257 struct ocmem_plat_data *pdata = NULL;
2258 struct device *dev = &pdev->dev;
2259
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002260 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002261 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002262 mutex_init(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002263 mutex_init(&sched_mutex);
2264 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002265 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002266 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2267 INIT_LIST_HEAD(&sched_queue[i]);
2268
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002269 mutex_init(&rdm_mutex);
2270 INIT_LIST_HEAD(&rdm_queue);
2271 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2272 if (!ocmem_rdm_wq)
2273 return -ENOMEM;
2274 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2275 if (!ocmem_eviction_wq)
2276 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002277
2278 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2279 NULL, &allocations_show_fops)) {
2280 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2281 return -EBUSY;
2282 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002283 return 0;
2284}