blob: 08f93e2dc0e90f732221d12402e5a11841b4d00f [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070048 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070049 OP_FAIL = ~0x0,
50};
51
52/* Represents various client priorities */
53/* Note: More than one client can share a priority level */
54enum client_prio {
55 MIN_PRIO = 0x0,
56 NO_PRIO = MIN_PRIO,
57 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070058 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070059 PRIO_LP_AUDIO = 0x1,
60 PRIO_HP_AUDIO = 0x2,
61 PRIO_VOICE = 0x3,
62 PRIO_GFX_GROWTH = 0x4,
63 PRIO_VIDEO = 0x5,
64 PRIO_GFX = 0x6,
65 PRIO_OCMEM = 0x7,
66 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
67};
68
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070069static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070070static struct list_head sched_queue[MAX_OCMEM_PRIO];
71static struct mutex sched_queue_mutex;
72
73/* The duration in msecs before a pending operation is scheduled
74 * This allows an idle window between use case boundaries where various
75 * hardware state changes can occur. The value will be tweaked on actual
76 * hardware.
77*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070078/* Delay in ms for switching to low power mode for OCMEM */
79#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070080
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081static struct list_head rdm_queue;
82static struct mutex rdm_mutex;
83static struct workqueue_struct *ocmem_rdm_wq;
84static struct workqueue_struct *ocmem_eviction_wq;
85
86static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
87
88struct ocmem_rdm_work {
89 int id;
90 struct ocmem_map_list *list;
91 struct ocmem_handle *handle;
92 int direction;
93 struct work_struct work;
94};
95
Naveen Ramarajb9da05782012-05-07 09:07:35 -070096/* OCMEM Operational modes */
97enum ocmem_client_modes {
98 OCMEM_PERFORMANCE = 1,
99 OCMEM_PASSIVE,
100 OCMEM_LOW_POWER,
101 OCMEM_MODE_MAX = OCMEM_LOW_POWER
102};
103
104/* OCMEM Addressing modes */
105enum ocmem_interconnects {
106 OCMEM_BLOCKED = 0,
107 OCMEM_PORT = 1,
108 OCMEM_OCMEMNOC = 2,
109 OCMEM_SYSNOC = 3,
110};
111
112/**
113 * Primary OCMEM Arbitration Table
114 **/
115struct ocmem_table {
116 int client_id;
117 int priority;
118 int mode;
119 int hw_interconnect;
120} ocmem_client_table[OCMEM_CLIENT_MAX] = {
121 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT},
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700122 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700123 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC},
124 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED},
125 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
126 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
127 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700128 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700129};
130
131static struct rb_root sched_tree;
132static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700133static struct mutex allocation_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700134
135/* A region represents a continuous interval in OCMEM address space */
136struct ocmem_region {
137 /* Chain in Interval Tree */
138 struct rb_node region_rb;
139 /* Hash map of requests */
140 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700141 /* Chain in eviction list */
142 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700143 unsigned long r_start;
144 unsigned long r_end;
145 unsigned long r_sz;
146 /* Highest priority of all requests served by this region */
147 int max_prio;
148};
149
150/* Is OCMEM tightly coupled to the client ?*/
151static inline int is_tcm(int id)
152{
153 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
154 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
155 return 1;
156 else
157 return 0;
158}
159
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700160static inline int is_iface_access(int id)
161{
162 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
163}
164
165static inline int is_remapped_access(int id)
166{
167 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
168}
169
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700170static inline int is_blocked(int id)
171{
172 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
173}
174
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700175inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
176{
177 if (handle)
178 return &handle->buffer;
179 else
180 return NULL;
181}
182
183inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
184{
185 if (buffer)
186 return container_of(buffer, struct ocmem_handle, buffer);
187 else
188 return NULL;
189}
190
191inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
192{
193 if (handle)
194 return handle->req;
195 else
196 return NULL;
197}
198
199inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
200{
201 if (req && req->buffer)
202 return container_of(req->buffer, struct ocmem_handle, buffer);
203 else
204 return NULL;
205}
206
207/* Simple wrappers which will have debug features added later */
208inline int ocmem_read(void *at)
209{
210 return readl_relaxed(at);
211}
212
213inline int ocmem_write(unsigned long val, void *at)
214{
215 writel_relaxed(val, at);
216 return 0;
217}
218
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700219inline int get_mode(int id)
220{
221 if (!check_id(id))
222 return MODE_NOT_SET;
223 else
224 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
225 WIDE_MODE : THIN_MODE;
226}
227
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700228/* Returns the address that can be used by a device core to access OCMEM */
229static unsigned long device_address(int id, unsigned long addr)
230{
231 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
232 unsigned long ret_addr = 0x0;
233
234 switch (hw_interconnect) {
235 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700236 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700237 ret_addr = phys_to_offset(addr);
238 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700239 case OCMEM_SYSNOC:
240 ret_addr = addr;
241 break;
242 case OCMEM_BLOCKED:
243 ret_addr = 0x0;
244 break;
245 }
246 return ret_addr;
247}
248
249/* Returns the address as viewed by the core */
250static unsigned long core_address(int id, unsigned long addr)
251{
252 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
253 unsigned long ret_addr = 0x0;
254
255 switch (hw_interconnect) {
256 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700257 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700258 ret_addr = offset_to_phys(addr);
259 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700260 case OCMEM_SYSNOC:
261 ret_addr = addr;
262 break;
263 case OCMEM_BLOCKED:
264 ret_addr = 0x0;
265 break;
266 }
267 return ret_addr;
268}
269
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700270static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
271{
272 int owner;
273 if (!req)
274 return NULL;
275 owner = req->owner;
276 return get_zone(owner);
277}
278
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700279static int insert_region(struct ocmem_region *region)
280{
281
282 struct rb_root *root = &sched_tree;
283 struct rb_node **p = &root->rb_node;
284 struct rb_node *parent = NULL;
285 struct ocmem_region *tmp = NULL;
286 unsigned long addr = region->r_start;
287
288 while (*p) {
289 parent = *p;
290 tmp = rb_entry(parent, struct ocmem_region, region_rb);
291
292 if (tmp->r_end > addr) {
293 if (tmp->r_start <= addr)
294 break;
295 p = &(*p)->rb_left;
296 } else if (tmp->r_end <= addr)
297 p = &(*p)->rb_right;
298 }
299 rb_link_node(&region->region_rb, parent, p);
300 rb_insert_color(&region->region_rb, root);
301 return 0;
302}
303
304static int remove_region(struct ocmem_region *region)
305{
306 struct rb_root *root = &sched_tree;
307 rb_erase(&region->region_rb, root);
308 return 0;
309}
310
311static struct ocmem_req *ocmem_create_req(void)
312{
313 struct ocmem_req *p = NULL;
314
315 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
316 if (!p)
317 return NULL;
318
319 INIT_LIST_HEAD(&p->zone_list);
320 INIT_LIST_HEAD(&p->sched_list);
321 init_rwsem(&p->rw_sem);
322 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700323 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700324 return p;
325}
326
327static int ocmem_destroy_req(struct ocmem_req *req)
328{
329 kfree(req);
330 return 0;
331}
332
333static struct ocmem_region *create_region(void)
334{
335 struct ocmem_region *p = NULL;
336
337 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
338 if (!p)
339 return NULL;
340 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700341 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700342 p->r_start = p->r_end = p->r_sz = 0x0;
343 p->max_prio = NO_PRIO;
344 return p;
345}
346
347static int destroy_region(struct ocmem_region *region)
348{
349 kfree(region);
350 return 0;
351}
352
353static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
354{
355 int ret, id;
356
357 while (1) {
358 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
359 return -ENOMEM;
360
361 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
362
363 if (ret != -EAGAIN)
364 break;
365 }
366
367 if (!ret) {
368 req->req_id = id;
369 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
370 req, id, region);
371 return 0;
372 }
373 return -EINVAL;
374}
375
376static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
377{
378 idr_remove(&region->region_idr, req->req_id);
379 return 0;
380}
381
382static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
383{
384 region->r_start = req->req_start;
385 region->r_end = req->req_end;
386 region->r_sz = req->req_end - req->req_start + 1;
387 return 0;
388}
389
390static int region_req_count(int id, void *ptr, void *data)
391{
392 int *count = data;
393 *count = *count + 1;
394 return 0;
395}
396
397static int req_count(struct ocmem_region *region)
398{
399 int count = 0;
400 idr_for_each(&region->region_idr, region_req_count, &count);
401 return count;
402}
403
404static int compute_max_prio(int id, void *ptr, void *data)
405{
406 int *max = data;
407 struct ocmem_req *req = ptr;
408
409 if (req->prio > *max)
410 *max = req->prio;
411 return 0;
412}
413
414static int update_region_prio(struct ocmem_region *region)
415{
416 int max_prio;
417 if (req_count(region) != 0) {
418 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
419 region->max_prio = max_prio;
420 } else {
421 region->max_prio = NO_PRIO;
422 }
423 pr_debug("ocmem: Updating prio of region %p as %d\n",
424 region, max_prio);
425
426 return 0;
427}
428
429static struct ocmem_region *find_region(unsigned long addr)
430{
431 struct ocmem_region *region = NULL;
432 struct rb_node *rb_node = NULL;
433
434 rb_node = sched_tree.rb_node;
435
436 while (rb_node) {
437 struct ocmem_region *tmp_region = NULL;
438 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
439
440 if (tmp_region->r_end > addr) {
441 region = tmp_region;
442 if (tmp_region->r_start <= addr)
443 break;
444 rb_node = rb_node->rb_left;
445 } else {
446 rb_node = rb_node->rb_right;
447 }
448 }
449 return region;
450}
451
452static struct ocmem_region *find_region_intersection(unsigned long start,
453 unsigned long end)
454{
455
456 struct ocmem_region *region = NULL;
457 region = find_region(start);
458 if (region && end <= region->r_start)
459 region = NULL;
460 return region;
461}
462
463static struct ocmem_region *find_region_match(unsigned long start,
464 unsigned long end)
465{
466
467 struct ocmem_region *region = NULL;
468 region = find_region(start);
469 if (region && start == region->r_start && end == region->r_end)
470 return region;
471 return NULL;
472}
473
474static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
475{
476 struct ocmem_req *req = NULL;
477
478 if (!region)
479 return NULL;
480
481 req = idr_find(&region->region_idr, owner);
482
483 return req;
484}
485
486/* Must be called with req->sem held */
487static inline int is_mapped(struct ocmem_req *req)
488{
489 return TEST_STATE(req, R_MAPPED);
490}
491
492/* Must be called with sched_mutex held */
493static int __sched_unmap(struct ocmem_req *req)
494{
495 struct ocmem_req *matched_req = NULL;
496 struct ocmem_region *matched_region = NULL;
497
498 matched_region = find_region_match(req->req_start, req->req_end);
499 matched_req = find_req_match(req->req_id, matched_region);
500
501 if (!matched_region || !matched_req) {
502 pr_err("Could not find backing region for req");
503 goto invalid_op_error;
504 }
505
506 if (matched_req != req) {
507 pr_err("Request does not match backing req");
508 goto invalid_op_error;
509 }
510
511 if (!is_mapped(req)) {
512 pr_err("Request is not currently mapped");
513 goto invalid_op_error;
514 }
515
516 /* Update the request state */
517 CLEAR_STATE(req, R_MAPPED);
518 SET_STATE(req, R_MUST_MAP);
519
520 return OP_COMPLETE;
521
522invalid_op_error:
523 return OP_FAIL;
524}
525
526/* Must be called with sched_mutex held */
527static int __sched_map(struct ocmem_req *req)
528{
529 struct ocmem_req *matched_req = NULL;
530 struct ocmem_region *matched_region = NULL;
531
532 matched_region = find_region_match(req->req_start, req->req_end);
533 matched_req = find_req_match(req->req_id, matched_region);
534
535 if (!matched_region || !matched_req) {
536 pr_err("Could not find backing region for req");
537 goto invalid_op_error;
538 }
539
540 if (matched_req != req) {
541 pr_err("Request does not match backing req");
542 goto invalid_op_error;
543 }
544
545 /* Update the request state */
546 CLEAR_STATE(req, R_MUST_MAP);
547 SET_STATE(req, R_MAPPED);
548
549 return OP_COMPLETE;
550
551invalid_op_error:
552 return OP_FAIL;
553}
554
555static int do_map(struct ocmem_req *req)
556{
557 int rc = 0;
558
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700559 down_write(&req->rw_sem);
560
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700561 mutex_lock(&sched_mutex);
562 rc = __sched_map(req);
563 mutex_unlock(&sched_mutex);
564
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700565 up_write(&req->rw_sem);
566
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700567 if (rc == OP_FAIL)
568 return -EINVAL;
569
570 return 0;
571}
572
573static int do_unmap(struct ocmem_req *req)
574{
575 int rc = 0;
576
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700577 down_write(&req->rw_sem);
578
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700579 mutex_lock(&sched_mutex);
580 rc = __sched_unmap(req);
581 mutex_unlock(&sched_mutex);
582
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700583 up_write(&req->rw_sem);
584
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700585 if (rc == OP_FAIL)
586 return -EINVAL;
587
588 return 0;
589}
590
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700591static int process_map(struct ocmem_req *req, unsigned long start,
592 unsigned long end)
593{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700594 int rc = 0;
595
596 rc = ocmem_enable_core_clock();
597
598 if (rc < 0)
599 goto core_clock_fail;
600
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700601
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700602 if (is_iface_access(req->owner)) {
603 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700604
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700605 if (rc < 0)
606 goto iface_clock_fail;
607 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700608
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700609 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
610 get_mode(req->owner));
611
612 if (rc < 0) {
613 pr_err("ocmem: Failed to secure request %p for %d\n", req,
614 req->owner);
615 goto lock_failed;
616 }
617
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700618 rc = do_map(req);
619
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700620 if (rc < 0) {
621 pr_err("ocmem: Failed to map request %p for %d\n",
622 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700623 goto process_map_fail;
624
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700625 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700626 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700627 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700628
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700629process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700630 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
631lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700632 if (is_iface_access(req->owner))
633 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700634iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700635 ocmem_disable_core_clock();
636core_clock_fail:
637 pr_err("ocmem: Failed to map ocmem request\n");
638 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700639}
640
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700641static int process_unmap(struct ocmem_req *req, unsigned long start,
642 unsigned long end)
643{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700644 int rc = 0;
645
646 rc = do_unmap(req);
647
648 if (rc < 0)
649 goto process_unmap_fail;
650
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700651 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
652 req->req_sz);
653
654 if (rc < 0) {
655 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
656 req->owner);
657 goto unlock_failed;
658 }
659
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700660 if (is_iface_access(req->owner))
661 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700662 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700663 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700664 return 0;
665
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700666unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700667process_unmap_fail:
668 pr_err("ocmem: Failed to unmap ocmem request\n");
669 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700670}
671
672static int __sched_grow(struct ocmem_req *req, bool can_block)
673{
674 unsigned long min = req->req_min;
675 unsigned long max = req->req_max;
676 unsigned long step = req->req_step;
677 int owner = req->owner;
678 unsigned long curr_sz = 0;
679 unsigned long growth_sz = 0;
680 unsigned long curr_start = 0;
681 enum client_prio prio = req->prio;
682 unsigned long alloc_addr = 0x0;
683 bool retry;
684 struct ocmem_region *spanned_r = NULL;
685 struct ocmem_region *overlap_r = NULL;
686
687 struct ocmem_req *matched_req = NULL;
688 struct ocmem_region *matched_region = NULL;
689
690 struct ocmem_zone *zone = get_zone(owner);
691 struct ocmem_region *region = NULL;
692
693 matched_region = find_region_match(req->req_start, req->req_end);
694 matched_req = find_req_match(req->req_id, matched_region);
695
696 if (!matched_region || !matched_req) {
697 pr_err("Could not find backing region for req");
698 goto invalid_op_error;
699 }
700
701 if (matched_req != req) {
702 pr_err("Request does not match backing req");
703 goto invalid_op_error;
704 }
705
706 curr_sz = matched_req->req_sz;
707 curr_start = matched_req->req_start;
708 growth_sz = matched_req->req_max - matched_req->req_sz;
709
710 pr_debug("Attempting to grow req %p from %lx to %lx\n",
711 req, matched_req->req_sz, matched_req->req_max);
712
713 retry = false;
714
715 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
716
717retry_next_step:
718
719 spanned_r = NULL;
720 overlap_r = NULL;
721
722 spanned_r = find_region(zone->z_head);
723 overlap_r = find_region_intersection(zone->z_head,
724 zone->z_head + growth_sz);
725
726 if (overlap_r == NULL) {
727 /* no conflicting regions, schedule this region */
728 zone->z_ops->free(zone, curr_start, curr_sz);
729 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
730
731 if (alloc_addr < 0) {
732 pr_err("ocmem: zone allocation operation failed\n");
733 goto internal_error;
734 }
735
736 curr_sz += growth_sz;
737 /* Detach the region from the interval tree */
738 /* This is to guarantee that any change in size
739 * causes the tree to be rebalanced if required */
740
741 detach_req(matched_region, req);
742 if (req_count(matched_region) == 0) {
743 remove_region(matched_region);
744 region = matched_region;
745 } else {
746 region = create_region();
747 if (!region) {
748 pr_err("ocmem: Unable to create region\n");
749 goto region_error;
750 }
751 }
752
753 /* update the request */
754 req->req_start = alloc_addr;
755 /* increment the size to reflect new length */
756 req->req_sz = curr_sz;
757 req->req_end = alloc_addr + req->req_sz - 1;
758
759 /* update request state */
760 CLEAR_STATE(req, R_MUST_GROW);
761 SET_STATE(req, R_ALLOCATED);
762 SET_STATE(req, R_MUST_MAP);
763 req->op = SCHED_MAP;
764
765 /* update the region with new req */
766 attach_req(region, req);
767 populate_region(region, req);
768 update_region_prio(region);
769
770 /* update the tree with new region */
771 if (insert_region(region)) {
772 pr_err("ocmem: Failed to insert the region\n");
773 goto region_error;
774 }
775
776 if (retry) {
777 SET_STATE(req, R_MUST_GROW);
778 SET_STATE(req, R_PENDING);
779 req->op = SCHED_GROW;
780 return OP_PARTIAL;
781 }
782 } else if (spanned_r != NULL && overlap_r != NULL) {
783 /* resolve conflicting regions based on priority */
784 if (overlap_r->max_prio < prio) {
785 /* Growth cannot be triggered unless a previous
786 * client of lower priority was evicted */
787 pr_err("ocmem: Invalid growth scheduled\n");
788 /* This is serious enough to fail */
789 BUG();
790 return OP_FAIL;
791 } else if (overlap_r->max_prio > prio) {
792 if (min == max) {
793 /* Cannot grow at this time, try later */
794 SET_STATE(req, R_PENDING);
795 SET_STATE(req, R_MUST_GROW);
796 return OP_RESCHED;
797 } else {
798 /* Try to grow in steps */
799 growth_sz -= step;
800 /* We are OOM at this point so need to retry */
801 if (growth_sz <= curr_sz) {
802 SET_STATE(req, R_PENDING);
803 SET_STATE(req, R_MUST_GROW);
804 return OP_RESCHED;
805 }
806 retry = true;
807 pr_debug("ocmem: Attempting with reduced size %lx\n",
808 growth_sz);
809 goto retry_next_step;
810 }
811 } else {
812 pr_err("ocmem: grow: New Region %p Existing %p\n",
813 matched_region, overlap_r);
814 pr_err("ocmem: Undetermined behavior\n");
815 /* This is serious enough to fail */
816 BUG();
817 }
818 } else if (spanned_r == NULL && overlap_r != NULL) {
819 goto err_not_supported;
820 }
821
822 return OP_COMPLETE;
823
824err_not_supported:
825 pr_err("ocmem: Scheduled unsupported operation\n");
826 return OP_FAIL;
827region_error:
828 zone->z_ops->free(zone, alloc_addr, curr_sz);
829 detach_req(region, req);
830 update_region_prio(region);
831 /* req is going to be destroyed by the caller anyways */
832internal_error:
833 destroy_region(region);
834invalid_op_error:
835 return OP_FAIL;
836}
837
838/* Must be called with sched_mutex held */
839static int __sched_free(struct ocmem_req *req)
840{
841 int owner = req->owner;
842 int ret = 0;
843
844 struct ocmem_req *matched_req = NULL;
845 struct ocmem_region *matched_region = NULL;
846
847 struct ocmem_zone *zone = get_zone(owner);
848
849 BUG_ON(!zone);
850
851 matched_region = find_region_match(req->req_start, req->req_end);
852 matched_req = find_req_match(req->req_id, matched_region);
853
854 if (!matched_region || !matched_req)
855 goto invalid_op_error;
856 if (matched_req != req)
857 goto invalid_op_error;
858
859 ret = zone->z_ops->free(zone,
860 matched_req->req_start, matched_req->req_sz);
861
862 if (ret < 0)
863 goto err_op_fail;
864
865 detach_req(matched_region, matched_req);
866 update_region_prio(matched_region);
867 if (req_count(matched_region) == 0) {
868 remove_region(matched_region);
869 destroy_region(matched_region);
870 }
871
872 /* Update the request */
873 req->req_start = 0x0;
874 req->req_sz = 0x0;
875 req->req_end = 0x0;
876 SET_STATE(req, R_FREE);
877 return OP_COMPLETE;
878invalid_op_error:
879 pr_err("ocmem: free: Failed to find matching region\n");
880err_op_fail:
881 pr_err("ocmem: free: Failed\n");
882 return OP_FAIL;
883}
884
885/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700886static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
887{
888 int owner = req->owner;
889 int ret = 0;
890
891 struct ocmem_req *matched_req = NULL;
892 struct ocmem_region *matched_region = NULL;
893 struct ocmem_region *region = NULL;
894 unsigned long alloc_addr = 0x0;
895
896 struct ocmem_zone *zone = get_zone(owner);
897
898 BUG_ON(!zone);
899
900 /* The shrink should not be called for zero size */
901 BUG_ON(new_sz == 0);
902
903 matched_region = find_region_match(req->req_start, req->req_end);
904 matched_req = find_req_match(req->req_id, matched_region);
905
906 if (!matched_region || !matched_req)
907 goto invalid_op_error;
908 if (matched_req != req)
909 goto invalid_op_error;
910
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700911 ret = zone->z_ops->free(zone,
912 matched_req->req_start, matched_req->req_sz);
913
914 if (ret < 0) {
915 pr_err("Zone Allocation operation failed\n");
916 goto internal_error;
917 }
918
919 alloc_addr = zone->z_ops->allocate(zone, new_sz);
920
921 if (alloc_addr < 0) {
922 pr_err("Zone Allocation operation failed\n");
923 goto internal_error;
924 }
925
926 /* Detach the region from the interval tree */
927 /* This is to guarantee that the change in size
928 * causes the tree to be rebalanced if required */
929
930 detach_req(matched_region, req);
931 if (req_count(matched_region) == 0) {
932 remove_region(matched_region);
933 region = matched_region;
934 } else {
935 region = create_region();
936 if (!region) {
937 pr_err("ocmem: Unable to create region\n");
938 goto internal_error;
939 }
940 }
941 /* update the request */
942 req->req_start = alloc_addr;
943 req->req_sz = new_sz;
944 req->req_end = alloc_addr + req->req_sz;
945
946 if (req_count(region) == 0) {
947 remove_region(matched_region);
948 destroy_region(matched_region);
949 }
950
951 /* update request state */
952 SET_STATE(req, R_MUST_GROW);
953 SET_STATE(req, R_MUST_MAP);
954 req->op = SCHED_MAP;
955
956 /* attach the request to the region */
957 attach_req(region, req);
958 populate_region(region, req);
959 update_region_prio(region);
960
961 /* update the tree with new region */
962 if (insert_region(region)) {
963 pr_err("ocmem: Failed to insert the region\n");
964 zone->z_ops->free(zone, alloc_addr, new_sz);
965 detach_req(region, req);
966 update_region_prio(region);
967 /* req will be destroyed by the caller */
968 goto region_error;
969 }
970 return OP_COMPLETE;
971
972region_error:
973 destroy_region(region);
974internal_error:
975 pr_err("ocmem: shrink: Failed\n");
976 return OP_FAIL;
977invalid_op_error:
978 pr_err("ocmem: shrink: Failed to find matching region\n");
979 return OP_FAIL;
980}
981
982/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700983static int __sched_allocate(struct ocmem_req *req, bool can_block,
984 bool can_wait)
985{
986 unsigned long min = req->req_min;
987 unsigned long max = req->req_max;
988 unsigned long step = req->req_step;
989 int owner = req->owner;
990 unsigned long sz = max;
991 enum client_prio prio = req->prio;
992 unsigned long alloc_addr = 0x0;
993 bool retry;
994
995 struct ocmem_region *spanned_r = NULL;
996 struct ocmem_region *overlap_r = NULL;
997
998 struct ocmem_zone *zone = get_zone(owner);
999 struct ocmem_region *region = NULL;
1000
1001 BUG_ON(!zone);
1002
1003 if (min > (zone->z_end - zone->z_start)) {
1004 pr_err("ocmem: requested minimum size exceeds quota\n");
1005 goto invalid_op_error;
1006 }
1007
1008 if (max > (zone->z_end - zone->z_start)) {
1009 pr_err("ocmem: requested maximum size exceeds quota\n");
1010 goto invalid_op_error;
1011 }
1012
1013 if (min > zone->z_free) {
1014 pr_err("ocmem: out of memory for zone %d\n", owner);
1015 goto invalid_op_error;
1016 }
1017
1018 region = create_region();
1019
1020 if (!region) {
1021 pr_err("ocmem: Unable to create region\n");
1022 goto invalid_op_error;
1023 }
1024
1025 retry = false;
1026
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001027 pr_debug("ocmem: do_allocate: %s request size %lx\n",
1028 get_name(owner), sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001029
1030retry_next_step:
1031
1032 spanned_r = NULL;
1033 overlap_r = NULL;
1034
1035 spanned_r = find_region(zone->z_head);
1036 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1037
1038 if (overlap_r == NULL) {
1039 /* no conflicting regions, schedule this region */
1040 alloc_addr = zone->z_ops->allocate(zone, sz);
1041
1042 if (alloc_addr < 0) {
1043 pr_err("Zone Allocation operation failed\n");
1044 goto internal_error;
1045 }
1046
1047 /* update the request */
1048 req->req_start = alloc_addr;
1049 req->req_end = alloc_addr + sz - 1;
1050 req->req_sz = sz;
1051 req->zone = zone;
1052
1053 /* update request state */
1054 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001055 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001056 SET_STATE(req, R_ALLOCATED);
1057 SET_STATE(req, R_MUST_MAP);
1058 req->op = SCHED_NOP;
1059
1060 /* attach the request to the region */
1061 attach_req(region, req);
1062 populate_region(region, req);
1063 update_region_prio(region);
1064
1065 /* update the tree with new region */
1066 if (insert_region(region)) {
1067 pr_err("ocmem: Failed to insert the region\n");
1068 zone->z_ops->free(zone, alloc_addr, sz);
1069 detach_req(region, req);
1070 update_region_prio(region);
1071 /* req will be destroyed by the caller */
1072 goto internal_error;
1073 }
1074
1075 if (retry) {
1076 SET_STATE(req, R_MUST_GROW);
1077 SET_STATE(req, R_PENDING);
1078 req->op = SCHED_GROW;
1079 return OP_PARTIAL;
1080 }
1081 } else if (spanned_r != NULL && overlap_r != NULL) {
1082 /* resolve conflicting regions based on priority */
1083 if (overlap_r->max_prio < prio) {
1084 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001085 req->req_start = zone->z_head;
1086 req->req_end = zone->z_head + sz - 1;
1087 req->req_sz = 0x0;
1088 req->edata = NULL;
1089 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001090 } else {
1091 /* Try to allocate atleast >= 'min' immediately */
1092 sz -= step;
1093 if (sz < min)
1094 goto err_out_of_mem;
1095 retry = true;
1096 pr_debug("ocmem: Attempting with reduced size %lx\n",
1097 sz);
1098 goto retry_next_step;
1099 }
1100 } else if (overlap_r->max_prio > prio) {
1101 if (can_block == true) {
1102 SET_STATE(req, R_PENDING);
1103 SET_STATE(req, R_MUST_GROW);
1104 return OP_RESCHED;
1105 } else {
1106 if (min == max) {
1107 pr_err("Cannot allocate %lx synchronously\n",
1108 sz);
1109 goto err_out_of_mem;
1110 } else {
1111 sz -= step;
1112 if (sz < min)
1113 goto err_out_of_mem;
1114 retry = true;
1115 pr_debug("ocmem: Attempting reduced size %lx\n",
1116 sz);
1117 goto retry_next_step;
1118 }
1119 }
1120 } else {
1121 pr_err("ocmem: Undetermined behavior\n");
1122 pr_err("ocmem: New Region %p Existing %p\n", region,
1123 overlap_r);
1124 /* This is serious enough to fail */
1125 BUG();
1126 }
1127 } else if (spanned_r == NULL && overlap_r != NULL)
1128 goto err_not_supported;
1129
1130 return OP_COMPLETE;
1131
Naveen Ramaraj59907982012-10-16 17:40:38 -07001132trigger_eviction:
1133 pr_debug("Trigger eviction of region %p\n", overlap_r);
1134 destroy_region(region);
1135 return OP_EVICT;
1136
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001137err_not_supported:
1138 pr_err("ocmem: Scheduled unsupported operation\n");
1139 return OP_FAIL;
1140
1141err_out_of_mem:
1142 pr_err("ocmem: Out of memory during allocation\n");
1143internal_error:
1144 destroy_region(region);
1145invalid_op_error:
1146 return OP_FAIL;
1147}
1148
1149static int sched_enqueue(struct ocmem_req *priv)
1150{
1151 struct ocmem_req *next = NULL;
1152 mutex_lock(&sched_queue_mutex);
1153 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1154 pr_debug("enqueued req %p\n", priv);
1155 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1156 pr_debug("pending requests for client %p\n", next);
1157 }
1158 mutex_unlock(&sched_queue_mutex);
1159 return 0;
1160}
1161
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001162static void sched_dequeue(struct ocmem_req *victim_req)
1163{
1164 struct ocmem_req *req = NULL;
1165 struct ocmem_req *next = NULL;
1166 int id;
1167
1168 if (!victim_req)
1169 return;
1170
1171 id = victim_req->owner;
1172
1173 mutex_lock(&sched_queue_mutex);
1174
1175 if (list_empty(&sched_queue[id]))
1176 goto dequeue_done;
1177
1178 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1179 {
1180 if (req == victim_req) {
1181 pr_debug("ocmem: Cancelling pending request %p\n",
1182 req);
1183 list_del(&req->sched_list);
1184 goto dequeue_done;
1185 }
1186 }
1187
1188dequeue_done:
1189 mutex_unlock(&sched_queue_mutex);
1190 return;
1191}
1192
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001193static struct ocmem_req *ocmem_fetch_req(void)
1194{
1195 int i;
1196 struct ocmem_req *req = NULL;
1197 struct ocmem_req *next = NULL;
1198
1199 mutex_lock(&sched_queue_mutex);
1200 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1201 if (list_empty(&sched_queue[i]))
1202 continue;
1203 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1204 {
1205 if (req) {
1206 pr_debug("ocmem: Fetched pending request %p\n",
1207 req);
1208 list_del(&req->sched_list);
1209 break;
1210 }
1211 }
1212 }
1213 mutex_unlock(&sched_queue_mutex);
1214 return req;
1215}
1216
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001217
1218unsigned long process_quota(int id)
1219{
1220 struct ocmem_zone *zone = NULL;
1221
1222 if (is_blocked(id))
1223 return 0;
1224
1225 zone = get_zone(id);
1226
1227 if (zone && zone->z_pool)
1228 return zone->z_end - zone->z_start;
1229 else
1230 return 0;
1231}
1232
1233static int do_grow(struct ocmem_req *req)
1234{
1235 struct ocmem_buf *buffer = NULL;
1236 bool can_block = true;
1237 int rc = 0;
1238
1239 down_write(&req->rw_sem);
1240 buffer = req->buffer;
1241
1242 /* Take the scheduler mutex */
1243 mutex_lock(&sched_mutex);
1244 rc = __sched_grow(req, can_block);
1245 mutex_unlock(&sched_mutex);
1246
1247 if (rc == OP_FAIL)
1248 goto err_op_fail;
1249
1250 if (rc == OP_RESCHED) {
1251 pr_debug("ocmem: Enqueue this allocation");
1252 sched_enqueue(req);
1253 }
1254
1255 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1256 buffer->addr = device_address(req->owner, req->req_start);
1257 buffer->len = req->req_sz;
1258 }
1259
1260 up_write(&req->rw_sem);
1261 return 0;
1262err_op_fail:
1263 up_write(&req->rw_sem);
1264 return -EINVAL;
1265}
1266
1267static int process_grow(struct ocmem_req *req)
1268{
1269 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001270 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001271
1272 /* Attempt to grow the region */
1273 rc = do_grow(req);
1274
1275 if (rc < 0)
1276 return -EINVAL;
1277
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001278 rc = process_map(req, req->req_start, req->req_end);
1279 if (rc < 0)
1280 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001281
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001282 offset = phys_to_offset(req->req_start);
1283
1284 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1285
1286 if (rc < 0) {
1287 pr_err("Failed to switch ON memory macros\n");
1288 goto power_ctl_error;
1289 }
1290
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001291 /* Notify the client about the buffer growth */
1292 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1293 if (rc < 0) {
1294 pr_err("No notifier callback to cater for req %p event: %d\n",
1295 req, OCMEM_ALLOC_GROW);
1296 BUG();
1297 }
1298 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001299power_ctl_error:
1300 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001301}
1302
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001303static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1304{
1305
1306 int rc = 0;
1307 struct ocmem_buf *buffer = NULL;
1308
1309 down_write(&req->rw_sem);
1310 buffer = req->buffer;
1311
1312 /* Take the scheduler mutex */
1313 mutex_lock(&sched_mutex);
1314 rc = __sched_shrink(req, shrink_size);
1315 mutex_unlock(&sched_mutex);
1316
1317 if (rc == OP_FAIL)
1318 goto err_op_fail;
1319
1320 else if (rc == OP_COMPLETE) {
1321 buffer->addr = device_address(req->owner, req->req_start);
1322 buffer->len = req->req_sz;
1323 }
1324
1325 up_write(&req->rw_sem);
1326 return 0;
1327err_op_fail:
1328 up_write(&req->rw_sem);
1329 return -EINVAL;
1330}
1331
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001332static void ocmem_sched_wk_func(struct work_struct *work);
1333DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1334
1335static int ocmem_schedule_pending(void)
1336{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001337
1338 bool need_sched = false;
1339 int i = 0;
1340
1341 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1342 if (!list_empty(&sched_queue[i])) {
1343 need_sched = true;
1344 break;
1345 }
1346 }
1347
1348 if (need_sched == true) {
1349 cancel_delayed_work(&ocmem_sched_thread);
1350 schedule_delayed_work(&ocmem_sched_thread,
1351 msecs_to_jiffies(SCHED_DELAY));
1352 pr_debug("ocmem: Scheduled delayed work\n");
1353 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001354 return 0;
1355}
1356
1357static int do_free(struct ocmem_req *req)
1358{
1359 int rc = 0;
1360 struct ocmem_buf *buffer = req->buffer;
1361
1362 down_write(&req->rw_sem);
1363
1364 if (is_mapped(req)) {
1365 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1366 goto err_free_fail;
1367 }
1368
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001369 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1370 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001371 /* Grab the sched mutex */
1372 mutex_lock(&sched_mutex);
1373 rc = __sched_free(req);
1374 mutex_unlock(&sched_mutex);
1375
1376 switch (rc) {
1377
1378 case OP_COMPLETE:
1379 buffer->addr = 0x0;
1380 buffer->len = 0x0;
1381 break;
1382 case OP_FAIL:
1383 default:
1384 goto err_free_fail;
1385 break;
1386 }
1387
1388 up_write(&req->rw_sem);
1389 return 0;
1390err_free_fail:
1391 up_write(&req->rw_sem);
1392 pr_err("ocmem: freeing req %p failed\n", req);
1393 return -EINVAL;
1394}
1395
1396int process_free(int id, struct ocmem_handle *handle)
1397{
1398 struct ocmem_req *req = NULL;
1399 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001400 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001401 int rc = 0;
1402
1403 if (is_blocked(id)) {
1404 pr_err("Client %d cannot request free\n", id);
1405 return -EINVAL;
1406 }
1407
1408 req = handle_to_req(handle);
1409 buffer = handle_to_buffer(handle);
1410
1411 if (!req)
1412 return -EINVAL;
1413
1414 if (req->req_start != core_address(id, buffer->addr)) {
1415 pr_err("Invalid buffer handle passed for free\n");
1416 return -EINVAL;
1417 }
1418
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001419 mutex_lock(&sched_mutex);
1420 sched_dequeue(req);
1421 mutex_unlock(&sched_mutex);
1422
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001423 if (TEST_STATE(req, R_MAPPED)) {
1424 /* unmap the interval and clear the memory */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001425 rc = process_unmap(req, req->req_start, req->req_end);
1426 if (rc < 0)
1427 return -EINVAL;
1428 }
1429
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001430 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001431 if (req->req_sz != 0) {
1432
1433 offset = phys_to_offset(req->req_start);
1434
1435 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1436
1437 if (rc < 0) {
1438 pr_err("Failed to switch OFF memory macros\n");
1439 return -EINVAL;
1440 }
1441
1442 }
1443
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001444 if (!TEST_STATE(req, R_FREE)) {
1445 /* free the allocation */
1446 rc = do_free(req);
1447 if (rc < 0)
1448 return -EINVAL;
1449 }
1450
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001451 inc_ocmem_stat(zone_of(req), NR_FREES);
1452
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001453 ocmem_destroy_req(req);
1454 handle->req = NULL;
1455
1456 ocmem_schedule_pending();
1457 return 0;
1458}
1459
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001460static void ocmem_rdm_worker(struct work_struct *work)
1461{
1462 int offset = 0;
1463 int rc = 0;
1464 int event;
1465 struct ocmem_rdm_work *work_data = container_of(work,
1466 struct ocmem_rdm_work, work);
1467 int id = work_data->id;
1468 struct ocmem_map_list *list = work_data->list;
1469 int direction = work_data->direction;
1470 struct ocmem_handle *handle = work_data->handle;
1471 struct ocmem_req *req = handle_to_req(handle);
1472 struct ocmem_buf *buffer = handle_to_buffer(handle);
1473
1474 down_write(&req->rw_sem);
1475 offset = phys_to_offset(req->req_start);
1476 rc = ocmem_rdm_transfer(id, list, offset, direction);
1477 if (work_data->direction == TO_OCMEM)
1478 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1479 else
1480 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001481 up_write(&req->rw_sem);
1482 kfree(work_data);
1483 dispatch_notification(id, event, buffer);
1484}
1485
1486int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1487 struct ocmem_map_list *list, int direction)
1488{
1489 struct ocmem_rdm_work *work_data = NULL;
1490
1491 down_write(&req->rw_sem);
1492
1493 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1494 if (!work_data)
1495 BUG();
1496
1497 work_data->handle = handle;
1498 work_data->list = list;
1499 work_data->id = req->owner;
1500 work_data->direction = direction;
1501 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1502 up_write(&req->rw_sem);
1503 queue_work(ocmem_rdm_wq, &work_data->work);
1504 return 0;
1505}
1506
1507int process_xfer_out(int id, struct ocmem_handle *handle,
1508 struct ocmem_map_list *list)
1509{
1510 struct ocmem_req *req = NULL;
1511 int rc = 0;
1512
1513 req = handle_to_req(handle);
1514
1515 if (!req)
1516 return -EINVAL;
1517
1518 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001519 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001520 goto transfer_out_error;
1521 }
1522
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001523 rc = queue_transfer(req, handle, list, TO_DDR);
1524
1525 if (rc < 0) {
1526 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001527 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001528 goto transfer_out_error;
1529 }
1530
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001531 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001532 return 0;
1533
1534transfer_out_error:
1535 return -EINVAL;
1536}
1537
1538int process_xfer_in(int id, struct ocmem_handle *handle,
1539 struct ocmem_map_list *list)
1540{
1541 struct ocmem_req *req = NULL;
1542 int rc = 0;
1543
1544 req = handle_to_req(handle);
1545
1546 if (!req)
1547 return -EINVAL;
1548
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001549
1550 if (!is_mapped(req)) {
1551 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001552 goto transfer_in_error;
1553 }
1554
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001555
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001556 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001557 rc = queue_transfer(req, handle, list, TO_OCMEM);
1558
1559 if (rc < 0) {
1560 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001561 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001562 goto transfer_in_error;
1563 }
1564
1565 return 0;
1566transfer_in_error:
1567 return -EINVAL;
1568}
1569
1570int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1571{
1572 struct ocmem_req *req = NULL;
1573 struct ocmem_buf *buffer = NULL;
1574 struct ocmem_eviction_data *edata = NULL;
1575 int rc = 0;
1576
1577 if (is_blocked(id)) {
1578 pr_err("Client %d cannot request free\n", id);
1579 return -EINVAL;
1580 }
1581
1582 req = handle_to_req(handle);
1583 buffer = handle_to_buffer(handle);
1584
1585 if (!req)
1586 return -EINVAL;
1587
1588 if (req->req_start != core_address(id, buffer->addr)) {
1589 pr_err("Invalid buffer handle passed for shrink\n");
1590 return -EINVAL;
1591 }
1592
1593 edata = req->edata;
1594
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001595 if (!edata) {
1596 pr_err("Unable to find eviction data\n");
1597 return -EINVAL;
1598 }
1599
1600 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001601
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001602 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1603
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001604 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001605 pr_debug("req %p being shrunk to zero\n", req);
1606 if (is_mapped(req))
1607 rc = process_unmap(req, req->req_start, req->req_end);
1608 if (rc < 0)
1609 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001610 rc = do_free(req);
1611 if (rc < 0)
1612 return -EINVAL;
1613 } else {
1614 rc = do_shrink(req, size);
1615 if (rc < 0)
1616 return -EINVAL;
1617 }
1618
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001619 req->edata = NULL;
1620 CLEAR_STATE(req, R_ALLOCATED);
1621 SET_STATE(req, R_FREE);
1622
1623 if (atomic_dec_and_test(&edata->pending)) {
1624 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001625 complete(&edata->completion);
1626 }
1627
1628 return 0;
1629}
1630
1631int process_xfer(int id, struct ocmem_handle *handle,
1632 struct ocmem_map_list *list, int direction)
1633{
1634 int rc = 0;
1635
1636 if (is_tcm(id)) {
1637 WARN(1, "Mapping operation is invalid for client\n");
1638 return -EINVAL;
1639 }
1640
1641 if (direction == TO_DDR)
1642 rc = process_xfer_out(id, handle, list);
1643 else if (direction == TO_OCMEM)
1644 rc = process_xfer_in(id, handle, list);
1645 return rc;
1646}
1647
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001648static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001649{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001650 struct ocmem_eviction_data *edata = NULL;
1651 int prio = ocmem_client_table[id].priority;
1652
1653 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1654
1655 if (!edata) {
1656 pr_err("ocmem: Could not allocate eviction data\n");
1657 return NULL;
1658 }
1659
1660 INIT_LIST_HEAD(&edata->victim_list);
1661 INIT_LIST_HEAD(&edata->req_list);
1662 edata->prio = prio;
1663 atomic_set(&edata->pending, 0);
1664 return edata;
1665}
1666
1667static void free_eviction(struct ocmem_eviction_data *edata)
1668{
1669
1670 if (!edata)
1671 return;
1672
1673 if (!list_empty(&edata->req_list))
1674 pr_err("ocmem: Eviction data %p not empty\n", edata);
1675
1676 kfree(edata);
1677 edata = NULL;
1678}
1679
1680static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1681{
1682
1683 if (!new || !old)
1684 return false;
1685
1686 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1687 new->req_start, new->req_end,
1688 old->req_start, old->req_end);
1689
1690 if ((new->req_start < old->req_start &&
1691 new->req_end >= old->req_start) ||
1692 (new->req_start >= old->req_start &&
1693 new->req_start <= old->req_end &&
1694 new->req_end >= old->req_end)) {
1695 pr_debug("request %p overlaps with existing req %p\n",
1696 new, old);
1697 return true;
1698 }
1699 return false;
1700}
1701
1702static int __evict_common(struct ocmem_eviction_data *edata,
1703 struct ocmem_req *req)
1704{
1705 struct rb_node *rb_node = NULL;
1706 struct ocmem_req *e_req = NULL;
1707 bool needs_eviction = false;
1708 int j = 0;
1709
1710 for (rb_node = rb_first(&sched_tree); rb_node;
1711 rb_node = rb_next(rb_node)) {
1712
1713 struct ocmem_region *tmp_region = NULL;
1714
1715 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1716
1717 if (tmp_region->max_prio < edata->prio) {
1718 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1719 needs_eviction = false;
1720 e_req = find_req_match(j, tmp_region);
1721 if (!e_req)
1722 continue;
1723 if (edata->passive == true) {
1724 needs_eviction = true;
1725 } else {
1726 needs_eviction = is_overlapping(req,
1727 e_req);
1728 }
1729
1730 if (needs_eviction) {
1731 pr_debug("adding %p in region %p to eviction list\n",
1732 e_req, tmp_region);
1733 list_add_tail(
1734 &e_req->eviction_list,
1735 &edata->req_list);
1736 atomic_inc(&edata->pending);
1737 e_req->edata = edata;
1738 }
1739 }
1740 } else {
1741 pr_debug("Skipped region %p\n", tmp_region);
1742 }
1743 }
1744
1745 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1746
1747 if (!atomic_read(&edata->pending))
1748 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001749 return 0;
1750}
1751
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001752static void trigger_eviction(struct ocmem_eviction_data *edata)
1753{
1754 struct ocmem_req *req = NULL;
1755 struct ocmem_req *next = NULL;
1756 struct ocmem_buf buffer;
1757
1758 if (!edata)
1759 return;
1760
1761 BUG_ON(atomic_read(&edata->pending) == 0);
1762
1763 init_completion(&edata->completion);
1764
1765 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1766 {
1767 if (req) {
1768 pr_debug("ocmem: Evicting request %p\n", req);
1769 buffer.addr = req->req_start;
1770 buffer.len = 0x0;
1771 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1772 &buffer);
1773 }
1774 }
1775 return;
1776}
1777
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001778int process_evict(int id)
1779{
1780 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001781 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001782
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001783 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001784
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001785 if (!edata)
1786 return -EINVAL;
1787
1788 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001789
1790 mutex_lock(&sched_mutex);
1791
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001792 rc = __evict_common(edata, NULL);
1793
1794 if (rc < 0)
1795 goto skip_eviction;
1796
1797 trigger_eviction(edata);
1798
1799 evictions[id] = edata;
1800
1801 mutex_unlock(&sched_mutex);
1802
1803 wait_for_completion(&edata->completion);
1804
1805 return 0;
1806
1807skip_eviction:
1808 evictions[id] = NULL;
1809 mutex_unlock(&sched_mutex);
1810 return 0;
1811}
1812
1813static int run_evict(struct ocmem_req *req)
1814{
1815 struct ocmem_eviction_data *edata = NULL;
1816 int rc = 0;
1817
1818 if (!req)
1819 return -EINVAL;
1820
1821 edata = init_eviction(req->owner);
1822
1823 if (!edata)
1824 return -EINVAL;
1825
1826 edata->passive = false;
1827
1828 rc = __evict_common(edata, req);
1829
1830 if (rc < 0)
1831 goto skip_eviction;
1832
1833 trigger_eviction(edata);
1834
1835 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1836 req->edata = edata;
1837
1838 wait_for_completion(&edata->completion);
1839
1840 pr_debug("ocmem: eviction completed successfully\n");
1841 return 0;
1842
1843skip_eviction:
1844 pr_err("ocmem: Unable to run eviction\n");
1845 free_eviction(edata);
1846 return -EINVAL;
1847}
1848
1849static int __restore_common(struct ocmem_eviction_data *edata)
1850{
1851
1852 struct ocmem_req *req = NULL;
1853 struct ocmem_req *next = NULL;
1854
1855 if (!edata)
1856 return -EINVAL;
1857
1858 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1859 {
1860 if (req) {
1861 pr_debug("ocmem: restoring evicted request %p\n",
1862 req);
1863 list_del(&req->eviction_list);
1864 req->op = SCHED_ALLOCATE;
1865 sched_enqueue(req);
1866 inc_ocmem_stat(zone_of(req), NR_RESTORES);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001867 }
1868 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001869
1870 pr_debug("Scheduled all evicted regions\n");
1871
1872 return 0;
1873}
1874
1875static int sched_restore(struct ocmem_req *req)
1876{
1877
1878 int rc = 0;
1879
1880 if (!req)
1881 return -EINVAL;
1882
1883 if (!req->edata)
1884 return 0;
1885
1886 rc = __restore_common(req->edata);
1887
1888 if (rc < 0)
1889 return -EINVAL;
1890
1891 free_eviction(req->edata);
1892 return 0;
1893}
1894
1895int process_restore(int id)
1896{
1897 struct ocmem_eviction_data *edata = evictions[id];
1898 int rc = 0;
1899
1900 if (!edata)
1901 return -EINVAL;
1902
1903 rc = __restore_common(edata);
1904
1905 if (rc < 0) {
1906 pr_err("Failed to restore evicted requests\n");
1907 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001908 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001909
1910 free_eviction(edata);
1911 evictions[id] = NULL;
1912 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001913 return 0;
1914}
1915
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001916static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1917{
1918 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001919 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001920 struct ocmem_buf *buffer = req->buffer;
1921
1922 down_write(&req->rw_sem);
1923
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001924 mutex_lock(&allocation_mutex);
1925retry_allocate:
1926
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001927 /* Take the scheduler mutex */
1928 mutex_lock(&sched_mutex);
1929 rc = __sched_allocate(req, can_block, can_wait);
1930 mutex_unlock(&sched_mutex);
1931
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001932 if (rc == OP_EVICT) {
1933
1934 ret = run_evict(req);
1935
1936 if (ret == 0) {
1937 rc = sched_restore(req);
1938 if (rc < 0) {
1939 pr_err("Failed to restore for req %p\n", req);
1940 goto err_allocate_fail;
1941 }
1942 req->edata = NULL;
1943
1944 pr_debug("Attempting to re-allocate req %p\n", req);
1945 req->req_start = 0x0;
1946 req->req_end = 0x0;
1947 goto retry_allocate;
1948 } else {
1949 goto err_allocate_fail;
1950 }
1951 }
1952
1953 mutex_unlock(&allocation_mutex);
1954
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001955 if (rc == OP_FAIL) {
1956 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001957 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001958 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001959
1960 if (rc == OP_RESCHED) {
1961 buffer->addr = 0x0;
1962 buffer->len = 0x0;
1963 pr_debug("ocmem: Enqueuing req %p\n", req);
1964 sched_enqueue(req);
1965 } else if (rc == OP_PARTIAL) {
1966 buffer->addr = device_address(req->owner, req->req_start);
1967 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001968 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001969 pr_debug("ocmem: Enqueuing req %p\n", req);
1970 sched_enqueue(req);
1971 } else if (rc == OP_COMPLETE) {
1972 buffer->addr = device_address(req->owner, req->req_start);
1973 buffer->len = req->req_sz;
1974 }
1975
1976 up_write(&req->rw_sem);
1977 return 0;
1978err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001979 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001980 up_write(&req->rw_sem);
1981 return -EINVAL;
1982}
1983
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07001984static int do_dump(struct ocmem_req *req, unsigned long addr)
1985{
1986
1987 void __iomem *req_vaddr;
1988 unsigned long offset = 0x0;
1989
1990 down_write(&req->rw_sem);
1991
1992 offset = phys_to_offset(req->req_start);
1993
1994 req_vaddr = ocmem_vaddr + offset;
1995
1996 if (!req_vaddr)
1997 goto err_do_dump;
1998
1999 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2000 get_name(req->owner), req->req_start,
2001 req_vaddr, addr);
2002
2003 memcpy((void *)addr, req_vaddr, req->req_sz);
2004
2005 up_write(&req->rw_sem);
2006 return 0;
2007err_do_dump:
2008 up_write(&req->rw_sem);
2009 return -EINVAL;
2010}
2011
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002012int process_allocate(int id, struct ocmem_handle *handle,
2013 unsigned long min, unsigned long max,
2014 unsigned long step, bool can_block, bool can_wait)
2015{
2016
2017 struct ocmem_req *req = NULL;
2018 struct ocmem_buf *buffer = NULL;
2019 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002020 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002021
2022 /* sanity checks */
2023 if (is_blocked(id)) {
2024 pr_err("Client %d cannot request allocation\n", id);
2025 return -EINVAL;
2026 }
2027
2028 if (handle->req != NULL) {
2029 pr_err("Invalid handle passed in\n");
2030 return -EINVAL;
2031 }
2032
2033 buffer = handle_to_buffer(handle);
2034 BUG_ON(buffer == NULL);
2035
2036 /* prepare a request structure to represent this transaction */
2037 req = ocmem_create_req();
2038 if (!req)
2039 return -ENOMEM;
2040
2041 req->owner = id;
2042 req->req_min = min;
2043 req->req_max = max;
2044 req->req_step = step;
2045 req->prio = ocmem_client_table[id].priority;
2046 req->op = SCHED_ALLOCATE;
2047 req->buffer = buffer;
2048
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002049 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2050
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002051 rc = do_allocate(req, can_block, can_wait);
2052
2053 if (rc < 0)
2054 goto do_allocate_error;
2055
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002056 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2057
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002058 handle->req = req;
2059
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002060 if (req->req_sz != 0) {
2061
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002062 rc = process_map(req, req->req_start, req->req_end);
2063 if (rc < 0)
2064 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002065
2066 offset = phys_to_offset(req->req_start);
2067
2068 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2069
2070 if (rc < 0) {
2071 pr_err("Failed to switch ON memory macros\n");
2072 goto power_ctl_error;
2073 }
2074 }
2075
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002076 return 0;
2077
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002078power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002079 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002080map_error:
2081 handle->req = NULL;
2082 do_free(req);
2083do_allocate_error:
2084 ocmem_destroy_req(req);
2085 return -EINVAL;
2086}
2087
2088int process_delayed_allocate(struct ocmem_req *req)
2089{
2090
2091 struct ocmem_handle *handle = NULL;
2092 int rc = 0;
2093 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002094 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002095
2096 handle = req_to_handle(req);
2097 BUG_ON(handle == NULL);
2098
2099 rc = do_allocate(req, true, false);
2100
2101 if (rc < 0)
2102 goto do_allocate_error;
2103
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002104 /* The request can still be pending */
2105 if (TEST_STATE(req, R_PENDING))
2106 return 0;
2107
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002108 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2109
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002110 if (req->req_sz != 0) {
2111
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002112 rc = process_map(req, req->req_start, req->req_end);
2113 if (rc < 0)
2114 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002115
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002116
2117 offset = phys_to_offset(req->req_start);
2118
2119 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2120
2121 if (rc < 0) {
2122 pr_err("Failed to switch ON memory macros\n");
2123 goto power_ctl_error;
2124 }
2125 }
2126
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002127 /* Notify the client about the buffer growth */
2128 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2129 if (rc < 0) {
2130 pr_err("No notifier callback to cater for req %p event: %d\n",
2131 req, OCMEM_ALLOC_GROW);
2132 BUG();
2133 }
2134 return 0;
2135
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002136power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002137 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002138map_error:
2139 handle->req = NULL;
2140 do_free(req);
2141do_allocate_error:
2142 ocmem_destroy_req(req);
2143 return -EINVAL;
2144}
2145
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002146int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2147{
2148 struct ocmem_req *req = NULL;
2149 int rc = 0;
2150
2151 req = handle_to_req(handle);
2152
2153 if (!req)
2154 return -EINVAL;
2155
2156 if (!is_mapped(req)) {
2157 pr_err("Buffer is not mapped\n");
2158 goto dump_error;
2159 }
2160
2161 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2162
2163 mutex_lock(&sched_mutex);
2164 rc = do_dump(req, addr);
2165 mutex_unlock(&sched_mutex);
2166
2167 if (rc < 0)
2168 goto dump_error;
2169
2170 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2171 return 0;
2172
2173dump_error:
2174 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2175 return -EINVAL;
2176}
2177
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002178static void ocmem_sched_wk_func(struct work_struct *work)
2179{
2180
2181 struct ocmem_buf *buffer = NULL;
2182 struct ocmem_handle *handle = NULL;
2183 struct ocmem_req *req = ocmem_fetch_req();
2184
2185 if (!req) {
2186 pr_debug("No Pending Requests found\n");
2187 return;
2188 }
2189
2190 pr_debug("ocmem: sched_wk pending req %p\n", req);
2191 handle = req_to_handle(req);
2192 buffer = handle_to_buffer(handle);
2193 BUG_ON(req->op == SCHED_NOP);
2194
2195 switch (req->op) {
2196 case SCHED_GROW:
2197 process_grow(req);
2198 break;
2199 case SCHED_ALLOCATE:
2200 process_delayed_allocate(req);
2201 break;
2202 default:
2203 pr_err("ocmem: Unknown operation encountered\n");
2204 break;
2205 }
2206 return;
2207}
2208
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002209static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2210{
2211 struct rb_node *rb_node = NULL;
2212 struct ocmem_req *req = NULL;
2213 unsigned j;
2214 mutex_lock(&sched_mutex);
2215 for (rb_node = rb_first(&sched_tree); rb_node;
2216 rb_node = rb_next(rb_node)) {
2217 struct ocmem_region *tmp_region = NULL;
2218 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2219 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2220 req = find_req_match(j, tmp_region);
2221 if (req) {
2222 seq_printf(f,
2223 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2224 get_name(req->owner),
2225 req->req_start, req->req_end,
2226 req->req_sz, req->state);
2227 }
2228 }
2229 }
2230 mutex_unlock(&sched_mutex);
2231 return 0;
2232}
2233
2234static int ocmem_allocations_open(struct inode *inode, struct file *file)
2235{
2236 return single_open(file, ocmem_allocations_show, inode->i_private);
2237}
2238
2239static const struct file_operations allocations_show_fops = {
2240 .open = ocmem_allocations_open,
2241 .read = seq_read,
2242 .llseek = seq_lseek,
2243 .release = seq_release,
2244};
2245
2246int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002247{
2248 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002249 struct ocmem_plat_data *pdata = NULL;
2250 struct device *dev = &pdev->dev;
2251
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002252 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002253 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002254 mutex_init(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002255 mutex_init(&sched_mutex);
2256 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002257 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002258 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2259 INIT_LIST_HEAD(&sched_queue[i]);
2260
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002261 mutex_init(&rdm_mutex);
2262 INIT_LIST_HEAD(&rdm_queue);
2263 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2264 if (!ocmem_rdm_wq)
2265 return -ENOMEM;
2266 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2267 if (!ocmem_eviction_wq)
2268 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002269
2270 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2271 NULL, &allocations_show_fops)) {
2272 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2273 return -EBUSY;
2274 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002275 return 0;
2276}