blob: 06d9e68edd38c2de2dbe0f5f8bd02448ec001229 [file] [log] [blame]
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/rbtree.h>
18#include <linux/idr.h>
19#include <linux/genalloc.h>
20#include <linux/of.h>
21#include <linux/io.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/seq_file.h>
25#include <mach/ocmem_priv.h>
26
27enum request_states {
28 R_FREE = 0x0, /* request is not allocated */
29 R_PENDING, /* request has a pending operation */
30 R_ALLOCATED, /* request has been allocated */
31 R_MUST_GROW, /* request must grow as a part of pending operation */
32 R_MUST_SHRINK, /* request must shrink as a part of pending operation */
33 R_MUST_MAP, /* request must be mapped before being used */
34 R_MUST_UNMAP, /* request must be unmapped when not being used */
35 R_MAPPED, /* request is mapped and actively used by client */
36 R_UNMAPPED, /* request is not mapped, so it's not in active use */
37 R_EVICTED, /* request is evicted and must be restored */
38};
39
40#define SET_STATE(x, val) (set_bit((val), &(x)->state))
41#define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state))
42#define TEST_STATE(x, val) (test_bit((val), &(x)->state))
43
44enum op_res {
45 OP_COMPLETE = 0x0,
46 OP_RESCHED,
47 OP_PARTIAL,
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070048 OP_EVICT,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070049 OP_FAIL = ~0x0,
50};
51
52/* Represents various client priorities */
53/* Note: More than one client can share a priority level */
54enum client_prio {
55 MIN_PRIO = 0x0,
56 NO_PRIO = MIN_PRIO,
57 PRIO_SENSORS = 0x1,
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070058 PRIO_OTHER_OS = 0x1,
Naveen Ramarajb9da05782012-05-07 09:07:35 -070059 PRIO_LP_AUDIO = 0x1,
60 PRIO_HP_AUDIO = 0x2,
61 PRIO_VOICE = 0x3,
62 PRIO_GFX_GROWTH = 0x4,
63 PRIO_VIDEO = 0x5,
64 PRIO_GFX = 0x6,
65 PRIO_OCMEM = 0x7,
66 MAX_OCMEM_PRIO = PRIO_OCMEM + 1,
67};
68
Naveen Ramaraj55ed8902012-09-26 13:18:06 -070069static void __iomem *ocmem_vaddr;
Naveen Ramarajb9da05782012-05-07 09:07:35 -070070static struct list_head sched_queue[MAX_OCMEM_PRIO];
71static struct mutex sched_queue_mutex;
72
73/* The duration in msecs before a pending operation is scheduled
74 * This allows an idle window between use case boundaries where various
75 * hardware state changes can occur. The value will be tweaked on actual
76 * hardware.
77*/
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -070078/* Delay in ms for switching to low power mode for OCMEM */
79#define SCHED_DELAY 5000
Naveen Ramarajb9da05782012-05-07 09:07:35 -070080
Naveen Ramarajcc4ec152012-05-14 09:55:29 -070081static struct list_head rdm_queue;
82static struct mutex rdm_mutex;
83static struct workqueue_struct *ocmem_rdm_wq;
84static struct workqueue_struct *ocmem_eviction_wq;
85
86static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
87
88struct ocmem_rdm_work {
89 int id;
90 struct ocmem_map_list *list;
91 struct ocmem_handle *handle;
92 int direction;
93 struct work_struct work;
94};
95
Naveen Ramarajb9da05782012-05-07 09:07:35 -070096/* OCMEM Operational modes */
97enum ocmem_client_modes {
98 OCMEM_PERFORMANCE = 1,
99 OCMEM_PASSIVE,
100 OCMEM_LOW_POWER,
101 OCMEM_MODE_MAX = OCMEM_LOW_POWER
102};
103
104/* OCMEM Addressing modes */
105enum ocmem_interconnects {
106 OCMEM_BLOCKED = 0,
107 OCMEM_PORT = 1,
108 OCMEM_OCMEMNOC = 2,
109 OCMEM_SYSNOC = 3,
110};
111
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700112enum ocmem_tz_client {
113 TZ_UNUSED = 0x0,
114 TZ_GRAPHICS,
115 TZ_VIDEO,
116 TZ_LP_AUDIO,
117 TZ_SENSORS,
118 TZ_OTHER_OS,
119 TZ_DEBUG,
120};
121
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700122/**
123 * Primary OCMEM Arbitration Table
124 **/
125struct ocmem_table {
126 int client_id;
127 int priority;
128 int mode;
129 int hw_interconnect;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700130 int tz_id;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700131} ocmem_client_table[OCMEM_CLIENT_MAX] = {
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700132 {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT,
133 TZ_GRAPHICS},
134 {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
135 TZ_VIDEO},
136 {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC,
137 TZ_UNUSED},
138 {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED,
139 TZ_UNUSED},
140 {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED,
141 TZ_UNUSED},
142 {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC,
143 TZ_LP_AUDIO},
144 {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
145 TZ_SENSORS},
146 {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC,
147 TZ_OTHER_OS},
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700148};
149
150static struct rb_root sched_tree;
151static struct mutex sched_mutex;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700152static struct mutex allocation_mutex;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700153
154/* A region represents a continuous interval in OCMEM address space */
155struct ocmem_region {
156 /* Chain in Interval Tree */
157 struct rb_node region_rb;
158 /* Hash map of requests */
159 struct idr region_idr;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700160 /* Chain in eviction list */
161 struct list_head eviction_list;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700162 unsigned long r_start;
163 unsigned long r_end;
164 unsigned long r_sz;
165 /* Highest priority of all requests served by this region */
166 int max_prio;
167};
168
169/* Is OCMEM tightly coupled to the client ?*/
170static inline int is_tcm(int id)
171{
172 if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT ||
173 ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC)
174 return 1;
175 else
176 return 0;
177}
178
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700179static inline int is_iface_access(int id)
180{
181 return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0;
182}
183
184static inline int is_remapped_access(int id)
185{
186 return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0;
187}
188
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700189static inline int is_blocked(int id)
190{
191 return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0;
192}
193
Naveen Ramaraj3b6c5a92012-08-19 18:50:44 -0700194inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle)
195{
196 if (handle)
197 return &handle->buffer;
198 else
199 return NULL;
200}
201
202inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer)
203{
204 if (buffer)
205 return container_of(buffer, struct ocmem_handle, buffer);
206 else
207 return NULL;
208}
209
210inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle)
211{
212 if (handle)
213 return handle->req;
214 else
215 return NULL;
216}
217
218inline struct ocmem_handle *req_to_handle(struct ocmem_req *req)
219{
220 if (req && req->buffer)
221 return container_of(req->buffer, struct ocmem_handle, buffer);
222 else
223 return NULL;
224}
225
226/* Simple wrappers which will have debug features added later */
227inline int ocmem_read(void *at)
228{
229 return readl_relaxed(at);
230}
231
232inline int ocmem_write(unsigned long val, void *at)
233{
234 writel_relaxed(val, at);
235 return 0;
236}
237
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700238inline int get_mode(int id)
239{
240 if (!check_id(id))
241 return MODE_NOT_SET;
242 else
243 return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ?
244 WIDE_MODE : THIN_MODE;
245}
246
Naveen Ramaraje4cc4622012-10-29 17:28:57 -0700247inline int get_tz_id(int id)
248{
249 if (!check_id(id))
250 return TZ_UNUSED;
251 else
252 return ocmem_client_table[id].tz_id;
253}
254
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700255/* Returns the address that can be used by a device core to access OCMEM */
256static unsigned long device_address(int id, unsigned long addr)
257{
258 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
259 unsigned long ret_addr = 0x0;
260
261 switch (hw_interconnect) {
262 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700263 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700264 ret_addr = phys_to_offset(addr);
265 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700266 case OCMEM_SYSNOC:
267 ret_addr = addr;
268 break;
269 case OCMEM_BLOCKED:
270 ret_addr = 0x0;
271 break;
272 }
273 return ret_addr;
274}
275
276/* Returns the address as viewed by the core */
277static unsigned long core_address(int id, unsigned long addr)
278{
279 int hw_interconnect = ocmem_client_table[id].hw_interconnect;
280 unsigned long ret_addr = 0x0;
281
282 switch (hw_interconnect) {
283 case OCMEM_PORT:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700284 case OCMEM_OCMEMNOC:
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700285 ret_addr = offset_to_phys(addr);
286 break;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700287 case OCMEM_SYSNOC:
288 ret_addr = addr;
289 break;
290 case OCMEM_BLOCKED:
291 ret_addr = 0x0;
292 break;
293 }
294 return ret_addr;
295}
296
Naveen Ramaraj6a92b262012-07-30 17:36:24 -0700297static inline struct ocmem_zone *zone_of(struct ocmem_req *req)
298{
299 int owner;
300 if (!req)
301 return NULL;
302 owner = req->owner;
303 return get_zone(owner);
304}
305
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700306static int insert_region(struct ocmem_region *region)
307{
308
309 struct rb_root *root = &sched_tree;
310 struct rb_node **p = &root->rb_node;
311 struct rb_node *parent = NULL;
312 struct ocmem_region *tmp = NULL;
313 unsigned long addr = region->r_start;
314
315 while (*p) {
316 parent = *p;
317 tmp = rb_entry(parent, struct ocmem_region, region_rb);
318
319 if (tmp->r_end > addr) {
320 if (tmp->r_start <= addr)
321 break;
322 p = &(*p)->rb_left;
323 } else if (tmp->r_end <= addr)
324 p = &(*p)->rb_right;
325 }
326 rb_link_node(&region->region_rb, parent, p);
327 rb_insert_color(&region->region_rb, root);
328 return 0;
329}
330
331static int remove_region(struct ocmem_region *region)
332{
333 struct rb_root *root = &sched_tree;
334 rb_erase(&region->region_rb, root);
335 return 0;
336}
337
338static struct ocmem_req *ocmem_create_req(void)
339{
340 struct ocmem_req *p = NULL;
341
342 p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL);
343 if (!p)
344 return NULL;
345
346 INIT_LIST_HEAD(&p->zone_list);
347 INIT_LIST_HEAD(&p->sched_list);
348 init_rwsem(&p->rw_sem);
349 SET_STATE(p, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700350 pr_debug("request %p created\n", p);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700351 return p;
352}
353
354static int ocmem_destroy_req(struct ocmem_req *req)
355{
356 kfree(req);
357 return 0;
358}
359
360static struct ocmem_region *create_region(void)
361{
362 struct ocmem_region *p = NULL;
363
364 p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL);
365 if (!p)
366 return NULL;
367 idr_init(&p->region_idr);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700368 INIT_LIST_HEAD(&p->eviction_list);
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700369 p->r_start = p->r_end = p->r_sz = 0x0;
370 p->max_prio = NO_PRIO;
371 return p;
372}
373
374static int destroy_region(struct ocmem_region *region)
375{
376 kfree(region);
377 return 0;
378}
379
380static int attach_req(struct ocmem_region *region, struct ocmem_req *req)
381{
382 int ret, id;
383
384 while (1) {
385 if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0)
386 return -ENOMEM;
387
388 ret = idr_get_new_above(&region->region_idr, req, 1, &id);
389
390 if (ret != -EAGAIN)
391 break;
392 }
393
394 if (!ret) {
395 req->req_id = id;
396 pr_debug("ocmem: request %p(id:%d) attached to region %p\n",
397 req, id, region);
398 return 0;
399 }
400 return -EINVAL;
401}
402
403static int detach_req(struct ocmem_region *region, struct ocmem_req *req)
404{
405 idr_remove(&region->region_idr, req->req_id);
406 return 0;
407}
408
409static int populate_region(struct ocmem_region *region, struct ocmem_req *req)
410{
411 region->r_start = req->req_start;
412 region->r_end = req->req_end;
413 region->r_sz = req->req_end - req->req_start + 1;
414 return 0;
415}
416
417static int region_req_count(int id, void *ptr, void *data)
418{
419 int *count = data;
420 *count = *count + 1;
421 return 0;
422}
423
424static int req_count(struct ocmem_region *region)
425{
426 int count = 0;
427 idr_for_each(&region->region_idr, region_req_count, &count);
428 return count;
429}
430
431static int compute_max_prio(int id, void *ptr, void *data)
432{
433 int *max = data;
434 struct ocmem_req *req = ptr;
435
436 if (req->prio > *max)
437 *max = req->prio;
438 return 0;
439}
440
441static int update_region_prio(struct ocmem_region *region)
442{
443 int max_prio;
444 if (req_count(region) != 0) {
445 idr_for_each(&region->region_idr, compute_max_prio, &max_prio);
446 region->max_prio = max_prio;
447 } else {
448 region->max_prio = NO_PRIO;
449 }
450 pr_debug("ocmem: Updating prio of region %p as %d\n",
451 region, max_prio);
452
453 return 0;
454}
455
456static struct ocmem_region *find_region(unsigned long addr)
457{
458 struct ocmem_region *region = NULL;
459 struct rb_node *rb_node = NULL;
460
461 rb_node = sched_tree.rb_node;
462
463 while (rb_node) {
464 struct ocmem_region *tmp_region = NULL;
465 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
466
467 if (tmp_region->r_end > addr) {
468 region = tmp_region;
469 if (tmp_region->r_start <= addr)
470 break;
471 rb_node = rb_node->rb_left;
472 } else {
473 rb_node = rb_node->rb_right;
474 }
475 }
476 return region;
477}
478
479static struct ocmem_region *find_region_intersection(unsigned long start,
480 unsigned long end)
481{
482
483 struct ocmem_region *region = NULL;
484 region = find_region(start);
485 if (region && end <= region->r_start)
486 region = NULL;
487 return region;
488}
489
490static struct ocmem_region *find_region_match(unsigned long start,
491 unsigned long end)
492{
493
494 struct ocmem_region *region = NULL;
495 region = find_region(start);
496 if (region && start == region->r_start && end == region->r_end)
497 return region;
498 return NULL;
499}
500
501static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region)
502{
503 struct ocmem_req *req = NULL;
504
505 if (!region)
506 return NULL;
507
508 req = idr_find(&region->region_idr, owner);
509
510 return req;
511}
512
513/* Must be called with req->sem held */
514static inline int is_mapped(struct ocmem_req *req)
515{
516 return TEST_STATE(req, R_MAPPED);
517}
518
519/* Must be called with sched_mutex held */
520static int __sched_unmap(struct ocmem_req *req)
521{
522 struct ocmem_req *matched_req = NULL;
523 struct ocmem_region *matched_region = NULL;
524
525 matched_region = find_region_match(req->req_start, req->req_end);
526 matched_req = find_req_match(req->req_id, matched_region);
527
528 if (!matched_region || !matched_req) {
529 pr_err("Could not find backing region for req");
530 goto invalid_op_error;
531 }
532
533 if (matched_req != req) {
534 pr_err("Request does not match backing req");
535 goto invalid_op_error;
536 }
537
538 if (!is_mapped(req)) {
539 pr_err("Request is not currently mapped");
540 goto invalid_op_error;
541 }
542
543 /* Update the request state */
544 CLEAR_STATE(req, R_MAPPED);
545 SET_STATE(req, R_MUST_MAP);
546
547 return OP_COMPLETE;
548
549invalid_op_error:
550 return OP_FAIL;
551}
552
553/* Must be called with sched_mutex held */
554static int __sched_map(struct ocmem_req *req)
555{
556 struct ocmem_req *matched_req = NULL;
557 struct ocmem_region *matched_region = NULL;
558
559 matched_region = find_region_match(req->req_start, req->req_end);
560 matched_req = find_req_match(req->req_id, matched_region);
561
562 if (!matched_region || !matched_req) {
563 pr_err("Could not find backing region for req");
564 goto invalid_op_error;
565 }
566
567 if (matched_req != req) {
568 pr_err("Request does not match backing req");
569 goto invalid_op_error;
570 }
571
572 /* Update the request state */
573 CLEAR_STATE(req, R_MUST_MAP);
574 SET_STATE(req, R_MAPPED);
575
576 return OP_COMPLETE;
577
578invalid_op_error:
579 return OP_FAIL;
580}
581
582static int do_map(struct ocmem_req *req)
583{
584 int rc = 0;
585
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700586 down_write(&req->rw_sem);
587
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700588 mutex_lock(&sched_mutex);
589 rc = __sched_map(req);
590 mutex_unlock(&sched_mutex);
591
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700592 up_write(&req->rw_sem);
593
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700594 if (rc == OP_FAIL)
595 return -EINVAL;
596
597 return 0;
598}
599
600static int do_unmap(struct ocmem_req *req)
601{
602 int rc = 0;
603
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700604 down_write(&req->rw_sem);
605
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700606 mutex_lock(&sched_mutex);
607 rc = __sched_unmap(req);
608 mutex_unlock(&sched_mutex);
609
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700610 up_write(&req->rw_sem);
611
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700612 if (rc == OP_FAIL)
613 return -EINVAL;
614
615 return 0;
616}
617
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700618static int process_map(struct ocmem_req *req, unsigned long start,
619 unsigned long end)
620{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700621 int rc = 0;
622
623 rc = ocmem_enable_core_clock();
624
625 if (rc < 0)
626 goto core_clock_fail;
627
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700628
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700629 if (is_iface_access(req->owner)) {
630 rc = ocmem_enable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700631
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700632 if (rc < 0)
633 goto iface_clock_fail;
634 }
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700635
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700636 rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz,
637 get_mode(req->owner));
638
639 if (rc < 0) {
640 pr_err("ocmem: Failed to secure request %p for %d\n", req,
641 req->owner);
642 goto lock_failed;
643 }
644
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700645 rc = do_map(req);
646
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700647 if (rc < 0) {
648 pr_err("ocmem: Failed to map request %p for %d\n",
649 req, req->owner);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700650 goto process_map_fail;
651
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700652 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700653 pr_debug("ocmem: Mapped request %p\n", req);
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700654 return 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700655
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700656process_map_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700657 ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz);
658lock_failed:
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700659 if (is_iface_access(req->owner))
660 ocmem_disable_iface_clock();
Naveen Ramaraj5da54542012-08-21 13:26:17 -0700661iface_clock_fail:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700662 ocmem_disable_core_clock();
663core_clock_fail:
664 pr_err("ocmem: Failed to map ocmem request\n");
665 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700666}
667
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700668static int process_unmap(struct ocmem_req *req, unsigned long start,
669 unsigned long end)
670{
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700671 int rc = 0;
672
673 rc = do_unmap(req);
674
675 if (rc < 0)
676 goto process_unmap_fail;
677
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700678 rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start),
679 req->req_sz);
680
681 if (rc < 0) {
682 pr_err("ocmem: Failed to un-secure request %p for %d\n", req,
683 req->owner);
684 goto unlock_failed;
685 }
686
Naveen Ramaraj716d59f2012-10-29 11:48:19 -0700687 if (is_iface_access(req->owner))
688 ocmem_disable_iface_clock();
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700689 ocmem_disable_core_clock();
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -0700690 pr_debug("ocmem: Unmapped request %p\n", req);
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700691 return 0;
692
Naveen Ramaraje43d9bb2012-08-20 14:41:13 -0700693unlock_failed:
Naveen Ramarajbea2d5d2012-08-15 17:26:43 -0700694process_unmap_fail:
695 pr_err("ocmem: Failed to unmap ocmem request\n");
696 return rc;
Naveen Ramarajb9da05782012-05-07 09:07:35 -0700697}
698
699static int __sched_grow(struct ocmem_req *req, bool can_block)
700{
701 unsigned long min = req->req_min;
702 unsigned long max = req->req_max;
703 unsigned long step = req->req_step;
704 int owner = req->owner;
705 unsigned long curr_sz = 0;
706 unsigned long growth_sz = 0;
707 unsigned long curr_start = 0;
708 enum client_prio prio = req->prio;
709 unsigned long alloc_addr = 0x0;
710 bool retry;
711 struct ocmem_region *spanned_r = NULL;
712 struct ocmem_region *overlap_r = NULL;
713
714 struct ocmem_req *matched_req = NULL;
715 struct ocmem_region *matched_region = NULL;
716
717 struct ocmem_zone *zone = get_zone(owner);
718 struct ocmem_region *region = NULL;
719
720 matched_region = find_region_match(req->req_start, req->req_end);
721 matched_req = find_req_match(req->req_id, matched_region);
722
723 if (!matched_region || !matched_req) {
724 pr_err("Could not find backing region for req");
725 goto invalid_op_error;
726 }
727
728 if (matched_req != req) {
729 pr_err("Request does not match backing req");
730 goto invalid_op_error;
731 }
732
733 curr_sz = matched_req->req_sz;
734 curr_start = matched_req->req_start;
735 growth_sz = matched_req->req_max - matched_req->req_sz;
736
737 pr_debug("Attempting to grow req %p from %lx to %lx\n",
738 req, matched_req->req_sz, matched_req->req_max);
739
740 retry = false;
741
742 pr_debug("ocmem: GROW: growth size %lx\n", growth_sz);
743
744retry_next_step:
745
746 spanned_r = NULL;
747 overlap_r = NULL;
748
749 spanned_r = find_region(zone->z_head);
750 overlap_r = find_region_intersection(zone->z_head,
751 zone->z_head + growth_sz);
752
753 if (overlap_r == NULL) {
754 /* no conflicting regions, schedule this region */
755 zone->z_ops->free(zone, curr_start, curr_sz);
756 alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz);
757
758 if (alloc_addr < 0) {
759 pr_err("ocmem: zone allocation operation failed\n");
760 goto internal_error;
761 }
762
763 curr_sz += growth_sz;
764 /* Detach the region from the interval tree */
765 /* This is to guarantee that any change in size
766 * causes the tree to be rebalanced if required */
767
768 detach_req(matched_region, req);
769 if (req_count(matched_region) == 0) {
770 remove_region(matched_region);
771 region = matched_region;
772 } else {
773 region = create_region();
774 if (!region) {
775 pr_err("ocmem: Unable to create region\n");
776 goto region_error;
777 }
778 }
779
780 /* update the request */
781 req->req_start = alloc_addr;
782 /* increment the size to reflect new length */
783 req->req_sz = curr_sz;
784 req->req_end = alloc_addr + req->req_sz - 1;
785
786 /* update request state */
787 CLEAR_STATE(req, R_MUST_GROW);
788 SET_STATE(req, R_ALLOCATED);
789 SET_STATE(req, R_MUST_MAP);
790 req->op = SCHED_MAP;
791
792 /* update the region with new req */
793 attach_req(region, req);
794 populate_region(region, req);
795 update_region_prio(region);
796
797 /* update the tree with new region */
798 if (insert_region(region)) {
799 pr_err("ocmem: Failed to insert the region\n");
800 goto region_error;
801 }
802
803 if (retry) {
804 SET_STATE(req, R_MUST_GROW);
805 SET_STATE(req, R_PENDING);
806 req->op = SCHED_GROW;
807 return OP_PARTIAL;
808 }
809 } else if (spanned_r != NULL && overlap_r != NULL) {
810 /* resolve conflicting regions based on priority */
811 if (overlap_r->max_prio < prio) {
812 /* Growth cannot be triggered unless a previous
813 * client of lower priority was evicted */
814 pr_err("ocmem: Invalid growth scheduled\n");
815 /* This is serious enough to fail */
816 BUG();
817 return OP_FAIL;
818 } else if (overlap_r->max_prio > prio) {
819 if (min == max) {
820 /* Cannot grow at this time, try later */
821 SET_STATE(req, R_PENDING);
822 SET_STATE(req, R_MUST_GROW);
823 return OP_RESCHED;
824 } else {
825 /* Try to grow in steps */
826 growth_sz -= step;
827 /* We are OOM at this point so need to retry */
828 if (growth_sz <= curr_sz) {
829 SET_STATE(req, R_PENDING);
830 SET_STATE(req, R_MUST_GROW);
831 return OP_RESCHED;
832 }
833 retry = true;
834 pr_debug("ocmem: Attempting with reduced size %lx\n",
835 growth_sz);
836 goto retry_next_step;
837 }
838 } else {
839 pr_err("ocmem: grow: New Region %p Existing %p\n",
840 matched_region, overlap_r);
841 pr_err("ocmem: Undetermined behavior\n");
842 /* This is serious enough to fail */
843 BUG();
844 }
845 } else if (spanned_r == NULL && overlap_r != NULL) {
846 goto err_not_supported;
847 }
848
849 return OP_COMPLETE;
850
851err_not_supported:
852 pr_err("ocmem: Scheduled unsupported operation\n");
853 return OP_FAIL;
854region_error:
855 zone->z_ops->free(zone, alloc_addr, curr_sz);
856 detach_req(region, req);
857 update_region_prio(region);
858 /* req is going to be destroyed by the caller anyways */
859internal_error:
860 destroy_region(region);
861invalid_op_error:
862 return OP_FAIL;
863}
864
865/* Must be called with sched_mutex held */
866static int __sched_free(struct ocmem_req *req)
867{
868 int owner = req->owner;
869 int ret = 0;
870
871 struct ocmem_req *matched_req = NULL;
872 struct ocmem_region *matched_region = NULL;
873
874 struct ocmem_zone *zone = get_zone(owner);
875
876 BUG_ON(!zone);
877
878 matched_region = find_region_match(req->req_start, req->req_end);
879 matched_req = find_req_match(req->req_id, matched_region);
880
881 if (!matched_region || !matched_req)
882 goto invalid_op_error;
883 if (matched_req != req)
884 goto invalid_op_error;
885
886 ret = zone->z_ops->free(zone,
887 matched_req->req_start, matched_req->req_sz);
888
889 if (ret < 0)
890 goto err_op_fail;
891
892 detach_req(matched_region, matched_req);
893 update_region_prio(matched_region);
894 if (req_count(matched_region) == 0) {
895 remove_region(matched_region);
896 destroy_region(matched_region);
897 }
898
899 /* Update the request */
900 req->req_start = 0x0;
901 req->req_sz = 0x0;
902 req->req_end = 0x0;
903 SET_STATE(req, R_FREE);
904 return OP_COMPLETE;
905invalid_op_error:
906 pr_err("ocmem: free: Failed to find matching region\n");
907err_op_fail:
908 pr_err("ocmem: free: Failed\n");
909 return OP_FAIL;
910}
911
912/* Must be called with sched_mutex held */
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700913static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
914{
915 int owner = req->owner;
916 int ret = 0;
917
918 struct ocmem_req *matched_req = NULL;
919 struct ocmem_region *matched_region = NULL;
920 struct ocmem_region *region = NULL;
921 unsigned long alloc_addr = 0x0;
922
923 struct ocmem_zone *zone = get_zone(owner);
924
925 BUG_ON(!zone);
926
927 /* The shrink should not be called for zero size */
928 BUG_ON(new_sz == 0);
929
930 matched_region = find_region_match(req->req_start, req->req_end);
931 matched_req = find_req_match(req->req_id, matched_region);
932
933 if (!matched_region || !matched_req)
934 goto invalid_op_error;
935 if (matched_req != req)
936 goto invalid_op_error;
937
Naveen Ramarajcc4ec152012-05-14 09:55:29 -0700938 ret = zone->z_ops->free(zone,
939 matched_req->req_start, matched_req->req_sz);
940
941 if (ret < 0) {
942 pr_err("Zone Allocation operation failed\n");
943 goto internal_error;
944 }
945
946 alloc_addr = zone->z_ops->allocate(zone, new_sz);
947
948 if (alloc_addr < 0) {
949 pr_err("Zone Allocation operation failed\n");
950 goto internal_error;
951 }
952
953 /* Detach the region from the interval tree */
954 /* This is to guarantee that the change in size
955 * causes the tree to be rebalanced if required */
956
957 detach_req(matched_region, req);
958 if (req_count(matched_region) == 0) {
959 remove_region(matched_region);
960 region = matched_region;
961 } else {
962 region = create_region();
963 if (!region) {
964 pr_err("ocmem: Unable to create region\n");
965 goto internal_error;
966 }
967 }
968 /* update the request */
969 req->req_start = alloc_addr;
970 req->req_sz = new_sz;
971 req->req_end = alloc_addr + req->req_sz;
972
973 if (req_count(region) == 0) {
974 remove_region(matched_region);
975 destroy_region(matched_region);
976 }
977
978 /* update request state */
979 SET_STATE(req, R_MUST_GROW);
980 SET_STATE(req, R_MUST_MAP);
981 req->op = SCHED_MAP;
982
983 /* attach the request to the region */
984 attach_req(region, req);
985 populate_region(region, req);
986 update_region_prio(region);
987
988 /* update the tree with new region */
989 if (insert_region(region)) {
990 pr_err("ocmem: Failed to insert the region\n");
991 zone->z_ops->free(zone, alloc_addr, new_sz);
992 detach_req(region, req);
993 update_region_prio(region);
994 /* req will be destroyed by the caller */
995 goto region_error;
996 }
997 return OP_COMPLETE;
998
999region_error:
1000 destroy_region(region);
1001internal_error:
1002 pr_err("ocmem: shrink: Failed\n");
1003 return OP_FAIL;
1004invalid_op_error:
1005 pr_err("ocmem: shrink: Failed to find matching region\n");
1006 return OP_FAIL;
1007}
1008
1009/* Must be called with sched_mutex held */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001010static int __sched_allocate(struct ocmem_req *req, bool can_block,
1011 bool can_wait)
1012{
1013 unsigned long min = req->req_min;
1014 unsigned long max = req->req_max;
1015 unsigned long step = req->req_step;
1016 int owner = req->owner;
1017 unsigned long sz = max;
1018 enum client_prio prio = req->prio;
1019 unsigned long alloc_addr = 0x0;
1020 bool retry;
1021
1022 struct ocmem_region *spanned_r = NULL;
1023 struct ocmem_region *overlap_r = NULL;
1024
1025 struct ocmem_zone *zone = get_zone(owner);
1026 struct ocmem_region *region = NULL;
1027
1028 BUG_ON(!zone);
1029
1030 if (min > (zone->z_end - zone->z_start)) {
1031 pr_err("ocmem: requested minimum size exceeds quota\n");
1032 goto invalid_op_error;
1033 }
1034
1035 if (max > (zone->z_end - zone->z_start)) {
1036 pr_err("ocmem: requested maximum size exceeds quota\n");
1037 goto invalid_op_error;
1038 }
1039
1040 if (min > zone->z_free) {
1041 pr_err("ocmem: out of memory for zone %d\n", owner);
1042 goto invalid_op_error;
1043 }
1044
1045 region = create_region();
1046
1047 if (!region) {
1048 pr_err("ocmem: Unable to create region\n");
1049 goto invalid_op_error;
1050 }
1051
1052 retry = false;
1053
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001054 pr_debug("ocmem: do_allocate: %s request size %lx\n",
1055 get_name(owner), sz);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001056
1057retry_next_step:
1058
1059 spanned_r = NULL;
1060 overlap_r = NULL;
1061
1062 spanned_r = find_region(zone->z_head);
1063 overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz);
1064
1065 if (overlap_r == NULL) {
1066 /* no conflicting regions, schedule this region */
1067 alloc_addr = zone->z_ops->allocate(zone, sz);
1068
1069 if (alloc_addr < 0) {
1070 pr_err("Zone Allocation operation failed\n");
1071 goto internal_error;
1072 }
1073
1074 /* update the request */
1075 req->req_start = alloc_addr;
1076 req->req_end = alloc_addr + sz - 1;
1077 req->req_sz = sz;
1078 req->zone = zone;
1079
1080 /* update request state */
1081 CLEAR_STATE(req, R_FREE);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001082 CLEAR_STATE(req, R_PENDING);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001083 SET_STATE(req, R_ALLOCATED);
1084 SET_STATE(req, R_MUST_MAP);
1085 req->op = SCHED_NOP;
1086
1087 /* attach the request to the region */
1088 attach_req(region, req);
1089 populate_region(region, req);
1090 update_region_prio(region);
1091
1092 /* update the tree with new region */
1093 if (insert_region(region)) {
1094 pr_err("ocmem: Failed to insert the region\n");
1095 zone->z_ops->free(zone, alloc_addr, sz);
1096 detach_req(region, req);
1097 update_region_prio(region);
1098 /* req will be destroyed by the caller */
1099 goto internal_error;
1100 }
1101
1102 if (retry) {
1103 SET_STATE(req, R_MUST_GROW);
1104 SET_STATE(req, R_PENDING);
1105 req->op = SCHED_GROW;
1106 return OP_PARTIAL;
1107 }
1108 } else if (spanned_r != NULL && overlap_r != NULL) {
1109 /* resolve conflicting regions based on priority */
1110 if (overlap_r->max_prio < prio) {
1111 if (min == max) {
Naveen Ramaraj59907982012-10-16 17:40:38 -07001112 req->req_start = zone->z_head;
1113 req->req_end = zone->z_head + sz - 1;
1114 req->req_sz = 0x0;
1115 req->edata = NULL;
1116 goto trigger_eviction;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001117 } else {
1118 /* Try to allocate atleast >= 'min' immediately */
1119 sz -= step;
1120 if (sz < min)
1121 goto err_out_of_mem;
1122 retry = true;
1123 pr_debug("ocmem: Attempting with reduced size %lx\n",
1124 sz);
1125 goto retry_next_step;
1126 }
1127 } else if (overlap_r->max_prio > prio) {
1128 if (can_block == true) {
1129 SET_STATE(req, R_PENDING);
1130 SET_STATE(req, R_MUST_GROW);
1131 return OP_RESCHED;
1132 } else {
1133 if (min == max) {
1134 pr_err("Cannot allocate %lx synchronously\n",
1135 sz);
1136 goto err_out_of_mem;
1137 } else {
1138 sz -= step;
1139 if (sz < min)
1140 goto err_out_of_mem;
1141 retry = true;
1142 pr_debug("ocmem: Attempting reduced size %lx\n",
1143 sz);
1144 goto retry_next_step;
1145 }
1146 }
1147 } else {
1148 pr_err("ocmem: Undetermined behavior\n");
1149 pr_err("ocmem: New Region %p Existing %p\n", region,
1150 overlap_r);
1151 /* This is serious enough to fail */
1152 BUG();
1153 }
1154 } else if (spanned_r == NULL && overlap_r != NULL)
1155 goto err_not_supported;
1156
1157 return OP_COMPLETE;
1158
Naveen Ramaraj59907982012-10-16 17:40:38 -07001159trigger_eviction:
1160 pr_debug("Trigger eviction of region %p\n", overlap_r);
1161 destroy_region(region);
1162 return OP_EVICT;
1163
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001164err_not_supported:
1165 pr_err("ocmem: Scheduled unsupported operation\n");
1166 return OP_FAIL;
1167
1168err_out_of_mem:
1169 pr_err("ocmem: Out of memory during allocation\n");
1170internal_error:
1171 destroy_region(region);
1172invalid_op_error:
1173 return OP_FAIL;
1174}
1175
1176static int sched_enqueue(struct ocmem_req *priv)
1177{
1178 struct ocmem_req *next = NULL;
1179 mutex_lock(&sched_queue_mutex);
1180 list_add_tail(&priv->sched_list, &sched_queue[priv->owner]);
1181 pr_debug("enqueued req %p\n", priv);
1182 list_for_each_entry(next, &sched_queue[priv->owner], sched_list) {
1183 pr_debug("pending requests for client %p\n", next);
1184 }
1185 mutex_unlock(&sched_queue_mutex);
1186 return 0;
1187}
1188
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001189static void sched_dequeue(struct ocmem_req *victim_req)
1190{
1191 struct ocmem_req *req = NULL;
1192 struct ocmem_req *next = NULL;
1193 int id;
1194
1195 if (!victim_req)
1196 return;
1197
1198 id = victim_req->owner;
1199
1200 mutex_lock(&sched_queue_mutex);
1201
1202 if (list_empty(&sched_queue[id]))
1203 goto dequeue_done;
1204
1205 list_for_each_entry_safe(req, next, &sched_queue[id], sched_list)
1206 {
1207 if (req == victim_req) {
1208 pr_debug("ocmem: Cancelling pending request %p\n",
1209 req);
1210 list_del(&req->sched_list);
1211 goto dequeue_done;
1212 }
1213 }
1214
1215dequeue_done:
1216 mutex_unlock(&sched_queue_mutex);
1217 return;
1218}
1219
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001220static struct ocmem_req *ocmem_fetch_req(void)
1221{
1222 int i;
1223 struct ocmem_req *req = NULL;
1224 struct ocmem_req *next = NULL;
1225
1226 mutex_lock(&sched_queue_mutex);
1227 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1228 if (list_empty(&sched_queue[i]))
1229 continue;
1230 list_for_each_entry_safe(req, next, &sched_queue[i], sched_list)
1231 {
1232 if (req) {
1233 pr_debug("ocmem: Fetched pending request %p\n",
1234 req);
1235 list_del(&req->sched_list);
1236 break;
1237 }
1238 }
1239 }
1240 mutex_unlock(&sched_queue_mutex);
1241 return req;
1242}
1243
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001244
1245unsigned long process_quota(int id)
1246{
1247 struct ocmem_zone *zone = NULL;
1248
1249 if (is_blocked(id))
1250 return 0;
1251
1252 zone = get_zone(id);
1253
1254 if (zone && zone->z_pool)
1255 return zone->z_end - zone->z_start;
1256 else
1257 return 0;
1258}
1259
1260static int do_grow(struct ocmem_req *req)
1261{
1262 struct ocmem_buf *buffer = NULL;
1263 bool can_block = true;
1264 int rc = 0;
1265
1266 down_write(&req->rw_sem);
1267 buffer = req->buffer;
1268
1269 /* Take the scheduler mutex */
1270 mutex_lock(&sched_mutex);
1271 rc = __sched_grow(req, can_block);
1272 mutex_unlock(&sched_mutex);
1273
1274 if (rc == OP_FAIL)
1275 goto err_op_fail;
1276
1277 if (rc == OP_RESCHED) {
1278 pr_debug("ocmem: Enqueue this allocation");
1279 sched_enqueue(req);
1280 }
1281
1282 else if (rc == OP_COMPLETE || rc == OP_PARTIAL) {
1283 buffer->addr = device_address(req->owner, req->req_start);
1284 buffer->len = req->req_sz;
1285 }
1286
1287 up_write(&req->rw_sem);
1288 return 0;
1289err_op_fail:
1290 up_write(&req->rw_sem);
1291 return -EINVAL;
1292}
1293
1294static int process_grow(struct ocmem_req *req)
1295{
1296 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001297 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001298
1299 /* Attempt to grow the region */
1300 rc = do_grow(req);
1301
1302 if (rc < 0)
1303 return -EINVAL;
1304
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001305 rc = process_map(req, req->req_start, req->req_end);
1306 if (rc < 0)
1307 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001308
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001309 offset = phys_to_offset(req->req_start);
1310
1311 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
1312
1313 if (rc < 0) {
1314 pr_err("Failed to switch ON memory macros\n");
1315 goto power_ctl_error;
1316 }
1317
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001318 /* Notify the client about the buffer growth */
1319 rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
1320 if (rc < 0) {
1321 pr_err("No notifier callback to cater for req %p event: %d\n",
1322 req, OCMEM_ALLOC_GROW);
1323 BUG();
1324 }
1325 return 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001326power_ctl_error:
1327 return -EINVAL;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001328}
1329
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001330static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
1331{
1332
1333 int rc = 0;
1334 struct ocmem_buf *buffer = NULL;
1335
1336 down_write(&req->rw_sem);
1337 buffer = req->buffer;
1338
1339 /* Take the scheduler mutex */
1340 mutex_lock(&sched_mutex);
1341 rc = __sched_shrink(req, shrink_size);
1342 mutex_unlock(&sched_mutex);
1343
1344 if (rc == OP_FAIL)
1345 goto err_op_fail;
1346
1347 else if (rc == OP_COMPLETE) {
1348 buffer->addr = device_address(req->owner, req->req_start);
1349 buffer->len = req->req_sz;
1350 }
1351
1352 up_write(&req->rw_sem);
1353 return 0;
1354err_op_fail:
1355 up_write(&req->rw_sem);
1356 return -EINVAL;
1357}
1358
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001359static void ocmem_sched_wk_func(struct work_struct *work);
1360DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
1361
1362static int ocmem_schedule_pending(void)
1363{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001364
1365 bool need_sched = false;
1366 int i = 0;
1367
1368 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) {
1369 if (!list_empty(&sched_queue[i])) {
1370 need_sched = true;
1371 break;
1372 }
1373 }
1374
1375 if (need_sched == true) {
1376 cancel_delayed_work(&ocmem_sched_thread);
1377 schedule_delayed_work(&ocmem_sched_thread,
1378 msecs_to_jiffies(SCHED_DELAY));
1379 pr_debug("ocmem: Scheduled delayed work\n");
1380 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001381 return 0;
1382}
1383
1384static int do_free(struct ocmem_req *req)
1385{
1386 int rc = 0;
1387 struct ocmem_buf *buffer = req->buffer;
1388
1389 down_write(&req->rw_sem);
1390
1391 if (is_mapped(req)) {
1392 pr_err("ocmem: Buffer needs to be unmapped before free\n");
1393 goto err_free_fail;
1394 }
1395
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001396 pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner),
1397 req);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001398 /* Grab the sched mutex */
1399 mutex_lock(&sched_mutex);
1400 rc = __sched_free(req);
1401 mutex_unlock(&sched_mutex);
1402
1403 switch (rc) {
1404
1405 case OP_COMPLETE:
1406 buffer->addr = 0x0;
1407 buffer->len = 0x0;
1408 break;
1409 case OP_FAIL:
1410 default:
1411 goto err_free_fail;
1412 break;
1413 }
1414
1415 up_write(&req->rw_sem);
1416 return 0;
1417err_free_fail:
1418 up_write(&req->rw_sem);
1419 pr_err("ocmem: freeing req %p failed\n", req);
1420 return -EINVAL;
1421}
1422
1423int process_free(int id, struct ocmem_handle *handle)
1424{
1425 struct ocmem_req *req = NULL;
1426 struct ocmem_buf *buffer = NULL;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001427 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001428 int rc = 0;
1429
1430 if (is_blocked(id)) {
1431 pr_err("Client %d cannot request free\n", id);
1432 return -EINVAL;
1433 }
1434
1435 req = handle_to_req(handle);
1436 buffer = handle_to_buffer(handle);
1437
1438 if (!req)
1439 return -EINVAL;
1440
1441 if (req->req_start != core_address(id, buffer->addr)) {
1442 pr_err("Invalid buffer handle passed for free\n");
1443 return -EINVAL;
1444 }
1445
Naveen Ramaraj7caffea2012-10-24 17:22:58 -07001446 mutex_lock(&sched_mutex);
1447 sched_dequeue(req);
1448 mutex_unlock(&sched_mutex);
1449
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001450 if (TEST_STATE(req, R_MAPPED)) {
1451 /* unmap the interval and clear the memory */
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001452 rc = process_unmap(req, req->req_start, req->req_end);
1453 if (rc < 0)
1454 return -EINVAL;
1455 }
1456
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001457 /* Turn off the memory */
Naveen Ramaraj99b07562012-05-28 20:57:09 -07001458 if (req->req_sz != 0) {
1459
1460 offset = phys_to_offset(req->req_start);
1461
1462 rc = ocmem_memory_off(req->owner, offset, req->req_sz);
1463
1464 if (rc < 0) {
1465 pr_err("Failed to switch OFF memory macros\n");
1466 return -EINVAL;
1467 }
1468
1469 }
1470
Naveen Ramaraj807d7582013-02-06 14:41:12 -08001471 if (!TEST_STATE(req, R_FREE)) {
1472 /* free the allocation */
1473 rc = do_free(req);
1474 if (rc < 0)
1475 return -EINVAL;
1476 }
1477
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001478 inc_ocmem_stat(zone_of(req), NR_FREES);
1479
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001480 ocmem_destroy_req(req);
1481 handle->req = NULL;
1482
1483 ocmem_schedule_pending();
1484 return 0;
1485}
1486
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001487static void ocmem_rdm_worker(struct work_struct *work)
1488{
1489 int offset = 0;
1490 int rc = 0;
1491 int event;
1492 struct ocmem_rdm_work *work_data = container_of(work,
1493 struct ocmem_rdm_work, work);
1494 int id = work_data->id;
1495 struct ocmem_map_list *list = work_data->list;
1496 int direction = work_data->direction;
1497 struct ocmem_handle *handle = work_data->handle;
1498 struct ocmem_req *req = handle_to_req(handle);
1499 struct ocmem_buf *buffer = handle_to_buffer(handle);
1500
1501 down_write(&req->rw_sem);
1502 offset = phys_to_offset(req->req_start);
1503 rc = ocmem_rdm_transfer(id, list, offset, direction);
1504 if (work_data->direction == TO_OCMEM)
1505 event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
1506 else
1507 event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001508 up_write(&req->rw_sem);
1509 kfree(work_data);
1510 dispatch_notification(id, event, buffer);
1511}
1512
1513int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
1514 struct ocmem_map_list *list, int direction)
1515{
1516 struct ocmem_rdm_work *work_data = NULL;
1517
1518 down_write(&req->rw_sem);
1519
1520 work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
1521 if (!work_data)
1522 BUG();
1523
1524 work_data->handle = handle;
1525 work_data->list = list;
1526 work_data->id = req->owner;
1527 work_data->direction = direction;
1528 INIT_WORK(&work_data->work, ocmem_rdm_worker);
1529 up_write(&req->rw_sem);
1530 queue_work(ocmem_rdm_wq, &work_data->work);
1531 return 0;
1532}
1533
1534int process_xfer_out(int id, struct ocmem_handle *handle,
1535 struct ocmem_map_list *list)
1536{
1537 struct ocmem_req *req = NULL;
1538 int rc = 0;
1539
1540 req = handle_to_req(handle);
1541
1542 if (!req)
1543 return -EINVAL;
1544
1545 if (!is_mapped(req)) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001546 pr_err("Buffer is not currently mapped\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001547 goto transfer_out_error;
1548 }
1549
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001550 rc = queue_transfer(req, handle, list, TO_DDR);
1551
1552 if (rc < 0) {
1553 pr_err("Failed to queue rdm transfer to DDR\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001554 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001555 goto transfer_out_error;
1556 }
1557
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001558 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001559 return 0;
1560
1561transfer_out_error:
1562 return -EINVAL;
1563}
1564
1565int process_xfer_in(int id, struct ocmem_handle *handle,
1566 struct ocmem_map_list *list)
1567{
1568 struct ocmem_req *req = NULL;
1569 int rc = 0;
1570
1571 req = handle_to_req(handle);
1572
1573 if (!req)
1574 return -EINVAL;
1575
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001576
1577 if (!is_mapped(req)) {
1578 pr_err("Buffer is not already mapped for transfer\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001579 goto transfer_in_error;
1580 }
1581
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001582
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001583 inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001584 rc = queue_transfer(req, handle, list, TO_OCMEM);
1585
1586 if (rc < 0) {
1587 pr_err("Failed to queue rdm transfer to OCMEM\n");
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001588 inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001589 goto transfer_in_error;
1590 }
1591
1592 return 0;
1593transfer_in_error:
1594 return -EINVAL;
1595}
1596
1597int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
1598{
1599 struct ocmem_req *req = NULL;
1600 struct ocmem_buf *buffer = NULL;
1601 struct ocmem_eviction_data *edata = NULL;
1602 int rc = 0;
1603
1604 if (is_blocked(id)) {
1605 pr_err("Client %d cannot request free\n", id);
1606 return -EINVAL;
1607 }
1608
1609 req = handle_to_req(handle);
1610 buffer = handle_to_buffer(handle);
1611
1612 if (!req)
1613 return -EINVAL;
1614
1615 if (req->req_start != core_address(id, buffer->addr)) {
1616 pr_err("Invalid buffer handle passed for shrink\n");
1617 return -EINVAL;
1618 }
1619
1620 edata = req->edata;
1621
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001622 if (!edata) {
1623 pr_err("Unable to find eviction data\n");
1624 return -EINVAL;
1625 }
1626
1627 pr_debug("Found edata %p in request %p\n", edata, req);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001628
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001629 inc_ocmem_stat(zone_of(req), NR_SHRINKS);
1630
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001631 if (size == 0) {
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001632 pr_debug("req %p being shrunk to zero\n", req);
1633 if (is_mapped(req))
1634 rc = process_unmap(req, req->req_start, req->req_end);
1635 if (rc < 0)
1636 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001637 rc = do_free(req);
1638 if (rc < 0)
1639 return -EINVAL;
1640 } else {
1641 rc = do_shrink(req, size);
1642 if (rc < 0)
1643 return -EINVAL;
1644 }
1645
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001646 req->edata = NULL;
1647 CLEAR_STATE(req, R_ALLOCATED);
1648 SET_STATE(req, R_FREE);
1649
1650 if (atomic_dec_and_test(&edata->pending)) {
1651 pr_debug("ocmem: All conflicting allocations were shrunk\n");
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001652 complete(&edata->completion);
1653 }
1654
1655 return 0;
1656}
1657
1658int process_xfer(int id, struct ocmem_handle *handle,
1659 struct ocmem_map_list *list, int direction)
1660{
1661 int rc = 0;
1662
1663 if (is_tcm(id)) {
1664 WARN(1, "Mapping operation is invalid for client\n");
1665 return -EINVAL;
1666 }
1667
1668 if (direction == TO_DDR)
1669 rc = process_xfer_out(id, handle, list);
1670 else if (direction == TO_OCMEM)
1671 rc = process_xfer_in(id, handle, list);
1672 return rc;
1673}
1674
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001675static struct ocmem_eviction_data *init_eviction(int id)
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001676{
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001677 struct ocmem_eviction_data *edata = NULL;
1678 int prio = ocmem_client_table[id].priority;
1679
1680 edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
1681
1682 if (!edata) {
1683 pr_err("ocmem: Could not allocate eviction data\n");
1684 return NULL;
1685 }
1686
1687 INIT_LIST_HEAD(&edata->victim_list);
1688 INIT_LIST_HEAD(&edata->req_list);
1689 edata->prio = prio;
1690 atomic_set(&edata->pending, 0);
1691 return edata;
1692}
1693
1694static void free_eviction(struct ocmem_eviction_data *edata)
1695{
1696
1697 if (!edata)
1698 return;
1699
1700 if (!list_empty(&edata->req_list))
1701 pr_err("ocmem: Eviction data %p not empty\n", edata);
1702
1703 kfree(edata);
1704 edata = NULL;
1705}
1706
1707static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old)
1708{
1709
1710 if (!new || !old)
1711 return false;
1712
1713 pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n",
1714 new->req_start, new->req_end,
1715 old->req_start, old->req_end);
1716
1717 if ((new->req_start < old->req_start &&
1718 new->req_end >= old->req_start) ||
1719 (new->req_start >= old->req_start &&
1720 new->req_start <= old->req_end &&
1721 new->req_end >= old->req_end)) {
1722 pr_debug("request %p overlaps with existing req %p\n",
1723 new, old);
1724 return true;
1725 }
1726 return false;
1727}
1728
1729static int __evict_common(struct ocmem_eviction_data *edata,
1730 struct ocmem_req *req)
1731{
1732 struct rb_node *rb_node = NULL;
1733 struct ocmem_req *e_req = NULL;
1734 bool needs_eviction = false;
1735 int j = 0;
1736
1737 for (rb_node = rb_first(&sched_tree); rb_node;
1738 rb_node = rb_next(rb_node)) {
1739
1740 struct ocmem_region *tmp_region = NULL;
1741
1742 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
1743
1744 if (tmp_region->max_prio < edata->prio) {
1745 for (j = edata->prio - 1; j > NO_PRIO; j--) {
1746 needs_eviction = false;
1747 e_req = find_req_match(j, tmp_region);
1748 if (!e_req)
1749 continue;
1750 if (edata->passive == true) {
1751 needs_eviction = true;
1752 } else {
1753 needs_eviction = is_overlapping(req,
1754 e_req);
1755 }
1756
1757 if (needs_eviction) {
1758 pr_debug("adding %p in region %p to eviction list\n",
1759 e_req, tmp_region);
1760 list_add_tail(
1761 &e_req->eviction_list,
1762 &edata->req_list);
1763 atomic_inc(&edata->pending);
1764 e_req->edata = edata;
1765 }
1766 }
1767 } else {
1768 pr_debug("Skipped region %p\n", tmp_region);
1769 }
1770 }
1771
1772 pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending));
1773
1774 if (!atomic_read(&edata->pending))
1775 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001776 return 0;
1777}
1778
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001779static void trigger_eviction(struct ocmem_eviction_data *edata)
1780{
1781 struct ocmem_req *req = NULL;
1782 struct ocmem_req *next = NULL;
1783 struct ocmem_buf buffer;
1784
1785 if (!edata)
1786 return;
1787
1788 BUG_ON(atomic_read(&edata->pending) == 0);
1789
1790 init_completion(&edata->completion);
1791
1792 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1793 {
1794 if (req) {
1795 pr_debug("ocmem: Evicting request %p\n", req);
1796 buffer.addr = req->req_start;
1797 buffer.len = 0x0;
1798 dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK,
1799 &buffer);
1800 }
1801 }
1802 return;
1803}
1804
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001805int process_evict(int id)
1806{
1807 struct ocmem_eviction_data *edata = NULL;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001808 int rc = 0;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001809
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001810 edata = init_eviction(id);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001811
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001812 if (!edata)
1813 return -EINVAL;
1814
1815 edata->passive = true;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001816
1817 mutex_lock(&sched_mutex);
1818
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001819 rc = __evict_common(edata, NULL);
1820
1821 if (rc < 0)
1822 goto skip_eviction;
1823
1824 trigger_eviction(edata);
1825
1826 evictions[id] = edata;
1827
1828 mutex_unlock(&sched_mutex);
1829
1830 wait_for_completion(&edata->completion);
1831
1832 return 0;
1833
1834skip_eviction:
1835 evictions[id] = NULL;
1836 mutex_unlock(&sched_mutex);
1837 return 0;
1838}
1839
1840static int run_evict(struct ocmem_req *req)
1841{
1842 struct ocmem_eviction_data *edata = NULL;
1843 int rc = 0;
1844
1845 if (!req)
1846 return -EINVAL;
1847
1848 edata = init_eviction(req->owner);
1849
1850 if (!edata)
1851 return -EINVAL;
1852
1853 edata->passive = false;
1854
1855 rc = __evict_common(edata, req);
1856
1857 if (rc < 0)
1858 goto skip_eviction;
1859
1860 trigger_eviction(edata);
1861
1862 pr_debug("ocmem: attaching eviction %p to request %p", edata, req);
1863 req->edata = edata;
1864
1865 wait_for_completion(&edata->completion);
1866
1867 pr_debug("ocmem: eviction completed successfully\n");
1868 return 0;
1869
1870skip_eviction:
1871 pr_err("ocmem: Unable to run eviction\n");
1872 free_eviction(edata);
1873 return -EINVAL;
1874}
1875
1876static int __restore_common(struct ocmem_eviction_data *edata)
1877{
1878
1879 struct ocmem_req *req = NULL;
1880 struct ocmem_req *next = NULL;
1881
1882 if (!edata)
1883 return -EINVAL;
1884
1885 list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
1886 {
1887 if (req) {
1888 pr_debug("ocmem: restoring evicted request %p\n",
1889 req);
1890 list_del(&req->eviction_list);
1891 req->op = SCHED_ALLOCATE;
1892 sched_enqueue(req);
1893 inc_ocmem_stat(zone_of(req), NR_RESTORES);
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001894 }
1895 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001896
1897 pr_debug("Scheduled all evicted regions\n");
1898
1899 return 0;
1900}
1901
1902static int sched_restore(struct ocmem_req *req)
1903{
1904
1905 int rc = 0;
1906
1907 if (!req)
1908 return -EINVAL;
1909
1910 if (!req->edata)
1911 return 0;
1912
1913 rc = __restore_common(req->edata);
1914
1915 if (rc < 0)
1916 return -EINVAL;
1917
1918 free_eviction(req->edata);
1919 return 0;
1920}
1921
1922int process_restore(int id)
1923{
1924 struct ocmem_eviction_data *edata = evictions[id];
1925 int rc = 0;
1926
1927 if (!edata)
1928 return -EINVAL;
1929
1930 rc = __restore_common(edata);
1931
1932 if (rc < 0) {
1933 pr_err("Failed to restore evicted requests\n");
1934 return -EINVAL;
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001935 }
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001936
1937 free_eviction(edata);
1938 evictions[id] = NULL;
1939 ocmem_schedule_pending();
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07001940 return 0;
1941}
1942
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001943static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
1944{
1945 int rc = 0;
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001946 int ret = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001947 struct ocmem_buf *buffer = req->buffer;
1948
1949 down_write(&req->rw_sem);
1950
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001951 mutex_lock(&allocation_mutex);
1952retry_allocate:
1953
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001954 /* Take the scheduler mutex */
1955 mutex_lock(&sched_mutex);
1956 rc = __sched_allocate(req, can_block, can_wait);
1957 mutex_unlock(&sched_mutex);
1958
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07001959 if (rc == OP_EVICT) {
1960
1961 ret = run_evict(req);
1962
1963 if (ret == 0) {
1964 rc = sched_restore(req);
1965 if (rc < 0) {
1966 pr_err("Failed to restore for req %p\n", req);
1967 goto err_allocate_fail;
1968 }
1969 req->edata = NULL;
1970
1971 pr_debug("Attempting to re-allocate req %p\n", req);
1972 req->req_start = 0x0;
1973 req->req_end = 0x0;
1974 goto retry_allocate;
1975 } else {
1976 goto err_allocate_fail;
1977 }
1978 }
1979
1980 mutex_unlock(&allocation_mutex);
1981
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001982 if (rc == OP_FAIL) {
1983 inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001984 goto err_allocate_fail;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001985 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001986
1987 if (rc == OP_RESCHED) {
1988 buffer->addr = 0x0;
1989 buffer->len = 0x0;
1990 pr_debug("ocmem: Enqueuing req %p\n", req);
1991 sched_enqueue(req);
1992 } else if (rc == OP_PARTIAL) {
1993 buffer->addr = device_address(req->owner, req->req_start);
1994 buffer->len = req->req_sz;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07001995 inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07001996 pr_debug("ocmem: Enqueuing req %p\n", req);
1997 sched_enqueue(req);
1998 } else if (rc == OP_COMPLETE) {
1999 buffer->addr = device_address(req->owner, req->req_start);
2000 buffer->len = req->req_sz;
2001 }
2002
2003 up_write(&req->rw_sem);
2004 return 0;
2005err_allocate_fail:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002006 mutex_unlock(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002007 up_write(&req->rw_sem);
2008 return -EINVAL;
2009}
2010
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002011static int do_dump(struct ocmem_req *req, unsigned long addr)
2012{
2013
2014 void __iomem *req_vaddr;
2015 unsigned long offset = 0x0;
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002016 int rc = 0;
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002017
2018 down_write(&req->rw_sem);
2019
2020 offset = phys_to_offset(req->req_start);
2021
2022 req_vaddr = ocmem_vaddr + offset;
2023
2024 if (!req_vaddr)
2025 goto err_do_dump;
2026
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002027 rc = ocmem_enable_dump(req->owner, offset, req->req_sz);
2028
2029 if (rc < 0)
2030 goto err_do_dump;
2031
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002032 pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n",
2033 get_name(req->owner), req->req_start,
2034 req_vaddr, addr);
2035
2036 memcpy((void *)addr, req_vaddr, req->req_sz);
2037
Naveen Ramaraje4cc4622012-10-29 17:28:57 -07002038 rc = ocmem_disable_dump(req->owner, offset, req->req_sz);
2039
2040 if (rc < 0)
2041 pr_err("Failed to secure request %p of %s after dump\n",
2042 req, get_name(req->owner));
2043
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002044 up_write(&req->rw_sem);
2045 return 0;
2046err_do_dump:
2047 up_write(&req->rw_sem);
2048 return -EINVAL;
2049}
2050
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002051int process_allocate(int id, struct ocmem_handle *handle,
2052 unsigned long min, unsigned long max,
2053 unsigned long step, bool can_block, bool can_wait)
2054{
2055
2056 struct ocmem_req *req = NULL;
2057 struct ocmem_buf *buffer = NULL;
2058 int rc = 0;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002059 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002060
2061 /* sanity checks */
2062 if (is_blocked(id)) {
2063 pr_err("Client %d cannot request allocation\n", id);
2064 return -EINVAL;
2065 }
2066
2067 if (handle->req != NULL) {
2068 pr_err("Invalid handle passed in\n");
2069 return -EINVAL;
2070 }
2071
2072 buffer = handle_to_buffer(handle);
2073 BUG_ON(buffer == NULL);
2074
2075 /* prepare a request structure to represent this transaction */
2076 req = ocmem_create_req();
2077 if (!req)
2078 return -ENOMEM;
2079
2080 req->owner = id;
2081 req->req_min = min;
2082 req->req_max = max;
2083 req->req_step = step;
2084 req->prio = ocmem_client_table[id].priority;
2085 req->op = SCHED_ALLOCATE;
2086 req->buffer = buffer;
2087
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002088 inc_ocmem_stat(zone_of(req), NR_REQUESTS);
2089
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002090 rc = do_allocate(req, can_block, can_wait);
2091
2092 if (rc < 0)
2093 goto do_allocate_error;
2094
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002095 inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS);
2096
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002097 handle->req = req;
2098
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002099 if (req->req_sz != 0) {
2100
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002101 rc = process_map(req, req->req_start, req->req_end);
2102 if (rc < 0)
2103 goto map_error;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002104
2105 offset = phys_to_offset(req->req_start);
2106
2107 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2108
2109 if (rc < 0) {
2110 pr_err("Failed to switch ON memory macros\n");
2111 goto power_ctl_error;
2112 }
2113 }
2114
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002115 return 0;
2116
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002117power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002118 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002119map_error:
2120 handle->req = NULL;
2121 do_free(req);
2122do_allocate_error:
2123 ocmem_destroy_req(req);
2124 return -EINVAL;
2125}
2126
2127int process_delayed_allocate(struct ocmem_req *req)
2128{
2129
2130 struct ocmem_handle *handle = NULL;
2131 int rc = 0;
2132 int id = req->owner;
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002133 unsigned long offset = 0;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002134
2135 handle = req_to_handle(req);
2136 BUG_ON(handle == NULL);
2137
2138 rc = do_allocate(req, true, false);
2139
2140 if (rc < 0)
2141 goto do_allocate_error;
2142
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002143 /* The request can still be pending */
2144 if (TEST_STATE(req, R_PENDING))
2145 return 0;
2146
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002147 inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS);
2148
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002149 if (req->req_sz != 0) {
2150
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002151 rc = process_map(req, req->req_start, req->req_end);
2152 if (rc < 0)
2153 goto map_error;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002154
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002155
2156 offset = phys_to_offset(req->req_start);
2157
2158 rc = ocmem_memory_on(req->owner, offset, req->req_sz);
2159
2160 if (rc < 0) {
2161 pr_err("Failed to switch ON memory macros\n");
2162 goto power_ctl_error;
2163 }
2164 }
2165
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002166 /* Notify the client about the buffer growth */
2167 rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
2168 if (rc < 0) {
2169 pr_err("No notifier callback to cater for req %p event: %d\n",
2170 req, OCMEM_ALLOC_GROW);
2171 BUG();
2172 }
2173 return 0;
2174
Naveen Ramaraj99b07562012-05-28 20:57:09 -07002175power_ctl_error:
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002176 process_unmap(req, req->req_start, req->req_end);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002177map_error:
2178 handle->req = NULL;
2179 do_free(req);
2180do_allocate_error:
2181 ocmem_destroy_req(req);
2182 return -EINVAL;
2183}
2184
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002185int process_dump(int id, struct ocmem_handle *handle, unsigned long addr)
2186{
2187 struct ocmem_req *req = NULL;
2188 int rc = 0;
2189
2190 req = handle_to_req(handle);
2191
2192 if (!req)
2193 return -EINVAL;
2194
2195 if (!is_mapped(req)) {
2196 pr_err("Buffer is not mapped\n");
2197 goto dump_error;
2198 }
2199
2200 inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS);
2201
2202 mutex_lock(&sched_mutex);
2203 rc = do_dump(req, addr);
2204 mutex_unlock(&sched_mutex);
2205
2206 if (rc < 0)
2207 goto dump_error;
2208
2209 inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE);
2210 return 0;
2211
2212dump_error:
2213 pr_err("Dumping OCMEM memory failed for client %d\n", id);
2214 return -EINVAL;
2215}
2216
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002217static void ocmem_sched_wk_func(struct work_struct *work)
2218{
2219
2220 struct ocmem_buf *buffer = NULL;
2221 struct ocmem_handle *handle = NULL;
2222 struct ocmem_req *req = ocmem_fetch_req();
2223
2224 if (!req) {
2225 pr_debug("No Pending Requests found\n");
2226 return;
2227 }
2228
2229 pr_debug("ocmem: sched_wk pending req %p\n", req);
2230 handle = req_to_handle(req);
2231 buffer = handle_to_buffer(handle);
2232 BUG_ON(req->op == SCHED_NOP);
2233
2234 switch (req->op) {
2235 case SCHED_GROW:
2236 process_grow(req);
2237 break;
2238 case SCHED_ALLOCATE:
2239 process_delayed_allocate(req);
2240 break;
2241 default:
2242 pr_err("ocmem: Unknown operation encountered\n");
2243 break;
2244 }
2245 return;
2246}
2247
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002248static int ocmem_allocations_show(struct seq_file *f, void *dummy)
2249{
2250 struct rb_node *rb_node = NULL;
2251 struct ocmem_req *req = NULL;
2252 unsigned j;
2253 mutex_lock(&sched_mutex);
2254 for (rb_node = rb_first(&sched_tree); rb_node;
2255 rb_node = rb_next(rb_node)) {
2256 struct ocmem_region *tmp_region = NULL;
2257 tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
2258 for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) {
2259 req = find_req_match(j, tmp_region);
2260 if (req) {
2261 seq_printf(f,
2262 "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n",
2263 get_name(req->owner),
2264 req->req_start, req->req_end,
2265 req->req_sz, req->state);
2266 }
2267 }
2268 }
2269 mutex_unlock(&sched_mutex);
2270 return 0;
2271}
2272
2273static int ocmem_allocations_open(struct inode *inode, struct file *file)
2274{
2275 return single_open(file, ocmem_allocations_show, inode->i_private);
2276}
2277
2278static const struct file_operations allocations_show_fops = {
2279 .open = ocmem_allocations_open,
2280 .read = seq_read,
2281 .llseek = seq_lseek,
2282 .release = seq_release,
2283};
2284
2285int ocmem_sched_init(struct platform_device *pdev)
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002286{
2287 int i = 0;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002288 struct ocmem_plat_data *pdata = NULL;
2289 struct device *dev = &pdev->dev;
2290
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002291 sched_tree = RB_ROOT;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002292 pdata = platform_get_drvdata(pdev);
Naveen Ramaraj0cd880a2012-10-16 17:38:06 -07002293 mutex_init(&allocation_mutex);
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002294 mutex_init(&sched_mutex);
2295 mutex_init(&sched_queue_mutex);
Naveen Ramaraj55ed8902012-09-26 13:18:06 -07002296 ocmem_vaddr = pdata->vbase;
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002297 for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
2298 INIT_LIST_HEAD(&sched_queue[i]);
2299
Naveen Ramarajcc4ec152012-05-14 09:55:29 -07002300 mutex_init(&rdm_mutex);
2301 INIT_LIST_HEAD(&rdm_queue);
2302 ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
2303 if (!ocmem_rdm_wq)
2304 return -ENOMEM;
2305 ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
2306 if (!ocmem_eviction_wq)
2307 return -ENOMEM;
Naveen Ramaraj6a92b262012-07-30 17:36:24 -07002308
2309 if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node,
2310 NULL, &allocations_show_fops)) {
2311 dev_err(dev, "Unable to create debugfs node for scheduler\n");
2312 return -EBUSY;
2313 }
Naveen Ramarajb9da05782012-05-07 09:07:35 -07002314 return 0;
2315}