| /* Copyright (c) 2012, Code Aurora Forum. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 and |
| * only version 2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| */ |
| |
| #include <linux/kernel.h> |
| #include <linux/module.h> |
| #include <linux/slab.h> |
| #include <linux/mm.h> |
| #include <linux/rbtree.h> |
| #include <linux/idr.h> |
| #include <linux/genalloc.h> |
| #include <linux/of.h> |
| #include <linux/io.h> |
| #include <linux/platform_device.h> |
| #include <linux/debugfs.h> |
| #include <linux/seq_file.h> |
| #include <mach/ocmem_priv.h> |
| |
| enum request_states { |
| R_FREE = 0x0, /* request is not allocated */ |
| R_PENDING, /* request has a pending operation */ |
| R_ALLOCATED, /* request has been allocated */ |
| R_MUST_GROW, /* request must grow as a part of pending operation */ |
| R_MUST_SHRINK, /* request must shrink as a part of pending operation */ |
| R_MUST_MAP, /* request must be mapped before being used */ |
| R_MUST_UNMAP, /* request must be unmapped when not being used */ |
| R_MAPPED, /* request is mapped and actively used by client */ |
| R_UNMAPPED, /* request is not mapped, so it's not in active use */ |
| R_EVICTED, /* request is evicted and must be restored */ |
| }; |
| |
| #define SET_STATE(x, val) (set_bit((val), &(x)->state)) |
| #define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state)) |
| #define TEST_STATE(x, val) (test_bit((val), &(x)->state)) |
| |
| enum op_res { |
| OP_COMPLETE = 0x0, |
| OP_RESCHED, |
| OP_PARTIAL, |
| OP_FAIL = ~0x0, |
| }; |
| |
| /* Represents various client priorities */ |
| /* Note: More than one client can share a priority level */ |
| enum client_prio { |
| MIN_PRIO = 0x0, |
| NO_PRIO = MIN_PRIO, |
| PRIO_SENSORS = 0x1, |
| PRIO_OTHER_OS = 0x1, |
| PRIO_LP_AUDIO = 0x1, |
| PRIO_HP_AUDIO = 0x2, |
| PRIO_VOICE = 0x3, |
| PRIO_GFX_GROWTH = 0x4, |
| PRIO_VIDEO = 0x5, |
| PRIO_GFX = 0x6, |
| PRIO_OCMEM = 0x7, |
| MAX_OCMEM_PRIO = PRIO_OCMEM + 1, |
| }; |
| |
| static struct list_head sched_queue[MAX_OCMEM_PRIO]; |
| static struct mutex sched_queue_mutex; |
| |
| /* The duration in msecs before a pending operation is scheduled |
| * This allows an idle window between use case boundaries where various |
| * hardware state changes can occur. The value will be tweaked on actual |
| * hardware. |
| */ |
| #define SCHED_DELAY 10 |
| |
| static struct list_head rdm_queue; |
| static struct mutex rdm_mutex; |
| static struct workqueue_struct *ocmem_rdm_wq; |
| static struct workqueue_struct *ocmem_eviction_wq; |
| |
| static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX]; |
| |
| struct ocmem_rdm_work { |
| int id; |
| struct ocmem_map_list *list; |
| struct ocmem_handle *handle; |
| int direction; |
| struct work_struct work; |
| }; |
| |
| /* OCMEM Operational modes */ |
| enum ocmem_client_modes { |
| OCMEM_PERFORMANCE = 1, |
| OCMEM_PASSIVE, |
| OCMEM_LOW_POWER, |
| OCMEM_MODE_MAX = OCMEM_LOW_POWER |
| }; |
| |
| /* OCMEM Addressing modes */ |
| enum ocmem_interconnects { |
| OCMEM_BLOCKED = 0, |
| OCMEM_PORT = 1, |
| OCMEM_OCMEMNOC = 2, |
| OCMEM_SYSNOC = 3, |
| }; |
| |
| /** |
| * Primary OCMEM Arbitration Table |
| **/ |
| struct ocmem_table { |
| int client_id; |
| int priority; |
| int mode; |
| int hw_interconnect; |
| } ocmem_client_table[OCMEM_CLIENT_MAX] = { |
| {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT}, |
| {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_PORT}, |
| {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC}, |
| {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED}, |
| {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED}, |
| {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC}, |
| {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC}, |
| {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC}, |
| }; |
| |
| static struct rb_root sched_tree; |
| static struct mutex sched_mutex; |
| |
| /* A region represents a continuous interval in OCMEM address space */ |
| struct ocmem_region { |
| /* Chain in Interval Tree */ |
| struct rb_node region_rb; |
| /* Hash map of requests */ |
| struct idr region_idr; |
| /* Chain in eviction list */ |
| struct list_head eviction_list; |
| unsigned long r_start; |
| unsigned long r_end; |
| unsigned long r_sz; |
| /* Highest priority of all requests served by this region */ |
| int max_prio; |
| }; |
| |
| /* Is OCMEM tightly coupled to the client ?*/ |
| static inline int is_tcm(int id) |
| { |
| if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT || |
| ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC) |
| return 1; |
| else |
| return 0; |
| } |
| |
| static inline int is_blocked(int id) |
| { |
| return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0; |
| } |
| |
| inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle) |
| { |
| if (handle) |
| return &handle->buffer; |
| else |
| return NULL; |
| } |
| |
| inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer) |
| { |
| if (buffer) |
| return container_of(buffer, struct ocmem_handle, buffer); |
| else |
| return NULL; |
| } |
| |
| inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle) |
| { |
| if (handle) |
| return handle->req; |
| else |
| return NULL; |
| } |
| |
| inline struct ocmem_handle *req_to_handle(struct ocmem_req *req) |
| { |
| if (req && req->buffer) |
| return container_of(req->buffer, struct ocmem_handle, buffer); |
| else |
| return NULL; |
| } |
| |
| /* Simple wrappers which will have debug features added later */ |
| inline int ocmem_read(void *at) |
| { |
| return readl_relaxed(at); |
| } |
| |
| inline int ocmem_write(unsigned long val, void *at) |
| { |
| writel_relaxed(val, at); |
| return 0; |
| } |
| |
| inline int get_mode(int id) |
| { |
| if (!check_id(id)) |
| return MODE_NOT_SET; |
| else |
| return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ? |
| WIDE_MODE : THIN_MODE; |
| } |
| |
| /* Returns the address that can be used by a device core to access OCMEM */ |
| static unsigned long device_address(int id, unsigned long addr) |
| { |
| int hw_interconnect = ocmem_client_table[id].hw_interconnect; |
| unsigned long ret_addr = 0x0; |
| |
| switch (hw_interconnect) { |
| case OCMEM_PORT: |
| ret_addr = phys_to_offset(addr); |
| break; |
| case OCMEM_OCMEMNOC: |
| case OCMEM_SYSNOC: |
| ret_addr = addr; |
| break; |
| case OCMEM_BLOCKED: |
| ret_addr = 0x0; |
| break; |
| } |
| return ret_addr; |
| } |
| |
| /* Returns the address as viewed by the core */ |
| static unsigned long core_address(int id, unsigned long addr) |
| { |
| int hw_interconnect = ocmem_client_table[id].hw_interconnect; |
| unsigned long ret_addr = 0x0; |
| |
| switch (hw_interconnect) { |
| case OCMEM_PORT: |
| ret_addr = offset_to_phys(addr); |
| break; |
| case OCMEM_OCMEMNOC: |
| case OCMEM_SYSNOC: |
| ret_addr = addr; |
| break; |
| case OCMEM_BLOCKED: |
| ret_addr = 0x0; |
| break; |
| } |
| return ret_addr; |
| } |
| |
| static inline struct ocmem_zone *zone_of(struct ocmem_req *req) |
| { |
| int owner; |
| if (!req) |
| return NULL; |
| owner = req->owner; |
| return get_zone(owner); |
| } |
| |
| static int insert_region(struct ocmem_region *region) |
| { |
| |
| struct rb_root *root = &sched_tree; |
| struct rb_node **p = &root->rb_node; |
| struct rb_node *parent = NULL; |
| struct ocmem_region *tmp = NULL; |
| unsigned long addr = region->r_start; |
| |
| while (*p) { |
| parent = *p; |
| tmp = rb_entry(parent, struct ocmem_region, region_rb); |
| |
| if (tmp->r_end > addr) { |
| if (tmp->r_start <= addr) |
| break; |
| p = &(*p)->rb_left; |
| } else if (tmp->r_end <= addr) |
| p = &(*p)->rb_right; |
| } |
| rb_link_node(®ion->region_rb, parent, p); |
| rb_insert_color(®ion->region_rb, root); |
| return 0; |
| } |
| |
| static int remove_region(struct ocmem_region *region) |
| { |
| struct rb_root *root = &sched_tree; |
| rb_erase(®ion->region_rb, root); |
| return 0; |
| } |
| |
| static struct ocmem_req *ocmem_create_req(void) |
| { |
| struct ocmem_req *p = NULL; |
| |
| p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL); |
| if (!p) |
| return NULL; |
| |
| INIT_LIST_HEAD(&p->zone_list); |
| INIT_LIST_HEAD(&p->sched_list); |
| init_rwsem(&p->rw_sem); |
| SET_STATE(p, R_FREE); |
| return p; |
| } |
| |
| static int ocmem_destroy_req(struct ocmem_req *req) |
| { |
| kfree(req); |
| return 0; |
| } |
| |
| static struct ocmem_region *create_region(void) |
| { |
| struct ocmem_region *p = NULL; |
| |
| p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL); |
| if (!p) |
| return NULL; |
| idr_init(&p->region_idr); |
| INIT_LIST_HEAD(&p->eviction_list); |
| p->r_start = p->r_end = p->r_sz = 0x0; |
| p->max_prio = NO_PRIO; |
| return p; |
| } |
| |
| static int destroy_region(struct ocmem_region *region) |
| { |
| kfree(region); |
| return 0; |
| } |
| |
| static int attach_req(struct ocmem_region *region, struct ocmem_req *req) |
| { |
| int ret, id; |
| |
| while (1) { |
| if (idr_pre_get(®ion->region_idr, GFP_KERNEL) == 0) |
| return -ENOMEM; |
| |
| ret = idr_get_new_above(®ion->region_idr, req, 1, &id); |
| |
| if (ret != -EAGAIN) |
| break; |
| } |
| |
| if (!ret) { |
| req->req_id = id; |
| pr_debug("ocmem: request %p(id:%d) attached to region %p\n", |
| req, id, region); |
| return 0; |
| } |
| return -EINVAL; |
| } |
| |
| static int detach_req(struct ocmem_region *region, struct ocmem_req *req) |
| { |
| idr_remove(®ion->region_idr, req->req_id); |
| return 0; |
| } |
| |
| static int populate_region(struct ocmem_region *region, struct ocmem_req *req) |
| { |
| region->r_start = req->req_start; |
| region->r_end = req->req_end; |
| region->r_sz = req->req_end - req->req_start + 1; |
| return 0; |
| } |
| |
| static int region_req_count(int id, void *ptr, void *data) |
| { |
| int *count = data; |
| *count = *count + 1; |
| return 0; |
| } |
| |
| static int req_count(struct ocmem_region *region) |
| { |
| int count = 0; |
| idr_for_each(®ion->region_idr, region_req_count, &count); |
| return count; |
| } |
| |
| static int compute_max_prio(int id, void *ptr, void *data) |
| { |
| int *max = data; |
| struct ocmem_req *req = ptr; |
| |
| if (req->prio > *max) |
| *max = req->prio; |
| return 0; |
| } |
| |
| static int update_region_prio(struct ocmem_region *region) |
| { |
| int max_prio; |
| if (req_count(region) != 0) { |
| idr_for_each(®ion->region_idr, compute_max_prio, &max_prio); |
| region->max_prio = max_prio; |
| } else { |
| region->max_prio = NO_PRIO; |
| } |
| pr_debug("ocmem: Updating prio of region %p as %d\n", |
| region, max_prio); |
| |
| return 0; |
| } |
| |
| static struct ocmem_region *find_region(unsigned long addr) |
| { |
| struct ocmem_region *region = NULL; |
| struct rb_node *rb_node = NULL; |
| |
| rb_node = sched_tree.rb_node; |
| |
| while (rb_node) { |
| struct ocmem_region *tmp_region = NULL; |
| tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); |
| |
| if (tmp_region->r_end > addr) { |
| region = tmp_region; |
| if (tmp_region->r_start <= addr) |
| break; |
| rb_node = rb_node->rb_left; |
| } else { |
| rb_node = rb_node->rb_right; |
| } |
| } |
| return region; |
| } |
| |
| static struct ocmem_region *find_region_intersection(unsigned long start, |
| unsigned long end) |
| { |
| |
| struct ocmem_region *region = NULL; |
| region = find_region(start); |
| if (region && end <= region->r_start) |
| region = NULL; |
| return region; |
| } |
| |
| static struct ocmem_region *find_region_match(unsigned long start, |
| unsigned long end) |
| { |
| |
| struct ocmem_region *region = NULL; |
| region = find_region(start); |
| if (region && start == region->r_start && end == region->r_end) |
| return region; |
| return NULL; |
| } |
| |
| static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region) |
| { |
| struct ocmem_req *req = NULL; |
| |
| if (!region) |
| return NULL; |
| |
| req = idr_find(®ion->region_idr, owner); |
| |
| return req; |
| } |
| |
| /* Must be called with req->sem held */ |
| static inline int is_mapped(struct ocmem_req *req) |
| { |
| return TEST_STATE(req, R_MAPPED); |
| } |
| |
| /* Must be called with sched_mutex held */ |
| static int __sched_unmap(struct ocmem_req *req) |
| { |
| struct ocmem_req *matched_req = NULL; |
| struct ocmem_region *matched_region = NULL; |
| |
| matched_region = find_region_match(req->req_start, req->req_end); |
| matched_req = find_req_match(req->req_id, matched_region); |
| |
| if (!matched_region || !matched_req) { |
| pr_err("Could not find backing region for req"); |
| goto invalid_op_error; |
| } |
| |
| if (matched_req != req) { |
| pr_err("Request does not match backing req"); |
| goto invalid_op_error; |
| } |
| |
| if (!is_mapped(req)) { |
| pr_err("Request is not currently mapped"); |
| goto invalid_op_error; |
| } |
| |
| /* Update the request state */ |
| CLEAR_STATE(req, R_MAPPED); |
| SET_STATE(req, R_MUST_MAP); |
| |
| return OP_COMPLETE; |
| |
| invalid_op_error: |
| return OP_FAIL; |
| } |
| |
| /* Must be called with sched_mutex held */ |
| static int __sched_map(struct ocmem_req *req) |
| { |
| struct ocmem_req *matched_req = NULL; |
| struct ocmem_region *matched_region = NULL; |
| |
| matched_region = find_region_match(req->req_start, req->req_end); |
| matched_req = find_req_match(req->req_id, matched_region); |
| |
| if (!matched_region || !matched_req) { |
| pr_err("Could not find backing region for req"); |
| goto invalid_op_error; |
| } |
| |
| if (matched_req != req) { |
| pr_err("Request does not match backing req"); |
| goto invalid_op_error; |
| } |
| |
| /* Update the request state */ |
| CLEAR_STATE(req, R_MUST_MAP); |
| SET_STATE(req, R_MAPPED); |
| |
| return OP_COMPLETE; |
| |
| invalid_op_error: |
| return OP_FAIL; |
| } |
| |
| static int do_map(struct ocmem_req *req) |
| { |
| int rc = 0; |
| |
| down_write(&req->rw_sem); |
| |
| mutex_lock(&sched_mutex); |
| rc = __sched_map(req); |
| mutex_unlock(&sched_mutex); |
| |
| up_write(&req->rw_sem); |
| |
| if (rc == OP_FAIL) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int do_unmap(struct ocmem_req *req) |
| { |
| int rc = 0; |
| |
| down_write(&req->rw_sem); |
| |
| mutex_lock(&sched_mutex); |
| rc = __sched_unmap(req); |
| mutex_unlock(&sched_mutex); |
| |
| up_write(&req->rw_sem); |
| |
| if (rc == OP_FAIL) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int process_map(struct ocmem_req *req, unsigned long start, |
| unsigned long end) |
| { |
| int rc = 0; |
| |
| rc = ocmem_enable_core_clock(); |
| |
| if (rc < 0) |
| goto core_clock_fail; |
| |
| rc = ocmem_enable_iface_clock(); |
| |
| if (rc < 0) |
| goto iface_clock_fail; |
| |
| rc = ocmem_enable_br_clock(); |
| |
| if (rc < 0) |
| goto br_clock_fail; |
| |
| rc = do_map(req); |
| |
| if (rc < 0) { |
| pr_err("ocmem: Failed to map request %p for %d\n", |
| req, req->owner); |
| goto process_map_fail; |
| |
| } |
| |
| if (ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz, |
| get_mode(req->owner))) { |
| pr_err("ocmem: Failed to secure request %p for %d\n", req, |
| req->owner); |
| rc = -EINVAL; |
| goto lock_failed; |
| } |
| |
| return 0; |
| lock_failed: |
| do_unmap(req); |
| process_map_fail: |
| ocmem_disable_br_clock(); |
| br_clock_fail: |
| ocmem_disable_iface_clock(); |
| iface_clock_fail: |
| ocmem_disable_core_clock(); |
| core_clock_fail: |
| pr_err("ocmem: Failed to map ocmem request\n"); |
| return rc; |
| } |
| |
| static int process_unmap(struct ocmem_req *req, unsigned long start, |
| unsigned long end) |
| { |
| int rc = 0; |
| |
| if (ocmem_unlock(req->owner, phys_to_offset(req->req_start), |
| req->req_sz)) { |
| pr_err("ocmem: Failed to un-secure request %p for %d\n", req, |
| req->owner); |
| rc = -EINVAL; |
| goto unlock_failed; |
| } |
| |
| rc = do_unmap(req); |
| |
| if (rc < 0) |
| goto process_unmap_fail; |
| |
| ocmem_disable_br_clock(); |
| ocmem_disable_iface_clock(); |
| ocmem_disable_core_clock(); |
| return 0; |
| |
| unlock_failed: |
| process_unmap_fail: |
| pr_err("ocmem: Failed to unmap ocmem request\n"); |
| return rc; |
| } |
| |
| static int __sched_grow(struct ocmem_req *req, bool can_block) |
| { |
| unsigned long min = req->req_min; |
| unsigned long max = req->req_max; |
| unsigned long step = req->req_step; |
| int owner = req->owner; |
| unsigned long curr_sz = 0; |
| unsigned long growth_sz = 0; |
| unsigned long curr_start = 0; |
| enum client_prio prio = req->prio; |
| unsigned long alloc_addr = 0x0; |
| bool retry; |
| struct ocmem_region *spanned_r = NULL; |
| struct ocmem_region *overlap_r = NULL; |
| |
| struct ocmem_req *matched_req = NULL; |
| struct ocmem_region *matched_region = NULL; |
| |
| struct ocmem_zone *zone = get_zone(owner); |
| struct ocmem_region *region = NULL; |
| |
| matched_region = find_region_match(req->req_start, req->req_end); |
| matched_req = find_req_match(req->req_id, matched_region); |
| |
| if (!matched_region || !matched_req) { |
| pr_err("Could not find backing region for req"); |
| goto invalid_op_error; |
| } |
| |
| if (matched_req != req) { |
| pr_err("Request does not match backing req"); |
| goto invalid_op_error; |
| } |
| |
| curr_sz = matched_req->req_sz; |
| curr_start = matched_req->req_start; |
| growth_sz = matched_req->req_max - matched_req->req_sz; |
| |
| pr_debug("Attempting to grow req %p from %lx to %lx\n", |
| req, matched_req->req_sz, matched_req->req_max); |
| |
| retry = false; |
| |
| pr_debug("ocmem: GROW: growth size %lx\n", growth_sz); |
| |
| retry_next_step: |
| |
| spanned_r = NULL; |
| overlap_r = NULL; |
| |
| spanned_r = find_region(zone->z_head); |
| overlap_r = find_region_intersection(zone->z_head, |
| zone->z_head + growth_sz); |
| |
| if (overlap_r == NULL) { |
| /* no conflicting regions, schedule this region */ |
| zone->z_ops->free(zone, curr_start, curr_sz); |
| alloc_addr = zone->z_ops->allocate(zone, curr_sz + growth_sz); |
| |
| if (alloc_addr < 0) { |
| pr_err("ocmem: zone allocation operation failed\n"); |
| goto internal_error; |
| } |
| |
| curr_sz += growth_sz; |
| /* Detach the region from the interval tree */ |
| /* This is to guarantee that any change in size |
| * causes the tree to be rebalanced if required */ |
| |
| detach_req(matched_region, req); |
| if (req_count(matched_region) == 0) { |
| remove_region(matched_region); |
| region = matched_region; |
| } else { |
| region = create_region(); |
| if (!region) { |
| pr_err("ocmem: Unable to create region\n"); |
| goto region_error; |
| } |
| } |
| |
| /* update the request */ |
| req->req_start = alloc_addr; |
| /* increment the size to reflect new length */ |
| req->req_sz = curr_sz; |
| req->req_end = alloc_addr + req->req_sz - 1; |
| |
| /* update request state */ |
| CLEAR_STATE(req, R_MUST_GROW); |
| SET_STATE(req, R_ALLOCATED); |
| SET_STATE(req, R_MUST_MAP); |
| req->op = SCHED_MAP; |
| |
| /* update the region with new req */ |
| attach_req(region, req); |
| populate_region(region, req); |
| update_region_prio(region); |
| |
| /* update the tree with new region */ |
| if (insert_region(region)) { |
| pr_err("ocmem: Failed to insert the region\n"); |
| goto region_error; |
| } |
| |
| if (retry) { |
| SET_STATE(req, R_MUST_GROW); |
| SET_STATE(req, R_PENDING); |
| req->op = SCHED_GROW; |
| return OP_PARTIAL; |
| } |
| } else if (spanned_r != NULL && overlap_r != NULL) { |
| /* resolve conflicting regions based on priority */ |
| if (overlap_r->max_prio < prio) { |
| /* Growth cannot be triggered unless a previous |
| * client of lower priority was evicted */ |
| pr_err("ocmem: Invalid growth scheduled\n"); |
| /* This is serious enough to fail */ |
| BUG(); |
| return OP_FAIL; |
| } else if (overlap_r->max_prio > prio) { |
| if (min == max) { |
| /* Cannot grow at this time, try later */ |
| SET_STATE(req, R_PENDING); |
| SET_STATE(req, R_MUST_GROW); |
| return OP_RESCHED; |
| } else { |
| /* Try to grow in steps */ |
| growth_sz -= step; |
| /* We are OOM at this point so need to retry */ |
| if (growth_sz <= curr_sz) { |
| SET_STATE(req, R_PENDING); |
| SET_STATE(req, R_MUST_GROW); |
| return OP_RESCHED; |
| } |
| retry = true; |
| pr_debug("ocmem: Attempting with reduced size %lx\n", |
| growth_sz); |
| goto retry_next_step; |
| } |
| } else { |
| pr_err("ocmem: grow: New Region %p Existing %p\n", |
| matched_region, overlap_r); |
| pr_err("ocmem: Undetermined behavior\n"); |
| /* This is serious enough to fail */ |
| BUG(); |
| } |
| } else if (spanned_r == NULL && overlap_r != NULL) { |
| goto err_not_supported; |
| } |
| |
| return OP_COMPLETE; |
| |
| err_not_supported: |
| pr_err("ocmem: Scheduled unsupported operation\n"); |
| return OP_FAIL; |
| region_error: |
| zone->z_ops->free(zone, alloc_addr, curr_sz); |
| detach_req(region, req); |
| update_region_prio(region); |
| /* req is going to be destroyed by the caller anyways */ |
| internal_error: |
| destroy_region(region); |
| invalid_op_error: |
| return OP_FAIL; |
| } |
| |
| /* Must be called with sched_mutex held */ |
| static int __sched_free(struct ocmem_req *req) |
| { |
| int owner = req->owner; |
| int ret = 0; |
| |
| struct ocmem_req *matched_req = NULL; |
| struct ocmem_region *matched_region = NULL; |
| |
| struct ocmem_zone *zone = get_zone(owner); |
| |
| BUG_ON(!zone); |
| |
| matched_region = find_region_match(req->req_start, req->req_end); |
| matched_req = find_req_match(req->req_id, matched_region); |
| |
| if (!matched_region || !matched_req) |
| goto invalid_op_error; |
| if (matched_req != req) |
| goto invalid_op_error; |
| |
| ret = zone->z_ops->free(zone, |
| matched_req->req_start, matched_req->req_sz); |
| |
| if (ret < 0) |
| goto err_op_fail; |
| |
| detach_req(matched_region, matched_req); |
| update_region_prio(matched_region); |
| if (req_count(matched_region) == 0) { |
| remove_region(matched_region); |
| destroy_region(matched_region); |
| } |
| |
| /* Update the request */ |
| req->req_start = 0x0; |
| req->req_sz = 0x0; |
| req->req_end = 0x0; |
| SET_STATE(req, R_FREE); |
| return OP_COMPLETE; |
| invalid_op_error: |
| pr_err("ocmem: free: Failed to find matching region\n"); |
| err_op_fail: |
| pr_err("ocmem: free: Failed\n"); |
| return OP_FAIL; |
| } |
| |
| /* Must be called with sched_mutex held */ |
| static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz) |
| { |
| int owner = req->owner; |
| int ret = 0; |
| |
| struct ocmem_req *matched_req = NULL; |
| struct ocmem_region *matched_region = NULL; |
| struct ocmem_region *region = NULL; |
| unsigned long alloc_addr = 0x0; |
| |
| struct ocmem_zone *zone = get_zone(owner); |
| |
| BUG_ON(!zone); |
| |
| /* The shrink should not be called for zero size */ |
| BUG_ON(new_sz == 0); |
| |
| matched_region = find_region_match(req->req_start, req->req_end); |
| matched_req = find_req_match(req->req_id, matched_region); |
| |
| if (!matched_region || !matched_req) |
| goto invalid_op_error; |
| if (matched_req != req) |
| goto invalid_op_error; |
| |
| ret = zone->z_ops->free(zone, |
| matched_req->req_start, matched_req->req_sz); |
| |
| if (ret < 0) { |
| pr_err("Zone Allocation operation failed\n"); |
| goto internal_error; |
| } |
| |
| alloc_addr = zone->z_ops->allocate(zone, new_sz); |
| |
| if (alloc_addr < 0) { |
| pr_err("Zone Allocation operation failed\n"); |
| goto internal_error; |
| } |
| |
| /* Detach the region from the interval tree */ |
| /* This is to guarantee that the change in size |
| * causes the tree to be rebalanced if required */ |
| |
| detach_req(matched_region, req); |
| if (req_count(matched_region) == 0) { |
| remove_region(matched_region); |
| region = matched_region; |
| } else { |
| region = create_region(); |
| if (!region) { |
| pr_err("ocmem: Unable to create region\n"); |
| goto internal_error; |
| } |
| } |
| /* update the request */ |
| req->req_start = alloc_addr; |
| req->req_sz = new_sz; |
| req->req_end = alloc_addr + req->req_sz; |
| |
| if (req_count(region) == 0) { |
| remove_region(matched_region); |
| destroy_region(matched_region); |
| } |
| |
| /* update request state */ |
| SET_STATE(req, R_MUST_GROW); |
| SET_STATE(req, R_MUST_MAP); |
| req->op = SCHED_MAP; |
| |
| /* attach the request to the region */ |
| attach_req(region, req); |
| populate_region(region, req); |
| update_region_prio(region); |
| |
| /* update the tree with new region */ |
| if (insert_region(region)) { |
| pr_err("ocmem: Failed to insert the region\n"); |
| zone->z_ops->free(zone, alloc_addr, new_sz); |
| detach_req(region, req); |
| update_region_prio(region); |
| /* req will be destroyed by the caller */ |
| goto region_error; |
| } |
| return OP_COMPLETE; |
| |
| region_error: |
| destroy_region(region); |
| internal_error: |
| pr_err("ocmem: shrink: Failed\n"); |
| return OP_FAIL; |
| invalid_op_error: |
| pr_err("ocmem: shrink: Failed to find matching region\n"); |
| return OP_FAIL; |
| } |
| |
| /* Must be called with sched_mutex held */ |
| static int __sched_allocate(struct ocmem_req *req, bool can_block, |
| bool can_wait) |
| { |
| unsigned long min = req->req_min; |
| unsigned long max = req->req_max; |
| unsigned long step = req->req_step; |
| int owner = req->owner; |
| unsigned long sz = max; |
| enum client_prio prio = req->prio; |
| unsigned long alloc_addr = 0x0; |
| bool retry; |
| |
| struct ocmem_region *spanned_r = NULL; |
| struct ocmem_region *overlap_r = NULL; |
| |
| struct ocmem_zone *zone = get_zone(owner); |
| struct ocmem_region *region = NULL; |
| |
| BUG_ON(!zone); |
| |
| if (min > (zone->z_end - zone->z_start)) { |
| pr_err("ocmem: requested minimum size exceeds quota\n"); |
| goto invalid_op_error; |
| } |
| |
| if (max > (zone->z_end - zone->z_start)) { |
| pr_err("ocmem: requested maximum size exceeds quota\n"); |
| goto invalid_op_error; |
| } |
| |
| if (min > zone->z_free) { |
| pr_err("ocmem: out of memory for zone %d\n", owner); |
| goto invalid_op_error; |
| } |
| |
| region = create_region(); |
| |
| if (!region) { |
| pr_err("ocmem: Unable to create region\n"); |
| goto invalid_op_error; |
| } |
| |
| retry = false; |
| |
| pr_debug("ocmem: ALLOCATE: request size %lx\n", sz); |
| |
| retry_next_step: |
| |
| spanned_r = NULL; |
| overlap_r = NULL; |
| |
| spanned_r = find_region(zone->z_head); |
| overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz); |
| |
| if (overlap_r == NULL) { |
| /* no conflicting regions, schedule this region */ |
| alloc_addr = zone->z_ops->allocate(zone, sz); |
| |
| if (alloc_addr < 0) { |
| pr_err("Zone Allocation operation failed\n"); |
| goto internal_error; |
| } |
| |
| /* update the request */ |
| req->req_start = alloc_addr; |
| req->req_end = alloc_addr + sz - 1; |
| req->req_sz = sz; |
| req->zone = zone; |
| |
| /* update request state */ |
| CLEAR_STATE(req, R_FREE); |
| SET_STATE(req, R_ALLOCATED); |
| SET_STATE(req, R_MUST_MAP); |
| req->op = SCHED_NOP; |
| |
| /* attach the request to the region */ |
| attach_req(region, req); |
| populate_region(region, req); |
| update_region_prio(region); |
| |
| /* update the tree with new region */ |
| if (insert_region(region)) { |
| pr_err("ocmem: Failed to insert the region\n"); |
| zone->z_ops->free(zone, alloc_addr, sz); |
| detach_req(region, req); |
| update_region_prio(region); |
| /* req will be destroyed by the caller */ |
| goto internal_error; |
| } |
| |
| if (retry) { |
| SET_STATE(req, R_MUST_GROW); |
| SET_STATE(req, R_PENDING); |
| req->op = SCHED_GROW; |
| return OP_PARTIAL; |
| } |
| } else if (spanned_r != NULL && overlap_r != NULL) { |
| /* resolve conflicting regions based on priority */ |
| if (overlap_r->max_prio < prio) { |
| if (min == max) { |
| pr_err("ocmem: Requires eviction support\n"); |
| goto err_not_supported; |
| } else { |
| /* Try to allocate atleast >= 'min' immediately */ |
| sz -= step; |
| if (sz < min) |
| goto err_out_of_mem; |
| retry = true; |
| pr_debug("ocmem: Attempting with reduced size %lx\n", |
| sz); |
| goto retry_next_step; |
| } |
| } else if (overlap_r->max_prio > prio) { |
| if (can_block == true) { |
| SET_STATE(req, R_PENDING); |
| SET_STATE(req, R_MUST_GROW); |
| return OP_RESCHED; |
| } else { |
| if (min == max) { |
| pr_err("Cannot allocate %lx synchronously\n", |
| sz); |
| goto err_out_of_mem; |
| } else { |
| sz -= step; |
| if (sz < min) |
| goto err_out_of_mem; |
| retry = true; |
| pr_debug("ocmem: Attempting reduced size %lx\n", |
| sz); |
| goto retry_next_step; |
| } |
| } |
| } else { |
| pr_err("ocmem: Undetermined behavior\n"); |
| pr_err("ocmem: New Region %p Existing %p\n", region, |
| overlap_r); |
| /* This is serious enough to fail */ |
| BUG(); |
| } |
| } else if (spanned_r == NULL && overlap_r != NULL) |
| goto err_not_supported; |
| |
| return OP_COMPLETE; |
| |
| err_not_supported: |
| pr_err("ocmem: Scheduled unsupported operation\n"); |
| return OP_FAIL; |
| |
| err_out_of_mem: |
| pr_err("ocmem: Out of memory during allocation\n"); |
| internal_error: |
| destroy_region(region); |
| invalid_op_error: |
| return OP_FAIL; |
| } |
| |
| static int sched_enqueue(struct ocmem_req *priv) |
| { |
| struct ocmem_req *next = NULL; |
| mutex_lock(&sched_queue_mutex); |
| list_add_tail(&priv->sched_list, &sched_queue[priv->owner]); |
| pr_debug("enqueued req %p\n", priv); |
| list_for_each_entry(next, &sched_queue[priv->owner], sched_list) { |
| pr_debug("pending requests for client %p\n", next); |
| } |
| mutex_unlock(&sched_queue_mutex); |
| return 0; |
| } |
| |
| static struct ocmem_req *ocmem_fetch_req(void) |
| { |
| int i; |
| struct ocmem_req *req = NULL; |
| struct ocmem_req *next = NULL; |
| |
| mutex_lock(&sched_queue_mutex); |
| for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) { |
| if (list_empty(&sched_queue[i])) |
| continue; |
| list_for_each_entry_safe(req, next, &sched_queue[i], sched_list) |
| { |
| if (req) { |
| pr_debug("ocmem: Fetched pending request %p\n", |
| req); |
| list_del(&req->sched_list); |
| break; |
| } |
| } |
| } |
| mutex_unlock(&sched_queue_mutex); |
| return req; |
| } |
| |
| |
| unsigned long process_quota(int id) |
| { |
| struct ocmem_zone *zone = NULL; |
| |
| if (is_blocked(id)) |
| return 0; |
| |
| zone = get_zone(id); |
| |
| if (zone && zone->z_pool) |
| return zone->z_end - zone->z_start; |
| else |
| return 0; |
| } |
| |
| static int do_grow(struct ocmem_req *req) |
| { |
| struct ocmem_buf *buffer = NULL; |
| bool can_block = true; |
| int rc = 0; |
| |
| down_write(&req->rw_sem); |
| buffer = req->buffer; |
| |
| /* Take the scheduler mutex */ |
| mutex_lock(&sched_mutex); |
| rc = __sched_grow(req, can_block); |
| mutex_unlock(&sched_mutex); |
| |
| if (rc == OP_FAIL) |
| goto err_op_fail; |
| |
| if (rc == OP_RESCHED) { |
| pr_debug("ocmem: Enqueue this allocation"); |
| sched_enqueue(req); |
| } |
| |
| else if (rc == OP_COMPLETE || rc == OP_PARTIAL) { |
| buffer->addr = device_address(req->owner, req->req_start); |
| buffer->len = req->req_sz; |
| } |
| |
| up_write(&req->rw_sem); |
| return 0; |
| err_op_fail: |
| up_write(&req->rw_sem); |
| return -EINVAL; |
| } |
| |
| static int process_grow(struct ocmem_req *req) |
| { |
| int rc = 0; |
| unsigned long offset = 0; |
| |
| /* Attempt to grow the region */ |
| rc = do_grow(req); |
| |
| if (rc < 0) |
| return -EINVAL; |
| |
| /* Map the newly grown region */ |
| if (is_tcm(req->owner)) { |
| rc = process_map(req, req->req_start, req->req_end); |
| if (rc < 0) |
| return -EINVAL; |
| } |
| |
| offset = phys_to_offset(req->req_start); |
| |
| rc = ocmem_memory_on(req->owner, offset, req->req_sz); |
| |
| if (rc < 0) { |
| pr_err("Failed to switch ON memory macros\n"); |
| goto power_ctl_error; |
| } |
| |
| /* Notify the client about the buffer growth */ |
| rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer); |
| if (rc < 0) { |
| pr_err("No notifier callback to cater for req %p event: %d\n", |
| req, OCMEM_ALLOC_GROW); |
| BUG(); |
| } |
| return 0; |
| power_ctl_error: |
| return -EINVAL; |
| } |
| |
| static int do_shrink(struct ocmem_req *req, unsigned long shrink_size) |
| { |
| |
| int rc = 0; |
| struct ocmem_buf *buffer = NULL; |
| |
| down_write(&req->rw_sem); |
| buffer = req->buffer; |
| |
| /* Take the scheduler mutex */ |
| mutex_lock(&sched_mutex); |
| rc = __sched_shrink(req, shrink_size); |
| mutex_unlock(&sched_mutex); |
| |
| if (rc == OP_FAIL) |
| goto err_op_fail; |
| |
| else if (rc == OP_COMPLETE) { |
| buffer->addr = device_address(req->owner, req->req_start); |
| buffer->len = req->req_sz; |
| } |
| |
| up_write(&req->rw_sem); |
| return 0; |
| err_op_fail: |
| up_write(&req->rw_sem); |
| return -EINVAL; |
| } |
| |
| static void ocmem_sched_wk_func(struct work_struct *work); |
| DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func); |
| |
| static int ocmem_schedule_pending(void) |
| { |
| schedule_delayed_work(&ocmem_sched_thread, |
| msecs_to_jiffies(SCHED_DELAY)); |
| return 0; |
| } |
| |
| static int do_free(struct ocmem_req *req) |
| { |
| int rc = 0; |
| struct ocmem_buf *buffer = req->buffer; |
| |
| down_write(&req->rw_sem); |
| |
| if (is_mapped(req)) { |
| pr_err("ocmem: Buffer needs to be unmapped before free\n"); |
| goto err_free_fail; |
| } |
| |
| /* Grab the sched mutex */ |
| mutex_lock(&sched_mutex); |
| rc = __sched_free(req); |
| mutex_unlock(&sched_mutex); |
| |
| switch (rc) { |
| |
| case OP_COMPLETE: |
| buffer->addr = 0x0; |
| buffer->len = 0x0; |
| break; |
| case OP_FAIL: |
| default: |
| goto err_free_fail; |
| break; |
| } |
| |
| up_write(&req->rw_sem); |
| return 0; |
| err_free_fail: |
| up_write(&req->rw_sem); |
| pr_err("ocmem: freeing req %p failed\n", req); |
| return -EINVAL; |
| } |
| |
| int process_free(int id, struct ocmem_handle *handle) |
| { |
| struct ocmem_req *req = NULL; |
| struct ocmem_buf *buffer = NULL; |
| unsigned long offset = 0; |
| int rc = 0; |
| |
| if (is_blocked(id)) { |
| pr_err("Client %d cannot request free\n", id); |
| return -EINVAL; |
| } |
| |
| req = handle_to_req(handle); |
| buffer = handle_to_buffer(handle); |
| |
| if (!req) |
| return -EINVAL; |
| |
| if (req->req_start != core_address(id, buffer->addr)) { |
| pr_err("Invalid buffer handle passed for free\n"); |
| return -EINVAL; |
| } |
| |
| if (is_tcm(req->owner)) { |
| rc = process_unmap(req, req->req_start, req->req_end); |
| if (rc < 0) |
| return -EINVAL; |
| } |
| |
| if (req->req_sz != 0) { |
| |
| offset = phys_to_offset(req->req_start); |
| |
| rc = ocmem_memory_off(req->owner, offset, req->req_sz); |
| |
| if (rc < 0) { |
| pr_err("Failed to switch OFF memory macros\n"); |
| return -EINVAL; |
| } |
| |
| } |
| |
| rc = do_free(req); |
| if (rc < 0) |
| return -EINVAL; |
| |
| inc_ocmem_stat(zone_of(req), NR_FREES); |
| |
| ocmem_destroy_req(req); |
| handle->req = NULL; |
| |
| ocmem_schedule_pending(); |
| return 0; |
| } |
| |
| static void ocmem_rdm_worker(struct work_struct *work) |
| { |
| int offset = 0; |
| int rc = 0; |
| int event; |
| struct ocmem_rdm_work *work_data = container_of(work, |
| struct ocmem_rdm_work, work); |
| int id = work_data->id; |
| struct ocmem_map_list *list = work_data->list; |
| int direction = work_data->direction; |
| struct ocmem_handle *handle = work_data->handle; |
| struct ocmem_req *req = handle_to_req(handle); |
| struct ocmem_buf *buffer = handle_to_buffer(handle); |
| |
| down_write(&req->rw_sem); |
| offset = phys_to_offset(req->req_start); |
| rc = ocmem_rdm_transfer(id, list, offset, direction); |
| if (work_data->direction == TO_OCMEM) |
| event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL; |
| else |
| event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL; |
| up_write(&req->rw_sem); |
| kfree(work_data); |
| dispatch_notification(id, event, buffer); |
| } |
| |
| int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle, |
| struct ocmem_map_list *list, int direction) |
| { |
| struct ocmem_rdm_work *work_data = NULL; |
| |
| down_write(&req->rw_sem); |
| |
| work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC); |
| if (!work_data) |
| BUG(); |
| |
| work_data->handle = handle; |
| work_data->list = list; |
| work_data->id = req->owner; |
| work_data->direction = direction; |
| INIT_WORK(&work_data->work, ocmem_rdm_worker); |
| up_write(&req->rw_sem); |
| queue_work(ocmem_rdm_wq, &work_data->work); |
| return 0; |
| } |
| |
| int process_xfer_out(int id, struct ocmem_handle *handle, |
| struct ocmem_map_list *list) |
| { |
| struct ocmem_req *req = NULL; |
| int rc = 0; |
| |
| req = handle_to_req(handle); |
| |
| if (!req) |
| return -EINVAL; |
| |
| if (!is_mapped(req)) { |
| pr_err("Buffer is not already mapped\n"); |
| goto transfer_out_error; |
| } |
| |
| rc = process_unmap(req, req->req_start, req->req_end); |
| if (rc < 0) { |
| pr_err("Unmapping the buffer failed\n"); |
| goto transfer_out_error; |
| } |
| |
| inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR); |
| |
| rc = queue_transfer(req, handle, list, TO_DDR); |
| |
| if (rc < 0) { |
| pr_err("Failed to queue rdm transfer to DDR\n"); |
| inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); |
| goto transfer_out_error; |
| } |
| |
| return 0; |
| |
| transfer_out_error: |
| return -EINVAL; |
| } |
| |
| int process_xfer_in(int id, struct ocmem_handle *handle, |
| struct ocmem_map_list *list) |
| { |
| struct ocmem_req *req = NULL; |
| int rc = 0; |
| |
| req = handle_to_req(handle); |
| |
| if (!req) |
| return -EINVAL; |
| |
| if (is_mapped(req)) { |
| pr_err("Buffer is already mapped\n"); |
| goto transfer_in_error; |
| } |
| |
| rc = process_map(req, req->req_start, req->req_end); |
| if (rc < 0) { |
| pr_err("Mapping the buffer failed\n"); |
| goto transfer_in_error; |
| } |
| |
| inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM); |
| |
| rc = queue_transfer(req, handle, list, TO_OCMEM); |
| |
| if (rc < 0) { |
| pr_err("Failed to queue rdm transfer to OCMEM\n"); |
| inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); |
| goto transfer_in_error; |
| } |
| |
| return 0; |
| transfer_in_error: |
| return -EINVAL; |
| } |
| |
| int process_shrink(int id, struct ocmem_handle *handle, unsigned long size) |
| { |
| struct ocmem_req *req = NULL; |
| struct ocmem_buf *buffer = NULL; |
| struct ocmem_eviction_data *edata = NULL; |
| int rc = 0; |
| |
| if (is_blocked(id)) { |
| pr_err("Client %d cannot request free\n", id); |
| return -EINVAL; |
| } |
| |
| req = handle_to_req(handle); |
| buffer = handle_to_buffer(handle); |
| |
| if (!req) |
| return -EINVAL; |
| |
| if (req->req_start != core_address(id, buffer->addr)) { |
| pr_err("Invalid buffer handle passed for shrink\n"); |
| return -EINVAL; |
| } |
| |
| edata = req->edata; |
| |
| if (is_tcm(req->owner)) |
| do_unmap(req); |
| |
| inc_ocmem_stat(zone_of(req), NR_SHRINKS); |
| |
| if (size == 0) { |
| pr_info("req %p being shrunk to zero\n", req); |
| rc = do_free(req); |
| if (rc < 0) |
| return -EINVAL; |
| } else { |
| rc = do_shrink(req, size); |
| if (rc < 0) |
| return -EINVAL; |
| } |
| |
| edata->pending--; |
| if (edata->pending == 0) { |
| pr_debug("All regions evicted"); |
| complete(&edata->completion); |
| } |
| |
| return 0; |
| } |
| |
| int process_xfer(int id, struct ocmem_handle *handle, |
| struct ocmem_map_list *list, int direction) |
| { |
| int rc = 0; |
| |
| if (is_tcm(id)) { |
| WARN(1, "Mapping operation is invalid for client\n"); |
| return -EINVAL; |
| } |
| |
| if (direction == TO_DDR) |
| rc = process_xfer_out(id, handle, list); |
| else if (direction == TO_OCMEM) |
| rc = process_xfer_in(id, handle, list); |
| return rc; |
| } |
| |
| int ocmem_eviction_thread(struct work_struct *work) |
| { |
| return 0; |
| } |
| |
| int process_evict(int id) |
| { |
| struct ocmem_eviction_data *edata = NULL; |
| int prio = ocmem_client_table[id].priority; |
| struct rb_node *rb_node = NULL; |
| struct ocmem_req *req = NULL; |
| struct ocmem_buf buffer; |
| int j = 0; |
| |
| edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC); |
| |
| INIT_LIST_HEAD(&edata->victim_list); |
| INIT_LIST_HEAD(&edata->req_list); |
| edata->prio = prio; |
| edata->pending = 0; |
| edata->passive = 1; |
| evictions[id] = edata; |
| |
| mutex_lock(&sched_mutex); |
| |
| for (rb_node = rb_first(&sched_tree); rb_node; |
| rb_node = rb_next(rb_node)) { |
| struct ocmem_region *tmp_region = NULL; |
| tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); |
| if (tmp_region->max_prio < prio) { |
| for (j = id - 1; j > NO_PRIO; j--) { |
| req = find_req_match(j, tmp_region); |
| if (req) { |
| pr_info("adding %p to eviction list\n", |
| tmp_region); |
| list_add_tail( |
| &tmp_region->eviction_list, |
| &edata->victim_list); |
| list_add_tail( |
| &req->eviction_list, |
| &edata->req_list); |
| edata->pending++; |
| req->edata = edata; |
| buffer.addr = req->req_start; |
| buffer.len = 0x0; |
| inc_ocmem_stat(zone_of(req), |
| NR_EVICTIONS); |
| dispatch_notification(req->owner, |
| OCMEM_ALLOC_SHRINK, &buffer); |
| } |
| } |
| } else { |
| pr_info("skipping %p from eviction\n", tmp_region); |
| } |
| } |
| mutex_unlock(&sched_mutex); |
| pr_debug("Waiting for all regions to be shrunk\n"); |
| if (edata->pending > 0) { |
| init_completion(&edata->completion); |
| wait_for_completion(&edata->completion); |
| } |
| return 0; |
| } |
| |
| static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait) |
| { |
| int rc = 0; |
| struct ocmem_buf *buffer = req->buffer; |
| |
| down_write(&req->rw_sem); |
| |
| /* Take the scheduler mutex */ |
| mutex_lock(&sched_mutex); |
| rc = __sched_allocate(req, can_block, can_wait); |
| mutex_unlock(&sched_mutex); |
| |
| if (rc == OP_FAIL) { |
| inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS); |
| goto err_allocate_fail; |
| } |
| |
| if (rc == OP_RESCHED) { |
| buffer->addr = 0x0; |
| buffer->len = 0x0; |
| pr_debug("ocmem: Enqueuing req %p\n", req); |
| sched_enqueue(req); |
| } else if (rc == OP_PARTIAL) { |
| buffer->addr = device_address(req->owner, req->req_start); |
| buffer->len = req->req_sz; |
| inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS); |
| pr_debug("ocmem: Enqueuing req %p\n", req); |
| sched_enqueue(req); |
| } else if (rc == OP_COMPLETE) { |
| buffer->addr = device_address(req->owner, req->req_start); |
| buffer->len = req->req_sz; |
| } |
| |
| up_write(&req->rw_sem); |
| return 0; |
| err_allocate_fail: |
| up_write(&req->rw_sem); |
| return -EINVAL; |
| } |
| |
| int process_restore(int id) |
| { |
| struct ocmem_req *req = NULL; |
| struct ocmem_req *next = NULL; |
| struct ocmem_eviction_data *edata = evictions[id]; |
| |
| if (!edata) |
| return 0; |
| |
| list_for_each_entry_safe(req, next, &edata->req_list, eviction_list) |
| { |
| if (req) { |
| pr_debug("ocmem: Fetched evicted request %p\n", |
| req); |
| list_del(&req->sched_list); |
| req->op = SCHED_ALLOCATE; |
| sched_enqueue(req); |
| inc_ocmem_stat(zone_of(req), NR_RESTORES); |
| } |
| } |
| kfree(edata); |
| evictions[id] = NULL; |
| pr_debug("Restore all evicted regions\n"); |
| ocmem_schedule_pending(); |
| return 0; |
| } |
| |
| int process_allocate(int id, struct ocmem_handle *handle, |
| unsigned long min, unsigned long max, |
| unsigned long step, bool can_block, bool can_wait) |
| { |
| |
| struct ocmem_req *req = NULL; |
| struct ocmem_buf *buffer = NULL; |
| int rc = 0; |
| unsigned long offset = 0; |
| |
| /* sanity checks */ |
| if (is_blocked(id)) { |
| pr_err("Client %d cannot request allocation\n", id); |
| return -EINVAL; |
| } |
| |
| if (handle->req != NULL) { |
| pr_err("Invalid handle passed in\n"); |
| return -EINVAL; |
| } |
| |
| buffer = handle_to_buffer(handle); |
| BUG_ON(buffer == NULL); |
| |
| /* prepare a request structure to represent this transaction */ |
| req = ocmem_create_req(); |
| if (!req) |
| return -ENOMEM; |
| |
| req->owner = id; |
| req->req_min = min; |
| req->req_max = max; |
| req->req_step = step; |
| req->prio = ocmem_client_table[id].priority; |
| req->op = SCHED_ALLOCATE; |
| req->buffer = buffer; |
| |
| inc_ocmem_stat(zone_of(req), NR_REQUESTS); |
| |
| rc = do_allocate(req, can_block, can_wait); |
| |
| if (rc < 0) |
| goto do_allocate_error; |
| |
| inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS); |
| |
| handle->req = req; |
| |
| if (is_tcm(id)) { |
| rc = process_map(req, req->req_start, req->req_end); |
| if (rc < 0) |
| goto map_error; |
| } |
| |
| if (req->req_sz != 0) { |
| |
| offset = phys_to_offset(req->req_start); |
| |
| rc = ocmem_memory_on(req->owner, offset, req->req_sz); |
| |
| if (rc < 0) { |
| pr_err("Failed to switch ON memory macros\n"); |
| goto power_ctl_error; |
| } |
| } |
| |
| return 0; |
| |
| power_ctl_error: |
| map_error: |
| handle->req = NULL; |
| do_free(req); |
| do_allocate_error: |
| ocmem_destroy_req(req); |
| return -EINVAL; |
| } |
| |
| int process_delayed_allocate(struct ocmem_req *req) |
| { |
| |
| struct ocmem_handle *handle = NULL; |
| int rc = 0; |
| int id = req->owner; |
| unsigned long offset = 0; |
| |
| handle = req_to_handle(req); |
| BUG_ON(handle == NULL); |
| |
| rc = do_allocate(req, true, false); |
| |
| if (rc < 0) |
| goto do_allocate_error; |
| |
| inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS); |
| |
| if (is_tcm(id)) { |
| rc = process_map(req, req->req_start, req->req_end); |
| if (rc < 0) |
| goto map_error; |
| } |
| |
| if (req->req_sz != 0) { |
| |
| offset = phys_to_offset(req->req_start); |
| |
| rc = ocmem_memory_on(req->owner, offset, req->req_sz); |
| |
| if (rc < 0) { |
| pr_err("Failed to switch ON memory macros\n"); |
| goto power_ctl_error; |
| } |
| } |
| |
| /* Notify the client about the buffer growth */ |
| rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer); |
| if (rc < 0) { |
| pr_err("No notifier callback to cater for req %p event: %d\n", |
| req, OCMEM_ALLOC_GROW); |
| BUG(); |
| } |
| return 0; |
| |
| power_ctl_error: |
| map_error: |
| handle->req = NULL; |
| do_free(req); |
| do_allocate_error: |
| ocmem_destroy_req(req); |
| return -EINVAL; |
| } |
| |
| static void ocmem_sched_wk_func(struct work_struct *work) |
| { |
| |
| struct ocmem_buf *buffer = NULL; |
| struct ocmem_handle *handle = NULL; |
| struct ocmem_req *req = ocmem_fetch_req(); |
| |
| if (!req) { |
| pr_debug("No Pending Requests found\n"); |
| return; |
| } |
| |
| pr_debug("ocmem: sched_wk pending req %p\n", req); |
| handle = req_to_handle(req); |
| buffer = handle_to_buffer(handle); |
| BUG_ON(req->op == SCHED_NOP); |
| |
| switch (req->op) { |
| case SCHED_GROW: |
| process_grow(req); |
| break; |
| case SCHED_ALLOCATE: |
| process_delayed_allocate(req); |
| break; |
| default: |
| pr_err("ocmem: Unknown operation encountered\n"); |
| break; |
| } |
| return; |
| } |
| |
| static int ocmem_allocations_show(struct seq_file *f, void *dummy) |
| { |
| struct rb_node *rb_node = NULL; |
| struct ocmem_req *req = NULL; |
| unsigned j; |
| mutex_lock(&sched_mutex); |
| for (rb_node = rb_first(&sched_tree); rb_node; |
| rb_node = rb_next(rb_node)) { |
| struct ocmem_region *tmp_region = NULL; |
| tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); |
| for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) { |
| req = find_req_match(j, tmp_region); |
| if (req) { |
| seq_printf(f, |
| "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n", |
| get_name(req->owner), |
| req->req_start, req->req_end, |
| req->req_sz, req->state); |
| } |
| } |
| } |
| mutex_unlock(&sched_mutex); |
| return 0; |
| } |
| |
| static int ocmem_allocations_open(struct inode *inode, struct file *file) |
| { |
| return single_open(file, ocmem_allocations_show, inode->i_private); |
| } |
| |
| static const struct file_operations allocations_show_fops = { |
| .open = ocmem_allocations_open, |
| .read = seq_read, |
| .llseek = seq_lseek, |
| .release = seq_release, |
| }; |
| |
| int ocmem_sched_init(struct platform_device *pdev) |
| { |
| int i = 0; |
| struct ocmem_plat_data *pdata = NULL; |
| struct device *dev = &pdev->dev; |
| |
| sched_tree = RB_ROOT; |
| pdata = platform_get_drvdata(pdev); |
| mutex_init(&sched_mutex); |
| mutex_init(&sched_queue_mutex); |
| for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) |
| INIT_LIST_HEAD(&sched_queue[i]); |
| |
| mutex_init(&rdm_mutex); |
| INIT_LIST_HEAD(&rdm_queue); |
| ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0); |
| if (!ocmem_rdm_wq) |
| return -ENOMEM; |
| ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0); |
| if (!ocmem_eviction_wq) |
| return -ENOMEM; |
| |
| if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node, |
| NULL, &allocations_show_fops)) { |
| dev_err(dev, "Unable to create debugfs node for scheduler\n"); |
| return -EBUSY; |
| } |
| return 0; |
| } |