Pavankumar Kondeti | 83236e3 | 2017-02-10 09:58:58 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
Pavankumar Kondeti | 088f334 | 2017-03-17 15:06:47 +0530 | [diff] [blame] | 13 | #define pr_fmt(fmt) "core_ctl: " fmt |
| 14 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/notifier.h> |
| 17 | #include <linux/cpu.h> |
| 18 | #include <linux/cpumask.h> |
| 19 | #include <linux/cpufreq.h> |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 20 | #include <linux/kthread.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/sched/rt.h> |
| 23 | |
Olav Haugan | 09bcc68 | 2016-09-02 17:12:20 -0700 | [diff] [blame] | 24 | #include <trace/events/sched.h> |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 25 | |
Pavankumar Kondeti | 4b62196 | 2017-04-10 14:08:31 +0530 | [diff] [blame] | 26 | #define MAX_CPUS_PER_CLUSTER 6 |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 27 | #define MAX_CLUSTERS 2 |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 28 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 29 | struct cluster_data { |
| 30 | bool inited; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 31 | unsigned int min_cpus; |
| 32 | unsigned int max_cpus; |
| 33 | unsigned int offline_delay_ms; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 34 | unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER]; |
| 35 | unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 36 | unsigned int active_cpus; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 37 | unsigned int num_cpus; |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 38 | unsigned int nr_isolated_cpus; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 39 | cpumask_t cpu_mask; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 40 | unsigned int need_cpus; |
| 41 | unsigned int task_thres; |
Pavankumar Kondeti | 83236e3 | 2017-02-10 09:58:58 +0530 | [diff] [blame] | 42 | s64 need_ts; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 43 | struct list_head lru; |
| 44 | bool pending; |
| 45 | spinlock_t pending_lock; |
| 46 | bool is_big_cluster; |
| 47 | int nrrun; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 48 | struct task_struct *core_ctl_thread; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 49 | unsigned int first_cpu; |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 50 | unsigned int boost; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 51 | struct kobject kobj; |
| 52 | }; |
| 53 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 54 | struct cpu_data { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 55 | bool is_busy; |
| 56 | unsigned int busy; |
| 57 | unsigned int cpu; |
| 58 | bool not_preferred; |
| 59 | struct cluster_data *cluster; |
| 60 | struct list_head sib; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 61 | bool isolated_by_us; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 64 | static DEFINE_PER_CPU(struct cpu_data, cpu_state); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 65 | static struct cluster_data cluster_state[MAX_CLUSTERS]; |
| 66 | static unsigned int num_clusters; |
| 67 | |
| 68 | #define for_each_cluster(cluster, idx) \ |
| 69 | for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\ |
| 70 | (idx)++, (cluster) = &cluster_state[idx]) |
| 71 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 72 | static DEFINE_SPINLOCK(state_lock); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 73 | static void apply_need(struct cluster_data *state); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 74 | static void wake_up_core_ctl_thread(struct cluster_data *state); |
| 75 | static bool initialized; |
| 76 | |
| 77 | static unsigned int get_active_cpu_count(const struct cluster_data *cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 78 | |
| 79 | /* ========================= sysfs interface =========================== */ |
| 80 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 81 | static ssize_t store_min_cpus(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 82 | const char *buf, size_t count) |
| 83 | { |
| 84 | unsigned int val; |
| 85 | |
| 86 | if (sscanf(buf, "%u\n", &val) != 1) |
| 87 | return -EINVAL; |
| 88 | |
| 89 | state->min_cpus = min(val, state->max_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 90 | wake_up_core_ctl_thread(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 91 | |
| 92 | return count; |
| 93 | } |
| 94 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 95 | static ssize_t show_min_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 96 | { |
| 97 | return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus); |
| 98 | } |
| 99 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 100 | static ssize_t store_max_cpus(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 101 | const char *buf, size_t count) |
| 102 | { |
| 103 | unsigned int val; |
| 104 | |
| 105 | if (sscanf(buf, "%u\n", &val) != 1) |
| 106 | return -EINVAL; |
| 107 | |
| 108 | val = min(val, state->num_cpus); |
| 109 | state->max_cpus = val; |
| 110 | state->min_cpus = min(state->min_cpus, state->max_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 111 | wake_up_core_ctl_thread(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 112 | |
| 113 | return count; |
| 114 | } |
| 115 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 116 | static ssize_t show_max_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 117 | { |
| 118 | return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus); |
| 119 | } |
| 120 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 121 | static ssize_t store_offline_delay_ms(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 122 | const char *buf, size_t count) |
| 123 | { |
| 124 | unsigned int val; |
| 125 | |
| 126 | if (sscanf(buf, "%u\n", &val) != 1) |
| 127 | return -EINVAL; |
| 128 | |
| 129 | state->offline_delay_ms = val; |
| 130 | apply_need(state); |
| 131 | |
| 132 | return count; |
| 133 | } |
| 134 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 135 | static ssize_t show_task_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 136 | { |
| 137 | return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres); |
| 138 | } |
| 139 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 140 | static ssize_t store_task_thres(struct cluster_data *state, |
| 141 | const char *buf, size_t count) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 142 | { |
| 143 | unsigned int val; |
| 144 | |
| 145 | if (sscanf(buf, "%u\n", &val) != 1) |
| 146 | return -EINVAL; |
| 147 | |
| 148 | if (val < state->num_cpus) |
| 149 | return -EINVAL; |
| 150 | |
| 151 | state->task_thres = val; |
| 152 | apply_need(state); |
| 153 | |
| 154 | return count; |
| 155 | } |
| 156 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 157 | static ssize_t show_offline_delay_ms(const struct cluster_data *state, |
| 158 | char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 159 | { |
| 160 | return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms); |
| 161 | } |
| 162 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 163 | static ssize_t store_busy_up_thres(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 164 | const char *buf, size_t count) |
| 165 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 166 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 167 | int ret, i; |
| 168 | |
Pavankumar Kondeti | 4b62196 | 2017-04-10 14:08:31 +0530 | [diff] [blame] | 169 | ret = sscanf(buf, "%u %u %u %u %u %u\n", |
| 170 | &val[0], &val[1], &val[2], &val[3], |
| 171 | &val[4], &val[5]); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 172 | if (ret != 1 && ret != state->num_cpus) |
| 173 | return -EINVAL; |
| 174 | |
| 175 | if (ret == 1) { |
| 176 | for (i = 0; i < state->num_cpus; i++) |
| 177 | state->busy_up_thres[i] = val[0]; |
| 178 | } else { |
| 179 | for (i = 0; i < state->num_cpus; i++) |
| 180 | state->busy_up_thres[i] = val[i]; |
| 181 | } |
| 182 | apply_need(state); |
| 183 | return count; |
| 184 | } |
| 185 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 186 | static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 187 | { |
| 188 | int i, count = 0; |
| 189 | |
| 190 | for (i = 0; i < state->num_cpus; i++) |
| 191 | count += snprintf(buf + count, PAGE_SIZE - count, "%u ", |
| 192 | state->busy_up_thres[i]); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 193 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 194 | count += snprintf(buf + count, PAGE_SIZE - count, "\n"); |
| 195 | return count; |
| 196 | } |
| 197 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 198 | static ssize_t store_busy_down_thres(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 199 | const char *buf, size_t count) |
| 200 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 201 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 202 | int ret, i; |
| 203 | |
Pavankumar Kondeti | 4b62196 | 2017-04-10 14:08:31 +0530 | [diff] [blame] | 204 | ret = sscanf(buf, "%u %u %u %u %u %u\n", |
| 205 | &val[0], &val[1], &val[2], &val[3], |
| 206 | &val[4], &val[5]); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 207 | if (ret != 1 && ret != state->num_cpus) |
| 208 | return -EINVAL; |
| 209 | |
| 210 | if (ret == 1) { |
| 211 | for (i = 0; i < state->num_cpus; i++) |
| 212 | state->busy_down_thres[i] = val[0]; |
| 213 | } else { |
| 214 | for (i = 0; i < state->num_cpus; i++) |
| 215 | state->busy_down_thres[i] = val[i]; |
| 216 | } |
| 217 | apply_need(state); |
| 218 | return count; |
| 219 | } |
| 220 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 221 | static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 222 | { |
| 223 | int i, count = 0; |
| 224 | |
| 225 | for (i = 0; i < state->num_cpus; i++) |
| 226 | count += snprintf(buf + count, PAGE_SIZE - count, "%u ", |
| 227 | state->busy_down_thres[i]); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 228 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 229 | count += snprintf(buf + count, PAGE_SIZE - count, "\n"); |
| 230 | return count; |
| 231 | } |
| 232 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 233 | static ssize_t store_is_big_cluster(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 234 | const char *buf, size_t count) |
| 235 | { |
| 236 | unsigned int val; |
| 237 | |
| 238 | if (sscanf(buf, "%u\n", &val) != 1) |
| 239 | return -EINVAL; |
| 240 | |
| 241 | state->is_big_cluster = val ? 1 : 0; |
| 242 | return count; |
| 243 | } |
| 244 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 245 | static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 246 | { |
| 247 | return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster); |
| 248 | } |
| 249 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 250 | static ssize_t show_need_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 251 | { |
| 252 | return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus); |
| 253 | } |
| 254 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 255 | static ssize_t show_active_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 256 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 257 | return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 260 | static ssize_t show_global_state(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 261 | { |
| 262 | struct cpu_data *c; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 263 | struct cluster_data *cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 264 | ssize_t count = 0; |
| 265 | unsigned int cpu; |
| 266 | |
Pavankumar Kondeti | a207d5c | 2017-04-13 18:44:24 +0530 | [diff] [blame] | 267 | spin_lock_irq(&state_lock); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 268 | for_each_possible_cpu(cpu) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 269 | c = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 270 | cluster = c->cluster; |
| 271 | if (!cluster || !cluster->inited) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 272 | continue; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 273 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 274 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 275 | "CPU%u\n", cpu); |
| 276 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 277 | "\tCPU: %u\n", c->cpu); |
| 278 | count += snprintf(buf + count, PAGE_SIZE - count, |
Pavankumar Kondeti | 85dfbfa | 2017-03-17 15:29:13 +0530 | [diff] [blame] | 279 | "\tOnline: %u\n", |
| 280 | cpu_online(c->cpu)); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 281 | count += snprintf(buf + count, PAGE_SIZE - count, |
Pavankumar Kondeti | fbfa7b3 | 2017-03-17 15:52:30 +0530 | [diff] [blame] | 282 | "\tIsolated: %u\n", |
| 283 | cpu_isolated(c->cpu)); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 284 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 285 | "\tFirst CPU: %u\n", |
| 286 | cluster->first_cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 287 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 288 | "\tBusy%%: %u\n", c->busy); |
| 289 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 290 | "\tIs busy: %u\n", c->is_busy); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 291 | count += snprintf(buf + count, PAGE_SIZE - count, |
Pavankumar Kondeti | 63877bc | 2017-02-16 08:25:21 +0530 | [diff] [blame] | 292 | "\tNot preferred: %u\n", |
| 293 | c->not_preferred); |
| 294 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 295 | "\tNr running: %u\n", cluster->nrrun); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 296 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 297 | "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 298 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 299 | "\tNeed CPUs: %u\n", cluster->need_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 300 | count += snprintf(buf + count, PAGE_SIZE - count, |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 301 | "\tNr isolated CPUs: %u\n", |
| 302 | cluster->nr_isolated_cpus); |
| 303 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 304 | "\tBoost: %u\n", (unsigned int) cluster->boost); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 305 | } |
Pavankumar Kondeti | a207d5c | 2017-04-13 18:44:24 +0530 | [diff] [blame] | 306 | spin_unlock_irq(&state_lock); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 307 | |
| 308 | return count; |
| 309 | } |
| 310 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 311 | static ssize_t store_not_preferred(struct cluster_data *state, |
| 312 | const char *buf, size_t count) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 313 | { |
| 314 | struct cpu_data *c; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 315 | unsigned int i; |
| 316 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
| 317 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 318 | int ret; |
| 319 | |
Pavankumar Kondeti | 4b62196 | 2017-04-10 14:08:31 +0530 | [diff] [blame] | 320 | ret = sscanf(buf, "%u %u %u %u %u %u\n", |
| 321 | &val[0], &val[1], &val[2], &val[3], |
| 322 | &val[4], &val[5]); |
Pavankumar Kondeti | 63877bc | 2017-02-16 08:25:21 +0530 | [diff] [blame] | 323 | if (ret != state->num_cpus) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 324 | return -EINVAL; |
| 325 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 326 | spin_lock_irqsave(&state_lock, flags); |
Pavankumar Kondeti | 63877bc | 2017-02-16 08:25:21 +0530 | [diff] [blame] | 327 | for (i = 0; i < state->num_cpus; i++) { |
| 328 | c = &per_cpu(cpu_state, i + state->first_cpu); |
| 329 | c->not_preferred = val[i]; |
| 330 | } |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 331 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 332 | |
| 333 | return count; |
| 334 | } |
| 335 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 336 | static ssize_t show_not_preferred(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 337 | { |
| 338 | struct cpu_data *c; |
| 339 | ssize_t count = 0; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 340 | unsigned long flags; |
Pavankumar Kondeti | 63877bc | 2017-02-16 08:25:21 +0530 | [diff] [blame] | 341 | int i; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 342 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 343 | spin_lock_irqsave(&state_lock, flags); |
Pavankumar Kondeti | 63877bc | 2017-02-16 08:25:21 +0530 | [diff] [blame] | 344 | for (i = 0; i < state->num_cpus; i++) { |
| 345 | c = &per_cpu(cpu_state, i + state->first_cpu); |
| 346 | count += scnprintf(buf + count, PAGE_SIZE - count, |
| 347 | "CPU#%d: %u\n", c->cpu, c->not_preferred); |
| 348 | } |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 349 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 350 | |
| 351 | return count; |
| 352 | } |
| 353 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 354 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 355 | struct core_ctl_attr { |
| 356 | struct attribute attr; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 357 | ssize_t (*show)(const struct cluster_data *, char *); |
| 358 | ssize_t (*store)(struct cluster_data *, const char *, size_t count); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 359 | }; |
| 360 | |
| 361 | #define core_ctl_attr_ro(_name) \ |
| 362 | static struct core_ctl_attr _name = \ |
| 363 | __ATTR(_name, 0444, show_##_name, NULL) |
| 364 | |
| 365 | #define core_ctl_attr_rw(_name) \ |
| 366 | static struct core_ctl_attr _name = \ |
| 367 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 368 | |
| 369 | core_ctl_attr_rw(min_cpus); |
| 370 | core_ctl_attr_rw(max_cpus); |
| 371 | core_ctl_attr_rw(offline_delay_ms); |
| 372 | core_ctl_attr_rw(busy_up_thres); |
| 373 | core_ctl_attr_rw(busy_down_thres); |
| 374 | core_ctl_attr_rw(task_thres); |
| 375 | core_ctl_attr_rw(is_big_cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 376 | core_ctl_attr_ro(need_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 377 | core_ctl_attr_ro(active_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 378 | core_ctl_attr_ro(global_state); |
| 379 | core_ctl_attr_rw(not_preferred); |
| 380 | |
| 381 | static struct attribute *default_attrs[] = { |
| 382 | &min_cpus.attr, |
| 383 | &max_cpus.attr, |
| 384 | &offline_delay_ms.attr, |
| 385 | &busy_up_thres.attr, |
| 386 | &busy_down_thres.attr, |
| 387 | &task_thres.attr, |
| 388 | &is_big_cluster.attr, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 389 | &need_cpus.attr, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 390 | &active_cpus.attr, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 391 | &global_state.attr, |
| 392 | ¬_preferred.attr, |
| 393 | NULL |
| 394 | }; |
| 395 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 396 | #define to_cluster_data(k) container_of(k, struct cluster_data, kobj) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 397 | #define to_attr(a) container_of(a, struct core_ctl_attr, attr) |
| 398 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
| 399 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 400 | struct cluster_data *data = to_cluster_data(kobj); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 401 | struct core_ctl_attr *cattr = to_attr(attr); |
| 402 | ssize_t ret = -EIO; |
| 403 | |
| 404 | if (cattr->show) |
| 405 | ret = cattr->show(data, buf); |
| 406 | |
| 407 | return ret; |
| 408 | } |
| 409 | |
| 410 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
| 411 | const char *buf, size_t count) |
| 412 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 413 | struct cluster_data *data = to_cluster_data(kobj); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 414 | struct core_ctl_attr *cattr = to_attr(attr); |
| 415 | ssize_t ret = -EIO; |
| 416 | |
| 417 | if (cattr->store) |
| 418 | ret = cattr->store(data, buf, count); |
| 419 | |
| 420 | return ret; |
| 421 | } |
| 422 | |
| 423 | static const struct sysfs_ops sysfs_ops = { |
| 424 | .show = show, |
| 425 | .store = store, |
| 426 | }; |
| 427 | |
| 428 | static struct kobj_type ktype_core_ctl = { |
| 429 | .sysfs_ops = &sysfs_ops, |
| 430 | .default_attrs = default_attrs, |
| 431 | }; |
| 432 | |
| 433 | /* ==================== runqueue based core count =================== */ |
| 434 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 435 | #define NR_RUNNING_TOLERANCE 5 |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 436 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 437 | static void update_running_avg(void) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 438 | { |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 439 | int avg, iowait_avg, big_avg; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 440 | struct cluster_data *cluster; |
| 441 | unsigned int index = 0; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 442 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 443 | sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg); |
| 444 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 445 | /* |
| 446 | * Round up to the next integer if the average nr running tasks |
| 447 | * is within NR_RUNNING_TOLERANCE/100 of the next integer. |
| 448 | * If normal rounding up is used, it will allow a transient task |
| 449 | * to trigger online event. By the time core is onlined, the task |
| 450 | * has finished. |
| 451 | * Rounding to closest suffers same problem because scheduler |
| 452 | * might only provide running stats per jiffy, and a transient |
| 453 | * task could skew the number for one jiffy. If core control |
| 454 | * samples every 2 jiffies, it will observe 0.5 additional running |
| 455 | * average which rounds up to 1 task. |
| 456 | */ |
| 457 | avg = (avg + NR_RUNNING_TOLERANCE) / 100; |
| 458 | big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100; |
| 459 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 460 | for_each_cluster(cluster, index) { |
| 461 | if (!cluster->inited) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 462 | continue; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 463 | /* |
| 464 | * Big cluster only need to take care of big tasks, but if |
| 465 | * there are not enough big cores, big tasks need to be run |
| 466 | * on little as well. Thus for little's runqueue stat, it |
| 467 | * has to use overall runqueue average, or derive what big |
| 468 | * tasks would have to be run on little. The latter approach |
| 469 | * is not easy to get given core control reacts much slower |
| 470 | * than scheduler, and can't predict scheduler's behavior. |
| 471 | */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 472 | cluster->nrrun = cluster->is_big_cluster ? big_avg : avg; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 473 | } |
| 474 | } |
| 475 | |
| 476 | /* adjust needed CPUs based on current runqueue information */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 477 | static unsigned int apply_task_need(const struct cluster_data *cluster, |
| 478 | unsigned int new_need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 479 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 480 | /* unisolate all cores if there are enough tasks */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 481 | if (cluster->nrrun >= cluster->task_thres) |
| 482 | return cluster->num_cpus; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 483 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 484 | /* only unisolate more cores if there are tasks to run */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 485 | if (cluster->nrrun > new_need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 486 | return new_need + 1; |
| 487 | |
| 488 | return new_need; |
| 489 | } |
| 490 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 491 | /* ======================= load based core count ====================== */ |
| 492 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 493 | static unsigned int apply_limits(const struct cluster_data *cluster, |
| 494 | unsigned int need_cpus) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 495 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 496 | return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 497 | } |
| 498 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 499 | static unsigned int get_active_cpu_count(const struct cluster_data *cluster) |
| 500 | { |
| 501 | return cluster->num_cpus - |
| 502 | sched_isolate_count(&cluster->cpu_mask, true); |
| 503 | } |
| 504 | |
| 505 | static bool is_active(const struct cpu_data *state) |
| 506 | { |
Pavankumar Kondeti | 85dfbfa | 2017-03-17 15:29:13 +0530 | [diff] [blame] | 507 | return cpu_online(state->cpu) && !cpu_isolated(state->cpu); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | static bool adjustment_possible(const struct cluster_data *cluster, |
| 511 | unsigned int need) |
| 512 | { |
| 513 | return (need < cluster->active_cpus || (need > cluster->active_cpus && |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 514 | cluster->nr_isolated_cpus)); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 517 | static bool eval_need(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 518 | { |
| 519 | unsigned long flags; |
| 520 | struct cpu_data *c; |
| 521 | unsigned int need_cpus = 0, last_need, thres_idx; |
| 522 | int ret = 0; |
| 523 | bool need_flag = false; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 524 | unsigned int new_need; |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 525 | s64 now, elapsed; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 526 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 527 | if (unlikely(!cluster->inited)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 528 | return 0; |
| 529 | |
| 530 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 531 | |
| 532 | if (cluster->boost) { |
| 533 | need_cpus = cluster->max_cpus; |
| 534 | } else { |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 535 | cluster->active_cpus = get_active_cpu_count(cluster); |
| 536 | thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 537 | list_for_each_entry(c, &cluster->lru, sib) { |
| 538 | if (c->busy >= cluster->busy_up_thres[thres_idx]) |
| 539 | c->is_busy = true; |
| 540 | else if (c->busy < cluster->busy_down_thres[thres_idx]) |
| 541 | c->is_busy = false; |
| 542 | need_cpus += c->is_busy; |
| 543 | } |
| 544 | need_cpus = apply_task_need(cluster, need_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 545 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 546 | new_need = apply_limits(cluster, need_cpus); |
| 547 | need_flag = adjustment_possible(cluster, new_need); |
| 548 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 549 | last_need = cluster->need_cpus; |
Pavankumar Kondeti | 83236e3 | 2017-02-10 09:58:58 +0530 | [diff] [blame] | 550 | now = ktime_to_ms(ktime_get()); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 551 | |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 552 | if (new_need > cluster->active_cpus) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 553 | ret = 1; |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 554 | } else { |
| 555 | if (new_need == last_need) { |
| 556 | cluster->need_ts = now; |
| 557 | spin_unlock_irqrestore(&state_lock, flags); |
| 558 | return 0; |
| 559 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 560 | |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 561 | elapsed = now - cluster->need_ts; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 562 | ret = elapsed >= cluster->offline_delay_ms; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 563 | } |
| 564 | |
Pavankumar Kondeti | 83236e3 | 2017-02-10 09:58:58 +0530 | [diff] [blame] | 565 | if (ret) { |
| 566 | cluster->need_ts = now; |
| 567 | cluster->need_cpus = new_need; |
| 568 | } |
Pavankumar Kondeti | 0824857 | 2017-04-13 15:45:11 +0530 | [diff] [blame] | 569 | trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 570 | ret && need_flag); |
| 571 | spin_unlock_irqrestore(&state_lock, flags); |
| 572 | |
| 573 | return ret && need_flag; |
| 574 | } |
| 575 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 576 | static void apply_need(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 577 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 578 | if (eval_need(cluster)) |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 579 | wake_up_core_ctl_thread(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 580 | } |
| 581 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 582 | static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 583 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 584 | unsigned int old_is_busy = c->is_busy; |
| 585 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 586 | if (c->busy == busy) |
| 587 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 588 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 589 | c->busy = busy; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 590 | trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | /* ========================= core count enforcement ==================== */ |
| 594 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 595 | static void wake_up_core_ctl_thread(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 596 | { |
| 597 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 598 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 599 | spin_lock_irqsave(&cluster->pending_lock, flags); |
| 600 | cluster->pending = true; |
| 601 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 602 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 603 | wake_up_process_no_notif(cluster->core_ctl_thread); |
| 604 | } |
| 605 | |
| 606 | static u64 core_ctl_check_timestamp; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 607 | |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 608 | int core_ctl_set_boost(bool boost) |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 609 | { |
| 610 | unsigned int index = 0; |
| 611 | struct cluster_data *cluster; |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 612 | unsigned long flags; |
| 613 | int ret = 0; |
| 614 | bool boost_state_changed = false; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 615 | |
Pavankumar Kondeti | d198dde | 2017-03-21 14:21:21 +0530 | [diff] [blame] | 616 | if (unlikely(!initialized)) |
| 617 | return 0; |
| 618 | |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 619 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 620 | for_each_cluster(cluster, index) { |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 621 | if (cluster->is_big_cluster) { |
| 622 | if (boost) { |
| 623 | boost_state_changed = !cluster->boost; |
| 624 | ++cluster->boost; |
| 625 | } else { |
| 626 | if (!cluster->boost) { |
| 627 | pr_err("Error turning off boost. Boost already turned off\n"); |
| 628 | ret = -EINVAL; |
| 629 | } else { |
| 630 | --cluster->boost; |
| 631 | boost_state_changed = !cluster->boost; |
| 632 | } |
| 633 | } |
| 634 | break; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 635 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 636 | } |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 637 | spin_unlock_irqrestore(&state_lock, flags); |
| 638 | |
| 639 | if (boost_state_changed) |
| 640 | apply_need(cluster); |
| 641 | |
| 642 | trace_core_ctl_set_boost(cluster->boost, ret); |
| 643 | |
| 644 | return ret; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 645 | } |
Olav Haugan | 4d77e57 | 2016-11-14 16:14:23 -0800 | [diff] [blame] | 646 | EXPORT_SYMBOL(core_ctl_set_boost); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 647 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 648 | void core_ctl_check(u64 window_start) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 649 | { |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 650 | int cpu; |
| 651 | unsigned int busy; |
| 652 | struct cpu_data *c; |
| 653 | struct cluster_data *cluster; |
| 654 | unsigned int index = 0; |
| 655 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 656 | if (unlikely(!initialized)) |
| 657 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 658 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 659 | if (window_start == core_ctl_check_timestamp) |
| 660 | return; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 661 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 662 | core_ctl_check_timestamp = window_start; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 663 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 664 | for_each_possible_cpu(cpu) { |
| 665 | |
| 666 | c = &per_cpu(cpu_state, cpu); |
| 667 | cluster = c->cluster; |
| 668 | |
| 669 | if (!cluster || !cluster->inited) |
| 670 | continue; |
| 671 | |
| 672 | busy = sched_get_cpu_util(cpu); |
| 673 | core_ctl_set_busy(c, busy); |
| 674 | } |
| 675 | |
| 676 | update_running_avg(); |
| 677 | |
| 678 | for_each_cluster(cluster, index) { |
| 679 | if (eval_need(cluster)) |
| 680 | wake_up_core_ctl_thread(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 681 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 682 | } |
| 683 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 684 | static void move_cpu_lru(struct cpu_data *cpu_data) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 685 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 686 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 687 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 688 | spin_lock_irqsave(&state_lock, flags); |
| 689 | list_del(&cpu_data->sib); |
| 690 | list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru); |
| 691 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 692 | } |
| 693 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 694 | static void try_to_isolate(struct cluster_data *cluster, unsigned int need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 695 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 696 | struct cpu_data *c, *tmp; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 697 | unsigned long flags; |
| 698 | unsigned int num_cpus = cluster->num_cpus; |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 699 | unsigned int nr_isolated = 0; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 700 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 701 | /* |
| 702 | * Protect against entry being removed (and added at tail) by other |
| 703 | * thread (hotplug). |
| 704 | */ |
| 705 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 706 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 707 | if (!num_cpus--) |
| 708 | break; |
| 709 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 710 | if (!is_active(c)) |
| 711 | continue; |
| 712 | if (cluster->active_cpus == need) |
| 713 | break; |
| 714 | /* Don't offline busy CPUs. */ |
| 715 | if (c->is_busy) |
| 716 | continue; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 717 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 718 | spin_unlock_irqrestore(&state_lock, flags); |
| 719 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 720 | pr_debug("Trying to isolate CPU%u\n", c->cpu); |
| 721 | if (!sched_isolate_cpu(c->cpu)) { |
| 722 | c->isolated_by_us = true; |
| 723 | move_cpu_lru(c); |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 724 | nr_isolated++; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 725 | } else { |
| 726 | pr_debug("Unable to isolate CPU%u\n", c->cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 727 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 728 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 729 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 730 | } |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 731 | cluster->nr_isolated_cpus += nr_isolated; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 732 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 733 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 734 | /* |
| 735 | * If the number of active CPUs is within the limits, then |
| 736 | * don't force isolation of any busy CPUs. |
| 737 | */ |
| 738 | if (cluster->active_cpus <= cluster->max_cpus) |
| 739 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 740 | |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 741 | nr_isolated = 0; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 742 | num_cpus = cluster->num_cpus; |
| 743 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 744 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 745 | if (!num_cpus--) |
| 746 | break; |
| 747 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 748 | if (!is_active(c)) |
| 749 | continue; |
| 750 | if (cluster->active_cpus <= cluster->max_cpus) |
| 751 | break; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 752 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 753 | spin_unlock_irqrestore(&state_lock, flags); |
| 754 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 755 | pr_debug("Trying to isolate CPU%u\n", c->cpu); |
| 756 | if (!sched_isolate_cpu(c->cpu)) { |
| 757 | c->isolated_by_us = true; |
| 758 | move_cpu_lru(c); |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 759 | nr_isolated++; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 760 | } else { |
| 761 | pr_debug("Unable to isolate CPU%u\n", c->cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 762 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 763 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 764 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 765 | } |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 766 | cluster->nr_isolated_cpus += nr_isolated; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 767 | spin_unlock_irqrestore(&state_lock, flags); |
| 768 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 769 | } |
| 770 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 771 | static void __try_to_unisolate(struct cluster_data *cluster, |
| 772 | unsigned int need, bool force) |
| 773 | { |
| 774 | struct cpu_data *c, *tmp; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 775 | unsigned long flags; |
| 776 | unsigned int num_cpus = cluster->num_cpus; |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 777 | unsigned int nr_unisolated = 0; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 778 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 779 | /* |
| 780 | * Protect against entry being removed (and added at tail) by other |
| 781 | * thread (hotplug). |
| 782 | */ |
| 783 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 784 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 785 | if (!num_cpus--) |
| 786 | break; |
| 787 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 788 | if (!c->isolated_by_us) |
| 789 | continue; |
Pavankumar Kondeti | 85dfbfa | 2017-03-17 15:29:13 +0530 | [diff] [blame] | 790 | if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) || |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 791 | (!force && c->not_preferred)) |
| 792 | continue; |
| 793 | if (cluster->active_cpus == need) |
| 794 | break; |
| 795 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 796 | spin_unlock_irqrestore(&state_lock, flags); |
| 797 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 798 | pr_debug("Trying to unisolate CPU%u\n", c->cpu); |
| 799 | if (!sched_unisolate_cpu(c->cpu)) { |
| 800 | c->isolated_by_us = false; |
| 801 | move_cpu_lru(c); |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 802 | nr_unisolated++; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 803 | } else { |
| 804 | pr_debug("Unable to unisolate CPU%u\n", c->cpu); |
| 805 | } |
| 806 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 807 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 808 | } |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 809 | cluster->nr_isolated_cpus -= nr_unisolated; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame] | 810 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 811 | } |
| 812 | |
| 813 | static void try_to_unisolate(struct cluster_data *cluster, unsigned int need) |
| 814 | { |
| 815 | bool force_use_non_preferred = false; |
| 816 | |
| 817 | __try_to_unisolate(cluster, need, force_use_non_preferred); |
| 818 | |
| 819 | if (cluster->active_cpus == need) |
| 820 | return; |
| 821 | |
| 822 | force_use_non_preferred = true; |
| 823 | __try_to_unisolate(cluster, need, force_use_non_preferred); |
| 824 | } |
| 825 | |
| 826 | static void __ref do_core_ctl(struct cluster_data *cluster) |
| 827 | { |
| 828 | unsigned int need; |
| 829 | |
| 830 | need = apply_limits(cluster, cluster->need_cpus); |
| 831 | |
| 832 | if (adjustment_possible(cluster, need)) { |
| 833 | pr_debug("Trying to adjust group %u from %u to %u\n", |
| 834 | cluster->first_cpu, cluster->active_cpus, need); |
| 835 | |
| 836 | if (cluster->active_cpus > need) |
| 837 | try_to_isolate(cluster, need); |
| 838 | else if (cluster->active_cpus < need) |
| 839 | try_to_unisolate(cluster, need); |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | static int __ref try_core_ctl(void *data) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 844 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 845 | struct cluster_data *cluster = data; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 846 | unsigned long flags; |
| 847 | |
| 848 | while (1) { |
| 849 | set_current_state(TASK_INTERRUPTIBLE); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 850 | spin_lock_irqsave(&cluster->pending_lock, flags); |
| 851 | if (!cluster->pending) { |
| 852 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 853 | schedule(); |
| 854 | if (kthread_should_stop()) |
| 855 | break; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 856 | spin_lock_irqsave(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 857 | } |
| 858 | set_current_state(TASK_RUNNING); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 859 | cluster->pending = false; |
| 860 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 861 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 862 | do_core_ctl(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 863 | } |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | |
| 868 | static int __ref cpu_callback(struct notifier_block *nfb, |
| 869 | unsigned long action, void *hcpu) |
| 870 | { |
| 871 | uint32_t cpu = (uintptr_t)hcpu; |
| 872 | struct cpu_data *state = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 873 | struct cluster_data *cluster = state->cluster; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 874 | unsigned int need; |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 875 | bool do_wakeup, unisolated = false; |
| 876 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 877 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 878 | if (unlikely(!cluster || !cluster->inited)) |
Pavankumar Kondeti | 046f223 | 2017-04-13 18:58:56 +0530 | [diff] [blame] | 879 | return NOTIFY_DONE; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 880 | |
Olav Haugan | 1e8a44c | 2016-11-17 18:31:33 -0800 | [diff] [blame] | 881 | switch (action & ~CPU_TASKS_FROZEN) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 882 | case CPU_ONLINE: |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 883 | cluster->active_cpus = get_active_cpu_count(cluster); |
| 884 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 885 | /* |
| 886 | * Moving to the end of the list should only happen in |
| 887 | * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an |
| 888 | * infinite list traversal when thermal (or other entities) |
| 889 | * reject trying to online CPUs. |
| 890 | */ |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 891 | move_cpu_lru(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 892 | break; |
| 893 | |
| 894 | case CPU_DEAD: |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 895 | /* |
| 896 | * We don't want to have a CPU both offline and isolated. |
| 897 | * So unisolate a CPU that went down if it was isolated by us. |
| 898 | */ |
| 899 | if (state->isolated_by_us) { |
| 900 | sched_unisolate_cpu_unlocked(cpu); |
| 901 | state->isolated_by_us = false; |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 902 | unisolated = true; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 903 | } |
| 904 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 905 | /* Move a CPU to the end of the LRU when it goes offline. */ |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 906 | move_cpu_lru(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 907 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 908 | state->busy = 0; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 909 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 910 | break; |
Pavankumar Kondeti | 046f223 | 2017-04-13 18:58:56 +0530 | [diff] [blame] | 911 | default: |
| 912 | return NOTIFY_DONE; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 913 | } |
| 914 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 915 | need = apply_limits(cluster, cluster->need_cpus); |
Pavankumar Kondeti | 32b2dd0 | 2017-04-13 16:58:57 +0530 | [diff] [blame] | 916 | spin_lock_irqsave(&state_lock, flags); |
| 917 | if (unisolated) |
| 918 | cluster->nr_isolated_cpus--; |
| 919 | do_wakeup = adjustment_possible(cluster, need); |
| 920 | spin_unlock_irqrestore(&state_lock, flags); |
| 921 | if (do_wakeup) |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 922 | wake_up_core_ctl_thread(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 923 | |
Pavankumar Kondeti | 046f223 | 2017-04-13 18:58:56 +0530 | [diff] [blame] | 924 | return NOTIFY_OK; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 925 | } |
| 926 | |
| 927 | static struct notifier_block __refdata cpu_notifier = { |
| 928 | .notifier_call = cpu_callback, |
| 929 | }; |
| 930 | |
| 931 | /* ============================ init code ============================== */ |
| 932 | |
Pavankumar Kondeti | 1e951a7 | 2017-03-21 14:00:09 +0530 | [diff] [blame] | 933 | static cpumask_var_t core_ctl_disable_cpumask; |
| 934 | static bool core_ctl_disable_cpumask_present; |
| 935 | |
| 936 | static int __init core_ctl_disable_setup(char *str) |
| 937 | { |
| 938 | if (!*str) |
| 939 | return -EINVAL; |
| 940 | |
| 941 | alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask); |
| 942 | |
| 943 | if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) { |
| 944 | free_bootmem_cpumask_var(core_ctl_disable_cpumask); |
| 945 | return -EINVAL; |
| 946 | } |
| 947 | |
| 948 | core_ctl_disable_cpumask_present = true; |
| 949 | pr_info("disable_cpumask=%*pbl\n", |
| 950 | cpumask_pr_args(core_ctl_disable_cpumask)); |
| 951 | |
| 952 | return 0; |
| 953 | } |
| 954 | early_param("core_ctl_disable_cpumask", core_ctl_disable_setup); |
| 955 | |
| 956 | static bool should_skip(const struct cpumask *mask) |
| 957 | { |
| 958 | if (!core_ctl_disable_cpumask_present) |
| 959 | return false; |
| 960 | |
| 961 | /* |
| 962 | * We operate on a cluster basis. Disable the core_ctl for |
| 963 | * a cluster, if all of it's cpus are specified in |
| 964 | * core_ctl_disable_cpumask |
| 965 | */ |
| 966 | return cpumask_subset(mask, core_ctl_disable_cpumask); |
| 967 | } |
| 968 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 969 | static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu) |
| 970 | { |
| 971 | unsigned int i; |
| 972 | |
| 973 | for (i = 0; i < num_clusters; ++i) { |
| 974 | if (cluster_state[i].first_cpu == first_cpu) |
| 975 | return &cluster_state[i]; |
| 976 | } |
| 977 | |
| 978 | return NULL; |
| 979 | } |
| 980 | |
| 981 | static int cluster_init(const struct cpumask *mask) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 982 | { |
| 983 | struct device *dev; |
| 984 | unsigned int first_cpu = cpumask_first(mask); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 985 | struct cluster_data *cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 986 | struct cpu_data *state; |
| 987 | unsigned int cpu; |
| 988 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 989 | |
Pavankumar Kondeti | 1e951a7 | 2017-03-21 14:00:09 +0530 | [diff] [blame] | 990 | if (should_skip(mask)) |
| 991 | return 0; |
| 992 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 993 | if (find_cluster_by_first_cpu(first_cpu)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 994 | return 0; |
| 995 | |
| 996 | dev = get_cpu_device(first_cpu); |
| 997 | if (!dev) |
| 998 | return -ENODEV; |
| 999 | |
| 1000 | pr_info("Creating CPU group %d\n", first_cpu); |
| 1001 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1002 | if (num_clusters == MAX_CLUSTERS) { |
| 1003 | pr_err("Unsupported number of clusters. Only %u supported\n", |
| 1004 | MAX_CLUSTERS); |
| 1005 | return -EINVAL; |
| 1006 | } |
| 1007 | cluster = &cluster_state[num_clusters]; |
| 1008 | ++num_clusters; |
| 1009 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1010 | cpumask_copy(&cluster->cpu_mask, mask); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1011 | cluster->num_cpus = cpumask_weight(mask); |
| 1012 | if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1013 | pr_err("HW configuration not supported\n"); |
| 1014 | return -EINVAL; |
| 1015 | } |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1016 | cluster->first_cpu = first_cpu; |
| 1017 | cluster->min_cpus = 1; |
| 1018 | cluster->max_cpus = cluster->num_cpus; |
| 1019 | cluster->need_cpus = cluster->num_cpus; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1020 | cluster->offline_delay_ms = 100; |
| 1021 | cluster->task_thres = UINT_MAX; |
| 1022 | cluster->nrrun = cluster->num_cpus; |
| 1023 | INIT_LIST_HEAD(&cluster->lru); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1024 | spin_lock_init(&cluster->pending_lock); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1025 | |
| 1026 | for_each_cpu(cpu, mask) { |
| 1027 | pr_info("Init CPU%u state\n", cpu); |
| 1028 | |
| 1029 | state = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1030 | state->cluster = cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1031 | state->cpu = cpu; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1032 | list_add_tail(&state->sib, &cluster->lru); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1033 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1034 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1035 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1036 | cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1037 | "core_ctl/%d", first_cpu); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1038 | if (IS_ERR(cluster->core_ctl_thread)) |
| 1039 | return PTR_ERR(cluster->core_ctl_thread); |
| 1040 | |
| 1041 | sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1042 | ¶m); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1043 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1044 | cluster->inited = true; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1045 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1046 | kobject_init(&cluster->kobj, &ktype_core_ctl); |
| 1047 | return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl"); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1048 | } |
| 1049 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1050 | static int __init core_ctl_init(void) |
| 1051 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1052 | unsigned int cpu; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 1053 | struct cpumask cpus = *cpu_possible_mask; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1054 | |
Pavankumar Kondeti | 1e951a7 | 2017-03-21 14:00:09 +0530 | [diff] [blame] | 1055 | if (should_skip(cpu_possible_mask)) |
| 1056 | return 0; |
| 1057 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1058 | register_cpu_notifier(&cpu_notifier); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1059 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 1060 | for_each_cpu(cpu, &cpus) { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1061 | int ret; |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 1062 | const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1063 | |
Syed Rameez Mustafa | 20acfe7 | 2017-01-30 09:35:46 +0530 | [diff] [blame^] | 1064 | ret = cluster_init(cluster_cpus); |
| 1065 | if (ret) |
| 1066 | pr_warn("unable to create core ctl group: %d\n", ret); |
| 1067 | cpumask_andnot(&cpus, &cpus, cluster_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1068 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1069 | initialized = true; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1070 | return 0; |
| 1071 | } |
| 1072 | |
| 1073 | late_initcall(core_ctl_init); |