Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/notifier.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/cpumask.h> |
| 17 | #include <linux/cpufreq.h> |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 18 | #include <linux/kthread.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/sched/rt.h> |
| 21 | |
Olav Haugan | 09bcc68 | 2016-09-02 17:12:20 -0700 | [diff] [blame] | 22 | #include <trace/events/sched.h> |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 23 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 24 | #define MAX_CPUS_PER_CLUSTER 4 |
| 25 | #define MAX_CLUSTERS 2 |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 26 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 27 | struct cluster_data { |
| 28 | bool inited; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 29 | unsigned int min_cpus; |
| 30 | unsigned int max_cpus; |
| 31 | unsigned int offline_delay_ms; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 32 | unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER]; |
| 33 | unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 34 | unsigned int active_cpus; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 35 | unsigned int num_cpus; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 36 | cpumask_t cpu_mask; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 37 | unsigned int need_cpus; |
| 38 | unsigned int task_thres; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 39 | s64 last_isolate_ts; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 40 | struct list_head lru; |
| 41 | bool pending; |
| 42 | spinlock_t pending_lock; |
| 43 | bool is_big_cluster; |
| 44 | int nrrun; |
| 45 | bool nrrun_changed; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 46 | struct task_struct *core_ctl_thread; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 47 | unsigned int first_cpu; |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 48 | unsigned int boost; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 49 | struct kobject kobj; |
| 50 | }; |
| 51 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 52 | struct cpu_data { |
| 53 | bool online; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 54 | bool is_busy; |
| 55 | unsigned int busy; |
| 56 | unsigned int cpu; |
| 57 | bool not_preferred; |
| 58 | struct cluster_data *cluster; |
| 59 | struct list_head sib; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 60 | bool isolated_by_us; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 61 | }; |
| 62 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 63 | static DEFINE_PER_CPU(struct cpu_data, cpu_state); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 64 | static struct cluster_data cluster_state[MAX_CLUSTERS]; |
| 65 | static unsigned int num_clusters; |
| 66 | |
| 67 | #define for_each_cluster(cluster, idx) \ |
| 68 | for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\ |
| 69 | (idx)++, (cluster) = &cluster_state[idx]) |
| 70 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 71 | static DEFINE_SPINLOCK(state_lock); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 72 | static void apply_need(struct cluster_data *state); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 73 | static void wake_up_core_ctl_thread(struct cluster_data *state); |
| 74 | static bool initialized; |
| 75 | |
| 76 | static unsigned int get_active_cpu_count(const struct cluster_data *cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 77 | |
| 78 | /* ========================= sysfs interface =========================== */ |
| 79 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 80 | static ssize_t store_min_cpus(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 81 | const char *buf, size_t count) |
| 82 | { |
| 83 | unsigned int val; |
| 84 | |
| 85 | if (sscanf(buf, "%u\n", &val) != 1) |
| 86 | return -EINVAL; |
| 87 | |
| 88 | state->min_cpus = min(val, state->max_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 89 | wake_up_core_ctl_thread(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 90 | |
| 91 | return count; |
| 92 | } |
| 93 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 94 | static ssize_t show_min_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 95 | { |
| 96 | return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus); |
| 97 | } |
| 98 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 99 | static ssize_t store_max_cpus(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 100 | const char *buf, size_t count) |
| 101 | { |
| 102 | unsigned int val; |
| 103 | |
| 104 | if (sscanf(buf, "%u\n", &val) != 1) |
| 105 | return -EINVAL; |
| 106 | |
| 107 | val = min(val, state->num_cpus); |
| 108 | state->max_cpus = val; |
| 109 | state->min_cpus = min(state->min_cpus, state->max_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 110 | wake_up_core_ctl_thread(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 111 | |
| 112 | return count; |
| 113 | } |
| 114 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 115 | static ssize_t show_max_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 116 | { |
| 117 | return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus); |
| 118 | } |
| 119 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 120 | static ssize_t store_offline_delay_ms(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 121 | const char *buf, size_t count) |
| 122 | { |
| 123 | unsigned int val; |
| 124 | |
| 125 | if (sscanf(buf, "%u\n", &val) != 1) |
| 126 | return -EINVAL; |
| 127 | |
| 128 | state->offline_delay_ms = val; |
| 129 | apply_need(state); |
| 130 | |
| 131 | return count; |
| 132 | } |
| 133 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 134 | static ssize_t show_task_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 135 | { |
| 136 | return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres); |
| 137 | } |
| 138 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 139 | static ssize_t store_task_thres(struct cluster_data *state, |
| 140 | const char *buf, size_t count) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 141 | { |
| 142 | unsigned int val; |
| 143 | |
| 144 | if (sscanf(buf, "%u\n", &val) != 1) |
| 145 | return -EINVAL; |
| 146 | |
| 147 | if (val < state->num_cpus) |
| 148 | return -EINVAL; |
| 149 | |
| 150 | state->task_thres = val; |
| 151 | apply_need(state); |
| 152 | |
| 153 | return count; |
| 154 | } |
| 155 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 156 | static ssize_t show_offline_delay_ms(const struct cluster_data *state, |
| 157 | char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 158 | { |
| 159 | return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms); |
| 160 | } |
| 161 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 162 | static ssize_t store_busy_up_thres(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 163 | const char *buf, size_t count) |
| 164 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 165 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 166 | int ret, i; |
| 167 | |
| 168 | ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); |
| 169 | if (ret != 1 && ret != state->num_cpus) |
| 170 | return -EINVAL; |
| 171 | |
| 172 | if (ret == 1) { |
| 173 | for (i = 0; i < state->num_cpus; i++) |
| 174 | state->busy_up_thres[i] = val[0]; |
| 175 | } else { |
| 176 | for (i = 0; i < state->num_cpus; i++) |
| 177 | state->busy_up_thres[i] = val[i]; |
| 178 | } |
| 179 | apply_need(state); |
| 180 | return count; |
| 181 | } |
| 182 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 183 | static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 184 | { |
| 185 | int i, count = 0; |
| 186 | |
| 187 | for (i = 0; i < state->num_cpus; i++) |
| 188 | count += snprintf(buf + count, PAGE_SIZE - count, "%u ", |
| 189 | state->busy_up_thres[i]); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 190 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 191 | count += snprintf(buf + count, PAGE_SIZE - count, "\n"); |
| 192 | return count; |
| 193 | } |
| 194 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 195 | static ssize_t store_busy_down_thres(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 196 | const char *buf, size_t count) |
| 197 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 198 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 199 | int ret, i; |
| 200 | |
| 201 | ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); |
| 202 | if (ret != 1 && ret != state->num_cpus) |
| 203 | return -EINVAL; |
| 204 | |
| 205 | if (ret == 1) { |
| 206 | for (i = 0; i < state->num_cpus; i++) |
| 207 | state->busy_down_thres[i] = val[0]; |
| 208 | } else { |
| 209 | for (i = 0; i < state->num_cpus; i++) |
| 210 | state->busy_down_thres[i] = val[i]; |
| 211 | } |
| 212 | apply_need(state); |
| 213 | return count; |
| 214 | } |
| 215 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 216 | static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 217 | { |
| 218 | int i, count = 0; |
| 219 | |
| 220 | for (i = 0; i < state->num_cpus; i++) |
| 221 | count += snprintf(buf + count, PAGE_SIZE - count, "%u ", |
| 222 | state->busy_down_thres[i]); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 223 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 224 | count += snprintf(buf + count, PAGE_SIZE - count, "\n"); |
| 225 | return count; |
| 226 | } |
| 227 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 228 | static ssize_t store_is_big_cluster(struct cluster_data *state, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 229 | const char *buf, size_t count) |
| 230 | { |
| 231 | unsigned int val; |
| 232 | |
| 233 | if (sscanf(buf, "%u\n", &val) != 1) |
| 234 | return -EINVAL; |
| 235 | |
| 236 | state->is_big_cluster = val ? 1 : 0; |
| 237 | return count; |
| 238 | } |
| 239 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 240 | static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 241 | { |
| 242 | return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster); |
| 243 | } |
| 244 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 245 | static ssize_t show_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 246 | { |
| 247 | struct cpu_data *c; |
| 248 | ssize_t count = 0; |
| 249 | unsigned long flags; |
| 250 | |
| 251 | spin_lock_irqsave(&state_lock, flags); |
| 252 | list_for_each_entry(c, &state->lru, sib) { |
| 253 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 254 | "CPU%u (%s)\n", c->cpu, |
| 255 | c->online ? "Online" : "Offline"); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 256 | } |
| 257 | spin_unlock_irqrestore(&state_lock, flags); |
| 258 | return count; |
| 259 | } |
| 260 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 261 | static ssize_t show_need_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 262 | { |
| 263 | return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus); |
| 264 | } |
| 265 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 266 | static ssize_t show_active_cpus(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 267 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 268 | return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 269 | } |
| 270 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 271 | static ssize_t show_global_state(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 272 | { |
| 273 | struct cpu_data *c; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 274 | struct cluster_data *cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 275 | ssize_t count = 0; |
| 276 | unsigned int cpu; |
| 277 | |
| 278 | for_each_possible_cpu(cpu) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 279 | c = &per_cpu(cpu_state, cpu); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 280 | if (!c->cluster) |
| 281 | continue; |
| 282 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 283 | cluster = c->cluster; |
| 284 | if (!cluster || !cluster->inited) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 285 | continue; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 286 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 287 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 288 | "CPU%u\n", cpu); |
| 289 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 290 | "\tCPU: %u\n", c->cpu); |
| 291 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 292 | "\tOnline: %u\n", c->online); |
| 293 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 294 | "\tActive: %u\n", |
| 295 | !cpu_isolated(c->cpu)); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 296 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 297 | "\tFirst CPU: %u\n", |
| 298 | cluster->first_cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 299 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 300 | "\tBusy%%: %u\n", c->busy); |
| 301 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 302 | "\tIs busy: %u\n", c->is_busy); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 303 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 304 | "\tNr running: %u\n", cluster->nrrun); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 305 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 306 | "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 307 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 308 | "\tNeed CPUs: %u\n", cluster->need_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 309 | count += snprintf(buf + count, PAGE_SIZE - count, |
| 310 | "\tBoost: %u\n", (unsigned int) cluster->boost); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | return count; |
| 314 | } |
| 315 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 316 | static ssize_t store_not_preferred(struct cluster_data *state, |
| 317 | const char *buf, size_t count) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 318 | { |
| 319 | struct cpu_data *c; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 320 | unsigned int i; |
| 321 | unsigned int val[MAX_CPUS_PER_CLUSTER]; |
| 322 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 323 | int ret; |
| 324 | |
| 325 | ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); |
| 326 | if (ret != 1 && ret != state->num_cpus) |
| 327 | return -EINVAL; |
| 328 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 329 | i = 0; |
| 330 | spin_lock_irqsave(&state_lock, flags); |
| 331 | list_for_each_entry(c, &state->lru, sib) |
| 332 | c->not_preferred = val[i++]; |
| 333 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 334 | |
| 335 | return count; |
| 336 | } |
| 337 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 338 | static ssize_t show_not_preferred(const struct cluster_data *state, char *buf) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 339 | { |
| 340 | struct cpu_data *c; |
| 341 | ssize_t count = 0; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 342 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 343 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 344 | spin_lock_irqsave(&state_lock, flags); |
| 345 | list_for_each_entry(c, &state->lru, sib) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 346 | count += snprintf(buf + count, PAGE_SIZE - count, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 347 | "\tCPU:%d %u\n", c->cpu, c->not_preferred); |
| 348 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 349 | |
| 350 | return count; |
| 351 | } |
| 352 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 353 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 354 | struct core_ctl_attr { |
| 355 | struct attribute attr; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 356 | ssize_t (*show)(const struct cluster_data *, char *); |
| 357 | ssize_t (*store)(struct cluster_data *, const char *, size_t count); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 358 | }; |
| 359 | |
| 360 | #define core_ctl_attr_ro(_name) \ |
| 361 | static struct core_ctl_attr _name = \ |
| 362 | __ATTR(_name, 0444, show_##_name, NULL) |
| 363 | |
| 364 | #define core_ctl_attr_rw(_name) \ |
| 365 | static struct core_ctl_attr _name = \ |
| 366 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 367 | |
| 368 | core_ctl_attr_rw(min_cpus); |
| 369 | core_ctl_attr_rw(max_cpus); |
| 370 | core_ctl_attr_rw(offline_delay_ms); |
| 371 | core_ctl_attr_rw(busy_up_thres); |
| 372 | core_ctl_attr_rw(busy_down_thres); |
| 373 | core_ctl_attr_rw(task_thres); |
| 374 | core_ctl_attr_rw(is_big_cluster); |
| 375 | core_ctl_attr_ro(cpus); |
| 376 | core_ctl_attr_ro(need_cpus); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 377 | core_ctl_attr_ro(active_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 378 | core_ctl_attr_ro(global_state); |
| 379 | core_ctl_attr_rw(not_preferred); |
| 380 | |
| 381 | static struct attribute *default_attrs[] = { |
| 382 | &min_cpus.attr, |
| 383 | &max_cpus.attr, |
| 384 | &offline_delay_ms.attr, |
| 385 | &busy_up_thres.attr, |
| 386 | &busy_down_thres.attr, |
| 387 | &task_thres.attr, |
| 388 | &is_big_cluster.attr, |
| 389 | &cpus.attr, |
| 390 | &need_cpus.attr, |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 391 | &active_cpus.attr, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 392 | &global_state.attr, |
| 393 | ¬_preferred.attr, |
| 394 | NULL |
| 395 | }; |
| 396 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 397 | #define to_cluster_data(k) container_of(k, struct cluster_data, kobj) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 398 | #define to_attr(a) container_of(a, struct core_ctl_attr, attr) |
| 399 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
| 400 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 401 | struct cluster_data *data = to_cluster_data(kobj); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 402 | struct core_ctl_attr *cattr = to_attr(attr); |
| 403 | ssize_t ret = -EIO; |
| 404 | |
| 405 | if (cattr->show) |
| 406 | ret = cattr->show(data, buf); |
| 407 | |
| 408 | return ret; |
| 409 | } |
| 410 | |
| 411 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
| 412 | const char *buf, size_t count) |
| 413 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 414 | struct cluster_data *data = to_cluster_data(kobj); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 415 | struct core_ctl_attr *cattr = to_attr(attr); |
| 416 | ssize_t ret = -EIO; |
| 417 | |
| 418 | if (cattr->store) |
| 419 | ret = cattr->store(data, buf, count); |
| 420 | |
| 421 | return ret; |
| 422 | } |
| 423 | |
| 424 | static const struct sysfs_ops sysfs_ops = { |
| 425 | .show = show, |
| 426 | .store = store, |
| 427 | }; |
| 428 | |
| 429 | static struct kobj_type ktype_core_ctl = { |
| 430 | .sysfs_ops = &sysfs_ops, |
| 431 | .default_attrs = default_attrs, |
| 432 | }; |
| 433 | |
| 434 | /* ==================== runqueue based core count =================== */ |
| 435 | |
| 436 | #define RQ_AVG_TOLERANCE 2 |
| 437 | #define RQ_AVG_DEFAULT_MS 20 |
| 438 | #define NR_RUNNING_TOLERANCE 5 |
| 439 | static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS; |
| 440 | |
| 441 | static s64 rq_avg_timestamp_ms; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 442 | |
| 443 | static void update_running_avg(bool trigger_update) |
| 444 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 445 | int avg, iowait_avg, big_avg, old_nrrun; |
| 446 | s64 now; |
| 447 | unsigned long flags; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 448 | struct cluster_data *cluster; |
| 449 | unsigned int index = 0; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 450 | |
| 451 | spin_lock_irqsave(&state_lock, flags); |
| 452 | |
| 453 | now = ktime_to_ms(ktime_get()); |
| 454 | if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) { |
| 455 | spin_unlock_irqrestore(&state_lock, flags); |
| 456 | return; |
| 457 | } |
| 458 | rq_avg_timestamp_ms = now; |
| 459 | sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg); |
| 460 | |
| 461 | spin_unlock_irqrestore(&state_lock, flags); |
| 462 | |
| 463 | /* |
| 464 | * Round up to the next integer if the average nr running tasks |
| 465 | * is within NR_RUNNING_TOLERANCE/100 of the next integer. |
| 466 | * If normal rounding up is used, it will allow a transient task |
| 467 | * to trigger online event. By the time core is onlined, the task |
| 468 | * has finished. |
| 469 | * Rounding to closest suffers same problem because scheduler |
| 470 | * might only provide running stats per jiffy, and a transient |
| 471 | * task could skew the number for one jiffy. If core control |
| 472 | * samples every 2 jiffies, it will observe 0.5 additional running |
| 473 | * average which rounds up to 1 task. |
| 474 | */ |
| 475 | avg = (avg + NR_RUNNING_TOLERANCE) / 100; |
| 476 | big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100; |
| 477 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 478 | for_each_cluster(cluster, index) { |
| 479 | if (!cluster->inited) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 480 | continue; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 481 | old_nrrun = cluster->nrrun; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 482 | /* |
| 483 | * Big cluster only need to take care of big tasks, but if |
| 484 | * there are not enough big cores, big tasks need to be run |
| 485 | * on little as well. Thus for little's runqueue stat, it |
| 486 | * has to use overall runqueue average, or derive what big |
| 487 | * tasks would have to be run on little. The latter approach |
| 488 | * is not easy to get given core control reacts much slower |
| 489 | * than scheduler, and can't predict scheduler's behavior. |
| 490 | */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 491 | cluster->nrrun = cluster->is_big_cluster ? big_avg : avg; |
| 492 | if (cluster->nrrun != old_nrrun) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 493 | if (trigger_update) |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 494 | apply_need(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 495 | else |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 496 | cluster->nrrun_changed = true; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 497 | } |
| 498 | } |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 499 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 500 | } |
| 501 | |
| 502 | /* adjust needed CPUs based on current runqueue information */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 503 | static unsigned int apply_task_need(const struct cluster_data *cluster, |
| 504 | unsigned int new_need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 505 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 506 | /* unisolate all cores if there are enough tasks */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 507 | if (cluster->nrrun >= cluster->task_thres) |
| 508 | return cluster->num_cpus; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 509 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 510 | /* only unisolate more cores if there are tasks to run */ |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 511 | if (cluster->nrrun > new_need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 512 | return new_need + 1; |
| 513 | |
| 514 | return new_need; |
| 515 | } |
| 516 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 517 | /* ======================= load based core count ====================== */ |
| 518 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 519 | static unsigned int apply_limits(const struct cluster_data *cluster, |
| 520 | unsigned int need_cpus) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 521 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 522 | return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 523 | } |
| 524 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 525 | static unsigned int get_active_cpu_count(const struct cluster_data *cluster) |
| 526 | { |
| 527 | return cluster->num_cpus - |
| 528 | sched_isolate_count(&cluster->cpu_mask, true); |
| 529 | } |
| 530 | |
| 531 | static bool is_active(const struct cpu_data *state) |
| 532 | { |
| 533 | return state->online && !cpu_isolated(state->cpu); |
| 534 | } |
| 535 | |
| 536 | static bool adjustment_possible(const struct cluster_data *cluster, |
| 537 | unsigned int need) |
| 538 | { |
| 539 | return (need < cluster->active_cpus || (need > cluster->active_cpus && |
| 540 | sched_isolate_count(&cluster->cpu_mask, false))); |
| 541 | } |
| 542 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 543 | static bool eval_need(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 544 | { |
| 545 | unsigned long flags; |
| 546 | struct cpu_data *c; |
| 547 | unsigned int need_cpus = 0, last_need, thres_idx; |
| 548 | int ret = 0; |
| 549 | bool need_flag = false; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 550 | unsigned int active_cpus; |
| 551 | unsigned int new_need; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 552 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 553 | if (unlikely(!cluster->inited)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 554 | return 0; |
| 555 | |
| 556 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 557 | |
| 558 | if (cluster->boost) { |
| 559 | need_cpus = cluster->max_cpus; |
| 560 | } else { |
| 561 | active_cpus = get_active_cpu_count(cluster); |
| 562 | thres_idx = active_cpus ? active_cpus - 1 : 0; |
| 563 | list_for_each_entry(c, &cluster->lru, sib) { |
| 564 | if (c->busy >= cluster->busy_up_thres[thres_idx]) |
| 565 | c->is_busy = true; |
| 566 | else if (c->busy < cluster->busy_down_thres[thres_idx]) |
| 567 | c->is_busy = false; |
| 568 | need_cpus += c->is_busy; |
| 569 | } |
| 570 | need_cpus = apply_task_need(cluster, need_cpus); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 571 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 572 | new_need = apply_limits(cluster, need_cpus); |
| 573 | need_flag = adjustment_possible(cluster, new_need); |
| 574 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 575 | last_need = cluster->need_cpus; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 576 | cluster->need_cpus = new_need; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 577 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 578 | if (!need_flag) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 579 | spin_unlock_irqrestore(&state_lock, flags); |
| 580 | return 0; |
| 581 | } |
| 582 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 583 | if (need_cpus > cluster->active_cpus) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 584 | ret = 1; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 585 | } else if (need_cpus < cluster->active_cpus) { |
| 586 | s64 now = ktime_to_ms(ktime_get()); |
| 587 | s64 elapsed = now - cluster->last_isolate_ts; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 588 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 589 | ret = elapsed >= cluster->offline_delay_ms; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 592 | trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 593 | ret && need_flag); |
| 594 | spin_unlock_irqrestore(&state_lock, flags); |
| 595 | |
| 596 | return ret && need_flag; |
| 597 | } |
| 598 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 599 | static void apply_need(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 600 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 601 | if (eval_need(cluster)) |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 602 | wake_up_core_ctl_thread(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 603 | } |
| 604 | |
| 605 | static int core_ctl_set_busy(unsigned int cpu, unsigned int busy) |
| 606 | { |
| 607 | struct cpu_data *c = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 608 | struct cluster_data *cluster = c->cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 609 | unsigned int old_is_busy = c->is_busy; |
| 610 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 611 | if (!cluster || !cluster->inited) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 612 | return 0; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 613 | |
| 614 | update_running_avg(false); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 615 | if (c->busy == busy && !cluster->nrrun_changed) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 616 | return 0; |
| 617 | c->busy = busy; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 618 | cluster->nrrun_changed = false; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 619 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 620 | apply_need(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 621 | trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy); |
| 622 | return 0; |
| 623 | } |
| 624 | |
| 625 | /* ========================= core count enforcement ==================== */ |
| 626 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 627 | static void wake_up_core_ctl_thread(struct cluster_data *cluster) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 628 | { |
| 629 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 630 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 631 | spin_lock_irqsave(&cluster->pending_lock, flags); |
| 632 | cluster->pending = true; |
| 633 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 634 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 635 | wake_up_process_no_notif(cluster->core_ctl_thread); |
| 636 | } |
| 637 | |
| 638 | static u64 core_ctl_check_timestamp; |
| 639 | static u64 core_ctl_check_interval; |
| 640 | |
| 641 | static bool do_check(u64 wallclock) |
| 642 | { |
| 643 | bool do_check = false; |
| 644 | unsigned long flags; |
| 645 | |
| 646 | spin_lock_irqsave(&state_lock, flags); |
| 647 | if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) { |
| 648 | core_ctl_check_timestamp = wallclock; |
| 649 | do_check = true; |
| 650 | } |
| 651 | spin_unlock_irqrestore(&state_lock, flags); |
| 652 | return do_check; |
| 653 | } |
| 654 | |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 655 | int core_ctl_set_boost(bool boost) |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 656 | { |
| 657 | unsigned int index = 0; |
| 658 | struct cluster_data *cluster; |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 659 | unsigned long flags; |
| 660 | int ret = 0; |
| 661 | bool boost_state_changed = false; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 662 | |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 663 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 664 | for_each_cluster(cluster, index) { |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 665 | if (cluster->is_big_cluster) { |
| 666 | if (boost) { |
| 667 | boost_state_changed = !cluster->boost; |
| 668 | ++cluster->boost; |
| 669 | } else { |
| 670 | if (!cluster->boost) { |
| 671 | pr_err("Error turning off boost. Boost already turned off\n"); |
| 672 | ret = -EINVAL; |
| 673 | } else { |
| 674 | --cluster->boost; |
| 675 | boost_state_changed = !cluster->boost; |
| 676 | } |
| 677 | } |
| 678 | break; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 679 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 680 | } |
Olav Haugan | a024f47 | 2016-10-13 10:34:11 -0700 | [diff] [blame] | 681 | spin_unlock_irqrestore(&state_lock, flags); |
| 682 | |
| 683 | if (boost_state_changed) |
| 684 | apply_need(cluster); |
| 685 | |
| 686 | trace_core_ctl_set_boost(cluster->boost, ret); |
| 687 | |
| 688 | return ret; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 689 | } |
| 690 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 691 | void core_ctl_check(u64 wallclock) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 692 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 693 | if (unlikely(!initialized)) |
| 694 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 695 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 696 | if (do_check(wallclock)) { |
| 697 | unsigned int index = 0; |
| 698 | struct cluster_data *cluster; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 699 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 700 | update_running_avg(true); |
| 701 | |
| 702 | for_each_cluster(cluster, index) { |
| 703 | if (eval_need(cluster)) |
| 704 | wake_up_core_ctl_thread(cluster); |
| 705 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 706 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 707 | } |
| 708 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 709 | static void move_cpu_lru(struct cpu_data *cpu_data) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 710 | { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 711 | unsigned long flags; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 712 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 713 | spin_lock_irqsave(&state_lock, flags); |
| 714 | list_del(&cpu_data->sib); |
| 715 | list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru); |
| 716 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 717 | } |
| 718 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 719 | static void try_to_isolate(struct cluster_data *cluster, unsigned int need) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 720 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 721 | struct cpu_data *c, *tmp; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 722 | unsigned long flags; |
| 723 | unsigned int num_cpus = cluster->num_cpus; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 724 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 725 | /* |
| 726 | * Protect against entry being removed (and added at tail) by other |
| 727 | * thread (hotplug). |
| 728 | */ |
| 729 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 730 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 731 | if (!num_cpus--) |
| 732 | break; |
| 733 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 734 | if (!is_active(c)) |
| 735 | continue; |
| 736 | if (cluster->active_cpus == need) |
| 737 | break; |
| 738 | /* Don't offline busy CPUs. */ |
| 739 | if (c->is_busy) |
| 740 | continue; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 741 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 742 | spin_unlock_irqrestore(&state_lock, flags); |
| 743 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 744 | pr_debug("Trying to isolate CPU%u\n", c->cpu); |
| 745 | if (!sched_isolate_cpu(c->cpu)) { |
| 746 | c->isolated_by_us = true; |
| 747 | move_cpu_lru(c); |
| 748 | cluster->last_isolate_ts = ktime_to_ms(ktime_get()); |
| 749 | } else { |
| 750 | pr_debug("Unable to isolate CPU%u\n", c->cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 751 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 752 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 753 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 754 | } |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 755 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 756 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 757 | /* |
| 758 | * If the number of active CPUs is within the limits, then |
| 759 | * don't force isolation of any busy CPUs. |
| 760 | */ |
| 761 | if (cluster->active_cpus <= cluster->max_cpus) |
| 762 | return; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 763 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 764 | num_cpus = cluster->num_cpus; |
| 765 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 766 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 767 | if (!num_cpus--) |
| 768 | break; |
| 769 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 770 | if (!is_active(c)) |
| 771 | continue; |
| 772 | if (cluster->active_cpus <= cluster->max_cpus) |
| 773 | break; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 774 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 775 | spin_unlock_irqrestore(&state_lock, flags); |
| 776 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 777 | pr_debug("Trying to isolate CPU%u\n", c->cpu); |
| 778 | if (!sched_isolate_cpu(c->cpu)) { |
| 779 | c->isolated_by_us = true; |
| 780 | move_cpu_lru(c); |
| 781 | cluster->last_isolate_ts = ktime_to_ms(ktime_get()); |
| 782 | } else { |
| 783 | pr_debug("Unable to isolate CPU%u\n", c->cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 784 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 785 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 786 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 787 | } |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 788 | spin_unlock_irqrestore(&state_lock, flags); |
| 789 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 790 | } |
| 791 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 792 | static void __try_to_unisolate(struct cluster_data *cluster, |
| 793 | unsigned int need, bool force) |
| 794 | { |
| 795 | struct cpu_data *c, *tmp; |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 796 | unsigned long flags; |
| 797 | unsigned int num_cpus = cluster->num_cpus; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 798 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 799 | /* |
| 800 | * Protect against entry being removed (and added at tail) by other |
| 801 | * thread (hotplug). |
| 802 | */ |
| 803 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 804 | list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 805 | if (!num_cpus--) |
| 806 | break; |
| 807 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 808 | if (!c->isolated_by_us) |
| 809 | continue; |
| 810 | if ((c->online && !cpu_isolated(c->cpu)) || |
| 811 | (!force && c->not_preferred)) |
| 812 | continue; |
| 813 | if (cluster->active_cpus == need) |
| 814 | break; |
| 815 | |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 816 | spin_unlock_irqrestore(&state_lock, flags); |
| 817 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 818 | pr_debug("Trying to unisolate CPU%u\n", c->cpu); |
| 819 | if (!sched_unisolate_cpu(c->cpu)) { |
| 820 | c->isolated_by_us = false; |
| 821 | move_cpu_lru(c); |
| 822 | } else { |
| 823 | pr_debug("Unable to unisolate CPU%u\n", c->cpu); |
| 824 | } |
| 825 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 826 | spin_lock_irqsave(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 827 | } |
Olav Haugan | 8597073 | 2016-11-08 13:45:01 -0800 | [diff] [blame^] | 828 | spin_unlock_irqrestore(&state_lock, flags); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 829 | } |
| 830 | |
| 831 | static void try_to_unisolate(struct cluster_data *cluster, unsigned int need) |
| 832 | { |
| 833 | bool force_use_non_preferred = false; |
| 834 | |
| 835 | __try_to_unisolate(cluster, need, force_use_non_preferred); |
| 836 | |
| 837 | if (cluster->active_cpus == need) |
| 838 | return; |
| 839 | |
| 840 | force_use_non_preferred = true; |
| 841 | __try_to_unisolate(cluster, need, force_use_non_preferred); |
| 842 | } |
| 843 | |
| 844 | static void __ref do_core_ctl(struct cluster_data *cluster) |
| 845 | { |
| 846 | unsigned int need; |
| 847 | |
| 848 | need = apply_limits(cluster, cluster->need_cpus); |
| 849 | |
| 850 | if (adjustment_possible(cluster, need)) { |
| 851 | pr_debug("Trying to adjust group %u from %u to %u\n", |
| 852 | cluster->first_cpu, cluster->active_cpus, need); |
| 853 | |
| 854 | if (cluster->active_cpus > need) |
| 855 | try_to_isolate(cluster, need); |
| 856 | else if (cluster->active_cpus < need) |
| 857 | try_to_unisolate(cluster, need); |
| 858 | } |
| 859 | } |
| 860 | |
| 861 | static int __ref try_core_ctl(void *data) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 862 | { |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 863 | struct cluster_data *cluster = data; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 864 | unsigned long flags; |
| 865 | |
| 866 | while (1) { |
| 867 | set_current_state(TASK_INTERRUPTIBLE); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 868 | spin_lock_irqsave(&cluster->pending_lock, flags); |
| 869 | if (!cluster->pending) { |
| 870 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 871 | schedule(); |
| 872 | if (kthread_should_stop()) |
| 873 | break; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 874 | spin_lock_irqsave(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 875 | } |
| 876 | set_current_state(TASK_RUNNING); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 877 | cluster->pending = false; |
| 878 | spin_unlock_irqrestore(&cluster->pending_lock, flags); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 879 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 880 | do_core_ctl(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 881 | } |
| 882 | |
| 883 | return 0; |
| 884 | } |
| 885 | |
| 886 | static int __ref cpu_callback(struct notifier_block *nfb, |
| 887 | unsigned long action, void *hcpu) |
| 888 | { |
| 889 | uint32_t cpu = (uintptr_t)hcpu; |
| 890 | struct cpu_data *state = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 891 | struct cluster_data *cluster = state->cluster; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 892 | unsigned int need; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 893 | int ret = NOTIFY_OK; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 894 | |
| 895 | /* Don't affect suspend resume */ |
| 896 | if (action & CPU_TASKS_FROZEN) |
| 897 | return NOTIFY_OK; |
| 898 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 899 | if (unlikely(!cluster || !cluster->inited)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 900 | return NOTIFY_OK; |
| 901 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 902 | switch (action) { |
| 903 | case CPU_UP_PREPARE: |
| 904 | |
| 905 | /* If online state of CPU somehow got out of sync, fix it. */ |
| 906 | if (state->online) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 907 | state->online = false; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 908 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 909 | pr_warn("CPU%d offline when state is online\n", cpu); |
| 910 | } |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 911 | break; |
| 912 | |
| 913 | case CPU_ONLINE: |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 914 | |
| 915 | state->online = true; |
| 916 | cluster->active_cpus = get_active_cpu_count(cluster); |
| 917 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 918 | /* |
| 919 | * Moving to the end of the list should only happen in |
| 920 | * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an |
| 921 | * infinite list traversal when thermal (or other entities) |
| 922 | * reject trying to online CPUs. |
| 923 | */ |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 924 | move_cpu_lru(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 925 | break; |
| 926 | |
| 927 | case CPU_DEAD: |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 928 | /* |
| 929 | * We don't want to have a CPU both offline and isolated. |
| 930 | * So unisolate a CPU that went down if it was isolated by us. |
| 931 | */ |
| 932 | if (state->isolated_by_us) { |
| 933 | sched_unisolate_cpu_unlocked(cpu); |
| 934 | state->isolated_by_us = false; |
| 935 | } |
| 936 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 937 | /* Move a CPU to the end of the LRU when it goes offline. */ |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 938 | move_cpu_lru(state); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 939 | |
| 940 | /* Fall through */ |
| 941 | |
| 942 | case CPU_UP_CANCELED: |
| 943 | |
| 944 | /* If online state of CPU somehow got out of sync, fix it. */ |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 945 | if (!state->online) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 946 | pr_warn("CPU%d online when state is offline\n", cpu); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 947 | |
| 948 | state->online = false; |
| 949 | state->busy = 0; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 950 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 951 | break; |
| 952 | } |
| 953 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 954 | need = apply_limits(cluster, cluster->need_cpus); |
| 955 | if (adjustment_possible(cluster, need)) |
| 956 | wake_up_core_ctl_thread(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 957 | |
| 958 | return ret; |
| 959 | } |
| 960 | |
| 961 | static struct notifier_block __refdata cpu_notifier = { |
| 962 | .notifier_call = cpu_callback, |
| 963 | }; |
| 964 | |
| 965 | /* ============================ init code ============================== */ |
| 966 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 967 | static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu) |
| 968 | { |
| 969 | unsigned int i; |
| 970 | |
| 971 | for (i = 0; i < num_clusters; ++i) { |
| 972 | if (cluster_state[i].first_cpu == first_cpu) |
| 973 | return &cluster_state[i]; |
| 974 | } |
| 975 | |
| 976 | return NULL; |
| 977 | } |
| 978 | |
| 979 | static int cluster_init(const struct cpumask *mask) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 980 | { |
| 981 | struct device *dev; |
| 982 | unsigned int first_cpu = cpumask_first(mask); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 983 | struct cluster_data *cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 984 | struct cpu_data *state; |
| 985 | unsigned int cpu; |
| 986 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 987 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 988 | if (find_cluster_by_first_cpu(first_cpu)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 989 | return 0; |
| 990 | |
| 991 | dev = get_cpu_device(first_cpu); |
| 992 | if (!dev) |
| 993 | return -ENODEV; |
| 994 | |
| 995 | pr_info("Creating CPU group %d\n", first_cpu); |
| 996 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 997 | if (num_clusters == MAX_CLUSTERS) { |
| 998 | pr_err("Unsupported number of clusters. Only %u supported\n", |
| 999 | MAX_CLUSTERS); |
| 1000 | return -EINVAL; |
| 1001 | } |
| 1002 | cluster = &cluster_state[num_clusters]; |
| 1003 | ++num_clusters; |
| 1004 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1005 | cpumask_copy(&cluster->cpu_mask, mask); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1006 | cluster->num_cpus = cpumask_weight(mask); |
| 1007 | if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1008 | pr_err("HW configuration not supported\n"); |
| 1009 | return -EINVAL; |
| 1010 | } |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1011 | cluster->first_cpu = first_cpu; |
| 1012 | cluster->min_cpus = 1; |
| 1013 | cluster->max_cpus = cluster->num_cpus; |
| 1014 | cluster->need_cpus = cluster->num_cpus; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1015 | cluster->offline_delay_ms = 100; |
| 1016 | cluster->task_thres = UINT_MAX; |
| 1017 | cluster->nrrun = cluster->num_cpus; |
| 1018 | INIT_LIST_HEAD(&cluster->lru); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1019 | spin_lock_init(&cluster->pending_lock); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1020 | |
| 1021 | for_each_cpu(cpu, mask) { |
| 1022 | pr_info("Init CPU%u state\n", cpu); |
| 1023 | |
| 1024 | state = &per_cpu(cpu_state, cpu); |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1025 | state->cluster = cluster; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1026 | state->cpu = cpu; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1027 | if (cpu_online(cpu)) |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1028 | state->online = true; |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1029 | list_add_tail(&state->sib, &cluster->lru); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1030 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1031 | cluster->active_cpus = get_active_cpu_count(cluster); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1032 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1033 | cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster, |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1034 | "core_ctl/%d", first_cpu); |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1035 | if (IS_ERR(cluster->core_ctl_thread)) |
| 1036 | return PTR_ERR(cluster->core_ctl_thread); |
| 1037 | |
| 1038 | sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO, |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1039 | ¶m); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1040 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1041 | cluster->inited = true; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1042 | |
Olav Haugan | e3a3db9 | 2016-06-27 11:35:43 -0700 | [diff] [blame] | 1043 | kobject_init(&cluster->kobj, &ktype_core_ctl); |
| 1044 | return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl"); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1045 | } |
| 1046 | |
| 1047 | static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val, |
| 1048 | void *data) |
| 1049 | { |
| 1050 | struct cpufreq_policy *policy = data; |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1051 | int ret; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1052 | |
| 1053 | switch (val) { |
| 1054 | case CPUFREQ_CREATE_POLICY: |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1055 | ret = cluster_init(policy->related_cpus); |
| 1056 | if (ret) |
| 1057 | pr_warn("unable to create core ctl group: %d\n", ret); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1058 | break; |
| 1059 | } |
| 1060 | |
| 1061 | return NOTIFY_OK; |
| 1062 | } |
| 1063 | |
| 1064 | static struct notifier_block cpufreq_pol_nb = { |
| 1065 | .notifier_call = cpufreq_policy_cb, |
| 1066 | }; |
| 1067 | |
| 1068 | static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val, |
| 1069 | void *data) |
| 1070 | { |
| 1071 | struct cpufreq_govinfo *info = data; |
| 1072 | |
| 1073 | switch (val) { |
| 1074 | case CPUFREQ_LOAD_CHANGE: |
| 1075 | core_ctl_set_busy(info->cpu, info->load); |
| 1076 | break; |
| 1077 | } |
| 1078 | |
| 1079 | return NOTIFY_OK; |
| 1080 | } |
| 1081 | |
| 1082 | static struct notifier_block cpufreq_gov_nb = { |
| 1083 | .notifier_call = cpufreq_gov_cb, |
| 1084 | }; |
| 1085 | |
| 1086 | static int __init core_ctl_init(void) |
| 1087 | { |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1088 | unsigned int cpu; |
| 1089 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1090 | core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE) |
| 1091 | * NSEC_PER_MSEC; |
| 1092 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1093 | register_cpu_notifier(&cpu_notifier); |
| 1094 | cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER); |
| 1095 | cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1096 | |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1097 | lock_device_hotplug(); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1098 | for_each_online_cpu(cpu) { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1099 | struct cpufreq_policy *policy; |
| 1100 | int ret; |
| 1101 | |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1102 | policy = cpufreq_cpu_get(cpu); |
| 1103 | if (policy) { |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1104 | ret = cluster_init(policy->related_cpus); |
| 1105 | if (ret) |
| 1106 | pr_warn("unable to create core ctl group: %d\n" |
| 1107 | , ret); |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1108 | cpufreq_cpu_put(policy); |
| 1109 | } |
| 1110 | } |
Olav Haugan | 833926cb | 2016-06-27 11:38:06 -0700 | [diff] [blame] | 1111 | unlock_device_hotplug(); |
| 1112 | initialized = true; |
Olav Haugan | 9306c80 | 2016-08-18 17:22:44 -0700 | [diff] [blame] | 1113 | return 0; |
| 1114 | } |
| 1115 | |
| 1116 | late_initcall(core_ctl_init); |