blob: 8bf4775ce03c7712e26355f878b7d740417a2b7d [file] [log] [blame]
Viresh Kumar4471a342012-10-26 00:47:42 +02001/*
2 * drivers/cpufreq/cpufreq_governor.h
3 *
4 * Header file for CPUFreq governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Borislav Petkovbeb0ff32013-04-02 12:26:15 +000017#ifndef _CPUFREQ_GOVERNOR_H
18#define _CPUFREQ_GOVERNOR_H
Viresh Kumar4471a342012-10-26 00:47:42 +020019
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +010020#include <linux/atomic.h>
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +010021#include <linux/irq_work.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022#include <linux/cpufreq.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053023#include <linux/kernel_stat.h>
24#include <linux/module.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020025#include <linux/mutex.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020026
27/*
28 * The polling frequency depends on the capability of the processor. Default
29 * polling frequency is 1000 times the transition latency of the processor. The
Stratos Karafotisc4afc412013-08-26 21:42:21 +030030 * governor will work on any processor with transition latency <= 10ms, using
Viresh Kumar4471a342012-10-26 00:47:42 +020031 * appropriate sampling rate.
32 *
Stratos Karafotisc4afc412013-08-26 21:42:21 +030033 * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
34 * this governor will not work. All times here are in us (micro seconds).
Viresh Kumar4471a342012-10-26 00:47:42 +020035 */
36#define MIN_SAMPLING_RATE_RATIO (2)
37#define LATENCY_MULTIPLIER (1000)
Viresh Kumar98104ee2013-02-26 15:07:24 +053038#define MIN_LATENCY_MULTIPLIER (20)
Viresh Kumar4471a342012-10-26 00:47:42 +020039#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
40
41/* Ondemand Sampling types */
42enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
43
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000044/* create helper routines */
Viresh Kumar4471a342012-10-26 00:47:42 +020045#define define_get_cpu_dbs_routines(_dbs_info) \
Viresh Kumar875b8502015-06-19 17:18:03 +053046static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
Viresh Kumar4471a342012-10-26 00:47:42 +020047{ \
48 return &per_cpu(_dbs_info, cpu).cdbs; \
49} \
50 \
51static void *get_cpu_dbs_info_s(int cpu) \
52{ \
53 return &per_cpu(_dbs_info, cpu); \
54}
55
56/*
57 * Abbreviations:
58 * dbs: used as a shortform for demand based switching It helps to keep variable
59 * names smaller, simpler
60 * cdbs: common dbs
Namhyung Kime5dde922013-02-28 05:38:00 +000061 * od_*: On-demand governor
Viresh Kumar4471a342012-10-26 00:47:42 +020062 * cs_*: Conservative governor
63 */
64
Rafael J. Wysockibc505472016-02-07 16:24:26 +010065/* Governor demand based switching data (per-policy or global). */
66struct dbs_data {
Rafael J. Wysockibc505472016-02-07 16:24:26 +010067 int usage_count;
68 void *tuners;
Viresh Kumarff4b1782016-02-09 09:01:32 +053069 unsigned int min_sampling_rate;
70 unsigned int ignore_nice_load;
71 unsigned int sampling_rate;
72 unsigned int sampling_down_factor;
73 unsigned int up_threshold;
Viresh Kumarc4435632016-02-09 09:01:33 +053074
75 struct kobject kobj;
Viresh Kumarc54df072016-02-10 11:00:25 +053076 struct list_head policy_dbs_list;
77 /*
78 * Protect concurrent updates to governor tunables from sysfs,
79 * policy_dbs_list and usage_count.
80 */
Viresh Kumarc4435632016-02-09 09:01:33 +053081 struct mutex mutex;
Rafael J. Wysockibc505472016-02-07 16:24:26 +010082};
83
Viresh Kumarc4435632016-02-09 09:01:33 +053084/* Governor's specific attributes */
85struct dbs_data;
86struct governor_attr {
87 struct attribute attr;
88 ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
89 ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
90 size_t count);
91};
92
93#define gov_show_one(_gov, file_name) \
94static ssize_t show_##file_name \
95(struct dbs_data *dbs_data, char *buf) \
96{ \
97 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
98 return sprintf(buf, "%u\n", tuners->file_name); \
99}
100
101#define gov_show_one_common(file_name) \
102static ssize_t show_##file_name \
103(struct dbs_data *dbs_data, char *buf) \
104{ \
105 return sprintf(buf, "%u\n", dbs_data->file_name); \
106}
107
108#define gov_attr_ro(_name) \
109static struct governor_attr _name = \
110__ATTR(_name, 0444, show_##_name, NULL)
111
112#define gov_attr_rw(_name) \
113static struct governor_attr _name = \
114__ATTR(_name, 0644, show_##_name, store_##_name)
115
Viresh Kumar44152cb2015-07-18 11:30:59 +0530116/* Common to all CPUs of a policy */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100117struct policy_dbs_info {
Viresh Kumar44152cb2015-07-18 11:30:59 +0530118 struct cpufreq_policy *policy;
119 /*
Viresh Kumar70f43e52015-12-09 07:34:42 +0530120 * Per policy mutex that serializes load evaluation from limit-change
121 * and work-handler.
Viresh Kumar44152cb2015-07-18 11:30:59 +0530122 */
123 struct mutex timer_mutex;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530124
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100125 u64 last_sample_time;
126 s64 sample_delay_ns;
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100127 atomic_t work_count;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100128 struct irq_work irq_work;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530129 struct work_struct work;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100130 /* dbs_data may be shared between multiple policy objects */
131 struct dbs_data *dbs_data;
Viresh Kumarc54df072016-02-10 11:00:25 +0530132 struct list_head list;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530133};
134
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100135static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100136 unsigned int delay_us)
137{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100138 policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100139}
140
Viresh Kumar4471a342012-10-26 00:47:42 +0200141/* Per cpu structures */
Viresh Kumar875b8502015-06-19 17:18:03 +0530142struct cpu_dbs_info {
Viresh Kumar1e7586a2012-10-26 00:51:21 +0200143 u64 prev_cpu_idle;
144 u64 prev_cpu_wall;
145 u64 prev_cpu_nice;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530146 /*
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530147 * Used to keep track of load in the previous interval. However, when
148 * explicitly set to zero, it is used as a flag to ensure that we copy
149 * the previous load to the current interval only once, upon the first
150 * wake-up from idle.
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530151 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530152 unsigned int prev_load;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100153 struct update_util_data update_util;
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100154 struct policy_dbs_info *policy_dbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200155};
156
157struct od_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530158 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200159 struct cpufreq_frequency_table *freq_table;
160 unsigned int freq_lo;
161 unsigned int freq_lo_jiffies;
162 unsigned int freq_hi_jiffies;
163 unsigned int rate_mult;
164 unsigned int sample_type:1;
165};
166
167struct cs_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530168 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200169 unsigned int down_skip;
170 unsigned int requested_freq;
Viresh Kumar4471a342012-10-26 00:47:42 +0200171};
172
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300173/* Per policy Governors sysfs tunables */
Viresh Kumar4471a342012-10-26 00:47:42 +0200174struct od_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200175 unsigned int powersave_bias;
176 unsigned int io_is_busy;
177};
178
179struct cs_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200180 unsigned int down_threshold;
181 unsigned int freq_step;
182};
183
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300184/* Common Governor data across policies */
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100185struct dbs_governor {
Rafael J. Wysockiaf926182016-02-05 03:16:08 +0100186 struct cpufreq_governor gov;
187
Viresh Kumar4471a342012-10-26 00:47:42 +0200188 #define GOV_ONDEMAND 0
189 #define GOV_CONSERVATIVE 1
190 int governor;
Viresh Kumarc4435632016-02-09 09:01:33 +0530191 struct kobj_type kobj_type;
Viresh Kumar4471a342012-10-26 00:47:42 +0200192
Viresh Kumar0b981e72013-10-02 14:13:18 +0530193 /*
194 * Common data for platforms that don't set
195 * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
196 */
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000197 struct dbs_data *gdbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200198
Viresh Kumar875b8502015-06-19 17:18:03 +0530199 struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200200 void *(*get_cpu_dbs_info_s)(int cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100201 unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200202 void (*gov_check_cpu)(int cpu, unsigned int load);
Viresh Kumar8e0484d2015-06-03 15:57:11 +0530203 int (*init)(struct dbs_data *dbs_data, bool notify);
204 void (*exit)(struct dbs_data *dbs_data, bool notify);
Viresh Kumar4471a342012-10-26 00:47:42 +0200205
206 /* Governor specific ops, see below */
207 void *gov_ops;
208};
209
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100210static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
211{
212 return container_of(policy->governor, struct dbs_governor, gov);
213}
214
Viresh Kumar4471a342012-10-26 00:47:42 +0200215/* Governor specific ops, will be passed to dbs_data->gov_ops */
216struct od_ops {
Viresh Kumar4471a342012-10-26 00:47:42 +0200217 void (*powersave_bias_init_cpu)(int cpu);
218 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
219 unsigned int freq_next, unsigned int relation);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530220 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
Viresh Kumar4471a342012-10-26 00:47:42 +0200221};
222
Viresh Kumar4471a342012-10-26 00:47:42 +0200223static inline int delay_for_sampling_rate(unsigned int sampling_rate)
224{
225 int delay = usecs_to_jiffies(sampling_rate);
226
227 /* We want all CPUs to do sampling nearly on same jiffy */
228 if (num_online_cpus() > 1)
229 delay -= jiffies % delay;
230
231 return delay;
232}
233
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100234extern struct mutex dbs_data_mutex;
Jane Li6f1e4ef2014-01-03 17:17:41 +0800235extern struct mutex cpufreq_governor_lock;
Rafael J. Wysockid10b5eb2016-02-06 13:50:24 +0100236void dbs_check_cpu(struct cpufreq_policy *policy);
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100237int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
Jacob Shinfb308092013-04-02 09:56:56 -0500238void od_register_powersave_bias_handler(unsigned int (*f)
239 (struct cpufreq_policy *, unsigned int, unsigned int),
240 unsigned int powersave_bias);
241void od_unregister_powersave_bias_handler(void);
Borislav Petkovbeb0ff32013-04-02 12:26:15 +0000242#endif /* _CPUFREQ_GOVERNOR_H */