blob: 521daac38ba512d4ca89ff5cf48ae30ed8fd33f5 [file] [log] [blame]
Viresh Kumar4471a342012-10-26 00:47:42 +02001/*
2 * drivers/cpufreq/cpufreq_governor.h
3 *
4 * Header file for CPUFreq governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Borislav Petkovbeb0ff32013-04-02 12:26:15 +000017#ifndef _CPUFREQ_GOVERNOR_H
18#define _CPUFREQ_GOVERNOR_H
Viresh Kumar4471a342012-10-26 00:47:42 +020019
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +010020#include <linux/atomic.h>
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +010021#include <linux/irq_work.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022#include <linux/cpufreq.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053023#include <linux/kernel_stat.h>
24#include <linux/module.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020025#include <linux/mutex.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020026
27/*
28 * The polling frequency depends on the capability of the processor. Default
29 * polling frequency is 1000 times the transition latency of the processor. The
Stratos Karafotisc4afc412013-08-26 21:42:21 +030030 * governor will work on any processor with transition latency <= 10ms, using
Viresh Kumar4471a342012-10-26 00:47:42 +020031 * appropriate sampling rate.
32 *
Stratos Karafotisc4afc412013-08-26 21:42:21 +030033 * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
34 * this governor will not work. All times here are in us (micro seconds).
Viresh Kumar4471a342012-10-26 00:47:42 +020035 */
36#define MIN_SAMPLING_RATE_RATIO (2)
37#define LATENCY_MULTIPLIER (1000)
Viresh Kumar98104ee2013-02-26 15:07:24 +053038#define MIN_LATENCY_MULTIPLIER (20)
Viresh Kumar4471a342012-10-26 00:47:42 +020039#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
40
41/* Ondemand Sampling types */
42enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
43
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000044/* create helper routines */
Viresh Kumar4471a342012-10-26 00:47:42 +020045#define define_get_cpu_dbs_routines(_dbs_info) \
Viresh Kumar875b8502015-06-19 17:18:03 +053046static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
Viresh Kumar4471a342012-10-26 00:47:42 +020047{ \
48 return &per_cpu(_dbs_info, cpu).cdbs; \
49} \
50 \
51static void *get_cpu_dbs_info_s(int cpu) \
52{ \
53 return &per_cpu(_dbs_info, cpu); \
54}
55
56/*
57 * Abbreviations:
58 * dbs: used as a shortform for demand based switching It helps to keep variable
59 * names smaller, simpler
60 * cdbs: common dbs
Namhyung Kime5dde922013-02-28 05:38:00 +000061 * od_*: On-demand governor
Viresh Kumar4471a342012-10-26 00:47:42 +020062 * cs_*: Conservative governor
63 */
64
Rafael J. Wysockibc505472016-02-07 16:24:26 +010065/* Governor demand based switching data (per-policy or global). */
66struct dbs_data {
Rafael J. Wysockibc505472016-02-07 16:24:26 +010067 int usage_count;
68 void *tuners;
Viresh Kumarff4b1782016-02-09 09:01:32 +053069 unsigned int min_sampling_rate;
70 unsigned int ignore_nice_load;
71 unsigned int sampling_rate;
72 unsigned int sampling_down_factor;
73 unsigned int up_threshold;
Viresh Kumarc4435632016-02-09 09:01:33 +053074
75 struct kobject kobj;
Viresh Kumarc54df072016-02-10 11:00:25 +053076 struct list_head policy_dbs_list;
77 /*
78 * Protect concurrent updates to governor tunables from sysfs,
79 * policy_dbs_list and usage_count.
80 */
Viresh Kumarc4435632016-02-09 09:01:33 +053081 struct mutex mutex;
Rafael J. Wysockibc505472016-02-07 16:24:26 +010082};
83
Viresh Kumarc4435632016-02-09 09:01:33 +053084/* Governor's specific attributes */
85struct dbs_data;
86struct governor_attr {
87 struct attribute attr;
88 ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
89 ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
90 size_t count);
91};
92
93#define gov_show_one(_gov, file_name) \
94static ssize_t show_##file_name \
95(struct dbs_data *dbs_data, char *buf) \
96{ \
97 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
98 return sprintf(buf, "%u\n", tuners->file_name); \
99}
100
101#define gov_show_one_common(file_name) \
102static ssize_t show_##file_name \
103(struct dbs_data *dbs_data, char *buf) \
104{ \
105 return sprintf(buf, "%u\n", dbs_data->file_name); \
106}
107
108#define gov_attr_ro(_name) \
109static struct governor_attr _name = \
110__ATTR(_name, 0444, show_##_name, NULL)
111
112#define gov_attr_rw(_name) \
113static struct governor_attr _name = \
114__ATTR(_name, 0644, show_##_name, store_##_name)
115
Viresh Kumar44152cb2015-07-18 11:30:59 +0530116/* Common to all CPUs of a policy */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100117struct policy_dbs_info {
Viresh Kumar44152cb2015-07-18 11:30:59 +0530118 struct cpufreq_policy *policy;
119 /*
Viresh Kumar70f43e52015-12-09 07:34:42 +0530120 * Per policy mutex that serializes load evaluation from limit-change
121 * and work-handler.
Viresh Kumar44152cb2015-07-18 11:30:59 +0530122 */
123 struct mutex timer_mutex;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530124
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100125 u64 last_sample_time;
126 s64 sample_delay_ns;
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100127 atomic_t work_count;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100128 struct irq_work irq_work;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530129 struct work_struct work;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100130 /* dbs_data may be shared between multiple policy objects */
131 struct dbs_data *dbs_data;
Viresh Kumarc54df072016-02-10 11:00:25 +0530132 struct list_head list;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100133 /* Status indicators */
134 bool is_shared; /* This object is used by multiple CPUs */
135 bool work_in_progress; /* Work is being queued up or in progress */
Viresh Kumar44152cb2015-07-18 11:30:59 +0530136};
137
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100138static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100139 unsigned int delay_us)
140{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100141 policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100142}
143
Viresh Kumar4471a342012-10-26 00:47:42 +0200144/* Per cpu structures */
Viresh Kumar875b8502015-06-19 17:18:03 +0530145struct cpu_dbs_info {
Viresh Kumar1e7586a2012-10-26 00:51:21 +0200146 u64 prev_cpu_idle;
147 u64 prev_cpu_wall;
148 u64 prev_cpu_nice;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530149 /*
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530150 * Used to keep track of load in the previous interval. However, when
151 * explicitly set to zero, it is used as a flag to ensure that we copy
152 * the previous load to the current interval only once, upon the first
153 * wake-up from idle.
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530154 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530155 unsigned int prev_load;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100156 struct update_util_data update_util;
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100157 struct policy_dbs_info *policy_dbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200158};
159
160struct od_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530161 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200162 struct cpufreq_frequency_table *freq_table;
163 unsigned int freq_lo;
164 unsigned int freq_lo_jiffies;
165 unsigned int freq_hi_jiffies;
166 unsigned int rate_mult;
167 unsigned int sample_type:1;
168};
169
170struct cs_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530171 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200172 unsigned int down_skip;
173 unsigned int requested_freq;
Viresh Kumar4471a342012-10-26 00:47:42 +0200174};
175
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300176/* Per policy Governors sysfs tunables */
Viresh Kumar4471a342012-10-26 00:47:42 +0200177struct od_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200178 unsigned int powersave_bias;
179 unsigned int io_is_busy;
180};
181
182struct cs_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200183 unsigned int down_threshold;
184 unsigned int freq_step;
185};
186
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300187/* Common Governor data across policies */
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100188struct dbs_governor {
Rafael J. Wysockiaf926182016-02-05 03:16:08 +0100189 struct cpufreq_governor gov;
190
Viresh Kumar4471a342012-10-26 00:47:42 +0200191 #define GOV_ONDEMAND 0
192 #define GOV_CONSERVATIVE 1
193 int governor;
Viresh Kumarc4435632016-02-09 09:01:33 +0530194 struct kobj_type kobj_type;
Viresh Kumar4471a342012-10-26 00:47:42 +0200195
Viresh Kumar0b981e72013-10-02 14:13:18 +0530196 /*
197 * Common data for platforms that don't set
198 * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
199 */
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000200 struct dbs_data *gdbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200201
Viresh Kumar875b8502015-06-19 17:18:03 +0530202 struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200203 void *(*get_cpu_dbs_info_s)(int cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100204 unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200205 void (*gov_check_cpu)(int cpu, unsigned int load);
Viresh Kumar8e0484d2015-06-03 15:57:11 +0530206 int (*init)(struct dbs_data *dbs_data, bool notify);
207 void (*exit)(struct dbs_data *dbs_data, bool notify);
Viresh Kumar4471a342012-10-26 00:47:42 +0200208
209 /* Governor specific ops, see below */
210 void *gov_ops;
211};
212
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100213static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
214{
215 return container_of(policy->governor, struct dbs_governor, gov);
216}
217
Viresh Kumar4471a342012-10-26 00:47:42 +0200218/* Governor specific ops, will be passed to dbs_data->gov_ops */
219struct od_ops {
Viresh Kumar4471a342012-10-26 00:47:42 +0200220 void (*powersave_bias_init_cpu)(int cpu);
221 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
222 unsigned int freq_next, unsigned int relation);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530223 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
Viresh Kumar4471a342012-10-26 00:47:42 +0200224};
225
Viresh Kumar4471a342012-10-26 00:47:42 +0200226static inline int delay_for_sampling_rate(unsigned int sampling_rate)
227{
228 int delay = usecs_to_jiffies(sampling_rate);
229
230 /* We want all CPUs to do sampling nearly on same jiffy */
231 if (num_online_cpus() > 1)
232 delay -= jiffies % delay;
233
234 return delay;
235}
236
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100237extern struct mutex dbs_data_mutex;
Rafael J. Wysockid10b5eb2016-02-06 13:50:24 +0100238void dbs_check_cpu(struct cpufreq_policy *policy);
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100239int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
Jacob Shinfb308092013-04-02 09:56:56 -0500240void od_register_powersave_bias_handler(unsigned int (*f)
241 (struct cpufreq_policy *, unsigned int, unsigned int),
242 unsigned int powersave_bias);
243void od_unregister_powersave_bias_handler(void);
Viresh Kumaraded3872016-02-11 17:31:15 +0530244ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
245 size_t count);
Borislav Petkovbeb0ff32013-04-02 12:26:15 +0000246#endif /* _CPUFREQ_GOVERNOR_H */