blob: 2ae0ad50ca3d3c58f3cfc37c867cf503a13687ff [file] [log] [blame]
Viresh Kumar4471a342012-10-26 00:47:42 +02001/*
2 * drivers/cpufreq/cpufreq_governor.h
3 *
4 * Header file for CPUFreq governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Borislav Petkovbeb0ff32013-04-02 12:26:15 +000017#ifndef _CPUFREQ_GOVERNOR_H
18#define _CPUFREQ_GOVERNOR_H
Viresh Kumar4471a342012-10-26 00:47:42 +020019
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +010020#include <linux/atomic.h>
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +010021#include <linux/irq_work.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022#include <linux/cpufreq.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053023#include <linux/kernel_stat.h>
24#include <linux/module.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020025#include <linux/mutex.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020026
27/*
28 * The polling frequency depends on the capability of the processor. Default
29 * polling frequency is 1000 times the transition latency of the processor. The
Stratos Karafotisc4afc412013-08-26 21:42:21 +030030 * governor will work on any processor with transition latency <= 10ms, using
Viresh Kumar4471a342012-10-26 00:47:42 +020031 * appropriate sampling rate.
32 *
Stratos Karafotisc4afc412013-08-26 21:42:21 +030033 * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
34 * this governor will not work. All times here are in us (micro seconds).
Viresh Kumar4471a342012-10-26 00:47:42 +020035 */
36#define MIN_SAMPLING_RATE_RATIO (2)
37#define LATENCY_MULTIPLIER (1000)
Viresh Kumar98104ee2013-02-26 15:07:24 +053038#define MIN_LATENCY_MULTIPLIER (20)
Viresh Kumar4471a342012-10-26 00:47:42 +020039#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
40
41/* Ondemand Sampling types */
42enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
43
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000044/* create helper routines */
Viresh Kumar4471a342012-10-26 00:47:42 +020045#define define_get_cpu_dbs_routines(_dbs_info) \
Viresh Kumar875b8502015-06-19 17:18:03 +053046static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
Viresh Kumar4471a342012-10-26 00:47:42 +020047{ \
48 return &per_cpu(_dbs_info, cpu).cdbs; \
49} \
50 \
51static void *get_cpu_dbs_info_s(int cpu) \
52{ \
53 return &per_cpu(_dbs_info, cpu); \
54}
55
56/*
57 * Abbreviations:
58 * dbs: used as a shortform for demand based switching It helps to keep variable
59 * names smaller, simpler
60 * cdbs: common dbs
Namhyung Kime5dde922013-02-28 05:38:00 +000061 * od_*: On-demand governor
Viresh Kumar4471a342012-10-26 00:47:42 +020062 * cs_*: Conservative governor
63 */
64
Rafael J. Wysockibc505472016-02-07 16:24:26 +010065/* Governor demand based switching data (per-policy or global). */
66struct dbs_data {
Rafael J. Wysockibc505472016-02-07 16:24:26 +010067 int usage_count;
68 void *tuners;
Viresh Kumarff4b1782016-02-09 09:01:32 +053069 unsigned int min_sampling_rate;
70 unsigned int ignore_nice_load;
71 unsigned int sampling_rate;
72 unsigned int sampling_down_factor;
73 unsigned int up_threshold;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +010074 unsigned int io_is_busy;
Viresh Kumarc4435632016-02-09 09:01:33 +053075
76 struct kobject kobj;
Viresh Kumarc54df072016-02-10 11:00:25 +053077 struct list_head policy_dbs_list;
78 /*
79 * Protect concurrent updates to governor tunables from sysfs,
80 * policy_dbs_list and usage_count.
81 */
Viresh Kumarc4435632016-02-09 09:01:33 +053082 struct mutex mutex;
Rafael J. Wysockibc505472016-02-07 16:24:26 +010083};
84
Viresh Kumarc4435632016-02-09 09:01:33 +053085/* Governor's specific attributes */
86struct dbs_data;
87struct governor_attr {
88 struct attribute attr;
89 ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
90 ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
91 size_t count);
92};
93
94#define gov_show_one(_gov, file_name) \
95static ssize_t show_##file_name \
96(struct dbs_data *dbs_data, char *buf) \
97{ \
98 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
99 return sprintf(buf, "%u\n", tuners->file_name); \
100}
101
102#define gov_show_one_common(file_name) \
103static ssize_t show_##file_name \
104(struct dbs_data *dbs_data, char *buf) \
105{ \
106 return sprintf(buf, "%u\n", dbs_data->file_name); \
107}
108
109#define gov_attr_ro(_name) \
110static struct governor_attr _name = \
111__ATTR(_name, 0444, show_##_name, NULL)
112
113#define gov_attr_rw(_name) \
114static struct governor_attr _name = \
115__ATTR(_name, 0644, show_##_name, store_##_name)
116
Viresh Kumar44152cb2015-07-18 11:30:59 +0530117/* Common to all CPUs of a policy */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100118struct policy_dbs_info {
Viresh Kumar44152cb2015-07-18 11:30:59 +0530119 struct cpufreq_policy *policy;
120 /*
Viresh Kumar70f43e52015-12-09 07:34:42 +0530121 * Per policy mutex that serializes load evaluation from limit-change
122 * and work-handler.
Viresh Kumar44152cb2015-07-18 11:30:59 +0530123 */
124 struct mutex timer_mutex;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530125
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100126 u64 last_sample_time;
127 s64 sample_delay_ns;
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100128 atomic_t work_count;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100129 struct irq_work irq_work;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530130 struct work_struct work;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100131 /* dbs_data may be shared between multiple policy objects */
132 struct dbs_data *dbs_data;
Viresh Kumarc54df072016-02-10 11:00:25 +0530133 struct list_head list;
Rafael J. Wysocki57dc3bc2016-02-15 02:20:51 +0100134 /* Multiplier for increasing sample delay temporarily. */
135 unsigned int rate_mult;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100136 /* Status indicators */
137 bool is_shared; /* This object is used by multiple CPUs */
138 bool work_in_progress; /* Work is being queued up or in progress */
Viresh Kumar44152cb2015-07-18 11:30:59 +0530139};
140
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100141static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100142 unsigned int delay_us)
143{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100144 policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100145}
146
Viresh Kumar4471a342012-10-26 00:47:42 +0200147/* Per cpu structures */
Viresh Kumar875b8502015-06-19 17:18:03 +0530148struct cpu_dbs_info {
Viresh Kumar1e7586a2012-10-26 00:51:21 +0200149 u64 prev_cpu_idle;
150 u64 prev_cpu_wall;
151 u64 prev_cpu_nice;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530152 /*
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530153 * Used to keep track of load in the previous interval. However, when
154 * explicitly set to zero, it is used as a flag to ensure that we copy
155 * the previous load to the current interval only once, upon the first
156 * wake-up from idle.
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530157 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530158 unsigned int prev_load;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100159 struct update_util_data update_util;
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100160 struct policy_dbs_info *policy_dbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200161};
162
163struct od_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530164 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200165 struct cpufreq_frequency_table *freq_table;
166 unsigned int freq_lo;
Rafael J. Wysocki07aa4402016-02-15 02:22:13 +0100167 unsigned int freq_lo_delay_us;
168 unsigned int freq_hi_delay_us;
Viresh Kumar4471a342012-10-26 00:47:42 +0200169 unsigned int sample_type:1;
170};
171
172struct cs_cpu_dbs_info_s {
Viresh Kumar875b8502015-06-19 17:18:03 +0530173 struct cpu_dbs_info cdbs;
Viresh Kumar4471a342012-10-26 00:47:42 +0200174 unsigned int down_skip;
175 unsigned int requested_freq;
Viresh Kumar4471a342012-10-26 00:47:42 +0200176};
177
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300178/* Per policy Governors sysfs tunables */
Viresh Kumar4471a342012-10-26 00:47:42 +0200179struct od_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200180 unsigned int powersave_bias;
Viresh Kumar4471a342012-10-26 00:47:42 +0200181};
182
183struct cs_dbs_tuners {
Viresh Kumar4471a342012-10-26 00:47:42 +0200184 unsigned int down_threshold;
185 unsigned int freq_step;
186};
187
Stratos Karafotisc4afc412013-08-26 21:42:21 +0300188/* Common Governor data across policies */
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100189struct dbs_governor {
Rafael J. Wysockiaf926182016-02-05 03:16:08 +0100190 struct cpufreq_governor gov;
191
Viresh Kumar4471a342012-10-26 00:47:42 +0200192 #define GOV_ONDEMAND 0
193 #define GOV_CONSERVATIVE 1
194 int governor;
Viresh Kumarc4435632016-02-09 09:01:33 +0530195 struct kobj_type kobj_type;
Viresh Kumar4471a342012-10-26 00:47:42 +0200196
Viresh Kumar0b981e72013-10-02 14:13:18 +0530197 /*
198 * Common data for platforms that don't set
199 * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
200 */
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000201 struct dbs_data *gdbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200202
Viresh Kumar875b8502015-06-19 17:18:03 +0530203 struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200204 void *(*get_cpu_dbs_info_s)(int cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100205 unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
Viresh Kumar8e0484d2015-06-03 15:57:11 +0530206 int (*init)(struct dbs_data *dbs_data, bool notify);
207 void (*exit)(struct dbs_data *dbs_data, bool notify);
Rafael J. Wysocki702c9e52016-02-18 02:21:21 +0100208 void (*start)(struct cpufreq_policy *policy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200209
210 /* Governor specific ops, see below */
211 void *gov_ops;
212};
213
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100214static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
215{
216 return container_of(policy->governor, struct dbs_governor, gov);
217}
218
Viresh Kumar4471a342012-10-26 00:47:42 +0200219/* Governor specific ops, will be passed to dbs_data->gov_ops */
220struct od_ops {
Viresh Kumar4471a342012-10-26 00:47:42 +0200221 void (*powersave_bias_init_cpu)(int cpu);
222 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
223 unsigned int freq_next, unsigned int relation);
Viresh Kumar4471a342012-10-26 00:47:42 +0200224};
225
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100226extern struct mutex dbs_data_mutex;
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100227unsigned int dbs_update(struct cpufreq_policy *policy);
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100228int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
Jacob Shinfb308092013-04-02 09:56:56 -0500229void od_register_powersave_bias_handler(unsigned int (*f)
230 (struct cpufreq_policy *, unsigned int, unsigned int),
231 unsigned int powersave_bias);
232void od_unregister_powersave_bias_handler(void);
Viresh Kumaraded3872016-02-11 17:31:15 +0530233ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
234 size_t count);
Borislav Petkovbeb0ff32013-04-02 12:26:15 +0000235#endif /* _CPUFREQ_GOVERNOR_H */