blob: 73513ece2b2edb175a83a468c4177cb7ef2fbbb7 [file] [log] [blame]
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -07001/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/module.h>
Jennifer Liu4ff40942013-07-30 15:25:46 -070017#include <linux/kthread.h>
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070018#include <linux/mutex.h>
19#include <linux/msm_tsens.h>
20#include <linux/workqueue.h>
Jennifer Liu4ff40942013-07-30 15:25:46 -070021#include <linux/completion.h>
Eugene Seah7d6d2732012-03-09 17:48:42 -070022#include <linux/cpu.h>
Praveen Chidambaram91814362012-05-25 17:36:07 -060023#include <linux/cpufreq.h>
24#include <linux/msm_tsens.h>
25#include <linux/msm_thermal.h>
Eugene Seahb77b0c42012-07-02 19:28:50 -060026#include <linux/platform_device.h>
27#include <linux/of.h>
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070028#include <linux/err.h>
29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/sysfs.h>
32#include <linux/types.h>
Archana Sathyakumar143b0b52013-04-09 14:24:32 -060033#include <linux/android_alarm.h>
Jennifer Liu4ff40942013-07-30 15:25:46 -070034#include <linux/thermal.h>
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070035#include <mach/rpm-regulator.h>
36#include <mach/rpm-regulator-smd.h>
37#include <linux/regulator/consumer.h>
Ram Chandrasekar64603922013-11-08 16:33:58 -070038#include <linux/msm_thermal_ioctl.h>
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070039
Ram Chandrasekar37716582013-12-10 16:09:30 -070040#define MAX_CURRENT_UA 1000000
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070041#define MAX_RAILS 5
Ram Chandrasekarb2f16712013-10-02 11:06:42 -060042#define MAX_THRESHOLD 2
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070043
Praveen Chidambaram91814362012-05-25 17:36:07 -060044static struct msm_thermal_data msm_thermal_info;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070045static struct delayed_work check_temp_work;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -070046static bool core_control_enabled;
47static uint32_t cpus_offlined;
48static DEFINE_MUTEX(core_control_mutex);
Archana Sathyakumar143b0b52013-04-09 14:24:32 -060049static uint32_t wakeup_ms;
50static struct alarm thermal_rtc;
51static struct kobject *tt_kobj;
Jennifer Liu4ff40942013-07-30 15:25:46 -070052static struct kobject *cc_kobj;
Archana Sathyakumar143b0b52013-04-09 14:24:32 -060053static struct work_struct timer_work;
Jennifer Liu4ff40942013-07-30 15:25:46 -070054static struct task_struct *hotplug_task;
Ram Chandrasekar89961312013-11-07 12:03:54 -070055static struct task_struct *freq_mitigation_task;
Jennifer Liu4ff40942013-07-30 15:25:46 -070056static struct completion hotplug_notify_complete;
Ram Chandrasekar89961312013-11-07 12:03:54 -070057static struct completion freq_mitigation_complete;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070058
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070059static int enabled;
60static int rails_cnt;
Jennifer Liu907922c2013-03-26 11:18:00 -070061static int psm_rails_cnt;
Ram Chandrasekar37716582013-12-10 16:09:30 -070062static int ocr_rail_cnt;
Eugene Seah2ee4a5d2012-06-25 18:16:41 -060063static int limit_idx;
64static int limit_idx_low;
65static int limit_idx_high;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070066static int max_tsens_num;
Eugene Seah2ee4a5d2012-06-25 18:16:41 -060067static struct cpufreq_frequency_table *table;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070068static uint32_t usefreq;
69static int freq_table_get;
70static bool vdd_rstr_enabled;
71static bool vdd_rstr_nodes_called;
72static bool vdd_rstr_probed;
Jennifer Liu907922c2013-03-26 11:18:00 -070073static bool psm_enabled;
74static bool psm_nodes_called;
75static bool psm_probed;
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -070076static bool hotplug_enabled;
Ram Chandrasekar89961312013-11-07 12:03:54 -070077static bool freq_mitigation_enabled;
Ram Chandrasekar37716582013-12-10 16:09:30 -070078static bool ocr_enabled;
79static bool ocr_nodes_called;
80static bool ocr_probed;
Jennifer Liu5a3518c2013-04-17 11:53:51 -070081static int *tsens_id_map;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070082static DEFINE_MUTEX(vdd_rstr_mutex);
Jennifer Liu907922c2013-03-26 11:18:00 -070083static DEFINE_MUTEX(psm_mutex);
Ram Chandrasekar37716582013-12-10 16:09:30 -070084static DEFINE_MUTEX(ocr_mutex);
Ram Chandrasekar89961312013-11-07 12:03:54 -070085static uint32_t min_freq_limit;
86
87enum thermal_threshold {
88 HOTPLUG_THRESHOLD_HIGH,
89 HOTPLUG_THRESHOLD_LOW,
90 FREQ_THRESHOLD_HIGH,
91 FREQ_THRESHOLD_LOW,
92 THRESHOLD_MAX_NR,
93};
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070094
Jennifer Liu4ff40942013-07-30 15:25:46 -070095struct cpu_info {
96 uint32_t cpu;
Jennifer Liu4ff40942013-07-30 15:25:46 -070097 const char *sensor_type;
Ram Chandrasekarb2f16712013-10-02 11:06:42 -060098 uint32_t sensor_id;
Ram Chandrasekar89961312013-11-07 12:03:54 -070099 bool offline;
100 bool user_offline;
101 bool hotplug_thresh_clear;
102 struct sensor_threshold threshold[THRESHOLD_MAX_NR];
103 bool max_freq;
Ram Chandrasekar64603922013-11-08 16:33:58 -0700104 uint32_t user_max_freq;
105 uint32_t user_min_freq;
Ram Chandrasekar89961312013-11-07 12:03:54 -0700106 uint32_t limited_max_freq;
107 uint32_t limited_min_freq;
108 bool freq_thresh_clear;
Jennifer Liu4ff40942013-07-30 15:25:46 -0700109};
110
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700111struct rail {
112 const char *name;
113 uint32_t freq_req;
114 uint32_t min_level;
115 uint32_t num_levels;
Jennifer Liu273d2962013-04-19 11:43:04 -0700116 int32_t curr_level;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700117 uint32_t levels[3];
118 struct kobj_attribute value_attr;
119 struct kobj_attribute level_attr;
120 struct regulator *reg;
121 struct attribute_group attr_gp;
122};
Jennifer Liu907922c2013-03-26 11:18:00 -0700123
124struct psm_rail {
125 const char *name;
126 uint8_t init;
127 uint8_t mode;
128 struct kobj_attribute mode_attr;
129 struct rpm_regulator *reg;
Ram Chandrasekar37716582013-12-10 16:09:30 -0700130 struct regulator *phase_reg;
Jennifer Liu907922c2013-03-26 11:18:00 -0700131 struct attribute_group attr_gp;
132};
133
134static struct psm_rail *psm_rails;
Ram Chandrasekar37716582013-12-10 16:09:30 -0700135static struct psm_rail *ocr_rails;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700136static struct rail *rails;
Jennifer Liu4ff40942013-07-30 15:25:46 -0700137static struct cpu_info cpus[NR_CPUS];
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700138
139struct vdd_rstr_enable {
140 struct kobj_attribute ko_attr;
141 uint32_t enabled;
142};
143
Jennifer Liu907922c2013-03-26 11:18:00 -0700144/* For SMPS only*/
145enum PMIC_SW_MODE {
146 PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
147 PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
148 PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
149};
150
Ram Chandrasekar37716582013-12-10 16:09:30 -0700151enum ocr_request {
152 OPTIMUM_CURRENT_MIN,
153 OPTIMUM_CURRENT_MAX,
154 OPTIMUM_CURRENT_NR,
155};
156
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700157#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
158 ko_attr.attr.name = __stringify(_name); \
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -0700159 ko_attr.attr.mode = 0444; \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700160 ko_attr.show = vdd_rstr_reg_##_name##_show; \
161 ko_attr.store = NULL; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700162 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700163 _rail.attr_gp.attrs[j] = &ko_attr.attr;
164
165#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
166 ko_attr.attr.name = __stringify(_name); \
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -0700167 ko_attr.attr.mode = 0644; \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700168 ko_attr.show = vdd_rstr_reg_##_name##_show; \
169 ko_attr.store = vdd_rstr_reg_##_name##_store; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700170 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700171 _rail.attr_gp.attrs[j] = &ko_attr.attr;
172
173#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
174 (container_of(attr, struct vdd_rstr_enable, ko_attr));
175
176#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
177 (container_of(attr, struct rail, value_attr));
178
179#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
180 (container_of(attr, struct rail, level_attr));
Jennifer Liu907922c2013-03-26 11:18:00 -0700181
Ram Chandrasekar37716582013-12-10 16:09:30 -0700182#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
183 ko_attr.attr.name = __stringify(_name); \
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -0700184 ko_attr.attr.mode = 0644; \
Ram Chandrasekar37716582013-12-10 16:09:30 -0700185 ko_attr.show = ocr_reg_##_name##_show; \
186 ko_attr.store = ocr_reg_##_name##_store; \
187 sysfs_attr_init(&ko_attr.attr); \
188 _rail.attr_gp.attrs[j] = &ko_attr.attr;
189
Jennifer Liu907922c2013-03-26 11:18:00 -0700190#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
191 ko_attr.attr.name = __stringify(_name); \
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -0700192 ko_attr.attr.mode = 0644; \
Jennifer Liu907922c2013-03-26 11:18:00 -0700193 ko_attr.show = psm_reg_##_name##_show; \
194 ko_attr.store = psm_reg_##_name##_store; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700195 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu907922c2013-03-26 11:18:00 -0700196 _rail.attr_gp.attrs[j] = &ko_attr.attr;
197
198#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
199 (container_of(attr, struct psm_rail, mode_attr));
Jennifer Liud8b1e1e2013-08-27 20:54:22 -0700200
201static int msm_thermal_cpufreq_callback(struct notifier_block *nfb,
202 unsigned long event, void *data)
203{
204 struct cpufreq_policy *policy = data;
Ram Chandrasekar89961312013-11-07 12:03:54 -0700205 uint32_t max_freq_req = cpus[policy->cpu].limited_max_freq;
206 uint32_t min_freq_req = cpus[policy->cpu].limited_min_freq;
Jennifer Liud8b1e1e2013-08-27 20:54:22 -0700207
208 switch (event) {
209 case CPUFREQ_INCOMPATIBLE:
Ram Chandrasekar89961312013-11-07 12:03:54 -0700210 pr_debug("%s: mitigating cpu %d to freq max: %u min: %u\n",
211 KBUILD_MODNAME, policy->cpu, max_freq_req, min_freq_req);
212
213 cpufreq_verify_within_limits(policy, min_freq_req,
214 max_freq_req);
215
216 if (max_freq_req < min_freq_req)
217 pr_err("Invalid frequency request Max:%u Min:%u\n",
218 max_freq_req, min_freq_req);
Jennifer Liud8b1e1e2013-08-27 20:54:22 -0700219 break;
220 }
221 return NOTIFY_OK;
222}
223
224static struct notifier_block msm_thermal_cpufreq_notifier = {
225 .notifier_call = msm_thermal_cpufreq_callback,
226};
227
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700228/* If freq table exists, then we can send freq request */
229static int check_freq_table(void)
230{
231 int ret = 0;
232 struct cpufreq_frequency_table *table = NULL;
233
234 table = cpufreq_frequency_get_table(0);
235 if (!table) {
236 pr_debug("%s: error reading cpufreq table\n", __func__);
237 return -EINVAL;
238 }
239 freq_table_get = 1;
240
241 return ret;
242}
243
Ram Chandrasekar89961312013-11-07 12:03:54 -0700244static void update_cpu_freq(int cpu)
245{
246 if (cpu_online(cpu)) {
247 if (cpufreq_update_policy(cpu))
248 pr_err("Unable to update policy for cpu:%d\n", cpu);
249 }
250}
251
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700252static int update_cpu_min_freq_all(uint32_t min)
253{
Ram Chandrasekar89961312013-11-07 12:03:54 -0700254 uint32_t cpu = 0;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700255 int ret = 0;
256
257 if (!freq_table_get) {
258 ret = check_freq_table();
259 if (ret) {
Jennifer Liud8b1e1e2013-08-27 20:54:22 -0700260 pr_err("%s:Fail to get freq table\n", KBUILD_MODNAME);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700261 return ret;
262 }
263 }
264 /* If min is larger than allowed max */
Jennifer Liud8b1e1e2013-08-27 20:54:22 -0700265 min = min(min, table[limit_idx_high].frequency);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700266
Ram Chandrasekar89961312013-11-07 12:03:54 -0700267 if (freq_mitigation_task) {
268 min_freq_limit = min;
269 complete(&freq_mitigation_complete);
270 } else {
271 get_online_cpus();
272 for_each_possible_cpu(cpu) {
273 cpus[cpu].limited_min_freq = min;
274 update_cpu_freq(cpu);
275 }
276 put_online_cpus();
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700277 }
278
279 return ret;
280}
281
282static int vdd_restriction_apply_freq(struct rail *r, int level)
283{
284 int ret = 0;
285
Jennifer Liu273d2962013-04-19 11:43:04 -0700286 if (level == r->curr_level)
287 return ret;
288
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700289 /* level = -1: disable, level = 0,1,2..n: enable */
290 if (level == -1) {
291 ret = update_cpu_min_freq_all(r->min_level);
292 if (ret)
293 return ret;
294 else
295 r->curr_level = -1;
296 } else if (level >= 0 && level < (r->num_levels)) {
297 ret = update_cpu_min_freq_all(r->levels[level]);
298 if (ret)
299 return ret;
300 else
301 r->curr_level = level;
302 } else {
303 pr_err("level input:%d is not within range\n", level);
304 return -EINVAL;
305 }
306
307 return ret;
308}
309
310static int vdd_restriction_apply_voltage(struct rail *r, int level)
311{
312 int ret = 0;
313
314 if (r->reg == NULL) {
315 pr_info("Do not have regulator handle:%s, can't apply vdd\n",
316 r->name);
317 return -EFAULT;
318 }
Jennifer Liu273d2962013-04-19 11:43:04 -0700319 if (level == r->curr_level)
320 return ret;
321
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700322 /* level = -1: disable, level = 0,1,2..n: enable */
323 if (level == -1) {
324 ret = regulator_set_voltage(r->reg, r->min_level,
325 r->levels[r->num_levels - 1]);
326 if (!ret)
327 r->curr_level = -1;
328 } else if (level >= 0 && level < (r->num_levels)) {
329 ret = regulator_set_voltage(r->reg, r->levels[level],
330 r->levels[r->num_levels - 1]);
331 if (!ret)
332 r->curr_level = level;
333 } else {
334 pr_err("level input:%d is not within range\n", level);
335 return -EINVAL;
336 }
337
338 return ret;
339}
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700340
Jennifer Liu907922c2013-03-26 11:18:00 -0700341/* Setting all rails the same mode */
342static int psm_set_mode_all(int mode)
343{
344 int i = 0;
345 int fail_cnt = 0;
346 int ret = 0;
347
348 for (i = 0; i < psm_rails_cnt; i++) {
349 if (psm_rails[i].mode != mode) {
350 ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
351 if (ret) {
352 pr_err("Cannot set mode:%d for %s",
353 mode, psm_rails[i].name);
354 fail_cnt++;
355 } else
356 psm_rails[i].mode = mode;
357 }
358 }
359
360 return fail_cnt ? (-EFAULT) : ret;
361}
362
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700363static int vdd_rstr_en_show(
364 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
365{
366 struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
367
368 return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
369}
370
371static ssize_t vdd_rstr_en_store(struct kobject *kobj,
372 struct kobj_attribute *attr, const char *buf, size_t count)
373{
374 int ret = 0;
375 int i = 0;
376 uint8_t en_cnt = 0;
377 uint8_t dis_cnt = 0;
378 uint32_t val = 0;
379 struct kernel_param kp;
380 struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
381
382 mutex_lock(&vdd_rstr_mutex);
383 kp.arg = &val;
384 ret = param_set_bool(buf, &kp);
385 if (ret) {
386 pr_err("Invalid input %s for enabled\n", buf);
387 goto done_vdd_rstr_en;
388 }
389
390 if ((val == 0) && (en->enabled == 0))
391 goto done_vdd_rstr_en;
392
393 for (i = 0; i < rails_cnt; i++) {
394 if (rails[i].freq_req == 1 && freq_table_get)
395 ret = vdd_restriction_apply_freq(&rails[i],
396 (val) ? 0 : -1);
397 else
398 ret = vdd_restriction_apply_voltage(&rails[i],
399 (val) ? 0 : -1);
400
Jennifer Liu273d2962013-04-19 11:43:04 -0700401 /*
402 * Even if fail to set one rail, still try to set the
403 * others. Continue the loop
404 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700405 if (ret)
406 pr_err("Set vdd restriction for %s failed\n",
407 rails[i].name);
408 else {
409 if (val)
410 en_cnt++;
411 else
412 dis_cnt++;
413 }
414 }
415 /* As long as one rail is enabled, vdd rstr is enabled */
416 if (val && en_cnt)
417 en->enabled = 1;
418 else if (!val && (dis_cnt == rails_cnt))
419 en->enabled = 0;
420
421done_vdd_rstr_en:
422 mutex_unlock(&vdd_rstr_mutex);
423 return count;
424}
425
426static struct vdd_rstr_enable vdd_rstr_en = {
427 .ko_attr.attr.name = __stringify(enabled),
Ram Chandrasekar9b41fcb22014-01-14 10:56:15 -0700428 .ko_attr.attr.mode = 0644,
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700429 .ko_attr.show = vdd_rstr_en_show,
430 .ko_attr.store = vdd_rstr_en_store,
431 .enabled = 1,
432};
433
434static struct attribute *vdd_rstr_en_attribs[] = {
435 &vdd_rstr_en.ko_attr.attr,
436 NULL,
437};
438
439static struct attribute_group vdd_rstr_en_attribs_gp = {
440 .attrs = vdd_rstr_en_attribs,
441};
442
443static int vdd_rstr_reg_value_show(
444 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
445{
446 int val = 0;
447 struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
448 /* -1:disabled, -2:fail to get regualtor handle */
449 if (reg->curr_level < 0)
450 val = reg->curr_level;
451 else
452 val = reg->levels[reg->curr_level];
453
Jennifer Liu273d2962013-04-19 11:43:04 -0700454 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700455}
456
457static int vdd_rstr_reg_level_show(
458 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
459{
460 struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
461 return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
462}
463
464static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
465 struct kobj_attribute *attr, const char *buf, size_t count)
466{
467 int ret = 0;
468 int val = 0;
469
470 struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
471
472 mutex_lock(&vdd_rstr_mutex);
473 if (vdd_rstr_en.enabled == 0)
474 goto done_store_level;
475
Archana Sathyakumar143b0b52013-04-09 14:24:32 -0600476 ret = kstrtouint(buf, 10, &val);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700477 if (ret) {
478 pr_err("Invalid input %s for level\n", buf);
479 goto done_store_level;
480 }
481
482 if (val < 0 || val > reg->num_levels - 1) {
483 pr_err(" Invalid number %d for level\n", val);
484 goto done_store_level;
485 }
486
487 if (val != reg->curr_level) {
488 if (reg->freq_req == 1 && freq_table_get)
489 update_cpu_min_freq_all(reg->levels[val]);
490 else {
491 ret = vdd_restriction_apply_voltage(reg, val);
492 if (ret) {
493 pr_err( \
494 "Set vdd restriction for regulator %s failed\n",
495 reg->name);
496 goto done_store_level;
497 }
498 }
499 reg->curr_level = val;
500 }
501
502done_store_level:
503 mutex_unlock(&vdd_rstr_mutex);
504 return count;
505}
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600506
Ram Chandrasekar37716582013-12-10 16:09:30 -0700507static int request_optimum_current(struct psm_rail *rail, enum ocr_request req)
508{
509 int ret = 0;
510
511 if ((!rail) || (req >= OPTIMUM_CURRENT_NR) ||
512 (req < 0)) {
513 pr_err("%s:%s Invalid input\n", KBUILD_MODNAME, __func__);
514 ret = -EINVAL;
515 goto request_ocr_exit;
516 }
517
518 ret = regulator_set_optimum_mode(rail->phase_reg,
519 (req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0);
520 if (ret < 0) {
521 pr_err("%s: Optimum current request failed\n", KBUILD_MODNAME);
522 goto request_ocr_exit;
523 }
524 ret = 0; /*regulator_set_optimum_mode returns the mode on success*/
525 pr_debug("%s: Requested optimum current mode: %d\n",
526 KBUILD_MODNAME, req);
527
528request_ocr_exit:
529 return ret;
530}
531
532static int ocr_set_mode_all(enum ocr_request req)
533{
534 int ret = 0, i;
535
536 for (i = 0; i < ocr_rail_cnt; i++) {
537 if (ocr_rails[i].mode == req)
538 continue;
539 ret = request_optimum_current(&ocr_rails[i], req);
540 if (ret)
541 goto ocr_set_mode_exit;
542 ocr_rails[i].mode = req;
543 }
544
545ocr_set_mode_exit:
546 return ret;
547}
548
549static int ocr_reg_mode_show(struct kobject *kobj,
550 struct kobj_attribute *attr, char *buf)
551{
552 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
553 return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
554}
555
556static ssize_t ocr_reg_mode_store(struct kobject *kobj,
557 struct kobj_attribute *attr, const char *buf, size_t count)
558{
559 int ret = 0;
560 int val = 0;
561 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
562
563 if (!ocr_enabled)
564 return count;
565
566 mutex_lock(&ocr_mutex);
567 ret = kstrtoint(buf, 10, &val);
568 if (ret) {
569 pr_err("%s: Invalid input %s for mode\n",
570 KBUILD_MODNAME, buf);
571 goto done_ocr_store;
572 }
573
574 if ((val != OPTIMUM_CURRENT_MAX) &&
575 (val != OPTIMUM_CURRENT_MIN)) {
576 pr_err("%s: Invalid value %d for mode\n",
577 KBUILD_MODNAME, val);
578 goto done_ocr_store;
579 }
580
581 if (val != reg->mode) {
582 ret = request_optimum_current(reg, val);
583 if (ret)
584 goto done_ocr_store;
585 reg->mode = val;
586 }
587
588done_ocr_store:
589 mutex_unlock(&ocr_mutex);
590 return count;
591}
592
Jennifer Liu907922c2013-03-26 11:18:00 -0700593static int psm_reg_mode_show(
594 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
595{
596 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
597 return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
598}
599
600static ssize_t psm_reg_mode_store(struct kobject *kobj,
601 struct kobj_attribute *attr, const char *buf, size_t count)
602{
603 int ret = 0;
604 int val = 0;
605 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
606
607 mutex_lock(&psm_mutex);
608 ret = kstrtoint(buf, 10, &val);
609 if (ret) {
610 pr_err("Invalid input %s for mode\n", buf);
611 goto done_psm_store;
612 }
613
614 if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
615 pr_err(" Invalid number %d for mode\n", val);
616 goto done_psm_store;
617 }
618
619 if (val != reg->mode) {
620 ret = rpm_regulator_set_mode(reg->reg, val);
621 if (ret) {
622 pr_err( \
623 "Fail to set PMIC SW Mode:%d for %s\n",
624 val, reg->name);
625 goto done_psm_store;
626 }
627 reg->mode = val;
628 }
629
630done_psm_store:
631 mutex_unlock(&psm_mutex);
632 return count;
633}
634
Jennifer Liu5a3518c2013-04-17 11:53:51 -0700635static int check_sensor_id(int sensor_id)
636{
637 int i = 0;
638 bool hw_id_found;
639 int ret = 0;
640
641 for (i = 0; i < max_tsens_num; i++) {
642 if (sensor_id == tsens_id_map[i]) {
643 hw_id_found = true;
644 break;
645 }
646 }
647 if (!hw_id_found) {
648 pr_err("%s: Invalid sensor hw id :%d\n", __func__, sensor_id);
649 return -EINVAL;
650 }
651
652 return ret;
653}
654
655static int create_sensor_id_map(void)
656{
657 int i = 0;
658 int ret = 0;
659
660 tsens_id_map = kzalloc(sizeof(int) * max_tsens_num,
661 GFP_KERNEL);
662 if (!tsens_id_map) {
663 pr_err("%s: Cannot allocate memory for tsens_id_map\n",
664 __func__);
665 return -ENOMEM;
666 }
667
668 for (i = 0; i < max_tsens_num; i++) {
669 ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]);
670 /* If return -ENXIO, hw_id is default in sequence */
671 if (ret) {
672 if (ret == -ENXIO) {
673 tsens_id_map[i] = i;
674 ret = 0;
675 } else {
676 pr_err( \
677 "%s: Failed to get hw id for sw id %d\n",
678 __func__, i);
679 goto fail;
680 }
681 }
682 }
683
684 return ret;
685fail:
686 kfree(tsens_id_map);
687 return ret;
688}
689
Jennifer Liu273d2962013-04-19 11:43:04 -0700690/* 1:enable, 0:disable */
691static int vdd_restriction_apply_all(int en)
692{
693 int i = 0;
694 int en_cnt = 0;
695 int dis_cnt = 0;
696 int fail_cnt = 0;
697 int ret = 0;
698
699 for (i = 0; i < rails_cnt; i++) {
700 if (rails[i].freq_req == 1 && freq_table_get)
701 ret = vdd_restriction_apply_freq(&rails[i],
702 en ? 0 : -1);
703 else
704 ret = vdd_restriction_apply_voltage(&rails[i],
705 en ? 0 : -1);
706 if (ret) {
707 pr_err("Cannot set voltage for %s", rails[i].name);
708 fail_cnt++;
709 } else {
710 if (en)
711 en_cnt++;
712 else
713 dis_cnt++;
714 }
715 }
716
717 /* As long as one rail is enabled, vdd rstr is enabled */
718 if (en && en_cnt)
719 vdd_rstr_en.enabled = 1;
720 else if (!en && (dis_cnt == rails_cnt))
721 vdd_rstr_en.enabled = 0;
722
723 /*
724 * Check fail_cnt again to make sure all of the rails are applied
725 * restriction successfully or not
726 */
727 if (fail_cnt)
728 return -EFAULT;
729 return ret;
730}
731
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600732static int msm_thermal_get_freq_table(void)
733{
734 int ret = 0;
735 int i = 0;
736
737 table = cpufreq_frequency_get_table(0);
738 if (table == NULL) {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700739 pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600740 ret = -EINVAL;
741 goto fail;
742 }
743
744 while (table[i].frequency != CPUFREQ_TABLE_END)
745 i++;
746
747 limit_idx_low = 0;
748 limit_idx_high = limit_idx = i - 1;
749 BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
750fail:
751 return ret;
752}
753
Ram Chandrasekarb2f16712013-10-02 11:06:42 -0600754static int set_and_activate_threshold(uint32_t sensor_id,
755 struct sensor_threshold *threshold)
756{
757 int ret = 0;
758
759 ret = sensor_set_trip(sensor_id, threshold);
760 if (ret != 0) {
761 pr_err("%s: Error in setting trip %d\n",
762 KBUILD_MODNAME, threshold->trip);
763 goto set_done;
764 }
765
766 ret = sensor_activate_trip(sensor_id, threshold, true);
767 if (ret != 0) {
768 pr_err("%s: Error in enabling trip %d\n",
769 KBUILD_MODNAME, threshold->trip);
770 goto set_done;
771 }
772
773set_done:
774 return ret;
775}
776
777static int set_threshold(uint32_t sensor_id,
778 struct sensor_threshold *threshold)
779{
780 struct tsens_device tsens_dev;
781 int i = 0, ret = 0;
782 long temp;
783
784 if ((!threshold) || check_sensor_id(sensor_id)) {
785 pr_err("%s: Invalid input\n", KBUILD_MODNAME);
786 ret = -EINVAL;
787 goto set_threshold_exit;
788 }
789
790 tsens_dev.sensor_num = sensor_id;
791 ret = tsens_get_temp(&tsens_dev, &temp);
792 if (ret) {
793 pr_err("%s: Unable to read TSENS sensor %d\n",
794 KBUILD_MODNAME, tsens_dev.sensor_num);
795 goto set_threshold_exit;
796 }
797 while (i < MAX_THRESHOLD) {
798 switch (threshold[i].trip) {
799 case THERMAL_TRIP_CONFIGURABLE_HI:
800 if (threshold[i].temp >= temp) {
801 ret = set_and_activate_threshold(sensor_id,
802 &threshold[i]);
803 if (ret)
804 goto set_threshold_exit;
805 }
806 break;
807 case THERMAL_TRIP_CONFIGURABLE_LOW:
808 if (threshold[i].temp <= temp) {
809 ret = set_and_activate_threshold(sensor_id,
810 &threshold[i]);
811 if (ret)
812 goto set_threshold_exit;
813 }
814 break;
815 default:
816 break;
817 }
818 i++;
819 }
820set_threshold_exit:
821 return ret;
822}
823
Anji Jonnala822b5c42013-05-21 20:09:24 +0530824#ifdef CONFIG_SMP
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530825static void __ref do_core_control(long temp)
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700826{
827 int i = 0;
828 int ret = 0;
829
830 if (!core_control_enabled)
831 return;
832
833 mutex_lock(&core_control_mutex);
834 if (msm_thermal_info.core_control_mask &&
835 temp >= msm_thermal_info.core_limit_temp_degC) {
836 for (i = num_possible_cpus(); i > 0; i--) {
837 if (!(msm_thermal_info.core_control_mask & BIT(i)))
838 continue;
839 if (cpus_offlined & BIT(i) && !cpu_online(i))
840 continue;
841 pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
842 KBUILD_MODNAME, i, temp);
843 ret = cpu_down(i);
844 if (ret)
845 pr_err("%s: Error %d offline core %d\n",
846 KBUILD_MODNAME, ret, i);
847 cpus_offlined |= BIT(i);
848 break;
849 }
850 } else if (msm_thermal_info.core_control_mask && cpus_offlined &&
851 temp <= (msm_thermal_info.core_limit_temp_degC -
852 msm_thermal_info.core_temp_hysteresis_degC)) {
853 for (i = 0; i < num_possible_cpus(); i++) {
854 if (!(cpus_offlined & BIT(i)))
855 continue;
856 cpus_offlined &= ~BIT(i);
857 pr_info("%s: Allow Online CPU%d Temp: %ld\n",
858 KBUILD_MODNAME, i, temp);
Jennifer Liu273d2962013-04-19 11:43:04 -0700859 /*
860 * If this core is already online, then bring up the
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700861 * next offlined core.
862 */
863 if (cpu_online(i))
864 continue;
865 ret = cpu_up(i);
866 if (ret)
867 pr_err("%s: Error %d online core %d\n",
868 KBUILD_MODNAME, ret, i);
869 break;
870 }
871 }
872 mutex_unlock(&core_control_mutex);
873}
Jennifer Liu4ff40942013-07-30 15:25:46 -0700874/* Call with core_control_mutex locked */
875static int __ref update_offline_cores(int val)
876{
Ram Chandrasekarb2f16712013-10-02 11:06:42 -0600877 uint32_t cpu = 0;
Jennifer Liu4ff40942013-07-30 15:25:46 -0700878 int ret = 0;
879
880 if (!core_control_enabled)
881 return 0;
882
883 cpus_offlined = msm_thermal_info.core_control_mask & val;
884
885 for_each_possible_cpu(cpu) {
886 if (!(cpus_offlined & BIT(cpu)))
887 continue;
888 if (!cpu_online(cpu))
889 continue;
890 ret = cpu_down(cpu);
891 if (ret)
892 pr_err("%s: Unable to offline cpu%d\n",
893 KBUILD_MODNAME, cpu);
894 }
895 return ret;
896}
897
898static __ref int do_hotplug(void *data)
899{
900 int ret = 0;
Ram Chandrasekarb2f16712013-10-02 11:06:42 -0600901 uint32_t cpu = 0, mask = 0;
Jennifer Liu4ff40942013-07-30 15:25:46 -0700902
903 if (!core_control_enabled)
904 return -EINVAL;
905
906 while (!kthread_should_stop()) {
907 wait_for_completion(&hotplug_notify_complete);
908 INIT_COMPLETION(hotplug_notify_complete);
909 mask = 0;
910
911 mutex_lock(&core_control_mutex);
912 for_each_possible_cpu(cpu) {
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -0700913 if (hotplug_enabled &&
Ram Chandrasekar89961312013-11-07 12:03:54 -0700914 cpus[cpu].hotplug_thresh_clear) {
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -0700915 set_threshold(cpus[cpu].sensor_id,
Ram Chandrasekar89961312013-11-07 12:03:54 -0700916 &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
917
918 cpus[cpu].hotplug_thresh_clear = false;
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -0700919 }
Jennifer Liu4ff40942013-07-30 15:25:46 -0700920 if (cpus[cpu].offline || cpus[cpu].user_offline)
921 mask |= BIT(cpu);
922 }
923 if (mask != cpus_offlined)
924 update_offline_cores(mask);
925 mutex_unlock(&core_control_mutex);
926 sysfs_notify(cc_kobj, NULL, "cpus_offlined");
927 }
928
929 return ret;
930}
Anji Jonnala822b5c42013-05-21 20:09:24 +0530931#else
932static void do_core_control(long temp)
933{
934 return;
935}
Jennifer Liu4ff40942013-07-30 15:25:46 -0700936
937static __ref int do_hotplug(void *data)
938{
939 return 0;
940}
Anji Jonnala822b5c42013-05-21 20:09:24 +0530941#endif
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700942
Ram Chandrasekar37716582013-12-10 16:09:30 -0700943static int do_ocr(void)
944{
945 struct tsens_device tsens_dev;
946 long temp = 0;
947 int ret = 0;
948 int i = 0, j = 0;
949 int auto_cnt = 0;
950
951 if (!ocr_enabled)
952 return ret;
953
954 mutex_lock(&ocr_mutex);
955 for (i = 0; i < max_tsens_num; i++) {
956 tsens_dev.sensor_num = tsens_id_map[i];
957 ret = tsens_get_temp(&tsens_dev, &temp);
958 if (ret) {
959 pr_debug("%s: Unable to read TSENS sensor %d\n",
960 __func__, tsens_dev.sensor_num);
961 auto_cnt++;
962 continue;
963 }
964
965 if (temp > msm_thermal_info.ocr_temp_degC) {
966 if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
967 for (j = 0; j < ocr_rail_cnt; j++)
968 ocr_rails[j].init = OPTIMUM_CURRENT_NR;
969 ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
970 if (ret)
971 pr_err("Error setting max optimum current\n");
972 goto do_ocr_exit;
973 } else if (temp <= (msm_thermal_info.ocr_temp_degC -
974 msm_thermal_info.ocr_temp_hyst_degC))
975 auto_cnt++;
976 }
977
978 if (auto_cnt == max_tsens_num ||
979 ocr_rails[0].init != OPTIMUM_CURRENT_NR) {
980 /* 'init' not equal to OPTIMUM_CURRENT_NR means this is the
981 ** first polling iteration after device probe. During first
982 ** iteration, if temperature is less than the set point, clear
983 ** the max current request made and reset the 'init'.
984 */
985 if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
986 for (j = 0; j < ocr_rail_cnt; j++)
987 ocr_rails[j].init = OPTIMUM_CURRENT_NR;
988 ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN);
989 if (ret) {
990 pr_err("Error setting min optimum current\n");
991 goto do_ocr_exit;
992 }
993 }
994
995do_ocr_exit:
996 mutex_unlock(&ocr_mutex);
997 return ret;
998}
999
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001000static int do_vdd_restriction(void)
1001{
1002 struct tsens_device tsens_dev;
1003 long temp = 0;
1004 int ret = 0;
1005 int i = 0;
1006 int dis_cnt = 0;
1007
1008 if (!vdd_rstr_enabled)
1009 return ret;
1010
1011 if (usefreq && !freq_table_get) {
1012 if (check_freq_table())
1013 return ret;
1014 }
1015
1016 mutex_lock(&vdd_rstr_mutex);
1017 for (i = 0; i < max_tsens_num; i++) {
Jennifer Liu5a3518c2013-04-17 11:53:51 -07001018 tsens_dev.sensor_num = tsens_id_map[i];
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001019 ret = tsens_get_temp(&tsens_dev, &temp);
1020 if (ret) {
1021 pr_debug("%s: Unable to read TSENS sensor %d\n",
1022 __func__, tsens_dev.sensor_num);
1023 dis_cnt++;
1024 continue;
1025 }
Jennifer Liu273d2962013-04-19 11:43:04 -07001026 if (temp <= msm_thermal_info.vdd_rstr_temp_degC) {
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001027 ret = vdd_restriction_apply_all(1);
1028 if (ret) {
1029 pr_err( \
1030 "Enable vdd rstr votlage for all failed\n");
1031 goto exit;
1032 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001033 goto exit;
Jennifer Liu273d2962013-04-19 11:43:04 -07001034 } else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001035 dis_cnt++;
1036 }
1037 if (dis_cnt == max_tsens_num) {
1038 ret = vdd_restriction_apply_all(0);
1039 if (ret) {
1040 pr_err("Disable vdd rstr votlage for all failed\n");
1041 goto exit;
1042 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001043 }
1044exit:
1045 mutex_unlock(&vdd_rstr_mutex);
1046 return ret;
1047}
1048
Jennifer Liu907922c2013-03-26 11:18:00 -07001049static int do_psm(void)
1050{
1051 struct tsens_device tsens_dev;
1052 long temp = 0;
1053 int ret = 0;
1054 int i = 0;
1055 int auto_cnt = 0;
1056
1057 mutex_lock(&psm_mutex);
1058 for (i = 0; i < max_tsens_num; i++) {
Jennifer Liu5a3518c2013-04-17 11:53:51 -07001059 tsens_dev.sensor_num = tsens_id_map[i];
Jennifer Liu907922c2013-03-26 11:18:00 -07001060 ret = tsens_get_temp(&tsens_dev, &temp);
1061 if (ret) {
1062 pr_debug("%s: Unable to read TSENS sensor %d\n",
1063 __func__, tsens_dev.sensor_num);
1064 auto_cnt++;
1065 continue;
1066 }
1067
Jennifer Liu273d2962013-04-19 11:43:04 -07001068 /*
1069 * As long as one sensor is above the threshold, set PWM mode
Jennifer Liu907922c2013-03-26 11:18:00 -07001070 * on all rails, and loop stops. Set auto mode when all rails
Jennifer Liu273d2962013-04-19 11:43:04 -07001071 * are below thershold
1072 */
Jennifer Liu907922c2013-03-26 11:18:00 -07001073 if (temp > msm_thermal_info.psm_temp_degC) {
1074 ret = psm_set_mode_all(PMIC_PWM_MODE);
1075 if (ret) {
1076 pr_err("Set pwm mode for all failed\n");
1077 goto exit;
1078 }
1079 break;
1080 } else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
1081 auto_cnt++;
1082 }
1083
1084 if (auto_cnt == max_tsens_num) {
1085 ret = psm_set_mode_all(PMIC_AUTO_MODE);
1086 if (ret) {
1087 pr_err("Set auto mode for all failed\n");
1088 goto exit;
1089 }
1090 }
1091
1092exit:
1093 mutex_unlock(&psm_mutex);
1094 return ret;
1095}
1096
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301097static void __ref do_freq_control(long temp)
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001098{
Ram Chandrasekar89961312013-11-07 12:03:54 -07001099 uint32_t cpu = 0;
1100 uint32_t max_freq = cpus[cpu].limited_max_freq;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001101
1102 if (temp >= msm_thermal_info.limit_temp_degC) {
1103 if (limit_idx == limit_idx_low)
1104 return;
1105
Ram Chandrasekar89961312013-11-07 12:03:54 -07001106 limit_idx -= msm_thermal_info.bootup_freq_step;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001107 if (limit_idx < limit_idx_low)
1108 limit_idx = limit_idx_low;
1109 max_freq = table[limit_idx].frequency;
1110 } else if (temp < msm_thermal_info.limit_temp_degC -
1111 msm_thermal_info.temp_hysteresis_degC) {
1112 if (limit_idx == limit_idx_high)
1113 return;
1114
Ram Chandrasekar89961312013-11-07 12:03:54 -07001115 limit_idx += msm_thermal_info.bootup_freq_step;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001116 if (limit_idx >= limit_idx_high) {
1117 limit_idx = limit_idx_high;
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07001118 max_freq = UINT_MAX;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001119 } else
1120 max_freq = table[limit_idx].frequency;
1121 }
1122
Ram Chandrasekar89961312013-11-07 12:03:54 -07001123 if (max_freq == cpus[cpu].limited_max_freq)
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001124 return;
1125
1126 /* Update new limits */
Ram Chandrasekar89961312013-11-07 12:03:54 -07001127 get_online_cpus();
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001128 for_each_possible_cpu(cpu) {
Ram Chandrasekar89961312013-11-07 12:03:54 -07001129 if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001130 continue;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001131 cpus[cpu].limited_max_freq = max_freq;
1132 update_cpu_freq(cpu);
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001133 }
Ram Chandrasekar89961312013-11-07 12:03:54 -07001134 put_online_cpus();
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001135}
1136
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301137static void __ref check_temp(struct work_struct *work)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001138{
Eugene Seah2ee4a5d2012-06-25 18:16:41 -06001139 static int limit_init;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001140 struct tsens_device tsens_dev;
Praveen Chidambaram0c6ab952013-02-07 17:47:16 -07001141 long temp = 0;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001142 int ret = 0;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001143
Praveen Chidambaram91814362012-05-25 17:36:07 -06001144 tsens_dev.sensor_num = msm_thermal_info.sensor_id;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001145 ret = tsens_get_temp(&tsens_dev, &temp);
1146 if (ret) {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001147 pr_debug("%s: Unable to read TSENS sensor %d\n",
1148 KBUILD_MODNAME, tsens_dev.sensor_num);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001149 goto reschedule;
1150 }
1151
Eugene Seah2ee4a5d2012-06-25 18:16:41 -06001152 if (!limit_init) {
1153 ret = msm_thermal_get_freq_table();
1154 if (ret)
1155 goto reschedule;
1156 else
1157 limit_init = 1;
1158 }
Praveen Chidambaram91814362012-05-25 17:36:07 -06001159
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001160 do_core_control(temp);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001161 do_vdd_restriction();
Jennifer Liu907922c2013-03-26 11:18:00 -07001162 do_psm();
Ram Chandrasekar37716582013-12-10 16:09:30 -07001163 do_ocr();
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001164 do_freq_control(temp);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001165
1166reschedule:
1167 if (enabled)
1168 schedule_delayed_work(&check_temp_work,
Praveen Chidambaram91814362012-05-25 17:36:07 -06001169 msecs_to_jiffies(msm_thermal_info.poll_ms));
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001170}
1171
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301172static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001173 unsigned long action, void *hcpu)
1174{
Ram Chandrasekar89961312013-11-07 12:03:54 -07001175 uint32_t cpu = (uint32_t)hcpu;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001176
1177 if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
1178 if (core_control_enabled &&
1179 (msm_thermal_info.core_control_mask & BIT(cpu)) &&
1180 (cpus_offlined & BIT(cpu))) {
Jennifer Liu4ff40942013-07-30 15:25:46 -07001181 pr_debug(
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001182 "%s: Preventing cpu%d from coming online.\n",
1183 KBUILD_MODNAME, cpu);
1184 return NOTIFY_BAD;
1185 }
1186 }
1187
1188
1189 return NOTIFY_OK;
1190}
1191
1192static struct notifier_block __refdata msm_thermal_cpu_notifier = {
1193 .notifier_call = msm_thermal_cpu_callback,
1194};
1195
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001196static void thermal_rtc_setup(void)
1197{
1198 ktime_t wakeup_time;
1199 ktime_t curr_time;
1200
1201 curr_time = alarm_get_elapsed_realtime();
1202 wakeup_time = ktime_add_us(curr_time,
1203 (wakeup_ms * USEC_PER_MSEC));
1204 alarm_start_range(&thermal_rtc, wakeup_time,
1205 wakeup_time);
1206 pr_debug("%s: Current Time: %ld %ld, Alarm set to: %ld %ld\n",
1207 KBUILD_MODNAME,
1208 ktime_to_timeval(curr_time).tv_sec,
1209 ktime_to_timeval(curr_time).tv_usec,
1210 ktime_to_timeval(wakeup_time).tv_sec,
1211 ktime_to_timeval(wakeup_time).tv_usec);
1212
1213}
1214
1215static void timer_work_fn(struct work_struct *work)
1216{
1217 sysfs_notify(tt_kobj, NULL, "wakeup_ms");
1218}
1219
1220static void thermal_rtc_callback(struct alarm *al)
1221{
1222 struct timeval ts;
1223 ts = ktime_to_timeval(alarm_get_elapsed_realtime());
1224 schedule_work(&timer_work);
1225 pr_debug("%s: Time on alarm expiry: %ld %ld\n", KBUILD_MODNAME,
1226 ts.tv_sec, ts.tv_usec);
1227}
1228
Jennifer Liu4ff40942013-07-30 15:25:46 -07001229static int hotplug_notify(enum thermal_trip_type type, int temp, void *data)
1230{
1231 struct cpu_info *cpu_node = (struct cpu_info *)data;
1232
1233 pr_info("%s: %s reach temp threshold: %d\n", KBUILD_MODNAME,
1234 cpu_node->sensor_type, temp);
1235
1236 if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
1237 return 0;
1238 switch (type) {
1239 case THERMAL_TRIP_CONFIGURABLE_HI:
1240 if (!(cpu_node->offline))
1241 cpu_node->offline = 1;
1242 break;
1243 case THERMAL_TRIP_CONFIGURABLE_LOW:
1244 if (cpu_node->offline)
1245 cpu_node->offline = 0;
1246 break;
1247 default:
1248 break;
1249 }
Ram Chandrasekarb2f16712013-10-02 11:06:42 -06001250 if (hotplug_task) {
Ram Chandrasekar89961312013-11-07 12:03:54 -07001251 cpu_node->hotplug_thresh_clear = true;
Jennifer Liu4ff40942013-07-30 15:25:46 -07001252 complete(&hotplug_notify_complete);
Ram Chandrasekarb2f16712013-10-02 11:06:42 -06001253 } else {
Jennifer Liu4ff40942013-07-30 15:25:46 -07001254 pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
Ram Chandrasekarb2f16712013-10-02 11:06:42 -06001255 }
Jennifer Liu4ff40942013-07-30 15:25:46 -07001256 return 0;
1257}
1258/* Adjust cpus offlined bit based on temperature reading. */
1259static int hotplug_init_cpu_offlined(void)
1260{
1261 struct tsens_device tsens_dev;
1262 long temp = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001263 uint32_t cpu = 0;
Jennifer Liu4ff40942013-07-30 15:25:46 -07001264
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07001265 if (!hotplug_enabled)
1266 return 0;
1267
Jennifer Liu4ff40942013-07-30 15:25:46 -07001268 mutex_lock(&core_control_mutex);
1269 for_each_possible_cpu(cpu) {
1270 if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
1271 continue;
Ram Chandrasekarb2f16712013-10-02 11:06:42 -06001272 tsens_dev.sensor_num = cpus[cpu].sensor_id;
Jennifer Liu4ff40942013-07-30 15:25:46 -07001273 if (tsens_get_temp(&tsens_dev, &temp)) {
1274 pr_err("%s: Unable to read TSENS sensor %d\n",
1275 KBUILD_MODNAME, tsens_dev.sensor_num);
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07001276 mutex_unlock(&core_control_mutex);
Jennifer Liu4ff40942013-07-30 15:25:46 -07001277 return -EINVAL;
1278 }
1279
1280 if (temp >= msm_thermal_info.hotplug_temp_degC)
1281 cpus[cpu].offline = 1;
1282 else if (temp <= (msm_thermal_info.hotplug_temp_degC -
1283 msm_thermal_info.hotplug_temp_hysteresis_degC))
1284 cpus[cpu].offline = 0;
1285 }
1286 mutex_unlock(&core_control_mutex);
1287
1288 if (hotplug_task)
1289 complete(&hotplug_notify_complete);
1290 else {
1291 pr_err("%s: Hotplug task is not initialized\n",
1292 KBUILD_MODNAME);
1293 return -EINVAL;
1294 }
1295 return 0;
1296}
1297
1298static void hotplug_init(void)
1299{
Ram Chandrasekar89961312013-11-07 12:03:54 -07001300 uint32_t cpu = 0;
1301 struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
Jennifer Liu4ff40942013-07-30 15:25:46 -07001302
1303 if (hotplug_task)
1304 return;
1305
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07001306 if (!hotplug_enabled)
1307 goto init_kthread;
1308
Jennifer Liu4ff40942013-07-30 15:25:46 -07001309 for_each_possible_cpu(cpu) {
Ram Chandrasekarb2f16712013-10-02 11:06:42 -06001310 cpus[cpu].sensor_id =
1311 sensor_get_id((char *)cpus[cpu].sensor_type);
Jennifer Liu4ff40942013-07-30 15:25:46 -07001312 if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
1313 continue;
Jennifer Liu4ff40942013-07-30 15:25:46 -07001314
Ram Chandrasekar89961312013-11-07 12:03:54 -07001315 hi_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH];
1316 low_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_LOW];
1317 hi_thresh->temp = msm_thermal_info.hotplug_temp_degC;
1318 hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
1319 low_thresh->temp = msm_thermal_info.hotplug_temp_degC -
Jennifer Liu4ff40942013-07-30 15:25:46 -07001320 msm_thermal_info.hotplug_temp_hysteresis_degC;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001321 low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
1322 hi_thresh->notify = low_thresh->notify = hotplug_notify;
1323 hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
1324
1325 set_threshold(cpus[cpu].sensor_id, hi_thresh);
Jennifer Liu4ff40942013-07-30 15:25:46 -07001326 }
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07001327init_kthread:
Jennifer Liu4ff40942013-07-30 15:25:46 -07001328 init_completion(&hotplug_notify_complete);
1329 hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
1330 if (IS_ERR(hotplug_task)) {
1331 pr_err("%s: Failed to create do_hotplug thread\n",
1332 KBUILD_MODNAME);
1333 return;
1334 }
1335 /*
1336 * Adjust cpus offlined bit when hotplug intitializes so that the new
1337 * cpus offlined state is based on hotplug threshold range
1338 */
1339 if (hotplug_init_cpu_offlined())
1340 kthread_stop(hotplug_task);
1341}
1342
Ram Chandrasekar89961312013-11-07 12:03:54 -07001343static __ref int do_freq_mitigation(void *data)
1344{
1345 int ret = 0;
Ram Chandrasekar64603922013-11-08 16:33:58 -07001346 uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001347
1348 while (!kthread_should_stop()) {
1349 wait_for_completion(&freq_mitigation_complete);
1350 INIT_COMPLETION(freq_mitigation_complete);
1351
1352 get_online_cpus();
1353 for_each_possible_cpu(cpu) {
1354 max_freq_req = (cpus[cpu].max_freq) ?
1355 msm_thermal_info.freq_limit :
1356 UINT_MAX;
Ram Chandrasekar64603922013-11-08 16:33:58 -07001357 max_freq_req = min(max_freq_req,
1358 cpus[cpu].user_max_freq);
1359
1360 min_freq_req = max(min_freq_limit,
1361 cpus[cpu].user_min_freq);
Ram Chandrasekar89961312013-11-07 12:03:54 -07001362
1363 if ((max_freq_req == cpus[cpu].limited_max_freq)
Ram Chandrasekar64603922013-11-08 16:33:58 -07001364 && (min_freq_req ==
Ram Chandrasekar89961312013-11-07 12:03:54 -07001365 cpus[cpu].limited_min_freq))
1366 goto reset_threshold;
1367
1368 cpus[cpu].limited_max_freq = max_freq_req;
Ram Chandrasekar64603922013-11-08 16:33:58 -07001369 cpus[cpu].limited_min_freq = min_freq_req;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001370 update_cpu_freq(cpu);
1371reset_threshold:
Ram Chandrasekar64603922013-11-08 16:33:58 -07001372 if (freq_mitigation_enabled &&
1373 cpus[cpu].freq_thresh_clear) {
Ram Chandrasekar89961312013-11-07 12:03:54 -07001374 set_threshold(cpus[cpu].sensor_id,
1375 &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]);
1376
1377 cpus[cpu].freq_thresh_clear = false;
1378 }
1379 }
1380 put_online_cpus();
1381 }
1382 return ret;
1383}
1384
1385static int freq_mitigation_notify(enum thermal_trip_type type,
1386 int temp, void *data)
1387{
1388 struct cpu_info *cpu_node = (struct cpu_info *) data;
1389
1390 pr_debug("%s: %s reached temp threshold: %d\n", KBUILD_MODNAME,
1391 cpu_node->sensor_type, temp);
1392
1393 if (!(msm_thermal_info.freq_mitig_control_mask &
1394 BIT(cpu_node->cpu)))
1395 return 0;
1396
1397 switch (type) {
1398 case THERMAL_TRIP_CONFIGURABLE_HI:
1399 if (!cpu_node->max_freq) {
1400 pr_info("%s: Mitigating cpu %d frequency to %d\n",
1401 KBUILD_MODNAME, cpu_node->cpu,
1402 msm_thermal_info.freq_limit);
1403
1404 cpu_node->max_freq = true;
1405 }
1406 break;
1407 case THERMAL_TRIP_CONFIGURABLE_LOW:
1408 if (cpu_node->max_freq) {
1409 pr_info("%s: Removing frequency mitigation for cpu%d\n",
1410 KBUILD_MODNAME, cpu_node->cpu);
1411
1412 cpu_node->max_freq = false;
1413 }
1414 break;
1415 default:
1416 break;
1417 }
1418
1419 if (freq_mitigation_task) {
1420 cpu_node->freq_thresh_clear = true;
1421 complete(&freq_mitigation_complete);
1422 } else {
1423 pr_err("%s: Frequency mitigation task is not initialized\n",
1424 KBUILD_MODNAME);
1425 }
1426
1427 return 0;
1428}
1429
1430static void freq_mitigation_init(void)
1431{
1432 uint32_t cpu = 0;
1433 struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
1434
Ram Chandrasekar64603922013-11-08 16:33:58 -07001435 if (freq_mitigation_task)
Ram Chandrasekar89961312013-11-07 12:03:54 -07001436 return;
Ram Chandrasekar64603922013-11-08 16:33:58 -07001437 if (!freq_mitigation_enabled)
1438 goto init_freq_thread;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001439
1440 for_each_possible_cpu(cpu) {
1441 if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu)))
1442 continue;
1443 hi_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH];
1444 low_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_LOW];
1445
1446 hi_thresh->temp = msm_thermal_info.freq_mitig_temp_degc;
1447 hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
1448 low_thresh->temp = msm_thermal_info.freq_mitig_temp_degc -
1449 msm_thermal_info.freq_mitig_temp_hysteresis_degc;
1450 low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
1451 hi_thresh->notify = low_thresh->notify =
1452 freq_mitigation_notify;
1453 hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
1454
1455 set_threshold(cpus[cpu].sensor_id, hi_thresh);
1456 }
Ram Chandrasekar64603922013-11-08 16:33:58 -07001457init_freq_thread:
Ram Chandrasekar89961312013-11-07 12:03:54 -07001458 init_completion(&freq_mitigation_complete);
1459 freq_mitigation_task = kthread_run(do_freq_mitigation, NULL,
1460 "msm_thermal:freq_mitig");
1461
1462 if (IS_ERR(freq_mitigation_task)) {
1463 pr_err("%s: Failed to create frequency mitigation thread\n",
1464 KBUILD_MODNAME);
1465 return;
1466 }
1467}
1468
Ram Chandrasekar64603922013-11-08 16:33:58 -07001469int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq, bool is_max)
1470{
1471 int ret = 0;
1472
1473 if (cpu >= num_possible_cpus()) {
1474 pr_err("%s: Invalid input\n", KBUILD_MODNAME);
1475 ret = -EINVAL;
1476 goto set_freq_exit;
1477 }
1478
1479 if (is_max) {
1480 if (cpus[cpu].user_max_freq == freq)
1481 goto set_freq_exit;
1482
1483 cpus[cpu].user_max_freq = freq;
1484 } else {
1485 if (cpus[cpu].user_min_freq == freq)
1486 goto set_freq_exit;
1487
1488 cpus[cpu].user_min_freq = freq;
1489 }
1490
1491 if (freq_mitigation_task) {
1492 complete(&freq_mitigation_complete);
1493 } else {
1494 pr_err("%s: Frequency mitigation task is not initialized\n",
1495 KBUILD_MODNAME);
1496 ret = -ESRCH;
1497 goto set_freq_exit;
1498 }
1499
1500set_freq_exit:
1501 return ret;
1502}
1503
Jennifer Liu273d2962013-04-19 11:43:04 -07001504/*
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001505 * We will reset the cpu frequencies limits here. The core online/offline
1506 * status will be carried over to the process stopping the msm_thermal, as
1507 * we dont want to online a core and bring in the thermal issues.
1508 */
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301509static void __ref disable_msm_thermal(void)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001510{
Ram Chandrasekar89961312013-11-07 12:03:54 -07001511 uint32_t cpu = 0;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001512
Eugene Seahcbc07532012-04-11 19:32:27 -06001513 /* make sure check_temp is no longer running */
1514 cancel_delayed_work(&check_temp_work);
1515 flush_scheduled_work();
1516
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07001517 get_online_cpus();
Ram Chandrasekar89961312013-11-07 12:03:54 -07001518 for_each_possible_cpu(cpu) {
1519 if (cpus[cpu].limited_max_freq == UINT_MAX &&
1520 cpus[cpu].limited_min_freq == 0)
1521 continue;
1522 cpus[cpu].limited_max_freq = UINT_MAX;
1523 cpus[cpu].limited_min_freq = 0;
1524 update_cpu_freq(cpu);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001525 }
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07001526 put_online_cpus();
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001527}
1528
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301529static int __ref set_enabled(const char *val, const struct kernel_param *kp)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001530{
1531 int ret = 0;
1532
1533 ret = param_set_bool(val, kp);
Jennifer Liu4ff40942013-07-30 15:25:46 -07001534 if (!enabled) {
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001535 disable_msm_thermal();
Jennifer Liu4ff40942013-07-30 15:25:46 -07001536 hotplug_init();
Ram Chandrasekar89961312013-11-07 12:03:54 -07001537 freq_mitigation_init();
Jennifer Liu4ff40942013-07-30 15:25:46 -07001538 } else
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001539 pr_info("%s: no action for enabled = %d\n",
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07001540 KBUILD_MODNAME, enabled);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001541
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001542 pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001543
1544 return ret;
1545}
1546
1547static struct kernel_param_ops module_ops = {
1548 .set = set_enabled,
1549 .get = param_get_bool,
1550};
1551
1552module_param_cb(enabled, &module_ops, &enabled, 0644);
1553MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
1554
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001555static ssize_t show_cc_enabled(struct kobject *kobj,
1556 struct kobj_attribute *attr, char *buf)
1557{
1558 return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
1559}
1560
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301561static ssize_t __ref store_cc_enabled(struct kobject *kobj,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001562 struct kobj_attribute *attr, const char *buf, size_t count)
1563{
1564 int ret = 0;
1565 int val = 0;
1566
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001567 ret = kstrtoint(buf, 10, &val);
1568 if (ret) {
1569 pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
1570 goto done_store_cc;
1571 }
1572
1573 if (core_control_enabled == !!val)
1574 goto done_store_cc;
1575
1576 core_control_enabled = !!val;
1577 if (core_control_enabled) {
1578 pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
1579 register_cpu_notifier(&msm_thermal_cpu_notifier);
Jennifer Liu4ff40942013-07-30 15:25:46 -07001580 if (hotplug_task)
1581 complete(&hotplug_notify_complete);
1582 else
1583 pr_err("%s: Hotplug task is not initialized\n",
1584 KBUILD_MODNAME);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001585 } else {
1586 pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
1587 unregister_cpu_notifier(&msm_thermal_cpu_notifier);
1588 }
1589
1590done_store_cc:
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001591 return count;
1592}
1593
1594static ssize_t show_cpus_offlined(struct kobject *kobj,
1595 struct kobj_attribute *attr, char *buf)
1596{
1597 return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
1598}
1599
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301600static ssize_t __ref store_cpus_offlined(struct kobject *kobj,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001601 struct kobj_attribute *attr, const char *buf, size_t count)
1602{
1603 int ret = 0;
1604 uint32_t val = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001605 uint32_t cpu;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001606
1607 mutex_lock(&core_control_mutex);
1608 ret = kstrtouint(buf, 10, &val);
1609 if (ret) {
1610 pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
1611 goto done_cc;
1612 }
1613
1614 if (enabled) {
1615 pr_err("%s: Ignoring request; polling thread is enabled.\n",
1616 KBUILD_MODNAME);
1617 goto done_cc;
1618 }
1619
Jennifer Liu4ff40942013-07-30 15:25:46 -07001620 for_each_possible_cpu(cpu) {
1621 if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
1622 continue;
1623 cpus[cpu].user_offline = !!(val & BIT(cpu));
1624 }
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001625
Jennifer Liu4ff40942013-07-30 15:25:46 -07001626 if (hotplug_task)
1627 complete(&hotplug_notify_complete);
1628 else
1629 pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001630done_cc:
1631 mutex_unlock(&core_control_mutex);
1632 return count;
1633}
1634
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301635static __refdata struct kobj_attribute cc_enabled_attr =
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001636__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
1637
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301638static __refdata struct kobj_attribute cpus_offlined_attr =
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001639__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
1640
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301641static __refdata struct attribute *cc_attrs[] = {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001642 &cc_enabled_attr.attr,
1643 &cpus_offlined_attr.attr,
1644 NULL,
1645};
1646
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301647static __refdata struct attribute_group cc_attr_group = {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001648 .attrs = cc_attrs,
1649};
1650
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001651static ssize_t show_wakeup_ms(struct kobject *kobj,
1652 struct kobj_attribute *attr, char *buf)
1653{
1654 return snprintf(buf, PAGE_SIZE, "%d\n", wakeup_ms);
1655}
1656
1657static ssize_t store_wakeup_ms(struct kobject *kobj,
1658 struct kobj_attribute *attr, const char *buf, size_t count)
1659{
1660 int ret;
1661 ret = kstrtouint(buf, 10, &wakeup_ms);
1662
1663 if (ret) {
1664 pr_err("%s: Trying to set invalid wakeup timer\n",
1665 KBUILD_MODNAME);
1666 return ret;
1667 }
1668
1669 if (wakeup_ms > 0) {
1670 thermal_rtc_setup();
1671 pr_debug("%s: Timer started for %ums\n", KBUILD_MODNAME,
1672 wakeup_ms);
1673 } else {
1674 ret = alarm_cancel(&thermal_rtc);
1675 if (ret)
1676 pr_debug("%s: Timer canceled\n", KBUILD_MODNAME);
1677 else
1678 pr_debug("%s: No active timer present to cancel\n",
1679 KBUILD_MODNAME);
1680
1681 }
1682 return count;
1683}
1684
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301685static __refdata struct kobj_attribute timer_attr =
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001686__ATTR(wakeup_ms, 0644, show_wakeup_ms, store_wakeup_ms);
1687
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301688static __refdata struct attribute *tt_attrs[] = {
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001689 &timer_attr.attr,
1690 NULL,
1691};
1692
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301693static __refdata struct attribute_group tt_attr_group = {
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001694 .attrs = tt_attrs,
1695};
1696
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001697static __init int msm_thermal_add_cc_nodes(void)
1698{
1699 struct kobject *module_kobj = NULL;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001700 int ret = 0;
1701
1702 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1703 if (!module_kobj) {
1704 pr_err("%s: cannot find kobject for module\n",
1705 KBUILD_MODNAME);
1706 ret = -ENOENT;
1707 goto done_cc_nodes;
1708 }
1709
1710 cc_kobj = kobject_create_and_add("core_control", module_kobj);
1711 if (!cc_kobj) {
1712 pr_err("%s: cannot create core control kobj\n",
1713 KBUILD_MODNAME);
1714 ret = -ENOMEM;
1715 goto done_cc_nodes;
1716 }
1717
1718 ret = sysfs_create_group(cc_kobj, &cc_attr_group);
1719 if (ret) {
1720 pr_err("%s: cannot create group\n", KBUILD_MODNAME);
1721 goto done_cc_nodes;
1722 }
1723
1724 return 0;
1725
1726done_cc_nodes:
1727 if (cc_kobj)
1728 kobject_del(cc_kobj);
1729 return ret;
1730}
1731
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001732static __init int msm_thermal_add_timer_nodes(void)
1733{
1734 struct kobject *module_kobj = NULL;
1735 int ret = 0;
1736
1737 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1738 if (!module_kobj) {
1739 pr_err("%s: cannot find kobject for module\n",
1740 KBUILD_MODNAME);
1741 ret = -ENOENT;
1742 goto failed;
1743 }
1744
1745 tt_kobj = kobject_create_and_add("thermal_timer", module_kobj);
1746 if (!tt_kobj) {
1747 pr_err("%s: cannot create timer kobj\n",
1748 KBUILD_MODNAME);
1749 ret = -ENOMEM;
1750 goto failed;
1751 }
1752
1753 ret = sysfs_create_group(tt_kobj, &tt_attr_group);
1754 if (ret) {
1755 pr_err("%s: cannot create group\n", KBUILD_MODNAME);
1756 goto failed;
1757 }
1758
1759 return 0;
1760
1761failed:
1762 if (tt_kobj)
1763 kobject_del(tt_kobj);
1764 return ret;
1765}
1766
Eugene Seahb77b0c42012-07-02 19:28:50 -06001767int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001768{
1769 int ret = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07001770 uint32_t cpu;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001771
Ram Chandrasekar52eac782014-02-03 18:37:11 -07001772 for_each_possible_cpu(cpu) {
1773 cpus[cpu].cpu = cpu;
1774 cpus[cpu].offline = 0;
1775 cpus[cpu].user_offline = 0;
1776 cpus[cpu].hotplug_thresh_clear = false;
1777 cpus[cpu].max_freq = false;
1778 cpus[cpu].user_max_freq = UINT_MAX;
1779 cpus[cpu].user_min_freq = 0;
1780 cpus[cpu].limited_max_freq = UINT_MAX;
1781 cpus[cpu].limited_min_freq = 0;
1782 cpus[cpu].freq_thresh_clear = false;
1783 }
Praveen Chidambaram91814362012-05-25 17:36:07 -06001784 BUG_ON(!pdata);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001785 tsens_get_max_sensor_num(&max_tsens_num);
Praveen Chidambaram91814362012-05-25 17:36:07 -06001786 memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
1787
Jennifer Liu5a3518c2013-04-17 11:53:51 -07001788 if (create_sensor_id_map())
1789 return -EINVAL;
1790 if (check_sensor_id(msm_thermal_info.sensor_id))
1791 return -EINVAL;
1792
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001793 enabled = 1;
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07001794 ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
1795 CPUFREQ_POLICY_NOTIFIER);
1796 if (ret)
1797 pr_err("%s: cannot register cpufreq notifier\n",
1798 KBUILD_MODNAME);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001799 INIT_DELAYED_WORK(&check_temp_work, check_temp);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001800 schedule_delayed_work(&check_temp_work, 0);
1801
Anji Jonnala822b5c42013-05-21 20:09:24 +05301802 if (num_possible_cpus() > 1)
1803 register_cpu_notifier(&msm_thermal_cpu_notifier);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001804
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001805 return ret;
1806}
Eugene Seahb77b0c42012-07-02 19:28:50 -06001807
Ram Chandrasekar37716582013-12-10 16:09:30 -07001808static int ocr_reg_init(struct platform_device *pdev)
1809{
1810 int ret = 0;
1811 int i, j;
1812
1813 for (i = 0; i < ocr_rail_cnt; i++) {
1814 /* Check if vdd_restriction has already initialized any
1815 * regualtor handle. If so use the same handle.*/
1816 for (j = 0; j < rails_cnt; j++) {
1817 if (!strcmp(ocr_rails[i].name, rails[j].name)) {
1818 if (rails[j].reg == NULL)
1819 break;
1820 ocr_rails[i].phase_reg = rails[j].reg;
1821 goto reg_init;
1822 }
1823
1824 }
1825 ocr_rails[i].phase_reg = devm_regulator_get(&pdev->dev,
1826 ocr_rails[i].name);
1827 if (IS_ERR_OR_NULL(ocr_rails[i].phase_reg)) {
1828 ret = PTR_ERR(ocr_rails[i].phase_reg);
1829 if (ret != -EPROBE_DEFER) {
1830 pr_err("%s, could not get regulator: %s\n",
1831 __func__, ocr_rails[i].name);
1832 ocr_rails[i].phase_reg = NULL;
1833 ocr_rails[i].mode = 0;
1834 ocr_rails[i].init = 0;
1835 }
1836 return ret;
1837 }
1838reg_init:
1839 ocr_rails[i].mode = OPTIMUM_CURRENT_MIN;
1840 }
1841 return ret;
1842}
1843
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001844static int vdd_restriction_reg_init(struct platform_device *pdev)
1845{
1846 int ret = 0;
1847 int i;
1848
1849 for (i = 0; i < rails_cnt; i++) {
1850 if (rails[i].freq_req == 1) {
1851 usefreq |= BIT(i);
1852 check_freq_table();
Jennifer Liu273d2962013-04-19 11:43:04 -07001853 /*
1854 * Restrict frequency by default until we have made
1855 * our first temp reading
1856 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001857 if (freq_table_get)
1858 ret = vdd_restriction_apply_freq(&rails[i], 0);
1859 else
1860 pr_info("%s:Defer vdd rstr freq init\n",
1861 __func__);
1862 } else {
1863 rails[i].reg = devm_regulator_get(&pdev->dev,
1864 rails[i].name);
1865 if (IS_ERR_OR_NULL(rails[i].reg)) {
1866 ret = PTR_ERR(rails[i].reg);
1867 if (ret != -EPROBE_DEFER) {
1868 pr_err( \
1869 "%s, could not get regulator: %s\n",
1870 rails[i].name, __func__);
1871 rails[i].reg = NULL;
1872 rails[i].curr_level = -2;
1873 return ret;
1874 }
1875 return ret;
1876 }
Jennifer Liu273d2962013-04-19 11:43:04 -07001877 /*
1878 * Restrict votlage by default until we have made
1879 * our first temp reading
1880 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001881 ret = vdd_restriction_apply_voltage(&rails[i], 0);
1882 }
1883 }
1884
1885 return ret;
1886}
1887
Jennifer Liu907922c2013-03-26 11:18:00 -07001888static int psm_reg_init(struct platform_device *pdev)
1889{
1890 int ret = 0;
1891 int i = 0;
1892 int j = 0;
1893
1894 for (i = 0; i < psm_rails_cnt; i++) {
1895 psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
1896 psm_rails[i].name);
1897 if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
1898 ret = PTR_ERR(psm_rails[i].reg);
1899 if (ret != -EPROBE_DEFER) {
1900 pr_err("%s, could not get rpm regulator: %s\n",
1901 psm_rails[i].name, __func__);
1902 psm_rails[i].reg = NULL;
1903 goto psm_reg_exit;
1904 }
1905 return ret;
1906 }
1907 /* Apps default vote for PWM mode */
1908 psm_rails[i].init = PMIC_PWM_MODE;
1909 ret = rpm_regulator_set_mode(psm_rails[i].reg,
1910 psm_rails[i].init);
1911 if (ret) {
1912 pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
1913 return ret;
1914 } else
1915 psm_rails[i].mode = PMIC_PWM_MODE;
1916 }
1917
1918 return ret;
1919
1920psm_reg_exit:
1921 if (ret) {
1922 for (j = 0; j < i; j++) {
1923 if (psm_rails[j].reg != NULL)
1924 rpm_regulator_put(psm_rails[j].reg);
1925 }
1926 }
1927
1928 return ret;
1929}
1930
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001931static int msm_thermal_add_vdd_rstr_nodes(void)
1932{
1933 struct kobject *module_kobj = NULL;
1934 struct kobject *vdd_rstr_kobj = NULL;
1935 struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
1936 int rc = 0;
1937 int i = 0;
1938
1939 if (!vdd_rstr_probed) {
1940 vdd_rstr_nodes_called = true;
1941 return rc;
1942 }
1943
1944 if (vdd_rstr_probed && rails_cnt == 0)
1945 return rc;
1946
1947 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1948 if (!module_kobj) {
1949 pr_err("%s: cannot find kobject for module %s\n",
1950 __func__, KBUILD_MODNAME);
1951 rc = -ENOENT;
1952 goto thermal_sysfs_add_exit;
1953 }
1954
1955 vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
1956 if (!vdd_rstr_kobj) {
1957 pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
1958 rc = -ENOMEM;
1959 goto thermal_sysfs_add_exit;
1960 }
1961
1962 rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
1963 if (rc) {
1964 pr_err("%s: cannot create kobject attribute group\n", __func__);
1965 rc = -ENOMEM;
1966 goto thermal_sysfs_add_exit;
1967 }
1968
1969 for (i = 0; i < rails_cnt; i++) {
1970 vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
1971 vdd_rstr_kobj);
1972 if (!vdd_rstr_reg_kobj[i]) {
1973 pr_err("%s: cannot create for kobject for %s\n",
1974 __func__, rails[i].name);
1975 rc = -ENOMEM;
1976 goto thermal_sysfs_add_exit;
1977 }
1978
1979 rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
1980 GFP_KERNEL);
1981 if (!rails[i].attr_gp.attrs) {
1982 rc = -ENOMEM;
1983 goto thermal_sysfs_add_exit;
1984 }
1985
1986 VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
1987 VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
1988 rails[i].attr_gp.attrs[2] = NULL;
1989
1990 rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
1991 &rails[i].attr_gp);
1992 if (rc) {
1993 pr_err("%s: cannot create attribute group for %s\n",
1994 __func__, rails[i].name);
1995 goto thermal_sysfs_add_exit;
1996 }
1997 }
1998
1999 return rc;
2000
2001thermal_sysfs_add_exit:
2002 if (rc) {
2003 for (i = 0; i < rails_cnt; i++) {
2004 kobject_del(vdd_rstr_reg_kobj[i]);
2005 kfree(rails[i].attr_gp.attrs);
2006 }
2007 if (vdd_rstr_kobj)
2008 kobject_del(vdd_rstr_kobj);
2009 }
2010 return rc;
2011}
2012
Ram Chandrasekar37716582013-12-10 16:09:30 -07002013static int msm_thermal_add_ocr_nodes(void)
2014{
2015 struct kobject *module_kobj = NULL;
2016 struct kobject *ocr_kobj = NULL;
2017 struct kobject *ocr_reg_kobj[MAX_RAILS] = {0};
2018 int rc = 0;
2019 int i = 0;
2020
2021 if (!ocr_probed) {
2022 ocr_nodes_called = true;
2023 return rc;
2024 }
2025
2026 if (ocr_probed && ocr_rail_cnt == 0)
2027 return rc;
2028
2029 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
2030 if (!module_kobj) {
2031 pr_err("%s: cannot find kobject for module %s\n",
2032 __func__, KBUILD_MODNAME);
2033 rc = -ENOENT;
2034 goto ocr_node_exit;
2035 }
2036
2037 ocr_kobj = kobject_create_and_add("opt_curr_req", module_kobj);
2038 if (!ocr_kobj) {
2039 pr_err("%s: cannot create ocr kobject\n", KBUILD_MODNAME);
2040 rc = -ENOMEM;
2041 goto ocr_node_exit;
2042 }
2043
2044 for (i = 0; i < ocr_rail_cnt; i++) {
2045 ocr_reg_kobj[i] = kobject_create_and_add(ocr_rails[i].name,
2046 ocr_kobj);
2047 if (!ocr_reg_kobj[i]) {
2048 pr_err("%s: cannot create for kobject for %s\n",
2049 KBUILD_MODNAME, ocr_rails[i].name);
2050 rc = -ENOMEM;
2051 goto ocr_node_exit;
2052 }
2053 ocr_rails[i].attr_gp.attrs = kzalloc( \
2054 sizeof(struct attribute *) * 2, GFP_KERNEL);
2055 if (!ocr_rails[i].attr_gp.attrs) {
2056 rc = -ENOMEM;
2057 goto ocr_node_exit;
2058 }
2059
2060 OCR_RW_ATTRIB(ocr_rails[i], ocr_rails[i].mode_attr, 0, mode);
2061 ocr_rails[i].attr_gp.attrs[1] = NULL;
2062
2063 rc = sysfs_create_group(ocr_reg_kobj[i], &ocr_rails[i].attr_gp);
2064 if (rc) {
2065 pr_err("%s: cannot create attribute group for %s\n",
2066 KBUILD_MODNAME, ocr_rails[i].name);
2067 goto ocr_node_exit;
2068 }
2069 }
2070
2071ocr_node_exit:
2072 if (rc) {
2073 for (i = 0; i < ocr_rail_cnt; i++) {
2074 if (ocr_reg_kobj[i])
2075 kobject_del(ocr_reg_kobj[i]);
2076 if (ocr_rails[i].attr_gp.attrs) {
2077 kfree(ocr_rails[i].attr_gp.attrs);
2078 ocr_rails[i].attr_gp.attrs = NULL;
2079 }
2080 }
2081 if (ocr_kobj)
2082 kobject_del(ocr_kobj);
2083 }
2084 return rc;
2085}
2086
Jennifer Liu907922c2013-03-26 11:18:00 -07002087static int msm_thermal_add_psm_nodes(void)
2088{
2089 struct kobject *module_kobj = NULL;
2090 struct kobject *psm_kobj = NULL;
2091 struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
2092 int rc = 0;
2093 int i = 0;
2094
2095 if (!psm_probed) {
2096 psm_nodes_called = true;
2097 return rc;
2098 }
2099
2100 if (psm_probed && psm_rails_cnt == 0)
2101 return rc;
2102
2103 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
2104 if (!module_kobj) {
2105 pr_err("%s: cannot find kobject for module %s\n",
2106 __func__, KBUILD_MODNAME);
2107 rc = -ENOENT;
2108 goto psm_node_exit;
2109 }
2110
2111 psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
2112 if (!psm_kobj) {
2113 pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
2114 rc = -ENOMEM;
2115 goto psm_node_exit;
2116 }
2117
2118 for (i = 0; i < psm_rails_cnt; i++) {
2119 psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
2120 psm_kobj);
2121 if (!psm_reg_kobj[i]) {
2122 pr_err("%s: cannot create for kobject for %s\n",
2123 KBUILD_MODNAME, psm_rails[i].name);
2124 rc = -ENOMEM;
2125 goto psm_node_exit;
2126 }
2127 psm_rails[i].attr_gp.attrs = kzalloc( \
2128 sizeof(struct attribute *) * 2, GFP_KERNEL);
2129 if (!psm_rails[i].attr_gp.attrs) {
2130 rc = -ENOMEM;
2131 goto psm_node_exit;
2132 }
2133
2134 PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
2135 psm_rails[i].attr_gp.attrs[1] = NULL;
2136
2137 rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
2138 if (rc) {
2139 pr_err("%s: cannot create attribute group for %s\n",
2140 KBUILD_MODNAME, psm_rails[i].name);
2141 goto psm_node_exit;
2142 }
2143 }
2144
2145 return rc;
2146
2147psm_node_exit:
2148 if (rc) {
2149 for (i = 0; i < psm_rails_cnt; i++) {
2150 kobject_del(psm_reg_kobj[i]);
2151 kfree(psm_rails[i].attr_gp.attrs);
2152 }
2153 if (psm_kobj)
2154 kobject_del(psm_kobj);
2155 }
2156 return rc;
2157}
2158
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002159static int probe_vdd_rstr(struct device_node *node,
2160 struct msm_thermal_data *data, struct platform_device *pdev)
2161{
2162 int ret = 0;
2163 int i = 0;
2164 int arr_size;
2165 char *key = NULL;
2166 struct device_node *child_node = NULL;
2167
Jennifer Liuf4e76492013-05-24 13:23:53 -07002168 rails = NULL;
2169
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002170 key = "qcom,vdd-restriction-temp";
2171 ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
2172 if (ret)
2173 goto read_node_fail;
2174
2175 key = "qcom,vdd-restriction-temp-hysteresis";
2176 ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
2177 if (ret)
2178 goto read_node_fail;
2179
2180 for_each_child_of_node(node, child_node) {
2181 rails_cnt++;
2182 }
2183
2184 if (rails_cnt == 0)
2185 goto read_node_fail;
2186 if (rails_cnt >= MAX_RAILS) {
2187 pr_err("%s: Too many rails.\n", __func__);
2188 return -EFAULT;
2189 }
2190
2191 rails = kzalloc(sizeof(struct rail) * rails_cnt,
2192 GFP_KERNEL);
2193 if (!rails) {
2194 pr_err("%s: Fail to allocate memory for rails.\n", __func__);
2195 return -ENOMEM;
2196 }
2197
2198 i = 0;
2199 for_each_child_of_node(node, child_node) {
2200 key = "qcom,vdd-rstr-reg";
2201 ret = of_property_read_string(child_node, key, &rails[i].name);
2202 if (ret)
2203 goto read_node_fail;
2204
2205 key = "qcom,levels";
2206 if (!of_get_property(child_node, key, &arr_size))
2207 goto read_node_fail;
2208 rails[i].num_levels = arr_size/sizeof(__be32);
2209 if (rails[i].num_levels >
2210 sizeof(rails[i].levels)/sizeof(uint32_t)) {
2211 pr_err("%s: Array size too large\n", __func__);
2212 return -EFAULT;
2213 }
2214 ret = of_property_read_u32_array(child_node, key,
2215 rails[i].levels, rails[i].num_levels);
2216 if (ret)
2217 goto read_node_fail;
2218
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002219 key = "qcom,freq-req";
2220 rails[i].freq_req = of_property_read_bool(child_node, key);
Jennifer Liu95c4a9a2013-05-03 16:57:23 -07002221 if (rails[i].freq_req)
Jennifer Liud8b1e1e2013-08-27 20:54:22 -07002222 rails[i].min_level = 0;
Jennifer Liu95c4a9a2013-05-03 16:57:23 -07002223 else {
2224 key = "qcom,min-level";
2225 ret = of_property_read_u32(child_node, key,
2226 &rails[i].min_level);
2227 if (ret)
2228 goto read_node_fail;
2229 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002230
Jennifer Liu4bc738d2013-07-16 16:16:37 -07002231 rails[i].curr_level = -1;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002232 rails[i].reg = NULL;
2233 i++;
2234 }
2235
2236 if (rails_cnt) {
2237 ret = vdd_restriction_reg_init(pdev);
2238 if (ret) {
2239 pr_info("%s:Failed to get regulators. KTM continues.\n",
2240 __func__);
2241 goto read_node_fail;
2242 }
2243 vdd_rstr_enabled = true;
2244 }
2245read_node_fail:
2246 vdd_rstr_probed = true;
2247 if (ret) {
2248 dev_info(&pdev->dev,
2249 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2250 __func__, node->full_name, key);
2251 kfree(rails);
2252 rails_cnt = 0;
2253 }
2254 if (ret == -EPROBE_DEFER)
2255 vdd_rstr_probed = false;
2256 return ret;
2257}
2258
Ram Chandrasekar37716582013-12-10 16:09:30 -07002259static int probe_ocr(struct device_node *node, struct msm_thermal_data *data,
2260 struct platform_device *pdev)
2261{
2262 int ret = 0;
2263 int j = 0;
2264 char *key = NULL;
2265
2266 if (ocr_probed) {
2267 pr_info("%s: Nodes already probed\n",
2268 __func__);
2269 goto read_ocr_exit;
2270 }
2271 ocr_rails = NULL;
2272
2273 key = "qti,pmic-opt-curr-temp";
2274 ret = of_property_read_u32(node, key, &data->ocr_temp_degC);
2275 if (ret)
2276 goto read_ocr_fail;
2277
2278 key = "qti,pmic-opt-curr-temp-hysteresis";
2279 ret = of_property_read_u32(node, key, &data->ocr_temp_hyst_degC);
2280 if (ret)
2281 goto read_ocr_fail;
2282
2283 key = "qti,pmic-opt-curr-regs";
2284 ocr_rail_cnt = of_property_count_strings(node, key);
2285 ocr_rails = kzalloc(sizeof(struct psm_rail) * ocr_rail_cnt,
2286 GFP_KERNEL);
2287 if (!ocr_rails) {
2288 pr_err("%s: Fail to allocate memory for ocr rails\n", __func__);
2289 ocr_rail_cnt = 0;
2290 return -ENOMEM;
2291 }
2292
2293 for (j = 0; j < ocr_rail_cnt; j++) {
2294 ret = of_property_read_string_index(node, key, j,
2295 &ocr_rails[j].name);
2296 if (ret)
2297 goto read_ocr_fail;
2298 ocr_rails[j].phase_reg = NULL;
2299 ocr_rails[j].init = OPTIMUM_CURRENT_MAX;
2300 }
2301
2302 if (ocr_rail_cnt) {
2303 ret = ocr_reg_init(pdev);
2304 if (ret) {
2305 pr_info("%s:Failed to get regulators. KTM continues.\n",
2306 __func__);
2307 goto read_ocr_fail;
2308 }
2309 ocr_enabled = true;
2310 ocr_nodes_called = false;
2311 /*
2312 * Vote for max optimum current by default until we have made
2313 * our first temp reading
2314 */
2315 if (ocr_set_mode_all(OPTIMUM_CURRENT_MAX))
2316 pr_err("Set max optimum current failed\n");
2317 }
2318
2319read_ocr_fail:
2320 ocr_probed = true;
2321 if (ret) {
2322 dev_info(&pdev->dev,
2323 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2324 __func__, node->full_name, key);
2325 if (ocr_rails)
2326 kfree(ocr_rails);
2327 ocr_rails = NULL;
2328 ocr_rail_cnt = 0;
2329 }
2330 if (ret == -EPROBE_DEFER)
2331 ocr_probed = false;
2332read_ocr_exit:
2333 return ret;
2334}
2335
Jennifer Liu907922c2013-03-26 11:18:00 -07002336static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
2337 struct platform_device *pdev)
2338{
2339 int ret = 0;
2340 int j = 0;
2341 char *key = NULL;
2342
Jennifer Liuf4e76492013-05-24 13:23:53 -07002343 psm_rails = NULL;
2344
Jennifer Liu907922c2013-03-26 11:18:00 -07002345 key = "qcom,pmic-sw-mode-temp";
2346 ret = of_property_read_u32(node, key, &data->psm_temp_degC);
2347 if (ret)
2348 goto read_node_fail;
2349
2350 key = "qcom,pmic-sw-mode-temp-hysteresis";
2351 ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
2352 if (ret)
2353 goto read_node_fail;
2354
2355 key = "qcom,pmic-sw-mode-regs";
2356 psm_rails_cnt = of_property_count_strings(node, key);
2357 psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
2358 GFP_KERNEL);
2359 if (!psm_rails) {
2360 pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
2361 psm_rails_cnt = 0;
2362 return -ENOMEM;
2363 }
2364
2365 for (j = 0; j < psm_rails_cnt; j++) {
2366 ret = of_property_read_string_index(node, key, j,
2367 &psm_rails[j].name);
2368 if (ret)
2369 goto read_node_fail;
2370 }
2371
2372 if (psm_rails_cnt) {
2373 ret = psm_reg_init(pdev);
2374 if (ret) {
2375 pr_info("%s:Failed to get regulators. KTM continues.\n",
2376 __func__);
2377 goto read_node_fail;
2378 }
2379 psm_enabled = true;
2380 }
2381
2382read_node_fail:
2383 psm_probed = true;
2384 if (ret) {
2385 dev_info(&pdev->dev,
2386 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2387 __func__, node->full_name, key);
2388 kfree(psm_rails);
2389 psm_rails_cnt = 0;
2390 }
2391 if (ret == -EPROBE_DEFER)
2392 psm_probed = false;
2393 return ret;
2394}
2395
Jennifer Liu4ff40942013-07-30 15:25:46 -07002396static int probe_cc(struct device_node *node, struct msm_thermal_data *data,
2397 struct platform_device *pdev)
2398{
2399 char *key = NULL;
Ram Chandrasekar89961312013-11-07 12:03:54 -07002400 uint32_t cpu_cnt = 0;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002401 int ret = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07002402 uint32_t cpu = 0;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002403
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002404 if (num_possible_cpus() > 1) {
2405 core_control_enabled = 1;
2406 hotplug_enabled = 1;
2407 }
2408
Jennifer Liu4ff40942013-07-30 15:25:46 -07002409 key = "qcom,core-limit-temp";
2410 ret = of_property_read_u32(node, key, &data->core_limit_temp_degC);
2411 if (ret)
2412 goto read_node_fail;
2413
2414 key = "qcom,core-temp-hysteresis";
2415 ret = of_property_read_u32(node, key, &data->core_temp_hysteresis_degC);
2416 if (ret)
2417 goto read_node_fail;
2418
2419 key = "qcom,core-control-mask";
2420 ret = of_property_read_u32(node, key, &data->core_control_mask);
2421 if (ret)
2422 goto read_node_fail;
2423
2424 key = "qcom,hotplug-temp";
2425 ret = of_property_read_u32(node, key, &data->hotplug_temp_degC);
2426 if (ret)
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002427 goto hotplug_node_fail;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002428
2429 key = "qcom,hotplug-temp-hysteresis";
2430 ret = of_property_read_u32(node, key,
2431 &data->hotplug_temp_hysteresis_degC);
2432 if (ret)
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002433 goto hotplug_node_fail;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002434
2435 key = "qcom,cpu-sensors";
2436 cpu_cnt = of_property_count_strings(node, key);
Anji Jonnala273ba4e2013-12-09 18:25:50 +05302437 if (cpu_cnt < num_possible_cpus()) {
2438 pr_err("%s: Wrong number of cpu sensors\n", KBUILD_MODNAME);
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002439 ret = -EINVAL;
2440 goto hotplug_node_fail;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002441 }
2442
2443 for_each_possible_cpu(cpu) {
Jennifer Liu4ff40942013-07-30 15:25:46 -07002444 ret = of_property_read_string_index(node, key, cpu,
2445 &cpus[cpu].sensor_type);
2446 if (ret)
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002447 goto hotplug_node_fail;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002448 }
2449
Jennifer Liu4ff40942013-07-30 15:25:46 -07002450read_node_fail:
2451 if (ret) {
2452 dev_info(&pdev->dev,
2453 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2454 KBUILD_MODNAME, node->full_name, key);
2455 core_control_enabled = 0;
2456 }
2457
2458 return ret;
Ram Chandrasekar81c1b0b2013-11-21 15:13:33 -07002459
2460hotplug_node_fail:
2461 if (ret) {
2462 dev_info(&pdev->dev,
2463 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2464 KBUILD_MODNAME, node->full_name, key);
2465 hotplug_enabled = 0;
2466 }
2467
2468 return ret;
Jennifer Liu4ff40942013-07-30 15:25:46 -07002469}
2470
Ram Chandrasekar89961312013-11-07 12:03:54 -07002471static int probe_freq_mitigation(struct device_node *node,
2472 struct msm_thermal_data *data,
2473 struct platform_device *pdev)
2474{
2475 char *key = NULL;
2476 int ret = 0;
Ram Chandrasekar89961312013-11-07 12:03:54 -07002477
2478 key = "qcom,freq-mitigation-temp";
2479 ret = of_property_read_u32(node, key, &data->freq_mitig_temp_degc);
2480 if (ret)
2481 goto PROBE_FREQ_EXIT;
2482
2483 key = "qcom,freq-mitigation-temp-hysteresis";
2484 ret = of_property_read_u32(node, key,
2485 &data->freq_mitig_temp_hysteresis_degc);
2486 if (ret)
2487 goto PROBE_FREQ_EXIT;
2488
2489 key = "qcom,freq-mitigation-value";
2490 ret = of_property_read_u32(node, key, &data->freq_limit);
2491 if (ret)
2492 goto PROBE_FREQ_EXIT;
2493
2494 key = "qcom,freq-mitigation-control-mask";
2495 ret = of_property_read_u32(node, key, &data->freq_mitig_control_mask);
2496 if (ret)
2497 goto PROBE_FREQ_EXIT;
2498
2499 freq_mitigation_enabled = 1;
Ram Chandrasekar89961312013-11-07 12:03:54 -07002500
2501PROBE_FREQ_EXIT:
2502 if (ret) {
2503 dev_info(&pdev->dev,
2504 "%s:Failed reading node=%s, key=%s. KTM continues\n",
2505 __func__, node->full_name, key);
2506 freq_mitigation_enabled = 0;
2507 }
2508 return ret;
2509}
2510
Eugene Seahb77b0c42012-07-02 19:28:50 -06002511static int __devinit msm_thermal_dev_probe(struct platform_device *pdev)
2512{
2513 int ret = 0;
2514 char *key = NULL;
2515 struct device_node *node = pdev->dev.of_node;
2516 struct msm_thermal_data data;
2517
2518 memset(&data, 0, sizeof(struct msm_thermal_data));
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002519
Eugene Seahb77b0c42012-07-02 19:28:50 -06002520 key = "qcom,sensor-id";
2521 ret = of_property_read_u32(node, key, &data.sensor_id);
2522 if (ret)
2523 goto fail;
Eugene Seahb77b0c42012-07-02 19:28:50 -06002524
2525 key = "qcom,poll-ms";
2526 ret = of_property_read_u32(node, key, &data.poll_ms);
2527 if (ret)
2528 goto fail;
2529
2530 key = "qcom,limit-temp";
2531 ret = of_property_read_u32(node, key, &data.limit_temp_degC);
2532 if (ret)
2533 goto fail;
2534
2535 key = "qcom,temp-hysteresis";
2536 ret = of_property_read_u32(node, key, &data.temp_hysteresis_degC);
2537 if (ret)
2538 goto fail;
2539
2540 key = "qcom,freq-step";
Ram Chandrasekar89961312013-11-07 12:03:54 -07002541 ret = of_property_read_u32(node, key, &data.bootup_freq_step);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07002542 if (ret)
2543 goto fail;
Eugene Seahb77b0c42012-07-02 19:28:50 -06002544
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06002545 key = "qcom,freq-control-mask";
Ram Chandrasekar89961312013-11-07 12:03:54 -07002546 ret = of_property_read_u32(node, key, &data.bootup_freq_control_mask);
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06002547
Jennifer Liu4ff40942013-07-30 15:25:46 -07002548 ret = probe_cc(node, &data, pdev);
Ram Chandrasekar89961312013-11-07 12:03:54 -07002549
2550 ret = probe_freq_mitigation(node, &data, pdev);
Jennifer Liu273d2962013-04-19 11:43:04 -07002551 /*
2552 * Probe optional properties below. Call probe_psm before
Jennifer Liu907922c2013-03-26 11:18:00 -07002553 * probe_vdd_rstr because rpm_regulator_get has to be called
Jennifer Liu273d2962013-04-19 11:43:04 -07002554 * before devm_regulator_get
Ram Chandrasekar37716582013-12-10 16:09:30 -07002555 * probe_ocr should be called after probe_vdd_rstr to reuse the
2556 * regualtor handle. calling devm_regulator_get more than once
2557 * will fail.
Jennifer Liu273d2962013-04-19 11:43:04 -07002558 */
Jennifer Liu907922c2013-03-26 11:18:00 -07002559 ret = probe_psm(node, &data, pdev);
2560 if (ret == -EPROBE_DEFER)
2561 goto fail;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002562 ret = probe_vdd_rstr(node, &data, pdev);
2563 if (ret == -EPROBE_DEFER)
2564 goto fail;
Ram Chandrasekar37716582013-12-10 16:09:30 -07002565 ret = probe_ocr(node, &data, pdev);
2566 if (ret == -EPROBE_DEFER)
2567 goto fail;
Jennifer Liu907922c2013-03-26 11:18:00 -07002568
Jennifer Liu273d2962013-04-19 11:43:04 -07002569 /*
2570 * In case sysfs add nodes get called before probe function.
2571 * Need to make sure sysfs node is created again
2572 */
Jennifer Liu907922c2013-03-26 11:18:00 -07002573 if (psm_nodes_called) {
2574 msm_thermal_add_psm_nodes();
2575 psm_nodes_called = false;
2576 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002577 if (vdd_rstr_nodes_called) {
2578 msm_thermal_add_vdd_rstr_nodes();
2579 vdd_rstr_nodes_called = false;
2580 }
Ram Chandrasekar37716582013-12-10 16:09:30 -07002581 if (ocr_nodes_called) {
2582 msm_thermal_add_ocr_nodes();
2583 ocr_nodes_called = false;
2584 }
Ram Chandrasekar64603922013-11-08 16:33:58 -07002585 msm_thermal_ioctl_init();
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07002586 ret = msm_thermal_init(&data);
2587
2588 return ret;
Eugene Seahb77b0c42012-07-02 19:28:50 -06002589fail:
2590 if (ret)
2591 pr_err("%s: Failed reading node=%s, key=%s\n",
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002592 __func__, node->full_name, key);
2593
Eugene Seahb77b0c42012-07-02 19:28:50 -06002594 return ret;
2595}
2596
Ram Chandrasekar89961312013-11-07 12:03:54 -07002597static int msm_thermal_dev_exit(struct platform_device *inp_dev)
2598{
Ram Chandrasekar64603922013-11-08 16:33:58 -07002599 msm_thermal_ioctl_cleanup();
Ram Chandrasekar89961312013-11-07 12:03:54 -07002600 return 0;
2601}
Jennifer Liu907922c2013-03-26 11:18:00 -07002602
Eugene Seahb77b0c42012-07-02 19:28:50 -06002603static struct of_device_id msm_thermal_match_table[] = {
2604 {.compatible = "qcom,msm-thermal"},
2605 {},
2606};
2607
2608static struct platform_driver msm_thermal_device_driver = {
2609 .probe = msm_thermal_dev_probe,
2610 .driver = {
2611 .name = "msm-thermal",
2612 .owner = THIS_MODULE,
2613 .of_match_table = msm_thermal_match_table,
2614 },
Ram Chandrasekar89961312013-11-07 12:03:54 -07002615 .remove = msm_thermal_dev_exit,
Eugene Seahb77b0c42012-07-02 19:28:50 -06002616};
2617
2618int __init msm_thermal_device_init(void)
2619{
2620 return platform_driver_register(&msm_thermal_device_driver);
2621}
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07002622
2623int __init msm_thermal_late_init(void)
2624{
Anji Jonnala822b5c42013-05-21 20:09:24 +05302625 if (num_possible_cpus() > 1)
2626 msm_thermal_add_cc_nodes();
Jennifer Liu907922c2013-03-26 11:18:00 -07002627 msm_thermal_add_psm_nodes();
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002628 msm_thermal_add_vdd_rstr_nodes();
Ram Chandrasekar37716582013-12-10 16:09:30 -07002629 msm_thermal_add_ocr_nodes();
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06002630 alarm_init(&thermal_rtc, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
2631 thermal_rtc_callback);
2632 INIT_WORK(&timer_work, timer_work_fn);
2633 msm_thermal_add_timer_nodes();
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002634
2635 return 0;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07002636}
2637late_initcall(msm_thermal_late_init);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07002638