blob: 974cadc3f7c6b7e465805d4ac37b346d6193a685 [file] [log] [blame]
Praveen Chidambaram0c6ab952013-02-07 17:47:16 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/module.h>
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070017#include <linux/mutex.h>
18#include <linux/msm_tsens.h>
19#include <linux/workqueue.h>
Eugene Seah7d6d2732012-03-09 17:48:42 -070020#include <linux/cpu.h>
Praveen Chidambaram91814362012-05-25 17:36:07 -060021#include <linux/cpufreq.h>
22#include <linux/msm_tsens.h>
23#include <linux/msm_thermal.h>
Eugene Seahb77b0c42012-07-02 19:28:50 -060024#include <linux/platform_device.h>
25#include <linux/of.h>
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070026#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/of.h>
29#include <linux/sysfs.h>
30#include <linux/types.h>
Archana Sathyakumar143b0b52013-04-09 14:24:32 -060031#include <linux/android_alarm.h>
Praveen Chidambaram91814362012-05-25 17:36:07 -060032#include <mach/cpufreq.h>
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070033#include <mach/rpm-regulator.h>
34#include <mach/rpm-regulator-smd.h>
35#include <linux/regulator/consumer.h>
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070036
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070037#define MAX_RAILS 5
38
Praveen Chidambaram91814362012-05-25 17:36:07 -060039static struct msm_thermal_data msm_thermal_info;
40static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070041static struct delayed_work check_temp_work;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -070042static bool core_control_enabled;
43static uint32_t cpus_offlined;
44static DEFINE_MUTEX(core_control_mutex);
Archana Sathyakumar143b0b52013-04-09 14:24:32 -060045static uint32_t wakeup_ms;
46static struct alarm thermal_rtc;
47static struct kobject *tt_kobj;
48static struct work_struct timer_work;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -070049
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070050static int enabled;
51static int rails_cnt;
Jennifer Liu907922c2013-03-26 11:18:00 -070052static int psm_rails_cnt;
Eugene Seah2ee4a5d2012-06-25 18:16:41 -060053static int limit_idx;
54static int limit_idx_low;
55static int limit_idx_high;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070056static int max_tsens_num;
Eugene Seah2ee4a5d2012-06-25 18:16:41 -060057static struct cpufreq_frequency_table *table;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070058static uint32_t usefreq;
59static int freq_table_get;
60static bool vdd_rstr_enabled;
61static bool vdd_rstr_nodes_called;
62static bool vdd_rstr_probed;
Jennifer Liu907922c2013-03-26 11:18:00 -070063static bool psm_enabled;
64static bool psm_nodes_called;
65static bool psm_probed;
Jennifer Liu5a3518c2013-04-17 11:53:51 -070066static int *tsens_id_map;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070067static DEFINE_MUTEX(vdd_rstr_mutex);
Jennifer Liu907922c2013-03-26 11:18:00 -070068static DEFINE_MUTEX(psm_mutex);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070069
70struct rail {
71 const char *name;
72 uint32_t freq_req;
73 uint32_t min_level;
74 uint32_t num_levels;
Jennifer Liu273d2962013-04-19 11:43:04 -070075 int32_t curr_level;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070076 uint32_t levels[3];
77 struct kobj_attribute value_attr;
78 struct kobj_attribute level_attr;
79 struct regulator *reg;
80 struct attribute_group attr_gp;
81};
Jennifer Liu907922c2013-03-26 11:18:00 -070082
83struct psm_rail {
84 const char *name;
85 uint8_t init;
86 uint8_t mode;
87 struct kobj_attribute mode_attr;
88 struct rpm_regulator *reg;
89 struct attribute_group attr_gp;
90};
91
92static struct psm_rail *psm_rails;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -070093static struct rail *rails;
94
95struct vdd_rstr_enable {
96 struct kobj_attribute ko_attr;
97 uint32_t enabled;
98};
99
Jennifer Liu907922c2013-03-26 11:18:00 -0700100/* For SMPS only*/
101enum PMIC_SW_MODE {
102 PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
103 PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
104 PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
105};
106
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700107#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
108 ko_attr.attr.name = __stringify(_name); \
109 ko_attr.attr.mode = 444; \
110 ko_attr.show = vdd_rstr_reg_##_name##_show; \
111 ko_attr.store = NULL; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700112 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700113 _rail.attr_gp.attrs[j] = &ko_attr.attr;
114
115#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
116 ko_attr.attr.name = __stringify(_name); \
117 ko_attr.attr.mode = 644; \
118 ko_attr.show = vdd_rstr_reg_##_name##_show; \
119 ko_attr.store = vdd_rstr_reg_##_name##_store; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700120 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700121 _rail.attr_gp.attrs[j] = &ko_attr.attr;
122
123#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
124 (container_of(attr, struct vdd_rstr_enable, ko_attr));
125
126#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
127 (container_of(attr, struct rail, value_attr));
128
129#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
130 (container_of(attr, struct rail, level_attr));
Jennifer Liu907922c2013-03-26 11:18:00 -0700131
132#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
133 ko_attr.attr.name = __stringify(_name); \
134 ko_attr.attr.mode = 644; \
135 ko_attr.show = psm_reg_##_name##_show; \
136 ko_attr.store = psm_reg_##_name##_store; \
Stephen Boyd1205f7f2013-04-25 10:16:28 -0700137 sysfs_attr_init(&ko_attr.attr); \
Jennifer Liu907922c2013-03-26 11:18:00 -0700138 _rail.attr_gp.attrs[j] = &ko_attr.attr;
139
140#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
141 (container_of(attr, struct psm_rail, mode_attr));
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700142/* If freq table exists, then we can send freq request */
143static int check_freq_table(void)
144{
145 int ret = 0;
146 struct cpufreq_frequency_table *table = NULL;
147
148 table = cpufreq_frequency_get_table(0);
149 if (!table) {
150 pr_debug("%s: error reading cpufreq table\n", __func__);
151 return -EINVAL;
152 }
153 freq_table_get = 1;
154
155 return ret;
156}
157
158static int update_cpu_min_freq_all(uint32_t min)
159{
160 int cpu = 0;
161 int ret = 0;
Jennifer Liuc9c48562013-05-02 09:46:45 -0700162 struct cpufreq_policy *policy = NULL;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700163
164 if (!freq_table_get) {
165 ret = check_freq_table();
166 if (ret) {
167 pr_err("%s:Fail to get freq table\n", __func__);
168 return ret;
169 }
170 }
171 /* If min is larger than allowed max */
172 if (min != MSM_CPUFREQ_NO_LIMIT &&
173 min > table[limit_idx_high].frequency)
174 min = table[limit_idx_high].frequency;
175
176 for_each_possible_cpu(cpu) {
177 ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700178 if (ret) {
179 pr_err("%s:Fail to set limits for cpu%d\n",
180 __func__, cpu);
181 return ret;
182 }
183
Jennifer Liuc9c48562013-05-02 09:46:45 -0700184 if (cpu_online(cpu)) {
185 policy = cpufreq_cpu_get(cpu);
186 if (!policy)
187 continue;
188 cpufreq_driver_target(policy, policy->cur,
189 CPUFREQ_RELATION_L);
190 cpufreq_cpu_put(policy);
191 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700192 }
193
194 return ret;
195}
196
197static int vdd_restriction_apply_freq(struct rail *r, int level)
198{
199 int ret = 0;
200
Jennifer Liu273d2962013-04-19 11:43:04 -0700201 if (level == r->curr_level)
202 return ret;
203
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700204 /* level = -1: disable, level = 0,1,2..n: enable */
205 if (level == -1) {
206 ret = update_cpu_min_freq_all(r->min_level);
207 if (ret)
208 return ret;
209 else
210 r->curr_level = -1;
211 } else if (level >= 0 && level < (r->num_levels)) {
212 ret = update_cpu_min_freq_all(r->levels[level]);
213 if (ret)
214 return ret;
215 else
216 r->curr_level = level;
217 } else {
218 pr_err("level input:%d is not within range\n", level);
219 return -EINVAL;
220 }
221
222 return ret;
223}
224
225static int vdd_restriction_apply_voltage(struct rail *r, int level)
226{
227 int ret = 0;
228
229 if (r->reg == NULL) {
230 pr_info("Do not have regulator handle:%s, can't apply vdd\n",
231 r->name);
232 return -EFAULT;
233 }
Jennifer Liu273d2962013-04-19 11:43:04 -0700234 if (level == r->curr_level)
235 return ret;
236
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700237 /* level = -1: disable, level = 0,1,2..n: enable */
238 if (level == -1) {
239 ret = regulator_set_voltage(r->reg, r->min_level,
240 r->levels[r->num_levels - 1]);
241 if (!ret)
242 r->curr_level = -1;
243 } else if (level >= 0 && level < (r->num_levels)) {
244 ret = regulator_set_voltage(r->reg, r->levels[level],
245 r->levels[r->num_levels - 1]);
246 if (!ret)
247 r->curr_level = level;
248 } else {
249 pr_err("level input:%d is not within range\n", level);
250 return -EINVAL;
251 }
252
253 return ret;
254}
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700255
Jennifer Liu907922c2013-03-26 11:18:00 -0700256/* Setting all rails the same mode */
257static int psm_set_mode_all(int mode)
258{
259 int i = 0;
260 int fail_cnt = 0;
261 int ret = 0;
262
263 for (i = 0; i < psm_rails_cnt; i++) {
264 if (psm_rails[i].mode != mode) {
265 ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
266 if (ret) {
267 pr_err("Cannot set mode:%d for %s",
268 mode, psm_rails[i].name);
269 fail_cnt++;
270 } else
271 psm_rails[i].mode = mode;
272 }
273 }
274
275 return fail_cnt ? (-EFAULT) : ret;
276}
277
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700278static int vdd_rstr_en_show(
279 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
280{
281 struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
282
283 return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
284}
285
286static ssize_t vdd_rstr_en_store(struct kobject *kobj,
287 struct kobj_attribute *attr, const char *buf, size_t count)
288{
289 int ret = 0;
290 int i = 0;
291 uint8_t en_cnt = 0;
292 uint8_t dis_cnt = 0;
293 uint32_t val = 0;
294 struct kernel_param kp;
295 struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
296
297 mutex_lock(&vdd_rstr_mutex);
298 kp.arg = &val;
299 ret = param_set_bool(buf, &kp);
300 if (ret) {
301 pr_err("Invalid input %s for enabled\n", buf);
302 goto done_vdd_rstr_en;
303 }
304
305 if ((val == 0) && (en->enabled == 0))
306 goto done_vdd_rstr_en;
307
308 for (i = 0; i < rails_cnt; i++) {
309 if (rails[i].freq_req == 1 && freq_table_get)
310 ret = vdd_restriction_apply_freq(&rails[i],
311 (val) ? 0 : -1);
312 else
313 ret = vdd_restriction_apply_voltage(&rails[i],
314 (val) ? 0 : -1);
315
Jennifer Liu273d2962013-04-19 11:43:04 -0700316 /*
317 * Even if fail to set one rail, still try to set the
318 * others. Continue the loop
319 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700320 if (ret)
321 pr_err("Set vdd restriction for %s failed\n",
322 rails[i].name);
323 else {
324 if (val)
325 en_cnt++;
326 else
327 dis_cnt++;
328 }
329 }
330 /* As long as one rail is enabled, vdd rstr is enabled */
331 if (val && en_cnt)
332 en->enabled = 1;
333 else if (!val && (dis_cnt == rails_cnt))
334 en->enabled = 0;
335
336done_vdd_rstr_en:
337 mutex_unlock(&vdd_rstr_mutex);
338 return count;
339}
340
341static struct vdd_rstr_enable vdd_rstr_en = {
342 .ko_attr.attr.name = __stringify(enabled),
343 .ko_attr.attr.mode = 644,
344 .ko_attr.show = vdd_rstr_en_show,
345 .ko_attr.store = vdd_rstr_en_store,
346 .enabled = 1,
347};
348
349static struct attribute *vdd_rstr_en_attribs[] = {
350 &vdd_rstr_en.ko_attr.attr,
351 NULL,
352};
353
354static struct attribute_group vdd_rstr_en_attribs_gp = {
355 .attrs = vdd_rstr_en_attribs,
356};
357
358static int vdd_rstr_reg_value_show(
359 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
360{
361 int val = 0;
362 struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
363 /* -1:disabled, -2:fail to get regualtor handle */
364 if (reg->curr_level < 0)
365 val = reg->curr_level;
366 else
367 val = reg->levels[reg->curr_level];
368
Jennifer Liu273d2962013-04-19 11:43:04 -0700369 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700370}
371
372static int vdd_rstr_reg_level_show(
373 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
374{
375 struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
376 return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
377}
378
379static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
380 struct kobj_attribute *attr, const char *buf, size_t count)
381{
382 int ret = 0;
383 int val = 0;
384
385 struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
386
387 mutex_lock(&vdd_rstr_mutex);
388 if (vdd_rstr_en.enabled == 0)
389 goto done_store_level;
390
Archana Sathyakumar143b0b52013-04-09 14:24:32 -0600391 ret = kstrtouint(buf, 10, &val);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700392 if (ret) {
393 pr_err("Invalid input %s for level\n", buf);
394 goto done_store_level;
395 }
396
397 if (val < 0 || val > reg->num_levels - 1) {
398 pr_err(" Invalid number %d for level\n", val);
399 goto done_store_level;
400 }
401
402 if (val != reg->curr_level) {
403 if (reg->freq_req == 1 && freq_table_get)
404 update_cpu_min_freq_all(reg->levels[val]);
405 else {
406 ret = vdd_restriction_apply_voltage(reg, val);
407 if (ret) {
408 pr_err( \
409 "Set vdd restriction for regulator %s failed\n",
410 reg->name);
411 goto done_store_level;
412 }
413 }
414 reg->curr_level = val;
415 }
416
417done_store_level:
418 mutex_unlock(&vdd_rstr_mutex);
419 return count;
420}
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600421
Jennifer Liu907922c2013-03-26 11:18:00 -0700422static int psm_reg_mode_show(
423 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
424{
425 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
426 return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
427}
428
429static ssize_t psm_reg_mode_store(struct kobject *kobj,
430 struct kobj_attribute *attr, const char *buf, size_t count)
431{
432 int ret = 0;
433 int val = 0;
434 struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
435
436 mutex_lock(&psm_mutex);
437 ret = kstrtoint(buf, 10, &val);
438 if (ret) {
439 pr_err("Invalid input %s for mode\n", buf);
440 goto done_psm_store;
441 }
442
443 if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
444 pr_err(" Invalid number %d for mode\n", val);
445 goto done_psm_store;
446 }
447
448 if (val != reg->mode) {
449 ret = rpm_regulator_set_mode(reg->reg, val);
450 if (ret) {
451 pr_err( \
452 "Fail to set PMIC SW Mode:%d for %s\n",
453 val, reg->name);
454 goto done_psm_store;
455 }
456 reg->mode = val;
457 }
458
459done_psm_store:
460 mutex_unlock(&psm_mutex);
461 return count;
462}
463
Jennifer Liu5a3518c2013-04-17 11:53:51 -0700464static int check_sensor_id(int sensor_id)
465{
466 int i = 0;
467 bool hw_id_found;
468 int ret = 0;
469
470 for (i = 0; i < max_tsens_num; i++) {
471 if (sensor_id == tsens_id_map[i]) {
472 hw_id_found = true;
473 break;
474 }
475 }
476 if (!hw_id_found) {
477 pr_err("%s: Invalid sensor hw id :%d\n", __func__, sensor_id);
478 return -EINVAL;
479 }
480
481 return ret;
482}
483
484static int create_sensor_id_map(void)
485{
486 int i = 0;
487 int ret = 0;
488
489 tsens_id_map = kzalloc(sizeof(int) * max_tsens_num,
490 GFP_KERNEL);
491 if (!tsens_id_map) {
492 pr_err("%s: Cannot allocate memory for tsens_id_map\n",
493 __func__);
494 return -ENOMEM;
495 }
496
497 for (i = 0; i < max_tsens_num; i++) {
498 ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]);
499 /* If return -ENXIO, hw_id is default in sequence */
500 if (ret) {
501 if (ret == -ENXIO) {
502 tsens_id_map[i] = i;
503 ret = 0;
504 } else {
505 pr_err( \
506 "%s: Failed to get hw id for sw id %d\n",
507 __func__, i);
508 goto fail;
509 }
510 }
511 }
512
513 return ret;
514fail:
515 kfree(tsens_id_map);
516 return ret;
517}
518
Jennifer Liu273d2962013-04-19 11:43:04 -0700519/* 1:enable, 0:disable */
520static int vdd_restriction_apply_all(int en)
521{
522 int i = 0;
523 int en_cnt = 0;
524 int dis_cnt = 0;
525 int fail_cnt = 0;
526 int ret = 0;
527
528 for (i = 0; i < rails_cnt; i++) {
529 if (rails[i].freq_req == 1 && freq_table_get)
530 ret = vdd_restriction_apply_freq(&rails[i],
531 en ? 0 : -1);
532 else
533 ret = vdd_restriction_apply_voltage(&rails[i],
534 en ? 0 : -1);
535 if (ret) {
536 pr_err("Cannot set voltage for %s", rails[i].name);
537 fail_cnt++;
538 } else {
539 if (en)
540 en_cnt++;
541 else
542 dis_cnt++;
543 }
544 }
545
546 /* As long as one rail is enabled, vdd rstr is enabled */
547 if (en && en_cnt)
548 vdd_rstr_en.enabled = 1;
549 else if (!en && (dis_cnt == rails_cnt))
550 vdd_rstr_en.enabled = 0;
551
552 /*
553 * Check fail_cnt again to make sure all of the rails are applied
554 * restriction successfully or not
555 */
556 if (fail_cnt)
557 return -EFAULT;
558 return ret;
559}
560
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600561static int msm_thermal_get_freq_table(void)
562{
563 int ret = 0;
564 int i = 0;
565
566 table = cpufreq_frequency_get_table(0);
567 if (table == NULL) {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700568 pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600569 ret = -EINVAL;
570 goto fail;
571 }
572
573 while (table[i].frequency != CPUFREQ_TABLE_END)
574 i++;
575
576 limit_idx_low = 0;
577 limit_idx_high = limit_idx = i - 1;
578 BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
579fail:
580 return ret;
581}
582
Praveen Chidambaram91814362012-05-25 17:36:07 -0600583static int update_cpu_max_freq(int cpu, uint32_t max_freq)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700584{
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700585 int ret = 0;
586
Praveen Chidambaram91814362012-05-25 17:36:07 -0600587 ret = msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, max_freq);
588 if (ret)
589 return ret;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700590
Praveen Chidambaram91814362012-05-25 17:36:07 -0600591 limited_max_freq = max_freq;
592 if (max_freq != MSM_CPUFREQ_NO_LIMIT)
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700593 pr_info("%s: Limiting cpu%d max frequency to %d\n",
594 KBUILD_MODNAME, cpu, max_freq);
Praveen Chidambaram91814362012-05-25 17:36:07 -0600595 else
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700596 pr_info("%s: Max frequency reset for cpu%d\n",
597 KBUILD_MODNAME, cpu);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700598
Praveen Chidambarama7435ce2013-05-03 12:52:42 -0600599 if (cpu_online(cpu)) {
600 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
601 if (!policy)
602 return ret;
603 ret = cpufreq_driver_target(policy, policy->cur,
604 CPUFREQ_RELATION_H);
605 cpufreq_cpu_put(policy);
606 }
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600607
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700608 return ret;
609}
610
Anji Jonnala822b5c42013-05-21 20:09:24 +0530611#ifdef CONFIG_SMP
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530612static void __ref do_core_control(long temp)
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700613{
614 int i = 0;
615 int ret = 0;
616
617 if (!core_control_enabled)
618 return;
619
620 mutex_lock(&core_control_mutex);
621 if (msm_thermal_info.core_control_mask &&
622 temp >= msm_thermal_info.core_limit_temp_degC) {
623 for (i = num_possible_cpus(); i > 0; i--) {
624 if (!(msm_thermal_info.core_control_mask & BIT(i)))
625 continue;
626 if (cpus_offlined & BIT(i) && !cpu_online(i))
627 continue;
628 pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
629 KBUILD_MODNAME, i, temp);
630 ret = cpu_down(i);
631 if (ret)
632 pr_err("%s: Error %d offline core %d\n",
633 KBUILD_MODNAME, ret, i);
634 cpus_offlined |= BIT(i);
635 break;
636 }
637 } else if (msm_thermal_info.core_control_mask && cpus_offlined &&
638 temp <= (msm_thermal_info.core_limit_temp_degC -
639 msm_thermal_info.core_temp_hysteresis_degC)) {
640 for (i = 0; i < num_possible_cpus(); i++) {
641 if (!(cpus_offlined & BIT(i)))
642 continue;
643 cpus_offlined &= ~BIT(i);
644 pr_info("%s: Allow Online CPU%d Temp: %ld\n",
645 KBUILD_MODNAME, i, temp);
Jennifer Liu273d2962013-04-19 11:43:04 -0700646 /*
647 * If this core is already online, then bring up the
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700648 * next offlined core.
649 */
650 if (cpu_online(i))
651 continue;
652 ret = cpu_up(i);
653 if (ret)
654 pr_err("%s: Error %d online core %d\n",
655 KBUILD_MODNAME, ret, i);
656 break;
657 }
658 }
659 mutex_unlock(&core_control_mutex);
660}
Anji Jonnala822b5c42013-05-21 20:09:24 +0530661#else
662static void do_core_control(long temp)
663{
664 return;
665}
666#endif
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700667
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700668static int do_vdd_restriction(void)
669{
670 struct tsens_device tsens_dev;
671 long temp = 0;
672 int ret = 0;
673 int i = 0;
674 int dis_cnt = 0;
675
676 if (!vdd_rstr_enabled)
677 return ret;
678
679 if (usefreq && !freq_table_get) {
680 if (check_freq_table())
681 return ret;
682 }
683
684 mutex_lock(&vdd_rstr_mutex);
685 for (i = 0; i < max_tsens_num; i++) {
Jennifer Liu5a3518c2013-04-17 11:53:51 -0700686 tsens_dev.sensor_num = tsens_id_map[i];
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700687 ret = tsens_get_temp(&tsens_dev, &temp);
688 if (ret) {
689 pr_debug("%s: Unable to read TSENS sensor %d\n",
690 __func__, tsens_dev.sensor_num);
691 dis_cnt++;
692 continue;
693 }
Jennifer Liu273d2962013-04-19 11:43:04 -0700694 if (temp <= msm_thermal_info.vdd_rstr_temp_degC) {
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700695 ret = vdd_restriction_apply_all(1);
696 if (ret) {
697 pr_err( \
698 "Enable vdd rstr votlage for all failed\n");
699 goto exit;
700 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700701 goto exit;
Jennifer Liu273d2962013-04-19 11:43:04 -0700702 } else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700703 dis_cnt++;
704 }
705 if (dis_cnt == max_tsens_num) {
706 ret = vdd_restriction_apply_all(0);
707 if (ret) {
708 pr_err("Disable vdd rstr votlage for all failed\n");
709 goto exit;
710 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700711 }
712exit:
713 mutex_unlock(&vdd_rstr_mutex);
714 return ret;
715}
716
Jennifer Liu907922c2013-03-26 11:18:00 -0700717static int do_psm(void)
718{
719 struct tsens_device tsens_dev;
720 long temp = 0;
721 int ret = 0;
722 int i = 0;
723 int auto_cnt = 0;
724
725 mutex_lock(&psm_mutex);
726 for (i = 0; i < max_tsens_num; i++) {
Jennifer Liu5a3518c2013-04-17 11:53:51 -0700727 tsens_dev.sensor_num = tsens_id_map[i];
Jennifer Liu907922c2013-03-26 11:18:00 -0700728 ret = tsens_get_temp(&tsens_dev, &temp);
729 if (ret) {
730 pr_debug("%s: Unable to read TSENS sensor %d\n",
731 __func__, tsens_dev.sensor_num);
732 auto_cnt++;
733 continue;
734 }
735
Jennifer Liu273d2962013-04-19 11:43:04 -0700736 /*
737 * As long as one sensor is above the threshold, set PWM mode
Jennifer Liu907922c2013-03-26 11:18:00 -0700738 * on all rails, and loop stops. Set auto mode when all rails
Jennifer Liu273d2962013-04-19 11:43:04 -0700739 * are below thershold
740 */
Jennifer Liu907922c2013-03-26 11:18:00 -0700741 if (temp > msm_thermal_info.psm_temp_degC) {
742 ret = psm_set_mode_all(PMIC_PWM_MODE);
743 if (ret) {
744 pr_err("Set pwm mode for all failed\n");
745 goto exit;
746 }
747 break;
748 } else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
749 auto_cnt++;
750 }
751
752 if (auto_cnt == max_tsens_num) {
753 ret = psm_set_mode_all(PMIC_AUTO_MODE);
754 if (ret) {
755 pr_err("Set auto mode for all failed\n");
756 goto exit;
757 }
758 }
759
760exit:
761 mutex_unlock(&psm_mutex);
762 return ret;
763}
764
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530765static void __ref do_freq_control(long temp)
Praveen Chidambarama7435ce2013-05-03 12:52:42 -0600766{
767 int ret = 0;
768 int cpu = 0;
769 uint32_t max_freq = limited_max_freq;
770
771 if (temp >= msm_thermal_info.limit_temp_degC) {
772 if (limit_idx == limit_idx_low)
773 return;
774
775 limit_idx -= msm_thermal_info.freq_step;
776 if (limit_idx < limit_idx_low)
777 limit_idx = limit_idx_low;
778 max_freq = table[limit_idx].frequency;
779 } else if (temp < msm_thermal_info.limit_temp_degC -
780 msm_thermal_info.temp_hysteresis_degC) {
781 if (limit_idx == limit_idx_high)
782 return;
783
784 limit_idx += msm_thermal_info.freq_step;
785 if (limit_idx >= limit_idx_high) {
786 limit_idx = limit_idx_high;
787 max_freq = MSM_CPUFREQ_NO_LIMIT;
788 } else
789 max_freq = table[limit_idx].frequency;
790 }
791
792 if (max_freq == limited_max_freq)
793 return;
794
795 /* Update new limits */
796 for_each_possible_cpu(cpu) {
797 if (!(msm_thermal_info.freq_control_mask & BIT(cpu)))
798 continue;
799 ret = update_cpu_max_freq(cpu, max_freq);
800 if (ret)
801 pr_debug(
802 "%s: Unable to limit cpu%d max freq to %d\n",
803 KBUILD_MODNAME, cpu, max_freq);
804 }
805
806}
807
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530808static void __ref check_temp(struct work_struct *work)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700809{
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600810 static int limit_init;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700811 struct tsens_device tsens_dev;
Praveen Chidambaram0c6ab952013-02-07 17:47:16 -0700812 long temp = 0;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700813 int ret = 0;
Praveen Chidambarama7435ce2013-05-03 12:52:42 -0600814
Praveen Chidambaram91814362012-05-25 17:36:07 -0600815 tsens_dev.sensor_num = msm_thermal_info.sensor_id;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700816 ret = tsens_get_temp(&tsens_dev, &temp);
817 if (ret) {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700818 pr_debug("%s: Unable to read TSENS sensor %d\n",
819 KBUILD_MODNAME, tsens_dev.sensor_num);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700820 goto reschedule;
821 }
822
Eugene Seah2ee4a5d2012-06-25 18:16:41 -0600823 if (!limit_init) {
824 ret = msm_thermal_get_freq_table();
825 if (ret)
826 goto reschedule;
827 else
828 limit_init = 1;
829 }
Praveen Chidambaram91814362012-05-25 17:36:07 -0600830
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700831 do_core_control(temp);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -0700832 do_vdd_restriction();
Jennifer Liu907922c2013-03-26 11:18:00 -0700833 do_psm();
Praveen Chidambarama7435ce2013-05-03 12:52:42 -0600834 do_freq_control(temp);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700835
836reschedule:
837 if (enabled)
838 schedule_delayed_work(&check_temp_work,
Praveen Chidambaram91814362012-05-25 17:36:07 -0600839 msecs_to_jiffies(msm_thermal_info.poll_ms));
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700840}
841
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530842static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700843 unsigned long action, void *hcpu)
844{
845 unsigned int cpu = (unsigned long)hcpu;
846
847 if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
848 if (core_control_enabled &&
849 (msm_thermal_info.core_control_mask & BIT(cpu)) &&
850 (cpus_offlined & BIT(cpu))) {
851 pr_info(
852 "%s: Preventing cpu%d from coming online.\n",
853 KBUILD_MODNAME, cpu);
854 return NOTIFY_BAD;
855 }
856 }
857
858
859 return NOTIFY_OK;
860}
861
862static struct notifier_block __refdata msm_thermal_cpu_notifier = {
863 .notifier_call = msm_thermal_cpu_callback,
864};
865
Archana Sathyakumar143b0b52013-04-09 14:24:32 -0600866static void thermal_rtc_setup(void)
867{
868 ktime_t wakeup_time;
869 ktime_t curr_time;
870
871 curr_time = alarm_get_elapsed_realtime();
872 wakeup_time = ktime_add_us(curr_time,
873 (wakeup_ms * USEC_PER_MSEC));
874 alarm_start_range(&thermal_rtc, wakeup_time,
875 wakeup_time);
876 pr_debug("%s: Current Time: %ld %ld, Alarm set to: %ld %ld\n",
877 KBUILD_MODNAME,
878 ktime_to_timeval(curr_time).tv_sec,
879 ktime_to_timeval(curr_time).tv_usec,
880 ktime_to_timeval(wakeup_time).tv_sec,
881 ktime_to_timeval(wakeup_time).tv_usec);
882
883}
884
885static void timer_work_fn(struct work_struct *work)
886{
887 sysfs_notify(tt_kobj, NULL, "wakeup_ms");
888}
889
890static void thermal_rtc_callback(struct alarm *al)
891{
892 struct timeval ts;
893 ts = ktime_to_timeval(alarm_get_elapsed_realtime());
894 schedule_work(&timer_work);
895 pr_debug("%s: Time on alarm expiry: %ld %ld\n", KBUILD_MODNAME,
896 ts.tv_sec, ts.tv_usec);
897}
898
Jennifer Liu273d2962013-04-19 11:43:04 -0700899/*
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700900 * We will reset the cpu frequencies limits here. The core online/offline
901 * status will be carried over to the process stopping the msm_thermal, as
902 * we dont want to online a core and bring in the thermal issues.
903 */
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530904static void __ref disable_msm_thermal(void)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700905{
906 int cpu = 0;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700907
Eugene Seahcbc07532012-04-11 19:32:27 -0600908 /* make sure check_temp is no longer running */
909 cancel_delayed_work(&check_temp_work);
910 flush_scheduled_work();
911
Praveen Chidambaram91814362012-05-25 17:36:07 -0600912 if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT)
913 return;
914
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700915 for_each_possible_cpu(cpu) {
Praveen Chidambaram91814362012-05-25 17:36:07 -0600916 update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700917 }
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700918}
919
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530920static int __ref set_enabled(const char *val, const struct kernel_param *kp)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700921{
922 int ret = 0;
923
924 ret = param_set_bool(val, kp);
925 if (!enabled)
926 disable_msm_thermal();
927 else
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700928 pr_info("%s: no action for enabled = %d\n",
929 KBUILD_MODNAME, enabled);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700930
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700931 pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -0700932
933 return ret;
934}
935
936static struct kernel_param_ops module_ops = {
937 .set = set_enabled,
938 .get = param_get_bool,
939};
940
941module_param_cb(enabled, &module_ops, &enabled, 0644);
942MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
943
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700944
Anji Jonnala822b5c42013-05-21 20:09:24 +0530945#ifdef CONFIG_SMP
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700946/* Call with core_control_mutex locked */
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530947static int __ref update_offline_cores(int val)
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700948{
949 int cpu = 0;
950 int ret = 0;
951
952 cpus_offlined = msm_thermal_info.core_control_mask & val;
953 if (!core_control_enabled)
954 return 0;
955
956 for_each_possible_cpu(cpu) {
957 if (!(cpus_offlined & BIT(cpu)))
958 continue;
959 if (!cpu_online(cpu))
960 continue;
961 ret = cpu_down(cpu);
962 if (ret)
963 pr_err("%s: Unable to offline cpu%d\n",
964 KBUILD_MODNAME, cpu);
965 }
966 return ret;
967}
Anji Jonnala822b5c42013-05-21 20:09:24 +0530968#else
969static int update_offline_cores(int val)
970{
971 return 0;
972}
973#endif
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700974
975static ssize_t show_cc_enabled(struct kobject *kobj,
976 struct kobj_attribute *attr, char *buf)
977{
978 return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
979}
980
Anji Jonnala7e3c5082013-05-02 00:46:12 +0530981static ssize_t __ref store_cc_enabled(struct kobject *kobj,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -0700982 struct kobj_attribute *attr, const char *buf, size_t count)
983{
984 int ret = 0;
985 int val = 0;
986
987 mutex_lock(&core_control_mutex);
988 ret = kstrtoint(buf, 10, &val);
989 if (ret) {
990 pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
991 goto done_store_cc;
992 }
993
994 if (core_control_enabled == !!val)
995 goto done_store_cc;
996
997 core_control_enabled = !!val;
998 if (core_control_enabled) {
999 pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
1000 register_cpu_notifier(&msm_thermal_cpu_notifier);
1001 update_offline_cores(cpus_offlined);
1002 } else {
1003 pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
1004 unregister_cpu_notifier(&msm_thermal_cpu_notifier);
1005 }
1006
1007done_store_cc:
1008 mutex_unlock(&core_control_mutex);
1009 return count;
1010}
1011
1012static ssize_t show_cpus_offlined(struct kobject *kobj,
1013 struct kobj_attribute *attr, char *buf)
1014{
1015 return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
1016}
1017
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301018static ssize_t __ref store_cpus_offlined(struct kobject *kobj,
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001019 struct kobj_attribute *attr, const char *buf, size_t count)
1020{
1021 int ret = 0;
1022 uint32_t val = 0;
1023
1024 mutex_lock(&core_control_mutex);
1025 ret = kstrtouint(buf, 10, &val);
1026 if (ret) {
1027 pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
1028 goto done_cc;
1029 }
1030
1031 if (enabled) {
1032 pr_err("%s: Ignoring request; polling thread is enabled.\n",
1033 KBUILD_MODNAME);
1034 goto done_cc;
1035 }
1036
1037 if (cpus_offlined == val)
1038 goto done_cc;
1039
1040 update_offline_cores(val);
1041done_cc:
1042 mutex_unlock(&core_control_mutex);
1043 return count;
1044}
1045
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301046static __refdata struct kobj_attribute cc_enabled_attr =
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001047__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
1048
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301049static __refdata struct kobj_attribute cpus_offlined_attr =
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001050__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
1051
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301052static __refdata struct attribute *cc_attrs[] = {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001053 &cc_enabled_attr.attr,
1054 &cpus_offlined_attr.attr,
1055 NULL,
1056};
1057
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301058static __refdata struct attribute_group cc_attr_group = {
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001059 .attrs = cc_attrs,
1060};
1061
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001062static ssize_t show_wakeup_ms(struct kobject *kobj,
1063 struct kobj_attribute *attr, char *buf)
1064{
1065 return snprintf(buf, PAGE_SIZE, "%d\n", wakeup_ms);
1066}
1067
1068static ssize_t store_wakeup_ms(struct kobject *kobj,
1069 struct kobj_attribute *attr, const char *buf, size_t count)
1070{
1071 int ret;
1072 ret = kstrtouint(buf, 10, &wakeup_ms);
1073
1074 if (ret) {
1075 pr_err("%s: Trying to set invalid wakeup timer\n",
1076 KBUILD_MODNAME);
1077 return ret;
1078 }
1079
1080 if (wakeup_ms > 0) {
1081 thermal_rtc_setup();
1082 pr_debug("%s: Timer started for %ums\n", KBUILD_MODNAME,
1083 wakeup_ms);
1084 } else {
1085 ret = alarm_cancel(&thermal_rtc);
1086 if (ret)
1087 pr_debug("%s: Timer canceled\n", KBUILD_MODNAME);
1088 else
1089 pr_debug("%s: No active timer present to cancel\n",
1090 KBUILD_MODNAME);
1091
1092 }
1093 return count;
1094}
1095
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301096static __refdata struct kobj_attribute timer_attr =
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001097__ATTR(wakeup_ms, 0644, show_wakeup_ms, store_wakeup_ms);
1098
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301099static __refdata struct attribute *tt_attrs[] = {
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001100 &timer_attr.attr,
1101 NULL,
1102};
1103
Anji Jonnala7e3c5082013-05-02 00:46:12 +05301104static __refdata struct attribute_group tt_attr_group = {
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001105 .attrs = tt_attrs,
1106};
1107
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001108static __init int msm_thermal_add_cc_nodes(void)
1109{
1110 struct kobject *module_kobj = NULL;
1111 struct kobject *cc_kobj = NULL;
1112 int ret = 0;
1113
1114 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1115 if (!module_kobj) {
1116 pr_err("%s: cannot find kobject for module\n",
1117 KBUILD_MODNAME);
1118 ret = -ENOENT;
1119 goto done_cc_nodes;
1120 }
1121
1122 cc_kobj = kobject_create_and_add("core_control", module_kobj);
1123 if (!cc_kobj) {
1124 pr_err("%s: cannot create core control kobj\n",
1125 KBUILD_MODNAME);
1126 ret = -ENOMEM;
1127 goto done_cc_nodes;
1128 }
1129
1130 ret = sysfs_create_group(cc_kobj, &cc_attr_group);
1131 if (ret) {
1132 pr_err("%s: cannot create group\n", KBUILD_MODNAME);
1133 goto done_cc_nodes;
1134 }
1135
1136 return 0;
1137
1138done_cc_nodes:
1139 if (cc_kobj)
1140 kobject_del(cc_kobj);
1141 return ret;
1142}
1143
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001144static __init int msm_thermal_add_timer_nodes(void)
1145{
1146 struct kobject *module_kobj = NULL;
1147 int ret = 0;
1148
1149 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1150 if (!module_kobj) {
1151 pr_err("%s: cannot find kobject for module\n",
1152 KBUILD_MODNAME);
1153 ret = -ENOENT;
1154 goto failed;
1155 }
1156
1157 tt_kobj = kobject_create_and_add("thermal_timer", module_kobj);
1158 if (!tt_kobj) {
1159 pr_err("%s: cannot create timer kobj\n",
1160 KBUILD_MODNAME);
1161 ret = -ENOMEM;
1162 goto failed;
1163 }
1164
1165 ret = sysfs_create_group(tt_kobj, &tt_attr_group);
1166 if (ret) {
1167 pr_err("%s: cannot create group\n", KBUILD_MODNAME);
1168 goto failed;
1169 }
1170
1171 return 0;
1172
1173failed:
1174 if (tt_kobj)
1175 kobject_del(tt_kobj);
1176 return ret;
1177}
1178
Eugene Seahb77b0c42012-07-02 19:28:50 -06001179int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001180{
1181 int ret = 0;
1182
Praveen Chidambaram91814362012-05-25 17:36:07 -06001183 BUG_ON(!pdata);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001184 tsens_get_max_sensor_num(&max_tsens_num);
Praveen Chidambaram91814362012-05-25 17:36:07 -06001185 memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
1186
Jennifer Liu5a3518c2013-04-17 11:53:51 -07001187 if (create_sensor_id_map())
1188 return -EINVAL;
1189 if (check_sensor_id(msm_thermal_info.sensor_id))
1190 return -EINVAL;
1191
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001192 enabled = 1;
Anji Jonnala822b5c42013-05-21 20:09:24 +05301193 if (num_possible_cpus() > 1)
1194 core_control_enabled = 1;
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001195 INIT_DELAYED_WORK(&check_temp_work, check_temp);
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001196 schedule_delayed_work(&check_temp_work, 0);
1197
Anji Jonnala822b5c42013-05-21 20:09:24 +05301198 if (num_possible_cpus() > 1)
1199 register_cpu_notifier(&msm_thermal_cpu_notifier);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001200
Praveen Chidambaramf248bb72012-01-20 11:38:44 -07001201 return ret;
1202}
Eugene Seahb77b0c42012-07-02 19:28:50 -06001203
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001204static int vdd_restriction_reg_init(struct platform_device *pdev)
1205{
1206 int ret = 0;
1207 int i;
1208
1209 for (i = 0; i < rails_cnt; i++) {
1210 if (rails[i].freq_req == 1) {
1211 usefreq |= BIT(i);
1212 check_freq_table();
Jennifer Liu273d2962013-04-19 11:43:04 -07001213 /*
1214 * Restrict frequency by default until we have made
1215 * our first temp reading
1216 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001217 if (freq_table_get)
1218 ret = vdd_restriction_apply_freq(&rails[i], 0);
1219 else
1220 pr_info("%s:Defer vdd rstr freq init\n",
1221 __func__);
1222 } else {
1223 rails[i].reg = devm_regulator_get(&pdev->dev,
1224 rails[i].name);
1225 if (IS_ERR_OR_NULL(rails[i].reg)) {
1226 ret = PTR_ERR(rails[i].reg);
1227 if (ret != -EPROBE_DEFER) {
1228 pr_err( \
1229 "%s, could not get regulator: %s\n",
1230 rails[i].name, __func__);
1231 rails[i].reg = NULL;
1232 rails[i].curr_level = -2;
1233 return ret;
1234 }
1235 return ret;
1236 }
Jennifer Liu273d2962013-04-19 11:43:04 -07001237 /*
1238 * Restrict votlage by default until we have made
1239 * our first temp reading
1240 */
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001241 ret = vdd_restriction_apply_voltage(&rails[i], 0);
1242 }
1243 }
1244
1245 return ret;
1246}
1247
Jennifer Liu907922c2013-03-26 11:18:00 -07001248static int psm_reg_init(struct platform_device *pdev)
1249{
1250 int ret = 0;
1251 int i = 0;
1252 int j = 0;
1253
1254 for (i = 0; i < psm_rails_cnt; i++) {
1255 psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
1256 psm_rails[i].name);
1257 if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
1258 ret = PTR_ERR(psm_rails[i].reg);
1259 if (ret != -EPROBE_DEFER) {
1260 pr_err("%s, could not get rpm regulator: %s\n",
1261 psm_rails[i].name, __func__);
1262 psm_rails[i].reg = NULL;
1263 goto psm_reg_exit;
1264 }
1265 return ret;
1266 }
1267 /* Apps default vote for PWM mode */
1268 psm_rails[i].init = PMIC_PWM_MODE;
1269 ret = rpm_regulator_set_mode(psm_rails[i].reg,
1270 psm_rails[i].init);
1271 if (ret) {
1272 pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
1273 return ret;
1274 } else
1275 psm_rails[i].mode = PMIC_PWM_MODE;
1276 }
1277
1278 return ret;
1279
1280psm_reg_exit:
1281 if (ret) {
1282 for (j = 0; j < i; j++) {
1283 if (psm_rails[j].reg != NULL)
1284 rpm_regulator_put(psm_rails[j].reg);
1285 }
1286 }
1287
1288 return ret;
1289}
1290
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001291static int msm_thermal_add_vdd_rstr_nodes(void)
1292{
1293 struct kobject *module_kobj = NULL;
1294 struct kobject *vdd_rstr_kobj = NULL;
1295 struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
1296 int rc = 0;
1297 int i = 0;
1298
1299 if (!vdd_rstr_probed) {
1300 vdd_rstr_nodes_called = true;
1301 return rc;
1302 }
1303
1304 if (vdd_rstr_probed && rails_cnt == 0)
1305 return rc;
1306
1307 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1308 if (!module_kobj) {
1309 pr_err("%s: cannot find kobject for module %s\n",
1310 __func__, KBUILD_MODNAME);
1311 rc = -ENOENT;
1312 goto thermal_sysfs_add_exit;
1313 }
1314
1315 vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
1316 if (!vdd_rstr_kobj) {
1317 pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
1318 rc = -ENOMEM;
1319 goto thermal_sysfs_add_exit;
1320 }
1321
1322 rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
1323 if (rc) {
1324 pr_err("%s: cannot create kobject attribute group\n", __func__);
1325 rc = -ENOMEM;
1326 goto thermal_sysfs_add_exit;
1327 }
1328
1329 for (i = 0; i < rails_cnt; i++) {
1330 vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
1331 vdd_rstr_kobj);
1332 if (!vdd_rstr_reg_kobj[i]) {
1333 pr_err("%s: cannot create for kobject for %s\n",
1334 __func__, rails[i].name);
1335 rc = -ENOMEM;
1336 goto thermal_sysfs_add_exit;
1337 }
1338
1339 rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
1340 GFP_KERNEL);
1341 if (!rails[i].attr_gp.attrs) {
1342 rc = -ENOMEM;
1343 goto thermal_sysfs_add_exit;
1344 }
1345
1346 VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
1347 VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
1348 rails[i].attr_gp.attrs[2] = NULL;
1349
1350 rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
1351 &rails[i].attr_gp);
1352 if (rc) {
1353 pr_err("%s: cannot create attribute group for %s\n",
1354 __func__, rails[i].name);
1355 goto thermal_sysfs_add_exit;
1356 }
1357 }
1358
1359 return rc;
1360
1361thermal_sysfs_add_exit:
1362 if (rc) {
1363 for (i = 0; i < rails_cnt; i++) {
1364 kobject_del(vdd_rstr_reg_kobj[i]);
1365 kfree(rails[i].attr_gp.attrs);
1366 }
1367 if (vdd_rstr_kobj)
1368 kobject_del(vdd_rstr_kobj);
1369 }
1370 return rc;
1371}
1372
Jennifer Liu907922c2013-03-26 11:18:00 -07001373static int msm_thermal_add_psm_nodes(void)
1374{
1375 struct kobject *module_kobj = NULL;
1376 struct kobject *psm_kobj = NULL;
1377 struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
1378 int rc = 0;
1379 int i = 0;
1380
1381 if (!psm_probed) {
1382 psm_nodes_called = true;
1383 return rc;
1384 }
1385
1386 if (psm_probed && psm_rails_cnt == 0)
1387 return rc;
1388
1389 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1390 if (!module_kobj) {
1391 pr_err("%s: cannot find kobject for module %s\n",
1392 __func__, KBUILD_MODNAME);
1393 rc = -ENOENT;
1394 goto psm_node_exit;
1395 }
1396
1397 psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
1398 if (!psm_kobj) {
1399 pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
1400 rc = -ENOMEM;
1401 goto psm_node_exit;
1402 }
1403
1404 for (i = 0; i < psm_rails_cnt; i++) {
1405 psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
1406 psm_kobj);
1407 if (!psm_reg_kobj[i]) {
1408 pr_err("%s: cannot create for kobject for %s\n",
1409 KBUILD_MODNAME, psm_rails[i].name);
1410 rc = -ENOMEM;
1411 goto psm_node_exit;
1412 }
1413 psm_rails[i].attr_gp.attrs = kzalloc( \
1414 sizeof(struct attribute *) * 2, GFP_KERNEL);
1415 if (!psm_rails[i].attr_gp.attrs) {
1416 rc = -ENOMEM;
1417 goto psm_node_exit;
1418 }
1419
1420 PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
1421 psm_rails[i].attr_gp.attrs[1] = NULL;
1422
1423 rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
1424 if (rc) {
1425 pr_err("%s: cannot create attribute group for %s\n",
1426 KBUILD_MODNAME, psm_rails[i].name);
1427 goto psm_node_exit;
1428 }
1429 }
1430
1431 return rc;
1432
1433psm_node_exit:
1434 if (rc) {
1435 for (i = 0; i < psm_rails_cnt; i++) {
1436 kobject_del(psm_reg_kobj[i]);
1437 kfree(psm_rails[i].attr_gp.attrs);
1438 }
1439 if (psm_kobj)
1440 kobject_del(psm_kobj);
1441 }
1442 return rc;
1443}
1444
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001445static int probe_vdd_rstr(struct device_node *node,
1446 struct msm_thermal_data *data, struct platform_device *pdev)
1447{
1448 int ret = 0;
1449 int i = 0;
1450 int arr_size;
1451 char *key = NULL;
1452 struct device_node *child_node = NULL;
1453
Jennifer Liuf4e76492013-05-24 13:23:53 -07001454 rails = NULL;
1455
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001456 key = "qcom,vdd-restriction-temp";
1457 ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
1458 if (ret)
1459 goto read_node_fail;
1460
1461 key = "qcom,vdd-restriction-temp-hysteresis";
1462 ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
1463 if (ret)
1464 goto read_node_fail;
1465
1466 for_each_child_of_node(node, child_node) {
1467 rails_cnt++;
1468 }
1469
1470 if (rails_cnt == 0)
1471 goto read_node_fail;
1472 if (rails_cnt >= MAX_RAILS) {
1473 pr_err("%s: Too many rails.\n", __func__);
1474 return -EFAULT;
1475 }
1476
1477 rails = kzalloc(sizeof(struct rail) * rails_cnt,
1478 GFP_KERNEL);
1479 if (!rails) {
1480 pr_err("%s: Fail to allocate memory for rails.\n", __func__);
1481 return -ENOMEM;
1482 }
1483
1484 i = 0;
1485 for_each_child_of_node(node, child_node) {
1486 key = "qcom,vdd-rstr-reg";
1487 ret = of_property_read_string(child_node, key, &rails[i].name);
1488 if (ret)
1489 goto read_node_fail;
1490
1491 key = "qcom,levels";
1492 if (!of_get_property(child_node, key, &arr_size))
1493 goto read_node_fail;
1494 rails[i].num_levels = arr_size/sizeof(__be32);
1495 if (rails[i].num_levels >
1496 sizeof(rails[i].levels)/sizeof(uint32_t)) {
1497 pr_err("%s: Array size too large\n", __func__);
1498 return -EFAULT;
1499 }
1500 ret = of_property_read_u32_array(child_node, key,
1501 rails[i].levels, rails[i].num_levels);
1502 if (ret)
1503 goto read_node_fail;
1504
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001505 key = "qcom,freq-req";
1506 rails[i].freq_req = of_property_read_bool(child_node, key);
Jennifer Liu95c4a9a2013-05-03 16:57:23 -07001507 if (rails[i].freq_req)
1508 rails[i].min_level = MSM_CPUFREQ_NO_LIMIT;
1509 else {
1510 key = "qcom,min-level";
1511 ret = of_property_read_u32(child_node, key,
1512 &rails[i].min_level);
1513 if (ret)
1514 goto read_node_fail;
1515 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001516
Jennifer Liu4bc738d2013-07-16 16:16:37 -07001517 rails[i].curr_level = -1;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001518 rails[i].reg = NULL;
1519 i++;
1520 }
1521
1522 if (rails_cnt) {
1523 ret = vdd_restriction_reg_init(pdev);
1524 if (ret) {
1525 pr_info("%s:Failed to get regulators. KTM continues.\n",
1526 __func__);
1527 goto read_node_fail;
1528 }
1529 vdd_rstr_enabled = true;
1530 }
1531read_node_fail:
1532 vdd_rstr_probed = true;
1533 if (ret) {
1534 dev_info(&pdev->dev,
1535 "%s:Failed reading node=%s, key=%s. KTM continues\n",
1536 __func__, node->full_name, key);
1537 kfree(rails);
1538 rails_cnt = 0;
1539 }
1540 if (ret == -EPROBE_DEFER)
1541 vdd_rstr_probed = false;
1542 return ret;
1543}
1544
Jennifer Liu907922c2013-03-26 11:18:00 -07001545static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
1546 struct platform_device *pdev)
1547{
1548 int ret = 0;
1549 int j = 0;
1550 char *key = NULL;
1551
Jennifer Liuf4e76492013-05-24 13:23:53 -07001552 psm_rails = NULL;
1553
Jennifer Liu907922c2013-03-26 11:18:00 -07001554 key = "qcom,pmic-sw-mode-temp";
1555 ret = of_property_read_u32(node, key, &data->psm_temp_degC);
1556 if (ret)
1557 goto read_node_fail;
1558
1559 key = "qcom,pmic-sw-mode-temp-hysteresis";
1560 ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
1561 if (ret)
1562 goto read_node_fail;
1563
1564 key = "qcom,pmic-sw-mode-regs";
1565 psm_rails_cnt = of_property_count_strings(node, key);
1566 psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
1567 GFP_KERNEL);
1568 if (!psm_rails) {
1569 pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
1570 psm_rails_cnt = 0;
1571 return -ENOMEM;
1572 }
1573
1574 for (j = 0; j < psm_rails_cnt; j++) {
1575 ret = of_property_read_string_index(node, key, j,
1576 &psm_rails[j].name);
1577 if (ret)
1578 goto read_node_fail;
1579 }
1580
1581 if (psm_rails_cnt) {
1582 ret = psm_reg_init(pdev);
1583 if (ret) {
1584 pr_info("%s:Failed to get regulators. KTM continues.\n",
1585 __func__);
1586 goto read_node_fail;
1587 }
1588 psm_enabled = true;
1589 }
1590
1591read_node_fail:
1592 psm_probed = true;
1593 if (ret) {
1594 dev_info(&pdev->dev,
1595 "%s:Failed reading node=%s, key=%s. KTM continues\n",
1596 __func__, node->full_name, key);
1597 kfree(psm_rails);
1598 psm_rails_cnt = 0;
1599 }
1600 if (ret == -EPROBE_DEFER)
1601 psm_probed = false;
1602 return ret;
1603}
1604
Eugene Seahb77b0c42012-07-02 19:28:50 -06001605static int __devinit msm_thermal_dev_probe(struct platform_device *pdev)
1606{
1607 int ret = 0;
1608 char *key = NULL;
1609 struct device_node *node = pdev->dev.of_node;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001610
Eugene Seahb77b0c42012-07-02 19:28:50 -06001611 struct msm_thermal_data data;
1612
1613 memset(&data, 0, sizeof(struct msm_thermal_data));
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001614
Eugene Seahb77b0c42012-07-02 19:28:50 -06001615 key = "qcom,sensor-id";
1616 ret = of_property_read_u32(node, key, &data.sensor_id);
1617 if (ret)
1618 goto fail;
Eugene Seahb77b0c42012-07-02 19:28:50 -06001619
1620 key = "qcom,poll-ms";
1621 ret = of_property_read_u32(node, key, &data.poll_ms);
1622 if (ret)
1623 goto fail;
1624
1625 key = "qcom,limit-temp";
1626 ret = of_property_read_u32(node, key, &data.limit_temp_degC);
1627 if (ret)
1628 goto fail;
1629
1630 key = "qcom,temp-hysteresis";
1631 ret = of_property_read_u32(node, key, &data.temp_hysteresis_degC);
1632 if (ret)
1633 goto fail;
1634
1635 key = "qcom,freq-step";
1636 ret = of_property_read_u32(node, key, &data.freq_step);
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001637 if (ret)
1638 goto fail;
Eugene Seahb77b0c42012-07-02 19:28:50 -06001639
Praveen Chidambarama7435ce2013-05-03 12:52:42 -06001640 key = "qcom,freq-control-mask";
1641 ret = of_property_read_u32(node, key, &data.freq_control_mask);
1642
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001643 key = "qcom,core-limit-temp";
1644 ret = of_property_read_u32(node, key, &data.core_limit_temp_degC);
1645
1646 key = "qcom,core-temp-hysteresis";
1647 ret = of_property_read_u32(node, key, &data.core_temp_hysteresis_degC);
1648
1649 key = "qcom,core-control-mask";
1650 ret = of_property_read_u32(node, key, &data.core_control_mask);
1651
Jennifer Liu273d2962013-04-19 11:43:04 -07001652 /*
1653 * Probe optional properties below. Call probe_psm before
Jennifer Liu907922c2013-03-26 11:18:00 -07001654 * probe_vdd_rstr because rpm_regulator_get has to be called
Jennifer Liu273d2962013-04-19 11:43:04 -07001655 * before devm_regulator_get
1656 */
Jennifer Liu907922c2013-03-26 11:18:00 -07001657 ret = probe_psm(node, &data, pdev);
1658 if (ret == -EPROBE_DEFER)
1659 goto fail;
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001660 ret = probe_vdd_rstr(node, &data, pdev);
1661 if (ret == -EPROBE_DEFER)
1662 goto fail;
Jennifer Liu907922c2013-03-26 11:18:00 -07001663
Jennifer Liu273d2962013-04-19 11:43:04 -07001664 /*
1665 * In case sysfs add nodes get called before probe function.
1666 * Need to make sure sysfs node is created again
1667 */
Jennifer Liu907922c2013-03-26 11:18:00 -07001668 if (psm_nodes_called) {
1669 msm_thermal_add_psm_nodes();
1670 psm_nodes_called = false;
1671 }
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001672 if (vdd_rstr_nodes_called) {
1673 msm_thermal_add_vdd_rstr_nodes();
1674 vdd_rstr_nodes_called = false;
1675 }
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001676 ret = msm_thermal_init(&data);
1677
1678 return ret;
Eugene Seahb77b0c42012-07-02 19:28:50 -06001679fail:
1680 if (ret)
1681 pr_err("%s: Failed reading node=%s, key=%s\n",
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001682 __func__, node->full_name, key);
1683
Eugene Seahb77b0c42012-07-02 19:28:50 -06001684 return ret;
1685}
1686
Jennifer Liu907922c2013-03-26 11:18:00 -07001687
Eugene Seahb77b0c42012-07-02 19:28:50 -06001688static struct of_device_id msm_thermal_match_table[] = {
1689 {.compatible = "qcom,msm-thermal"},
1690 {},
1691};
1692
1693static struct platform_driver msm_thermal_device_driver = {
1694 .probe = msm_thermal_dev_probe,
1695 .driver = {
1696 .name = "msm-thermal",
1697 .owner = THIS_MODULE,
1698 .of_match_table = msm_thermal_match_table,
1699 },
1700};
1701
1702int __init msm_thermal_device_init(void)
1703{
1704 return platform_driver_register(&msm_thermal_device_driver);
1705}
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001706
1707int __init msm_thermal_late_init(void)
1708{
Anji Jonnala822b5c42013-05-21 20:09:24 +05301709 if (num_possible_cpus() > 1)
1710 msm_thermal_add_cc_nodes();
Jennifer Liu907922c2013-03-26 11:18:00 -07001711 msm_thermal_add_psm_nodes();
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001712 msm_thermal_add_vdd_rstr_nodes();
Archana Sathyakumar143b0b52013-04-09 14:24:32 -06001713 alarm_init(&thermal_rtc, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
1714 thermal_rtc_callback);
1715 INIT_WORK(&timer_work, timer_work_fn);
1716 msm_thermal_add_timer_nodes();
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001717
1718 return 0;
Praveen Chidambaram5abb7db2013-02-20 17:42:17 -07001719}
1720late_initcall(msm_thermal_late_init);
Jennifer Liu4b4f4cc2013-04-05 15:26:33 -07001721