summaryrefslogtreecommitdiff
path: root/amlogic_thermal_module.c (plain)
blob: b3426ad278a7c6b94f1217ba6be9c2fc679ddf6b
1/*
2 * amlogic_thermal.c - Samsung amlogic thermal (Thermal Management Unit)
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/err.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/platform_device.h>
29#include <linux/interrupt.h>
30#include <linux/clk.h>
31#include <linux/workqueue.h>
32#include <linux/sysfs.h>
33#include <linux/kobject.h>
34#include <linux/io.h>
35#include <linux/mutex.h>
36#include <linux/thermal.h>
37#include <linux/cpufreq.h>
38#include <linux/cpu_cooling.h>
39#include <linux/of.h>
40#include <linux/amlogic/saradc.h>
41#include <linux/random.h>
42#include <linux/gpu_cooling.h>
43#include <linux/cpucore_cooling.h>
44#include <linux/gpucore_cooling.h>
45#include <linux/thermal_core.h>
46#include <linux/version.h>
47#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 33)
48#include <linux/amlogic/aml_thermal_hw.h>
49#else
50#include <mach/thermal.h>
51#endif
52#include <linux/version.h>
53#include "amlogic_thermal.h"
54
55#define DBG_VIRTUAL 0
56int thermal_debug_enable = 0;
57int high_temp_protect = 0;
58atomic_t freq_update_flag;
59EXPORT_SYMBOL(thermal_debug_enable);
60EXPORT_SYMBOL(high_temp_protect);
61EXPORT_SYMBOL(freq_update_flag);
62
63#define THERMAL_DBG(format,args...) \
64 if (thermal_debug_enable) { \
65 printk("[THERMAL]"format, ##args); \
66 }
67
68static struct aml_virtual_thermal_device cpu_virtual_thermal = {};
69static struct aml_virtual_thermal_device gpu_virtual_thermal = {};
70static unsigned int report_interval[4] = {};
71
72/* CPU Zone information */
73#define PANIC_ZONE 4
74#define WARN_ZONE 3
75#define MONITOR_ZONE 2
76#define SAFE_ZONE 1
77
78#define GET_ZONE(trip) (trip + 2)
79#define GET_TRIP(zone) (zone - 2)
80
81static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata);
82static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev);
83
84void thermal_lock(struct mutex *lock)
85{
86 mutex_lock(lock);
87}
88EXPORT_SYMBOL(thermal_lock);
89
90void thermal_unlock(struct mutex *lock)
91{
92 mutex_unlock(lock);
93}
94EXPORT_SYMBOL(thermal_unlock);
95
96/* Get mode callback functions for thermal zone */
97static int amlogic_get_mode(struct thermal_zone_device *thermal,
98 enum thermal_device_mode *mode)
99{
100 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
101
102 if (pdata)
103 *mode = pdata->mode;
104 return 0;
105}
106
107/* Set mode callback functions for thermal zone */
108static int amlogic_set_mode(struct thermal_zone_device *thermal,
109 enum thermal_device_mode mode)
110{
111 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
112 struct cpucore_cooling_device *cpucore_device =NULL;
113 struct gpucore_cooling_device *gpucore_device = NULL;
114 if(!pdata)
115 return -EINVAL;
116
117 //mutex_lock(&pdata->therm_dev->lock);
118
119 if (mode == THERMAL_DEVICE_ENABLED){
120 pdata->therm_dev->polling_delay = pdata->idle_interval;
121 if(pdata->cpucore_cool_dev){
122 cpucore_device=pdata->cpucore_cool_dev->devdata;
123 cpucore_device->stop_flag=0;
124 }
125 if(pdata->gpucore_cool_dev){
126 gpucore_device=pdata->gpucore_cool_dev->devdata;
127 gpucore_device->stop_flag=0;
128 }
129 if (pdata->keep_mode) { // start work
130 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
131 }
132 }
133 else{
134 pdata->therm_dev->polling_delay = 0;
135 if (pdata->keep_mode) {
136 cancel_delayed_work_sync(&pdata->thermal_work);
137 keep_mode_set_mode(pdata);
138 }
139 if(pdata->cpucore_cool_dev)
140 pdata->cpucore_cool_dev->ops->set_cur_state(pdata->cpucore_cool_dev,(0|CPU_STOP));
141 if(pdata->gpucore_cool_dev)
142 pdata->gpucore_cool_dev->ops->set_cur_state(pdata->gpucore_cool_dev,(0|GPU_STOP));
143 }
144
145 //mutex_unlock(&pdata->therm_dev->lock);
146
147 pdata->mode = mode;
148 thermal_zone_device_update(pdata->therm_dev);
149 pr_info("thermal polling set for duration=%d msec\n",
150 pdata->therm_dev->polling_delay);
151 return 0;
152}
153
154/* Get trip type callback functions for thermal zone */
155static int amlogic_get_trip_type(struct thermal_zone_device *thermal, int trip,
156 enum thermal_trip_type *type)
157{
158 if(trip < thermal->trips-1)
159 *type = THERMAL_TRIP_ACTIVE;
160 else if(trip == thermal->trips-1)
161 *type = THERMAL_TRIP_CRITICAL;
162 else
163 return -EINVAL;
164 return 0;
165}
166
167/* Get trip temperature callback functions for thermal zone */
168static int amlogic_get_trip_temp(struct thermal_zone_device *thermal, int trip,
169 unsigned long *temp)
170{
171 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
172
173 if(trip > pdata->temp_trip_count ||trip<0)
174 return -EINVAL;
175 mutex_lock(&pdata->lock);
176 *temp =pdata->tmp_trip[trip].temperature;
177 /* convert the temperature into millicelsius */
178 mutex_unlock(&pdata->lock);
179
180 return 0;
181}
182
183static int amlogic_set_trip_temp(struct thermal_zone_device *thermal, int trip,
184 unsigned long temp)
185{
186 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
187
188 if(trip > pdata->temp_trip_count ||trip<0)
189 return -EINVAL;
190 mutex_lock(&pdata->lock);
191 pdata->tmp_trip[trip].temperature=temp;
192 /* convert the temperature into millicelsius */
193 mutex_unlock(&pdata->lock);
194 return 0;
195}
196
197/* Get critical temperature callback functions for thermal zone */
198static int amlogic_get_crit_temp(struct thermal_zone_device *thermal,
199 unsigned long *temp)
200{
201 int ret;
202 /* Panic zone */
203 ret =amlogic_get_trip_temp(thermal, thermal->trips-1, temp);
204
205 return ret;
206}
207
208
209/* Bind callback functions for thermal zone */
210static int amlogic_bind(struct thermal_zone_device *thermal,
211 struct thermal_cooling_device *cdev)
212{
213 int ret = 0, i;
214 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
215 int id;
216 char type[THERMAL_NAME_LENGTH];
217 unsigned long max;
218
219 if (!sscanf(cdev->type, "thermal-%7s-%d", type,&id))
220 return -EINVAL;
221 if(!strcmp(type,"cpufreq")){
222 /* Bind the thermal zone to the cpufreq cooling device */
223 for (i = 0; i < pdata->temp_trip_count; i++) {
224 if(pdata->tmp_trip[0].cpu_upper_level==THERMAL_CSTATE_INVALID)
225 {
226 printk("disable cpu cooling device by dtd\n");
227 ret = -EINVAL;
228 goto out;
229 }
230 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
231 pdata->tmp_trip[i].cpu_upper_level,
232 pdata->tmp_trip[i].cpu_lower_level)) {
233 pr_err("error binding cdev inst %d\n", i);
234 ret = -EINVAL;
235 goto out;
236 }
237 }
238 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
239 if (pdata->keep_mode) {
240 cdev->ops->get_max_state(cdev, &max);
241 keep_mode_bind(pdata, max, 0);
242 }
243 }
244
245 if(!strcmp(type,"gpufreq")){
246 struct gpufreq_cooling_device *gpufreq_dev=
247 (struct gpufreq_cooling_device *)cdev->devdata;
248 /* Bind the thermal zone to the cpufreq cooling device */
249 for (i = 0; i < pdata->temp_trip_count; i++) {
250 if(!gpufreq_dev->get_gpu_freq_level){
251 ret = -EINVAL;
252 pr_info("invalidate pointer %p\n",gpufreq_dev->get_gpu_freq_level);
253 goto out;
254 }
255 pdata->tmp_trip[i].gpu_lower_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_upper_freq);
256 pdata->tmp_trip[i].gpu_upper_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_lower_freq);
257 printk("pdata->tmp_trip[%d].gpu_lower_level=%d\n",i,pdata->tmp_trip[i].gpu_lower_level);
258 printk("pdata->tmp_trip[%d].gpu_upper_level=%d\n",i,pdata->tmp_trip[i].gpu_upper_level);
259 if(pdata->tmp_trip[0].gpu_lower_level==THERMAL_CSTATE_INVALID)
260 {
261 printk("disable gpu cooling device by dtd\n");
262 ret = -EINVAL;
263 goto out;
264 }
265 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
266 pdata->tmp_trip[i].gpu_upper_level,
267 pdata->tmp_trip[i].gpu_lower_level)) {
268 pr_err("error binding cdev inst %d\n", i);
269 ret = -EINVAL;
270 goto out;
271 }
272 }
273 pdata->gpu_cool_dev=cdev;
274 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
275 if (pdata->keep_mode) {
276 cdev->ops->get_max_state(cdev, &max);
277 keep_mode_bind(pdata, max, 1);
278 }
279 }
280
281 if(!strcmp(type,"cpucore")){
282 /* Bind the thermal zone to the cpufreq cooling device */
283 struct cpucore_cooling_device *cpucore_dev=
284 (struct cpucore_cooling_device *)cdev->devdata;
285 for (i = 0; i < pdata->temp_trip_count; i++) {
286 if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID)
287 {
288 printk("disable cpucore cooling device by dtd\n");
289 ret = -EINVAL;
290 goto out;
291 }
292 if(pdata->tmp_trip[i].cpu_core_num !=-1)
293 pdata->tmp_trip[i].cpu_core_upper=cpucore_dev->max_cpu_core_num-pdata->tmp_trip[i].cpu_core_num;
294 else
295 pdata->tmp_trip[i].cpu_core_upper=pdata->tmp_trip[i].cpu_core_num;
296 printk("tmp_trip[%d].cpu_core_upper=%d\n",i,pdata->tmp_trip[i].cpu_core_upper);
297 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
298 pdata->tmp_trip[i].cpu_core_upper,
299 pdata->tmp_trip[i].cpu_core_upper)) {
300 pr_err("error binding cdev inst %d\n", i);
301 ret = -EINVAL;
302 goto out;
303 }
304 }
305 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
306 if (pdata->keep_mode) {
307 cdev->ops->get_max_state(cdev, &max);
308 keep_mode_bind(pdata, max, 2);
309 }
310 }
311
312 if(!strcmp(type,"gpucore")){
313 /* Bind the thermal zone to the cpufreq cooling device */
314 struct gpucore_cooling_device *gpucore_dev=
315 (struct gpucore_cooling_device *)cdev->devdata;
316 for (i = 0; i < pdata->temp_trip_count; i++) {
317 if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID)
318 {
319 printk("disable gpucore cooling device by dtd\n");
320 ret = -EINVAL;
321 goto out;
322 }
323 if(pdata->tmp_trip[i].gpu_core_num != -1)
324 pdata->tmp_trip[i].gpu_core_upper=gpucore_dev->max_gpu_core_num-pdata->tmp_trip[i].gpu_core_num;
325 else
326 pdata->tmp_trip[i].gpu_core_upper=pdata->tmp_trip[i].gpu_core_num;
327
328 printk("tmp_trip[%d].gpu_core_upper=%d\n",i,pdata->tmp_trip[i].gpu_core_upper);
329 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
330 pdata->tmp_trip[i].gpu_core_upper,
331 pdata->tmp_trip[i].gpu_core_upper)) {
332 pr_err("error binding cdev inst %d\n", i);
333 ret = -EINVAL;
334 goto out;
335 }
336 }
337 pdata->gpucore_cool_dev=cdev;
338 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
339 if (pdata->keep_mode) {
340 cdev->ops->get_max_state(cdev, &max);
341 keep_mode_bind(pdata, max, 3);
342 }
343 }
344 return ret;
345out:
346 return ret;
347}
348
349/* Unbind callback functions for thermal zone */
350static int amlogic_unbind(struct thermal_zone_device *thermal,
351 struct thermal_cooling_device *cdev)
352{
353 int i;
354 if(thermal && cdev){
355 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
356 for (i = 0; i < pdata->temp_trip_count; i++) {
357 pr_info("\n%s unbinding %s ",thermal->type,cdev->type);
358 if (thermal_zone_unbind_cooling_device(thermal, i, cdev)) {
359 pr_err(" error %d \n", i);
360 return -EINVAL;
361 }
362 pr_info(" okay\n");
363 return 0;
364 }
365 }else{
366 return -EINVAL;
367 }
368 return -EINVAL;
369}
370#define ABS(a) ((a) > 0 ? (a) : -(a))
371
372void *thermal_alloc(size_t len)
373{
374 return kzalloc(len, GFP_KERNEL);
375}
376EXPORT_SYMBOL(thermal_alloc);
377
378static void thermal_work(struct work_struct *work)
379{
380 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
381 struct amlogic_thermal_platform_data *pdata;
382 int cpu_freq = 0;
383
384 pdata = container_of((struct delayed_work *)work, struct amlogic_thermal_platform_data, thermal_work);
385 if (policy) {
386 cpu_freq = policy->cur;
387 }
388 keep_mode_work(pdata, cpu_freq);
389 if (pdata->mode == THERMAL_DEVICE_ENABLED) { // no need to do this work again if thermal disabled
390 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
391 }
392}
393
394static int aml_virtaul_thermal_probe(struct platform_device *pdev, struct amlogic_thermal_platform_data *pdata)
395{
396 int ret, len, cells;
397 struct property *prop;
398 void *buf;
399
400 if (!of_property_read_bool(pdev->dev.of_node, "use_virtual_thermal")) {
401 printk("%s, virtual thermal is not enabled\n", __func__);
402 pdata->virtual_thermal_en = 0;
403 return 0;
404 } else {
405 printk("%s, virtual thermal enabled\n", __func__);
406 }
407
408 ret = of_property_read_u32(pdev->dev.of_node,
409 "freq_sample_period",
410 &pdata->freq_sample_period);
411 if (ret) {
412 printk("%s, get freq_sample_period failed, us 30 as default\n", __func__);
413 pdata->freq_sample_period = 30;
414 } else {
415 printk("%s, get freq_sample_period with value:%d\n", __func__, pdata->freq_sample_period);
416 }
417 ret = of_property_read_u32_array(pdev->dev.of_node,
418 "report_time",
419 report_interval, sizeof(report_interval) / sizeof(u32));
420 if (ret) {
421 printk("%s, get report_time failed\n", __func__);
422 goto error;
423 } else {
424 printk("[virtual_thermal] report interval:%4d, %4d, %4d, %4d\n",
425 report_interval[0], report_interval[1], report_interval[2], report_interval[3]);
426 }
427 /*
428 * read cpu_virtal
429 */
430 prop = of_find_property(pdev->dev.of_node, "cpu_virtual", &len);
431 if (!prop) {
432 printk("%s, cpu virtual not found\n", __func__);
433 goto error;
434 }
435 cells = len / sizeof(struct aml_virtual_thermal);
436 buf = kzalloc(len, GFP_KERNEL);
437 if (!buf) {
438 printk("%s, no memory\n", __func__);
439 return -ENOMEM;
440 }
441 ret = of_property_read_u32_array(pdev->dev.of_node,
442 "cpu_virtual",
443 buf, len/sizeof(u32));
444 if (ret) {
445 printk("%s, read cpu_virtual failed\n", __func__);
446 kfree(buf);
447 goto error;
448 }
449 cpu_virtual_thermal.count = cells;
450 cpu_virtual_thermal.thermal = buf;
451
452 /*
453 * read gpu_virtal
454 */
455 prop = of_find_property(pdev->dev.of_node, "gpu_virtual", &len);
456 if (!prop) {
457 printk("%s, gpu virtual not found\n", __func__);
458 goto error;
459 }
460 cells = len / sizeof(struct aml_virtual_thermal);
461 buf = kzalloc(len, GFP_KERNEL);
462 if (!buf) {
463 printk("%s, no memory\n", __func__);
464 return -ENOMEM;
465 }
466 ret = of_property_read_u32_array(pdev->dev.of_node,
467 "gpu_virtual",
468 buf, len/sizeof(u32));
469 if (ret) {
470 printk("%s, read gpu_virtual failed\n", __func__);
471 kfree(buf);
472 goto error;
473 }
474 gpu_virtual_thermal.count = cells;
475 gpu_virtual_thermal.thermal = buf;
476
477#if DBG_VIRTUAL
478 printk("cpu_virtal cells:%d, table:\n", cpu_virtual_thermal.count);
479 for (len = 0; len < cpu_virtual_thermal.count; len++) {
480 printk("%2d, %8d, %4d, %4d, %4d, %4d\n",
481 len,
482 cpu_virtual_thermal.thermal[len].freq,
483 cpu_virtual_thermal.thermal[len].temp_time[0],
484 cpu_virtual_thermal.thermal[len].temp_time[1],
485 cpu_virtual_thermal.thermal[len].temp_time[2],
486 cpu_virtual_thermal.thermal[len].temp_time[3]);
487 }
488 printk("gpu_virtal cells:%d, table:\n", gpu_virtual_thermal.count);
489 for (len = 0; len < gpu_virtual_thermal.count; len++) {
490 printk("%2d, %8d, %4d, %4d, %4d, %4d\n",
491 len,
492 gpu_virtual_thermal.thermal[len].freq,
493 gpu_virtual_thermal.thermal[len].temp_time[0],
494 gpu_virtual_thermal.thermal[len].temp_time[1],
495 gpu_virtual_thermal.thermal[len].temp_time[2],
496 gpu_virtual_thermal.thermal[len].temp_time[3]);
497 }
498#endif
499
500 pdata->virtual_thermal_en = 1;
501 return 0;
502
503error:
504 pdata->virtual_thermal_en = 0;
505 return -1;
506}
507
508static void aml_virtual_thermal_remove(struct amlogic_thermal_platform_data *pdata)
509{
510 kfree(cpu_virtual_thermal.thermal);
511 kfree(gpu_virtual_thermal.thermal);
512 pdata->virtual_thermal_en = 0;
513}
514
515static int check_freq_level(struct aml_virtual_thermal_device *dev, unsigned int freq)
516{
517 int i = 0;
518
519 if (freq >= dev->thermal[dev->count-1].freq) {
520 return dev->count - 1;
521 }
522 for (i = 0; i < dev->count - 1; i++) {
523 if (freq > dev->thermal[i].freq && freq <= dev->thermal[i + 1].freq) {
524 return i + 1;
525 }
526 }
527 return 0;
528}
529
530static int check_freq_level_cnt(unsigned int cnt)
531{
532 int i;
533
534 if (cnt >= report_interval[3]) {
535 return 3;
536 }
537 for (i = 0; i < 3; i++) {
538 if (cnt >= report_interval[i] && cnt < report_interval[i + 1]) {
539 return i;
540 }
541 }
542 return 0;
543}
544
545static unsigned long aml_cal_virtual_temp(struct amlogic_thermal_platform_data *pdata)
546{
547 static unsigned int cpu_freq_level_cnt = 0, gpu_freq_level_cnt = 0;
548 static unsigned int last_cpu_freq_level = 0, last_gpu_freq_level = 0;
549 static unsigned int cpu_temp = 40, gpu_temp = 40; // default set to 40 when at homescreen
550 unsigned int curr_cpu_avg_freq, curr_gpu_avg_freq;
551 int curr_cpu_freq_level, curr_gpu_freq_level;
552 int cnt_level, level_diff;
553 int temp_update = 0, final_temp;
554
555 /*
556 * CPU temp
557 */
558 if (atomic_read(&freq_update_flag)) {
559 curr_cpu_avg_freq = pdata->monitor.avg_cpu_freq;
560 curr_cpu_freq_level = check_freq_level(&cpu_virtual_thermal, curr_cpu_avg_freq);
561 level_diff = curr_cpu_freq_level - last_cpu_freq_level;
562 if (ABS(level_diff) <= 1) { // freq change is not large
563 cpu_freq_level_cnt++;
564 cnt_level = check_freq_level_cnt(cpu_freq_level_cnt);
565 cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[cnt_level];
566#if DBG_VIRTUAL
567 printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, cpu_temp:%d\n",
568 __func__, curr_cpu_avg_freq, curr_cpu_freq_level, cnt_level, cpu_freq_level_cnt, cpu_temp);
569#endif
570 } else { // level not match
571 cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[0];
572#if DBG_VIRTUAL
573 printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, last_cnt_level:%d, cpu_temp:%d\n",
574 __func__, curr_cpu_avg_freq, curr_cpu_freq_level, last_cpu_freq_level, cpu_freq_level_cnt, cpu_temp);
575#endif
576 cpu_freq_level_cnt = 0;
577 }
578 last_cpu_freq_level = curr_cpu_freq_level;
579
580 curr_gpu_avg_freq = pdata->monitor.avg_gpu_freq;
581 curr_gpu_freq_level = check_freq_level(&gpu_virtual_thermal, curr_gpu_avg_freq);
582 level_diff = curr_gpu_freq_level - last_gpu_freq_level;
583 if (ABS(level_diff) <= 1) { // freq change is not large
584 gpu_freq_level_cnt++;
585 cnt_level = check_freq_level_cnt(gpu_freq_level_cnt);
586 gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[cnt_level];
587#if DBG_VIRTUAL
588 printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, gpu_temp:%d\n",
589 __func__, curr_gpu_avg_freq, curr_gpu_freq_level, cnt_level, gpu_freq_level_cnt, gpu_temp);
590#endif
591 } else { // level not match
592 gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[0];
593 gpu_freq_level_cnt = 0;
594#if DBG_VIRTUAL
595 printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, gpu_temp:%d\n",
596 __func__, curr_gpu_avg_freq, curr_gpu_freq_level, last_gpu_freq_level, gpu_temp);
597#endif
598 }
599 last_gpu_freq_level = curr_gpu_freq_level;
600
601 atomic_set(&freq_update_flag, 0);
602 temp_update = 1;
603 }
604
605 if (cpu_temp <= 0 && gpu_temp <= 0) {
606 printk("%s, Bug here, cpu & gpu temp can't be 0, cpu_temp:%d, gpu_temp:%d\n", __func__, cpu_temp, gpu_temp);
607 final_temp = 40;
608 }
609 final_temp = (cpu_temp >= gpu_temp ? cpu_temp : gpu_temp);
610 if (temp_update) {
611#if DBG_VIRTUAL
612 printk("final temp:%d\n", final_temp);
613#endif
614 }
615 return final_temp;
616}
617
618/* Get temperature callback functions for thermal zone */
619static int amlogic_get_temp(struct thermal_zone_device *thermal,
620 unsigned long *temp)
621{
622 struct amlogic_thermal_platform_data *pdata = thermal->devdata;
623
624 if (pdata->trim_flag) {
625 *temp = (unsigned long)get_cpu_temp();
626 pdata->current_temp = *temp;
627 } else if (pdata->virtual_thermal_en) {
628 *temp = aml_cal_virtual_temp(pdata);
629 } else {
630 *temp = 45; // fix cpu temperature to 45 if not trimed && disable virtual thermal
631 }
632 return 0;
633}
634
635/* Get the temperature trend */
636static int amlogic_get_trend(struct thermal_zone_device *thermal,
637 int trip, enum thermal_trend *trend)
638{
639 return 1;
640}
641/* Operation callback functions for thermal zone */
642static struct thermal_zone_device_ops amlogic_dev_ops = {
643 .bind = amlogic_bind,
644 .unbind = amlogic_unbind,
645 .get_temp = amlogic_get_temp,
646 .get_trend = amlogic_get_trend,
647 .get_mode = amlogic_get_mode,
648 .set_mode = amlogic_set_mode,
649 .get_trip_type = amlogic_get_trip_type,
650 .get_trip_temp = amlogic_get_trip_temp,
651 .set_trip_temp = amlogic_set_trip_temp,
652 .get_crit_temp = amlogic_get_crit_temp,
653};
654
655/*
656 * sysfs for keep_mode
657 */
658#ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG // for DEBUG
659extern unsigned int max_cpu_num;
660static ssize_t max_cpu_num_show(struct device *dev, struct device_attribute *attr, char *buf)
661{
662 return sprintf(buf, "%d\n", max_cpu_num);
663}
664#endif
665
666static ssize_t thermal_debug_show(struct device *dev, struct device_attribute *attr, char *buf)
667{
668 return sprintf(buf, "%d\n", thermal_debug_enable);
669}
670
671static ssize_t thermal_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
672{
673 int32_t data = simple_strtol(buf, NULL, 10);
674
675 if (data) {
676 thermal_debug_enable = 1;
677 } else {
678 thermal_debug_enable = 0;
679 }
680 return count;
681}
682
683static ssize_t keep_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
684{
685 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
686 struct amlogic_thermal_platform_data *pdata = tz->devdata;
687
688 return sprintf(buf, "%s\n", pdata->keep_mode ? "enabled": "disabled");
689}
690
691static ssize_t keep_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
692{
693 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
694 struct amlogic_thermal_platform_data *pdata = tz->devdata;
695 if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) {
696 pdata->keep_mode = 1;
697 } else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) {
698 pdata->keep_mode = 0;
699 }
700 return count;
701}
702
703static ssize_t keep_mode_threshold_show(struct device *dev, struct device_attribute *attr, char *buf)
704{
705 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
706 struct amlogic_thermal_platform_data *pdata = tz->devdata;
707
708 return sprintf(buf, "%d\n", pdata->keep_mode_threshold);
709}
710
711static ssize_t keep_mode_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
712{
713 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
714 struct amlogic_thermal_platform_data *pdata = tz->devdata;
715 int32_t data = simple_strtol(buf, NULL, 10);
716
717 if (data > 200) {
718 printk("input is %d, seems too large, invalid\n", data);
719 }
720 keep_mode_update_threshold(pdata, data);
721 printk("set keep_mode_threshold to %d\n", data);
722 return count;
723}
724
725static ssize_t high_temp_protect_show(struct device *dev, struct device_attribute *attr, char *buf)
726{
727 return sprintf(buf, "%d\n", high_temp_protect);
728}
729
730static ssize_t high_temp_protect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
731{
732 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
733 struct amlogic_thermal_platform_data *pdata = tz->devdata;
734 int32_t data = simple_strtol(buf, NULL, 10);
735
736 high_temp_protect = data ? 1 : 0;
737 if (high_temp_protect) {
738 pdata->tmp_trip[1].temperature = pdata->keep_mode_threshold + 25;
739 } else {
740 pdata->tmp_trip[1].temperature = 260;
741 }
742 printk("high temperature protect %s\n", high_temp_protect ? "enabled" : "disabled");
743 return count;
744}
745
746static struct device_attribute amlogic_thermal_attr[] = {
747#ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG
748 __ATTR(max_cpu_num, 0444, max_cpu_num_show, NULL),
749#endif
750 __ATTR(thermal_debug, 0644, thermal_debug_show, thermal_debug_store),
751 __ATTR(keep_mode, 0644, keep_mode_show, keep_mode_store),
752 __ATTR(keep_mode_threshold, 0644, keep_mode_threshold_show, keep_mode_threshold_store),
753 __ATTR(high_temp_protect, 0644, high_temp_protect_show, high_temp_protect_store)
754};
755
756/* Register with the in-kernel thermal management */
757static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev)
758{
759 int ret=0, j;
760 struct cpumask mask_val;
761
762 memset(&mask_val,0,sizeof(struct cpumask));
763 cpumask_set_cpu(0, &mask_val);
764 pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val);
765 if (IS_ERR(pdata->cpu_cool_dev)) {
766 pr_err("Failed to register cpufreq cooling device\n");
767 ret = -EINVAL;
768 goto err_unregister;
769 }
770 pdata->cpucore_cool_dev = cpucore_cooling_register();
771 if (IS_ERR(pdata->cpucore_cool_dev)) {
772 pr_err("Failed to register cpufreq cooling device\n");
773 ret = -EINVAL;
774 goto err_unregister;
775 }
776
777 pdata->therm_dev = thermal_zone_device_register(pdata->name,
778 pdata->temp_trip_count,
779 ((1 << pdata->temp_trip_count) - 1),
780 pdata,
781 &amlogic_dev_ops,
782 NULL,
783 0,
784 pdata->idle_interval);
785
786 if (IS_ERR(pdata->therm_dev)) {
787 pr_err("Failed to register thermal zone device, err:%p\n", pdata->therm_dev);
788 ret = -EINVAL;
789 goto err_unregister;
790 }
791
792 if (pdata->keep_mode) { // create sysfs for keep_mode
793 for (j = 0; j < ARRAY_SIZE(amlogic_thermal_attr); j++) {
794 device_create_file(&pdata->therm_dev->device, &amlogic_thermal_attr[j]);
795 }
796 }
797 pr_info("amlogic: Kernel Thermal management registered\n");
798
799 return 0;
800
801err_unregister:
802 amlogic_unregister_thermal(pdata);
803 return ret;
804}
805
806/* Un-Register with the in-kernel thermal management */
807static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata)
808{
809 if (pdata->therm_dev)
810 thermal_zone_device_unregister(pdata->therm_dev);
811 if (pdata->cpu_cool_dev)
812 cpufreq_cooling_unregister(pdata->cpu_cool_dev);
813
814 pr_info("amlogic: Kernel Thermal management unregistered\n");
815}
816
817int get_desend(void)
818{
819 int i;
820 unsigned int freq = CPUFREQ_ENTRY_INVALID;
821 int descend = -1;
822 struct cpufreq_frequency_table *table =
823 cpufreq_frequency_get_table(0);
824
825 if (!table)
826 return -EINVAL;
827
828 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
829 /* ignore invalid entries */
830 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
831 continue;
832
833 /* ignore duplicate entry */
834 if (freq == table[i].frequency)
835 continue;
836
837 /* get the frequency order */
838 if (freq != CPUFREQ_ENTRY_INVALID && descend == -1){
839 descend = !!(freq > table[i].frequency);
840 break;
841 }
842
843 freq = table[i].frequency;
844 }
845 return descend;
846}
847int fix_to_freq(int freqold,int descend)
848{
849 int i;
850 unsigned int freq = CPUFREQ_ENTRY_INVALID;
851 struct cpufreq_frequency_table *table =
852 cpufreq_frequency_get_table(0);
853
854 if (!table)
855 return -EINVAL;
856
857 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
858 /* ignore invalid entry */
859 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
860 continue;
861
862 /* ignore duplicate entry */
863 if (freq == table[i].frequency)
864 continue;
865 freq = table[i].frequency;
866 if(descend){
867 if(freqold>=table[i+1].frequency && freqold<=table[i].frequency)
868 return table[i+1].frequency;
869 }
870 else{
871 if(freqold>=table[i].frequency && freqold<=table[i+1].frequency)
872 return table[i].frequency;
873 }
874 }
875 return -EINVAL;
876}
877
878void thermal_atomic_set(atomic_t *a, int value)
879{
880 atomic_set(a, 1);
881}
882EXPORT_SYMBOL(thermal_atomic_set);
883
884static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag)
885{
886 int i = 0, ret = -1, val = 0, cells, descend, error = 0;
887 struct property *prop;
888 struct temp_level *tmp_level = NULL;
889 struct amlogic_thermal_platform_data *pdata = NULL;
890
891 if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){
892 //INIT FROM DTS
893 pdata=kzalloc(sizeof(*pdata),GFP_KERNEL);
894 if(!pdata){
895 goto err;
896 }
897 memset((void* )pdata,0,sizeof(*pdata));
898 ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val);
899 if(ret){
900 dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret);
901 goto err;
902 }
903 printk("#thermal-cells=%d\n",val);
904 cells=val;
905
906 /*
907 * process for KEEP_MODE and virtual thermal
908 * Logic: If virtual thermal is enabled, then ignore keep_mode
909 *
910 */
911 pdata->trim_flag = trim_flag;
912 if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal
913 aml_virtaul_thermal_probe(pdev, pdata);
914 } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) {
915 if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) {
916 printk("ERROR:keep_mode is set but not found 'keep_mode_threshold'\n");
917 error = 1;
918 }
919 if (of_property_read_u32_array(pdev->dev.of_node,
920 "keep_mode_max_range",
921 pdata->keep_mode_max_range,
922 sizeof(pdata->keep_mode_max_range)/sizeof(u32))) {
923 printk("ERROR:keep_mode is set but not found 'keep_mode_max_range'\n");
924 error = 1;
925 }
926 if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now
927 printk("keep_mode enabled\n");
928 printk("keep_mode_max_range: [%7d, %3d, %d, %d]\n",
929 pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1],
930 pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]);
931 pdata->keep_mode = 1;
932 pdata->freq_sample_period = 5;
933 }
934 } else {
935 printk("keep_mode is disabled\n");
936 }
937 if(pdata->keep_mode || !pdata->trim_flag){
938 INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work);
939 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
940 atomic_set(&freq_update_flag, 0);
941 }
942
943 prop = of_find_property(pdev->dev.of_node, "trip_point", &val);
944 if (!prop){
945 dev_err(&pdev->dev, "read %s length error\n","trip_point");
946 goto err;
947 }
948 if (pdata->keep_mode) {
949 pdata->temp_trip_count = 2;
950 } else {
951 pdata->temp_trip_count=val/cells/sizeof(u32);
952 }
953 printk("pdata->temp_trip_count=%d\n",pdata->temp_trip_count);
954 tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL);
955 pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL);
956 if(!tmp_level){
957 goto err;
958 }
959
960 if (pdata->keep_mode) { // keep mode only need one point
961 keep_mode_temp_level_init(pdata, tmp_level);
962 } else {
963 ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32));
964 if (ret){
965 dev_err(&pdev->dev, "read %s data error\n","trip_point");
966 goto err;
967 }
968 }
969 descend=get_desend();
970 for (i = 0; i < pdata->temp_trip_count; i++) {
971 printk("temperature=%d on trip point=%d\n",tmp_level[i].temperature,i);
972 pdata->tmp_trip[i].temperature=tmp_level[i].temperature;
973 printk("fixing high_freq=%d to ",tmp_level[i].cpu_high_freq);
974 tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend);
975 pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq);
976 printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_high_freq,i,pdata->tmp_trip[i].cpu_lower_level);
977
978 printk("fixing low_freq=%d to ",tmp_level[i].cpu_low_freq);
979 tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend);
980 pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq);
981 printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_low_freq,i,pdata->tmp_trip[i].cpu_upper_level);
982 pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq;
983 pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq;
984 printk("gpu[%d].gpu_high_freq=%d,tmp_level[%d].gpu_high_freq=%d\n",i,tmp_level[i].gpu_high_freq,i,tmp_level[i].gpu_low_freq);
985
986 pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num;
987 printk("cpu[%d] core num==%d\n",i,pdata->tmp_trip[i].cpu_core_num);
988 pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num;
989 printk("gpu[%d] core num==%d\n",i,pdata->tmp_trip[i].gpu_core_num);
990 }
991
992 ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val);
993 if (ret){
994 dev_err(&pdev->dev, "read %s error\n","idle_interval");
995 goto err;
996 }
997 pdata->idle_interval=val;
998 printk("idle interval=%d\n",pdata->idle_interval);
999 ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name);
1000 if (ret){
1001 dev_err(&pdev->dev, "read %s error\n","dev_name");
1002 goto err;
1003 }
1004 printk("pdata->name:%s, pdata:%p\n",pdata->name, pdata);
1005 pdata->mode=THERMAL_DEVICE_ENABLED;
1006 if(tmp_level)
1007 kfree(tmp_level);
1008 printk("%s, %d\n", __func__, __LINE__);
1009 return pdata;
1010 }
1011err:
1012 if(tmp_level)
1013 kfree(tmp_level);
1014 if(pdata)
1015 kfree(pdata);
1016 pdata= NULL;
1017 return pdata;
1018}
1019
1020static struct amlogic_thermal_platform_data * amlogic_thermal_initialize(struct platform_device *pdev, int trim_flag)
1021{
1022 struct amlogic_thermal_platform_data *pdata=NULL;
1023 pdata=amlogic_thermal_init_from_dts(pdev, trim_flag);
1024 printk("%s, %d, pdata:%p\n", __func__, __LINE__, pdata);
1025 return pdata;
1026}
1027
1028static const struct of_device_id amlogic_thermal_match[] = {
1029 {
1030 .compatible = "amlogic, amlogic-thermal",
1031 },
1032 {},
1033};
1034
1035#ifdef CONFIG_HIBERNATION
1036static int amlogic_thermal_freeze(struct device *dev)
1037{
1038 return 0;
1039}
1040
1041static int amlogic_thermal_thaw(struct device *dev)
1042{
1043 return 0;
1044}
1045
1046static int amlogic_thermal_restore(struct device *dev)
1047{
1048 thermal_firmware_init();
1049
1050 return 0;
1051}
1052
1053static struct dev_pm_ops amlogic_theraml_pm = {
1054 .freeze = amlogic_thermal_freeze,
1055 .thaw = amlogic_thermal_thaw,
1056 .restore = amlogic_thermal_restore,
1057};
1058#endif
1059
1060static int amlogic_thermal_probe(struct platform_device *pdev)
1061{
1062 int ret, trim_flag;
1063 struct amlogic_thermal_platform_data *pdata=NULL;
1064
1065 ret=thermal_firmware_init();
1066 if(ret<0){
1067 printk("%s, this chip is not trimmed, can't use thermal\n", __func__);
1068 trim_flag = 0;
1069 return -ENODEV;
1070 }else{
1071 printk("%s, this chip is trimmed, use thermal\n", __func__);
1072 trim_flag = 1;
1073 }
1074
1075 dev_info(&pdev->dev, "amlogic thermal probe start\n");
1076 pdata = amlogic_thermal_initialize(pdev, trim_flag);
1077 if (!pdata) {
1078 dev_err(&pdev->dev, "Failed to initialize thermal\n");
1079 goto err;
1080 }
1081 mutex_init(&pdata->lock);
1082 pdev->dev.platform_data=pdata;
1083 platform_set_drvdata(pdev, pdata);
1084 ret = amlogic_register_thermal(pdata, pdev);
1085 if (ret) {
1086 dev_err(&pdev->dev, "Failed to register thermal interface\n");
1087 goto err;
1088 }
1089 dev_info(&pdev->dev, "amlogic thermal probe done\n");
1090 return 0;
1091err:
1092 platform_set_drvdata(pdev, NULL);
1093 return ret;
1094}
1095
1096static int amlogic_thermal_remove(struct platform_device *pdev)
1097{
1098 struct amlogic_thermal_platform_data *pdata = platform_get_drvdata(pdev);
1099
1100 aml_virtual_thermal_remove(pdata);
1101
1102 amlogic_unregister_thermal(pdata);
1103
1104 platform_set_drvdata(pdev, NULL);
1105
1106 return 0;
1107}
1108
1109struct platform_driver amlogic_thermal_driver = {
1110 .driver = {
1111 .name = "amlogic-thermal",
1112 .owner = THIS_MODULE,
1113 #ifdef CONFIG_HIBERNATION
1114 .pm = &amlogic_theraml_pm,
1115 #endif
1116 .of_match_table = of_match_ptr(amlogic_thermal_match),
1117 },
1118 .probe = amlogic_thermal_probe,
1119 .remove = amlogic_thermal_remove,
1120};
1121
1122void *aml_get_cdevdata(struct thermal_cooling_device *cdev)
1123{
1124 return cdev->devdata;
1125}
1126EXPORT_SYMBOL(aml_get_cdevdata);
1127
1128void aml_set_cdev_update(struct thermal_cooling_device *cdev, bool update)
1129{
1130 cdev->updated = update;
1131}
1132EXPORT_SYMBOL(aml_set_cdev_update);
1133
1134void aml_cdev_lockop(struct thermal_cooling_device *cdev, bool lock)
1135{
1136 if (lock) {
1137 thermal_lock(&cdev->lock);
1138 } else {
1139 thermal_unlock(&cdev->lock);
1140 }
1141}
1142EXPORT_SYMBOL(aml_cdev_lockop);
1143
1144void aml_cdev_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *ret)
1145{
1146 cdev->ops->get_cur_state(cdev, ret);
1147}
1148EXPORT_SYMBOL(aml_cdev_get_cur_state);
1149
1150static int __init amlogic_thermal_driver_init(void)
1151{
1152 return platform_driver_register(&(amlogic_thermal_driver));
1153}
1154late_initcall(amlogic_thermal_driver_init);
1155static void __exit amlogic_thermal_driver_exit(void)
1156{
1157 platform_driver_unregister(&(amlogic_thermal_driver) );
1158}
1159module_exit(amlogic_thermal_driver_exit);
1160
1161MODULE_DESCRIPTION("amlogic thermal Driver");
1162MODULE_AUTHOR("Amlogic SH platform team");
1163MODULE_ALIAS("platform:amlogic-thermal");
1164MODULE_LICENSE("GPL");
1165
1166