summaryrefslogtreecommitdiff
path: root/amlogic_thermal_module.c (plain)
blob: 3965d065d700f2807593ee6011e72fec28893b41
1/*
2 * amlogic_thermal.c - Samsung amlogic thermal (Thermal Management Unit)
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/err.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/platform_device.h>
29#include <linux/interrupt.h>
30#include <linux/clk.h>
31#include <linux/workqueue.h>
32#include <linux/sysfs.h>
33#include <linux/kobject.h>
34#include <linux/io.h>
35#include <linux/mutex.h>
36#include <linux/thermal.h>
37#include <linux/cpufreq.h>
38#include <linux/cpu_cooling.h>
39#include <linux/of.h>
40#include <linux/amlogic/saradc.h>
41#include <linux/random.h>
42#include <linux/gpu_cooling.h>
43#include <linux/cpucore_cooling.h>
44#include <linux/gpucore_cooling.h>
45#include <linux/thermal_core.h>
46#include <linux/version.h>
47#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 33)
48#include <linux/amlogic/aml_thermal_hw.h>
49#else
50#include <mach/thermal.h>
51#endif
52#include <linux/version.h>
53#include "amlogic_thermal.h"
54
55#define DBG_VIRTUAL 0
56#define MIN_TEMP (-273)
57int thermal_debug_enable = 0;
58int high_temp_protect = 0;
59atomic_t freq_update_flag;
60EXPORT_SYMBOL(thermal_debug_enable);
61EXPORT_SYMBOL(high_temp_protect);
62EXPORT_SYMBOL(freq_update_flag);
63
64#define THERMAL_DBG(format,args...) \
65 if (thermal_debug_enable) { \
66 printk("[THERMAL]"format, ##args); \
67 }
68
69static struct aml_virtual_thermal_device cpu_virtual_thermal = {};
70static struct aml_virtual_thermal_device gpu_virtual_thermal = {};
71static unsigned int report_interval[4] = {};
72
73/* CPU Zone information */
74#define PANIC_ZONE 4
75#define WARN_ZONE 3
76#define MONITOR_ZONE 2
77#define SAFE_ZONE 1
78
79#define GET_ZONE(trip) (trip + 2)
80#define GET_TRIP(zone) (zone - 2)
81
82static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata);
83static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev);
84
85void thermal_lock(struct mutex *lock)
86{
87 mutex_lock(lock);
88}
89EXPORT_SYMBOL(thermal_lock);
90
91void thermal_unlock(struct mutex *lock)
92{
93 mutex_unlock(lock);
94}
95EXPORT_SYMBOL(thermal_unlock);
96
97/* Get mode callback functions for thermal zone */
98static int amlogic_get_mode(struct thermal_zone_device *thermal,
99 enum thermal_device_mode *mode)
100{
101 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
102
103 if (pdata)
104 *mode = pdata->mode;
105 return 0;
106}
107
108/* Set mode callback functions for thermal zone */
109static int amlogic_set_mode(struct thermal_zone_device *thermal,
110 enum thermal_device_mode mode)
111{
112 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
113 struct cpucore_cooling_device *cpucore_device =NULL;
114 struct gpucore_cooling_device *gpucore_device = NULL;
115 if(!pdata)
116 return -EINVAL;
117
118 //mutex_lock(&pdata->therm_dev->lock);
119
120 if (mode == THERMAL_DEVICE_ENABLED){
121 pdata->therm_dev->polling_delay = pdata->idle_interval;
122 if(pdata->cpucore_cool_dev){
123 cpucore_device=pdata->cpucore_cool_dev->devdata;
124 cpucore_device->stop_flag=0;
125 }
126 if(pdata->gpucore_cool_dev){
127 gpucore_device=pdata->gpucore_cool_dev->devdata;
128 gpucore_device->stop_flag=0;
129 }
130 if (pdata->keep_mode) { // start work
131 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
132 }
133 }
134 else{
135 pdata->therm_dev->polling_delay = 0;
136 if (pdata->keep_mode) {
137 cancel_delayed_work_sync(&pdata->thermal_work);
138 keep_mode_set_mode(pdata);
139 }
140 if(pdata->cpucore_cool_dev)
141 pdata->cpucore_cool_dev->ops->set_cur_state(pdata->cpucore_cool_dev,(0|CPU_STOP));
142 if(pdata->gpucore_cool_dev)
143 pdata->gpucore_cool_dev->ops->set_cur_state(pdata->gpucore_cool_dev,(0|GPU_STOP));
144 }
145
146 //mutex_unlock(&pdata->therm_dev->lock);
147
148 pdata->mode = mode;
149 thermal_zone_device_update(pdata->therm_dev);
150 pr_info("thermal polling set for duration=%d msec\n",
151 pdata->therm_dev->polling_delay);
152 return 0;
153}
154
155/* Get trip type callback functions for thermal zone */
156static int amlogic_get_trip_type(struct thermal_zone_device *thermal, int trip,
157 enum thermal_trip_type *type)
158{
159 if(trip < thermal->trips-1)
160 *type = THERMAL_TRIP_ACTIVE;
161 else if(trip == thermal->trips-1)
162 *type = THERMAL_TRIP_CRITICAL;
163 else
164 return -EINVAL;
165 return 0;
166}
167
168/* Get trip temperature callback functions for thermal zone */
169static int amlogic_get_trip_temp(struct thermal_zone_device *thermal, int trip,
170 unsigned long *temp)
171{
172 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
173
174 if(trip > pdata->temp_trip_count ||trip<0)
175 return -EINVAL;
176 mutex_lock(&pdata->lock);
177 *temp =pdata->tmp_trip[trip].temperature;
178 /* convert the temperature into millicelsius */
179 mutex_unlock(&pdata->lock);
180
181 return 0;
182}
183
184static int amlogic_set_trip_temp(struct thermal_zone_device *thermal, int trip,
185 unsigned long temp)
186{
187 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
188
189 if(trip > pdata->temp_trip_count ||trip<0)
190 return -EINVAL;
191 mutex_lock(&pdata->lock);
192 pdata->tmp_trip[trip].temperature=temp;
193 /* convert the temperature into millicelsius */
194 mutex_unlock(&pdata->lock);
195 return 0;
196}
197
198/* Get critical temperature callback functions for thermal zone */
199static int amlogic_get_crit_temp(struct thermal_zone_device *thermal,
200 unsigned long *temp)
201{
202 int ret;
203 /* Panic zone */
204 ret =amlogic_get_trip_temp(thermal, thermal->trips-1, temp);
205
206 return ret;
207}
208
209
210/* Bind callback functions for thermal zone */
211static int amlogic_bind(struct thermal_zone_device *thermal,
212 struct thermal_cooling_device *cdev)
213{
214 int ret = 0, i;
215 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
216 int id;
217 char type[THERMAL_NAME_LENGTH];
218 unsigned long max;
219
220 if (!sscanf(cdev->type, "thermal-%7s-%d", type,&id))
221 return -EINVAL;
222 if(!strcmp(type,"cpufreq")){
223 /* Bind the thermal zone to the cpufreq cooling device */
224 for (i = 0; i < pdata->temp_trip_count; i++) {
225 if(pdata->tmp_trip[0].cpu_upper_level==THERMAL_CSTATE_INVALID)
226 {
227 printk("disable cpu cooling device by dtd\n");
228 ret = -EINVAL;
229 goto out;
230 }
231 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
232 pdata->tmp_trip[i].cpu_upper_level,
233 pdata->tmp_trip[i].cpu_lower_level)) {
234 pr_err("error binding cdev inst %d\n", i);
235 ret = -EINVAL;
236 goto out;
237 }
238 }
239 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
240 if (pdata->keep_mode) {
241 cdev->ops->get_max_state(cdev, &max);
242 keep_mode_bind(pdata, max, 0);
243 }
244 }
245
246 if(!strcmp(type,"gpufreq")){
247 struct gpufreq_cooling_device *gpufreq_dev=
248 (struct gpufreq_cooling_device *)cdev->devdata;
249 /* Bind the thermal zone to the cpufreq cooling device */
250 for (i = 0; i < pdata->temp_trip_count; i++) {
251 if(!gpufreq_dev->get_gpu_freq_level){
252 ret = -EINVAL;
253 pr_info("invalidate pointer %p\n",gpufreq_dev->get_gpu_freq_level);
254 goto out;
255 }
256 pdata->tmp_trip[i].gpu_lower_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_upper_freq);
257 pdata->tmp_trip[i].gpu_upper_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_lower_freq);
258 printk("pdata->tmp_trip[%d].gpu_lower_level=%d\n",i,pdata->tmp_trip[i].gpu_lower_level);
259 printk("pdata->tmp_trip[%d].gpu_upper_level=%d\n",i,pdata->tmp_trip[i].gpu_upper_level);
260 if(pdata->tmp_trip[0].gpu_lower_level==THERMAL_CSTATE_INVALID)
261 {
262 printk("disable gpu cooling device by dtd\n");
263 ret = -EINVAL;
264 goto out;
265 }
266 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
267 pdata->tmp_trip[i].gpu_upper_level,
268 pdata->tmp_trip[i].gpu_lower_level)) {
269 pr_err("error binding cdev inst %d\n", i);
270 ret = -EINVAL;
271 goto out;
272 }
273 }
274 pdata->gpu_cool_dev=cdev;
275 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
276 if (pdata->keep_mode) {
277 cdev->ops->get_max_state(cdev, &max);
278 keep_mode_bind(pdata, max, 1);
279 }
280 }
281
282 if(!strcmp(type,"cpucore")){
283 /* Bind the thermal zone to the cpufreq cooling device */
284 struct cpucore_cooling_device *cpucore_dev=
285 (struct cpucore_cooling_device *)cdev->devdata;
286 for (i = 0; i < pdata->temp_trip_count; i++) {
287 if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID)
288 {
289 printk("disable cpucore cooling device by dtd\n");
290 ret = -EINVAL;
291 goto out;
292 }
293 if(pdata->tmp_trip[i].cpu_core_num !=-1)
294 pdata->tmp_trip[i].cpu_core_upper=cpucore_dev->max_cpu_core_num-pdata->tmp_trip[i].cpu_core_num;
295 else
296 pdata->tmp_trip[i].cpu_core_upper=pdata->tmp_trip[i].cpu_core_num;
297 printk("tmp_trip[%d].cpu_core_upper=%d\n",i,pdata->tmp_trip[i].cpu_core_upper);
298 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
299 pdata->tmp_trip[i].cpu_core_upper,
300 pdata->tmp_trip[i].cpu_core_upper)) {
301 pr_err("error binding cdev inst %d\n", i);
302 ret = -EINVAL;
303 goto out;
304 }
305 }
306 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
307 if (pdata->keep_mode) {
308 cdev->ops->get_max_state(cdev, &max);
309 keep_mode_bind(pdata, max, 2);
310 }
311 }
312
313 if(!strcmp(type,"gpucore")){
314 /* Bind the thermal zone to the cpufreq cooling device */
315 struct gpucore_cooling_device *gpucore_dev=
316 (struct gpucore_cooling_device *)cdev->devdata;
317 for (i = 0; i < pdata->temp_trip_count; i++) {
318 if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID)
319 {
320 printk("disable gpucore cooling device by dtd\n");
321 ret = -EINVAL;
322 goto out;
323 }
324 if(pdata->tmp_trip[i].gpu_core_num != -1)
325 pdata->tmp_trip[i].gpu_core_upper=gpucore_dev->max_gpu_core_num-pdata->tmp_trip[i].gpu_core_num;
326 else
327 pdata->tmp_trip[i].gpu_core_upper=pdata->tmp_trip[i].gpu_core_num;
328
329 printk("tmp_trip[%d].gpu_core_upper=%d\n",i,pdata->tmp_trip[i].gpu_core_upper);
330 if (thermal_zone_bind_cooling_device(thermal, i, cdev,
331 pdata->tmp_trip[i].gpu_core_upper,
332 pdata->tmp_trip[i].gpu_core_upper)) {
333 pr_err("error binding cdev inst %d\n", i);
334 ret = -EINVAL;
335 goto out;
336 }
337 }
338 pdata->gpucore_cool_dev=cdev;
339 pr_info("%s bind %s okay !\n",thermal->type,cdev->type);
340 if (pdata->keep_mode) {
341 cdev->ops->get_max_state(cdev, &max);
342 keep_mode_bind(pdata, max, 3);
343 }
344 }
345 return ret;
346out:
347 return ret;
348}
349
350/* Unbind callback functions for thermal zone */
351static int amlogic_unbind(struct thermal_zone_device *thermal,
352 struct thermal_cooling_device *cdev)
353{
354 int i;
355 if(thermal && cdev){
356 struct amlogic_thermal_platform_data *pdata= thermal->devdata;
357 for (i = 0; i < pdata->temp_trip_count; i++) {
358 pr_info("\n%s unbinding %s ",thermal->type,cdev->type);
359 if (thermal_zone_unbind_cooling_device(thermal, i, cdev)) {
360 pr_err(" error %d \n", i);
361 return -EINVAL;
362 }
363 pr_info(" okay\n");
364 return 0;
365 }
366 }else{
367 return -EINVAL;
368 }
369 return -EINVAL;
370}
371#define ABS(a) ((a) > 0 ? (a) : -(a))
372
373void *thermal_alloc(size_t len)
374{
375 return kzalloc(len, GFP_KERNEL);
376}
377EXPORT_SYMBOL(thermal_alloc);
378
379static void thermal_work(struct work_struct *work)
380{
381 struct amlogic_thermal_platform_data *pdata;
382 int cpu_freq = cpufreq_quick_get(0);
383
384 pdata = container_of((struct delayed_work *)work, struct amlogic_thermal_platform_data, thermal_work);
385 if (pdata->temp_valid)
386 keep_mode_work(pdata, cpu_freq);
387 if (pdata->mode == THERMAL_DEVICE_ENABLED) { // no need to do this work again if thermal disabled
388 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
389 }
390}
391
392static int aml_virtaul_thermal_probe(struct platform_device *pdev, struct amlogic_thermal_platform_data *pdata)
393{
394 int ret, len, cells;
395 struct property *prop;
396 void *buf;
397
398 if (!of_property_read_bool(pdev->dev.of_node, "use_virtual_thermal")) {
399 printk("%s, virtual thermal is not enabled\n", __func__);
400 pdata->virtual_thermal_en = 0;
401 return 0;
402 } else {
403 printk("%s, virtual thermal enabled\n", __func__);
404 }
405
406 ret = of_property_read_u32(pdev->dev.of_node,
407 "freq_sample_period",
408 &pdata->freq_sample_period);
409 if (ret) {
410 printk("%s, get freq_sample_period failed, us 30 as default\n", __func__);
411 pdata->freq_sample_period = 30;
412 } else {
413 printk("%s, get freq_sample_period with value:%d\n", __func__, pdata->freq_sample_period);
414 }
415 ret = of_property_read_u32_array(pdev->dev.of_node,
416 "report_time",
417 report_interval, sizeof(report_interval) / sizeof(u32));
418 if (ret) {
419 printk("%s, get report_time failed\n", __func__);
420 goto error;
421 } else {
422 printk("[virtual_thermal] report interval:%4d, %4d, %4d, %4d\n",
423 report_interval[0], report_interval[1], report_interval[2], report_interval[3]);
424 }
425 /*
426 * read cpu_virtal
427 */
428 prop = of_find_property(pdev->dev.of_node, "cpu_virtual", &len);
429 if (!prop) {
430 printk("%s, cpu virtual not found\n", __func__);
431 goto error;
432 }
433 cells = len / sizeof(struct aml_virtual_thermal);
434 buf = kzalloc(len, GFP_KERNEL);
435 if (!buf) {
436 printk("%s, no memory\n", __func__);
437 return -ENOMEM;
438 }
439 ret = of_property_read_u32_array(pdev->dev.of_node,
440 "cpu_virtual",
441 buf, len/sizeof(u32));
442 if (ret) {
443 printk("%s, read cpu_virtual failed\n", __func__);
444 kfree(buf);
445 goto error;
446 }
447 cpu_virtual_thermal.count = cells;
448 cpu_virtual_thermal.thermal = buf;
449
450 /*
451 * read gpu_virtal
452 */
453 prop = of_find_property(pdev->dev.of_node, "gpu_virtual", &len);
454 if (!prop) {
455 printk("%s, gpu virtual not found\n", __func__);
456 goto error;
457 }
458 cells = len / sizeof(struct aml_virtual_thermal);
459 buf = kzalloc(len, GFP_KERNEL);
460 if (!buf) {
461 printk("%s, no memory\n", __func__);
462 return -ENOMEM;
463 }
464 ret = of_property_read_u32_array(pdev->dev.of_node,
465 "gpu_virtual",
466 buf, len/sizeof(u32));
467 if (ret) {
468 printk("%s, read gpu_virtual failed\n", __func__);
469 kfree(buf);
470 goto error;
471 }
472 gpu_virtual_thermal.count = cells;
473 gpu_virtual_thermal.thermal = buf;
474
475#if DBG_VIRTUAL
476 printk("cpu_virtal cells:%d, table:\n", cpu_virtual_thermal.count);
477 for (len = 0; len < cpu_virtual_thermal.count; len++) {
478 printk("%2d, %8d, %4d, %4d, %4d, %4d\n",
479 len,
480 cpu_virtual_thermal.thermal[len].freq,
481 cpu_virtual_thermal.thermal[len].temp_time[0],
482 cpu_virtual_thermal.thermal[len].temp_time[1],
483 cpu_virtual_thermal.thermal[len].temp_time[2],
484 cpu_virtual_thermal.thermal[len].temp_time[3]);
485 }
486 printk("gpu_virtal cells:%d, table:\n", gpu_virtual_thermal.count);
487 for (len = 0; len < gpu_virtual_thermal.count; len++) {
488 printk("%2d, %8d, %4d, %4d, %4d, %4d\n",
489 len,
490 gpu_virtual_thermal.thermal[len].freq,
491 gpu_virtual_thermal.thermal[len].temp_time[0],
492 gpu_virtual_thermal.thermal[len].temp_time[1],
493 gpu_virtual_thermal.thermal[len].temp_time[2],
494 gpu_virtual_thermal.thermal[len].temp_time[3]);
495 }
496#endif
497
498 pdata->virtual_thermal_en = 1;
499 return 0;
500
501error:
502 pdata->virtual_thermal_en = 0;
503 return -1;
504}
505
506static void aml_virtual_thermal_remove(struct amlogic_thermal_platform_data *pdata)
507{
508 kfree(cpu_virtual_thermal.thermal);
509 kfree(gpu_virtual_thermal.thermal);
510 pdata->virtual_thermal_en = 0;
511}
512
513static int check_freq_level(struct aml_virtual_thermal_device *dev, unsigned int freq)
514{
515 int i = 0;
516
517 if (freq >= dev->thermal[dev->count-1].freq) {
518 return dev->count - 1;
519 }
520 for (i = 0; i < dev->count - 1; i++) {
521 if (freq > dev->thermal[i].freq && freq <= dev->thermal[i + 1].freq) {
522 return i + 1;
523 }
524 }
525 return 0;
526}
527
528static int check_freq_level_cnt(unsigned int cnt)
529{
530 int i;
531
532 if (cnt >= report_interval[3]) {
533 return 3;
534 }
535 for (i = 0; i < 3; i++) {
536 if (cnt >= report_interval[i] && cnt < report_interval[i + 1]) {
537 return i;
538 }
539 }
540 return 0;
541}
542
543static unsigned long aml_cal_virtual_temp(struct amlogic_thermal_platform_data *pdata)
544{
545 static unsigned int cpu_freq_level_cnt = 0, gpu_freq_level_cnt = 0;
546 static unsigned int last_cpu_freq_level = 0, last_gpu_freq_level = 0;
547 static unsigned int cpu_temp = 40, gpu_temp = 40; // default set to 40 when at homescreen
548 unsigned int curr_cpu_avg_freq, curr_gpu_avg_freq;
549 int curr_cpu_freq_level, curr_gpu_freq_level;
550 int cnt_level, level_diff;
551 int temp_update = 0, final_temp;
552
553 /*
554 * CPU temp
555 */
556 if (atomic_read(&freq_update_flag)) {
557 curr_cpu_avg_freq = pdata->monitor.avg_cpu_freq;
558 curr_cpu_freq_level = check_freq_level(&cpu_virtual_thermal, curr_cpu_avg_freq);
559 level_diff = curr_cpu_freq_level - last_cpu_freq_level;
560 if (ABS(level_diff) <= 1) { // freq change is not large
561 cpu_freq_level_cnt++;
562 cnt_level = check_freq_level_cnt(cpu_freq_level_cnt);
563 cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[cnt_level];
564#if DBG_VIRTUAL
565 printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, cpu_temp:%d\n",
566 __func__, curr_cpu_avg_freq, curr_cpu_freq_level, cnt_level, cpu_freq_level_cnt, cpu_temp);
567#endif
568 } else { // level not match
569 cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[0];
570#if DBG_VIRTUAL
571 printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, last_cnt_level:%d, cpu_temp:%d\n",
572 __func__, curr_cpu_avg_freq, curr_cpu_freq_level, last_cpu_freq_level, cpu_freq_level_cnt, cpu_temp);
573#endif
574 cpu_freq_level_cnt = 0;
575 }
576 last_cpu_freq_level = curr_cpu_freq_level;
577
578 curr_gpu_avg_freq = pdata->monitor.avg_gpu_freq;
579 curr_gpu_freq_level = check_freq_level(&gpu_virtual_thermal, curr_gpu_avg_freq);
580 level_diff = curr_gpu_freq_level - last_gpu_freq_level;
581 if (ABS(level_diff) <= 1) { // freq change is not large
582 gpu_freq_level_cnt++;
583 cnt_level = check_freq_level_cnt(gpu_freq_level_cnt);
584 gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[cnt_level];
585#if DBG_VIRTUAL
586 printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, gpu_temp:%d\n",
587 __func__, curr_gpu_avg_freq, curr_gpu_freq_level, cnt_level, gpu_freq_level_cnt, gpu_temp);
588#endif
589 } else { // level not match
590 gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[0];
591 gpu_freq_level_cnt = 0;
592#if DBG_VIRTUAL
593 printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, gpu_temp:%d\n",
594 __func__, curr_gpu_avg_freq, curr_gpu_freq_level, last_gpu_freq_level, gpu_temp);
595#endif
596 }
597 last_gpu_freq_level = curr_gpu_freq_level;
598
599 atomic_set(&freq_update_flag, 0);
600 temp_update = 1;
601 }
602
603 if (cpu_temp <= 0 && gpu_temp <= 0) {
604 printk("%s, Bug here, cpu & gpu temp can't be 0, cpu_temp:%d, gpu_temp:%d\n", __func__, cpu_temp, gpu_temp);
605 final_temp = 40;
606 }
607 final_temp = (cpu_temp >= gpu_temp ? cpu_temp : gpu_temp);
608 if (temp_update) {
609#if DBG_VIRTUAL
610 printk("final temp:%d\n", final_temp);
611#endif
612 }
613 return final_temp;
614}
615
616/* Get temperature callback functions for thermal zone */
617static int amlogic_get_temp(struct thermal_zone_device *thermal,
618 unsigned long *temp)
619{
620 struct amlogic_thermal_platform_data *pdata = thermal->devdata;
621 int tmp;
622
623 if (pdata->trim_flag) {
624 tmp = get_cpu_temp();
625 if (tmp < MIN_TEMP) {
626 pdata->temp_valid = 0;
627 return -EINVAL;
628 }
629 pdata->temp_valid = 1;
630 *temp = (unsigned long)get_cpu_temp();
631 pdata->current_temp = *temp;
632 } else if (pdata->virtual_thermal_en) {
633 *temp = aml_cal_virtual_temp(pdata);
634 } else {
635 *temp = 45; // fix cpu temperature to 45 if not trimed && disable virtual thermal
636 }
637 return 0;
638}
639
640/* Get the temperature trend */
641static int amlogic_get_trend(struct thermal_zone_device *thermal,
642 int trip, enum thermal_trend *trend)
643{
644 return 1;
645}
646/* Operation callback functions for thermal zone */
647static struct thermal_zone_device_ops amlogic_dev_ops = {
648 .bind = amlogic_bind,
649 .unbind = amlogic_unbind,
650 .get_temp = amlogic_get_temp,
651 .get_trend = amlogic_get_trend,
652 .get_mode = amlogic_get_mode,
653 .set_mode = amlogic_set_mode,
654 .get_trip_type = amlogic_get_trip_type,
655 .get_trip_temp = amlogic_get_trip_temp,
656 .set_trip_temp = amlogic_set_trip_temp,
657 .get_crit_temp = amlogic_get_crit_temp,
658};
659
660/*
661 * sysfs for keep_mode
662 */
663#ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG // for DEBUG
664extern unsigned int max_cpu_num;
665static ssize_t max_cpu_num_show(struct device *dev, struct device_attribute *attr, char *buf)
666{
667 return sprintf(buf, "%d\n", max_cpu_num);
668}
669#endif
670
671static ssize_t thermal_debug_show(struct device *dev, struct device_attribute *attr, char *buf)
672{
673 return sprintf(buf, "%d\n", thermal_debug_enable);
674}
675
676static ssize_t thermal_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
677{
678 int32_t data = simple_strtol(buf, NULL, 10);
679
680 if (data) {
681 thermal_debug_enable = 1;
682 } else {
683 thermal_debug_enable = 0;
684 }
685 return count;
686}
687
688static ssize_t keep_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
689{
690 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
691 struct amlogic_thermal_platform_data *pdata = tz->devdata;
692
693 return sprintf(buf, "%s\n", pdata->keep_mode ? "enabled": "disabled");
694}
695
696static ssize_t keep_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
697{
698 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
699 struct amlogic_thermal_platform_data *pdata = tz->devdata;
700 if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) {
701 pdata->keep_mode = 1;
702 } else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) {
703 pdata->keep_mode = 0;
704 }
705 return count;
706}
707
708static ssize_t keep_mode_threshold_show(struct device *dev, struct device_attribute *attr, char *buf)
709{
710 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
711 struct amlogic_thermal_platform_data *pdata = tz->devdata;
712
713 return sprintf(buf, "%d\n", pdata->keep_mode_threshold);
714}
715
716static ssize_t keep_mode_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
717{
718 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
719 struct amlogic_thermal_platform_data *pdata = tz->devdata;
720 int32_t data = simple_strtol(buf, NULL, 10);
721
722 if (data > 200) {
723 printk("input is %d, seems too large, invalid\n", data);
724 }
725 keep_mode_update_threshold(pdata, data);
726 printk("set keep_mode_threshold to %d\n", data);
727 return count;
728}
729
730static ssize_t high_temp_protect_show(struct device *dev, struct device_attribute *attr, char *buf)
731{
732 return sprintf(buf, "%d\n", high_temp_protect);
733}
734
735static ssize_t high_temp_protect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
736{
737 struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device);
738 struct amlogic_thermal_platform_data *pdata = tz->devdata;
739 int32_t data = simple_strtol(buf, NULL, 10);
740
741 high_temp_protect = data ? 1 : 0;
742 if (high_temp_protect) {
743 pdata->tmp_trip[1].temperature = pdata->keep_mode_threshold + 25;
744 } else {
745 pdata->tmp_trip[1].temperature = 260;
746 }
747 printk("high temperature protect %s\n", high_temp_protect ? "enabled" : "disabled");
748 return count;
749}
750
751static struct device_attribute amlogic_thermal_attr[] = {
752#ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG
753 __ATTR(max_cpu_num, 0444, max_cpu_num_show, NULL),
754#endif
755 __ATTR(thermal_debug, 0644, thermal_debug_show, thermal_debug_store),
756 __ATTR(keep_mode, 0644, keep_mode_show, keep_mode_store),
757 __ATTR(keep_mode_threshold, 0644, keep_mode_threshold_show, keep_mode_threshold_store),
758 __ATTR(high_temp_protect, 0644, high_temp_protect_show, high_temp_protect_store)
759};
760
761/* Register with the in-kernel thermal management */
762static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev)
763{
764 int ret=0, j;
765 struct cpumask mask_val;
766
767 memset(&mask_val,0,sizeof(struct cpumask));
768 cpumask_set_cpu(0, &mask_val);
769 pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val);
770 if (IS_ERR(pdata->cpu_cool_dev)) {
771 pr_err("Failed to register cpufreq cooling device\n");
772 ret = -EINVAL;
773 goto err_unregister;
774 }
775 pdata->cpucore_cool_dev = cpucore_cooling_register();
776 if (IS_ERR(pdata->cpucore_cool_dev)) {
777 pr_err("Failed to register cpufreq cooling device\n");
778 ret = -EINVAL;
779 goto err_unregister;
780 }
781
782 pdata->therm_dev = thermal_zone_device_register(pdata->name,
783 pdata->temp_trip_count,
784 ((1 << pdata->temp_trip_count) - 1),
785 pdata,
786 &amlogic_dev_ops,
787 NULL,
788 0,
789 pdata->idle_interval);
790
791 if (IS_ERR(pdata->therm_dev)) {
792 pr_err("Failed to register thermal zone device, err:%p\n", pdata->therm_dev);
793 ret = -EINVAL;
794 goto err_unregister;
795 }
796
797 if (pdata->keep_mode) { // create sysfs for keep_mode
798 for (j = 0; j < ARRAY_SIZE(amlogic_thermal_attr); j++) {
799 device_create_file(&pdata->therm_dev->device, &amlogic_thermal_attr[j]);
800 }
801 }
802 pr_info("amlogic: Kernel Thermal management registered\n");
803
804 return 0;
805
806err_unregister:
807 amlogic_unregister_thermal(pdata);
808 return ret;
809}
810
811/* Un-Register with the in-kernel thermal management */
812static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata)
813{
814 if (pdata->therm_dev)
815 thermal_zone_device_unregister(pdata->therm_dev);
816 if (pdata->cpu_cool_dev)
817 cpufreq_cooling_unregister(pdata->cpu_cool_dev);
818
819 pr_info("amlogic: Kernel Thermal management unregistered\n");
820}
821
822int get_desend(void)
823{
824 int i;
825 unsigned int freq = CPUFREQ_ENTRY_INVALID;
826 int descend = -1;
827 struct cpufreq_frequency_table *table =
828 cpufreq_frequency_get_table(0);
829
830 if (!table)
831 return -EINVAL;
832
833 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
834 /* ignore invalid entries */
835 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
836 continue;
837
838 /* ignore duplicate entry */
839 if (freq == table[i].frequency)
840 continue;
841
842 /* get the frequency order */
843 if (freq != CPUFREQ_ENTRY_INVALID && descend == -1){
844 descend = !!(freq > table[i].frequency);
845 break;
846 }
847
848 freq = table[i].frequency;
849 }
850 return descend;
851}
852int fix_to_freq(int freqold,int descend)
853{
854 int i;
855 unsigned int freq = CPUFREQ_ENTRY_INVALID;
856 struct cpufreq_frequency_table *table =
857 cpufreq_frequency_get_table(0);
858
859 if (!table)
860 return -EINVAL;
861
862 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
863 /* ignore invalid entry */
864 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
865 continue;
866
867 /* ignore duplicate entry */
868 if (freq == table[i].frequency)
869 continue;
870 freq = table[i].frequency;
871 if(descend){
872 if(freqold>=table[i+1].frequency && freqold<=table[i].frequency)
873 return table[i+1].frequency;
874 }
875 else{
876 if(freqold>=table[i].frequency && freqold<=table[i+1].frequency)
877 return table[i].frequency;
878 }
879 }
880 return -EINVAL;
881}
882
883void thermal_atomic_set(atomic_t *a, int value)
884{
885 atomic_set(a, 1);
886}
887EXPORT_SYMBOL(thermal_atomic_set);
888
889static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag)
890{
891 int i = 0, ret = -1, val = 0, cells, descend, error = 0;
892 struct property *prop;
893 struct temp_level *tmp_level = NULL;
894 struct amlogic_thermal_platform_data *pdata = NULL;
895
896 if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){
897 //INIT FROM DTS
898 pdata=kzalloc(sizeof(*pdata),GFP_KERNEL);
899 if(!pdata){
900 goto err;
901 }
902 memset((void* )pdata,0,sizeof(*pdata));
903 ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val);
904 if(ret){
905 dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret);
906 goto err;
907 }
908 printk("#thermal-cells=%d\n",val);
909 cells=val;
910
911 /*
912 * process for KEEP_MODE and virtual thermal
913 * Logic: If virtual thermal is enabled, then ignore keep_mode
914 *
915 */
916 pdata->trim_flag = trim_flag;
917 if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal
918 aml_virtaul_thermal_probe(pdev, pdata);
919 } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) {
920 if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) {
921 printk("ERROR:keep_mode is set but not found 'keep_mode_threshold'\n");
922 error = 1;
923 }
924 if (of_property_read_u32_array(pdev->dev.of_node,
925 "keep_mode_max_range",
926 pdata->keep_mode_max_range,
927 sizeof(pdata->keep_mode_max_range)/sizeof(u32))) {
928 printk("ERROR:keep_mode is set but not found 'keep_mode_max_range'\n");
929 error = 1;
930 }
931 if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now
932 printk("keep_mode enabled\n");
933 printk("keep_mode_max_range: [%7d, %3d, %d, %d]\n",
934 pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1],
935 pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]);
936 pdata->keep_mode = 1;
937 pdata->freq_sample_period = 5;
938 }
939 } else {
940 printk("keep_mode is disabled\n");
941 }
942 if(pdata->keep_mode || !pdata->trim_flag){
943 INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work);
944 schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100));
945 atomic_set(&freq_update_flag, 0);
946 }
947
948 prop = of_find_property(pdev->dev.of_node, "trip_point", &val);
949 if (!prop){
950 dev_err(&pdev->dev, "read %s length error\n","trip_point");
951 goto err;
952 }
953 if (pdata->keep_mode) {
954 pdata->temp_trip_count = 2;
955 } else {
956 pdata->temp_trip_count=val/cells/sizeof(u32);
957 }
958 printk("pdata->temp_trip_count=%d\n",pdata->temp_trip_count);
959 tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL);
960 pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL);
961 if(!tmp_level){
962 goto err;
963 }
964
965 if (pdata->keep_mode) { // keep mode only need one point
966 keep_mode_temp_level_init(pdata, tmp_level);
967 } else {
968 ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32));
969 if (ret){
970 dev_err(&pdev->dev, "read %s data error\n","trip_point");
971 goto err;
972 }
973 }
974 descend=get_desend();
975 for (i = 0; i < pdata->temp_trip_count; i++) {
976 printk("temperature=%d on trip point=%d\n",tmp_level[i].temperature,i);
977 pdata->tmp_trip[i].temperature=tmp_level[i].temperature;
978 printk("fixing high_freq=%d to ",tmp_level[i].cpu_high_freq);
979 tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend);
980 pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq);
981 printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_high_freq,i,pdata->tmp_trip[i].cpu_lower_level);
982
983 printk("fixing low_freq=%d to ",tmp_level[i].cpu_low_freq);
984 tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend);
985 pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq);
986 printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_low_freq,i,pdata->tmp_trip[i].cpu_upper_level);
987 pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq;
988 pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq;
989 printk("gpu[%d].gpu_high_freq=%d,tmp_level[%d].gpu_high_freq=%d\n",i,tmp_level[i].gpu_high_freq,i,tmp_level[i].gpu_low_freq);
990
991 pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num;
992 printk("cpu[%d] core num==%d\n",i,pdata->tmp_trip[i].cpu_core_num);
993 pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num;
994 printk("gpu[%d] core num==%d\n",i,pdata->tmp_trip[i].gpu_core_num);
995 }
996
997 ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val);
998 if (ret){
999 dev_err(&pdev->dev, "read %s error\n","idle_interval");
1000 goto err;
1001 }
1002 pdata->idle_interval=val;
1003 printk("idle interval=%d\n",pdata->idle_interval);
1004 ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name);
1005 if (ret){
1006 dev_err(&pdev->dev, "read %s error\n","dev_name");
1007 goto err;
1008 }
1009 printk("pdata->name:%s, pdata:%p\n",pdata->name, pdata);
1010 pdata->mode=THERMAL_DEVICE_ENABLED;
1011 if(tmp_level)
1012 kfree(tmp_level);
1013 printk("%s, %d\n", __func__, __LINE__);
1014 return pdata;
1015 }
1016err:
1017 if(tmp_level)
1018 kfree(tmp_level);
1019 if(pdata)
1020 kfree(pdata);
1021 pdata= NULL;
1022 return pdata;
1023}
1024
1025static struct amlogic_thermal_platform_data * amlogic_thermal_initialize(struct platform_device *pdev, int trim_flag)
1026{
1027 struct amlogic_thermal_platform_data *pdata=NULL;
1028 pdata=amlogic_thermal_init_from_dts(pdev, trim_flag);
1029 printk("%s, %d, pdata:%p\n", __func__, __LINE__, pdata);
1030 return pdata;
1031}
1032
1033static const struct of_device_id amlogic_thermal_match[] = {
1034 {
1035 .compatible = "amlogic, amlogic-thermal",
1036 },
1037 {},
1038};
1039
1040#ifdef CONFIG_HIBERNATION
1041static int amlogic_thermal_freeze(struct device *dev)
1042{
1043 return 0;
1044}
1045
1046static int amlogic_thermal_thaw(struct device *dev)
1047{
1048 return 0;
1049}
1050
1051static int amlogic_thermal_restore(struct device *dev)
1052{
1053 thermal_firmware_init();
1054
1055 return 0;
1056}
1057
1058static struct dev_pm_ops amlogic_theraml_pm = {
1059 .freeze = amlogic_thermal_freeze,
1060 .thaw = amlogic_thermal_thaw,
1061 .restore = amlogic_thermal_restore,
1062};
1063#endif
1064
1065static int amlogic_thermal_probe(struct platform_device *pdev)
1066{
1067 int ret, trim_flag;
1068 struct amlogic_thermal_platform_data *pdata=NULL;
1069
1070 ret=thermal_firmware_init();
1071 if(ret<0){
1072 printk("%s, this chip is not trimmed, can't use thermal\n", __func__);
1073 trim_flag = 0;
1074 return -ENODEV;
1075 }else{
1076 printk("%s, this chip is trimmed, use thermal\n", __func__);
1077 trim_flag = 1;
1078 }
1079
1080 dev_info(&pdev->dev, "amlogic thermal probe start\n");
1081 pdata = amlogic_thermal_initialize(pdev, trim_flag);
1082 if (!pdata) {
1083 dev_err(&pdev->dev, "Failed to initialize thermal\n");
1084 goto err;
1085 }
1086 mutex_init(&pdata->lock);
1087 pdev->dev.platform_data=pdata;
1088 platform_set_drvdata(pdev, pdata);
1089 ret = amlogic_register_thermal(pdata, pdev);
1090 if (ret) {
1091 dev_err(&pdev->dev, "Failed to register thermal interface\n");
1092 goto err;
1093 }
1094 dev_info(&pdev->dev, "amlogic thermal probe done\n");
1095 return 0;
1096err:
1097 platform_set_drvdata(pdev, NULL);
1098 return ret;
1099}
1100
1101static int amlogic_thermal_remove(struct platform_device *pdev)
1102{
1103 struct amlogic_thermal_platform_data *pdata = platform_get_drvdata(pdev);
1104
1105 aml_virtual_thermal_remove(pdata);
1106
1107 amlogic_unregister_thermal(pdata);
1108
1109 platform_set_drvdata(pdev, NULL);
1110
1111 return 0;
1112}
1113
1114struct platform_driver amlogic_thermal_driver = {
1115 .driver = {
1116 .name = "amlogic-thermal",
1117 .owner = THIS_MODULE,
1118 #ifdef CONFIG_HIBERNATION
1119 .pm = &amlogic_theraml_pm,
1120 #endif
1121 .of_match_table = of_match_ptr(amlogic_thermal_match),
1122 },
1123 .probe = amlogic_thermal_probe,
1124 .remove = amlogic_thermal_remove,
1125};
1126
1127void *aml_get_cdevdata(struct thermal_cooling_device *cdev)
1128{
1129 return cdev->devdata;
1130}
1131EXPORT_SYMBOL(aml_get_cdevdata);
1132
1133void aml_set_cdev_update(struct thermal_cooling_device *cdev, bool update)
1134{
1135 cdev->updated = update;
1136}
1137EXPORT_SYMBOL(aml_set_cdev_update);
1138
1139void aml_cdev_lockop(struct thermal_cooling_device *cdev, bool lock)
1140{
1141 if (lock) {
1142 thermal_lock(&cdev->lock);
1143 } else {
1144 thermal_unlock(&cdev->lock);
1145 }
1146}
1147EXPORT_SYMBOL(aml_cdev_lockop);
1148
1149void aml_cdev_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *ret)
1150{
1151 cdev->ops->get_cur_state(cdev, ret);
1152}
1153EXPORT_SYMBOL(aml_cdev_get_cur_state);
1154
1155static int __init amlogic_thermal_driver_init(void)
1156{
1157 return platform_driver_register(&(amlogic_thermal_driver));
1158}
1159late_initcall(amlogic_thermal_driver_init);
1160static void __exit amlogic_thermal_driver_exit(void)
1161{
1162 platform_driver_unregister(&(amlogic_thermal_driver) );
1163}
1164module_exit(amlogic_thermal_driver_exit);
1165
1166MODULE_DESCRIPTION("amlogic thermal Driver");
1167MODULE_AUTHOR("Amlogic SH platform team");
1168MODULE_ALIAS("platform:amlogic-thermal");
1169MODULE_LICENSE("GPL");
1170
1171