blob: ecd7ff92c0a1d518d15b7fad4a3e481a48c9c944
1 | /* |
2 | * amlogic_thermal.c - Samsung amlogic thermal (Thermal Management Unit) |
3 | * |
4 | * Copyright (C) 2011 Samsung Electronics |
5 | * Donggeun Kim <dg77.kim@samsung.com> |
6 | * Amit Daniel Kachhap <amit.kachhap@linaro.org> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | * |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/err.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/platform_device.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/clk.h> |
31 | #include <linux/workqueue.h> |
32 | #include <linux/sysfs.h> |
33 | #include <linux/kobject.h> |
34 | #include <linux/io.h> |
35 | #include <linux/mutex.h> |
36 | #include <linux/thermal.h> |
37 | #include <linux/cpufreq.h> |
38 | #include <linux/cpu_cooling.h> |
39 | #include <linux/of.h> |
40 | #include <linux/amlogic/saradc.h> |
41 | #include <plat/cpu.h> |
42 | #include <linux/random.h> |
43 | #include <linux/gpu_cooling.h> |
44 | #include <linux/cpucore_cooling.h> |
45 | #include <linux/gpucore_cooling.h> |
46 | #include <linux/thermal_core.h> |
47 | #include <mach/thermal.h> |
48 | #include "amlogic_thermal.h" |
49 | |
50 | #define DBG_VIRTUAL 0 |
51 | int thermal_debug_enable = 0; |
52 | int high_temp_protect = 0; |
53 | atomic_t freq_update_flag; |
54 | EXPORT_SYMBOL(thermal_debug_enable); |
55 | EXPORT_SYMBOL(high_temp_protect); |
56 | EXPORT_SYMBOL(freq_update_flag); |
57 | |
58 | #define THERMAL_DBG(format,args...) \ |
59 | if (thermal_debug_enable) { \ |
60 | printk("[THERMAL]"format, ##args); \ |
61 | } |
62 | |
63 | static struct aml_virtual_thermal_device cpu_virtual_thermal = {}; |
64 | static struct aml_virtual_thermal_device gpu_virtual_thermal = {}; |
65 | static unsigned int report_interval[4] = {}; |
66 | |
67 | /* CPU Zone information */ |
68 | #define PANIC_ZONE 4 |
69 | #define WARN_ZONE 3 |
70 | #define MONITOR_ZONE 2 |
71 | #define SAFE_ZONE 1 |
72 | |
73 | #define GET_ZONE(trip) (trip + 2) |
74 | #define GET_TRIP(zone) (zone - 2) |
75 | |
76 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata); |
77 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata); |
78 | |
79 | void thermal_lock(struct mutex *lock) |
80 | { |
81 | mutex_lock(lock); |
82 | } |
83 | EXPORT_SYMBOL(thermal_lock); |
84 | |
85 | void thermal_unlock(struct mutex *lock) |
86 | { |
87 | mutex_unlock(lock); |
88 | } |
89 | EXPORT_SYMBOL(thermal_unlock); |
90 | |
91 | /* Get mode callback functions for thermal zone */ |
92 | static int amlogic_get_mode(struct thermal_zone_device *thermal, |
93 | enum thermal_device_mode *mode) |
94 | { |
95 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
96 | |
97 | if (pdata) |
98 | *mode = pdata->mode; |
99 | return 0; |
100 | } |
101 | |
102 | /* Set mode callback functions for thermal zone */ |
103 | static int amlogic_set_mode(struct thermal_zone_device *thermal, |
104 | enum thermal_device_mode mode) |
105 | { |
106 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
107 | struct cpucore_cooling_device *cpucore_device =NULL; |
108 | struct gpucore_cooling_device *gpucore_device = NULL; |
109 | if(!pdata) |
110 | return -EINVAL; |
111 | |
112 | //mutex_lock(&pdata->therm_dev->lock); |
113 | |
114 | if (mode == THERMAL_DEVICE_ENABLED){ |
115 | pdata->therm_dev->polling_delay = pdata->idle_interval; |
116 | if(pdata->cpucore_cool_dev){ |
117 | cpucore_device=pdata->cpucore_cool_dev->devdata; |
118 | cpucore_device->stop_flag=0; |
119 | } |
120 | if(pdata->gpucore_cool_dev){ |
121 | gpucore_device=pdata->gpucore_cool_dev->devdata; |
122 | gpucore_device->stop_flag=0; |
123 | } |
124 | if (pdata->keep_mode) { // start work |
125 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
126 | } |
127 | } |
128 | else{ |
129 | pdata->therm_dev->polling_delay = 0; |
130 | if (pdata->keep_mode) { |
131 | cancel_delayed_work_sync(&pdata->thermal_work); |
132 | keep_mode_set_mode(pdata); |
133 | } |
134 | if(pdata->cpucore_cool_dev) |
135 | pdata->cpucore_cool_dev->ops->set_cur_state(pdata->cpucore_cool_dev,(0|CPU_STOP)); |
136 | if(pdata->gpucore_cool_dev) |
137 | pdata->gpucore_cool_dev->ops->set_cur_state(pdata->gpucore_cool_dev,(0|GPU_STOP)); |
138 | } |
139 | |
140 | //mutex_unlock(&pdata->therm_dev->lock); |
141 | |
142 | pdata->mode = mode; |
143 | thermal_zone_device_update(pdata->therm_dev); |
144 | pr_info("thermal polling set for duration=%d msec\n", |
145 | pdata->therm_dev->polling_delay); |
146 | return 0; |
147 | } |
148 | |
149 | /* Get trip type callback functions for thermal zone */ |
150 | static int amlogic_get_trip_type(struct thermal_zone_device *thermal, int trip, |
151 | enum thermal_trip_type *type) |
152 | { |
153 | if(trip < thermal->trips-1) |
154 | *type = THERMAL_TRIP_ACTIVE; |
155 | else if(trip == thermal->trips-1) |
156 | *type = THERMAL_TRIP_CRITICAL; |
157 | else |
158 | return -EINVAL; |
159 | return 0; |
160 | } |
161 | |
162 | /* Get trip temperature callback functions for thermal zone */ |
163 | static int amlogic_get_trip_temp(struct thermal_zone_device *thermal, int trip, |
164 | unsigned long *temp) |
165 | { |
166 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
167 | |
168 | if(trip > pdata->temp_trip_count ||trip<0) |
169 | return -EINVAL; |
170 | mutex_lock(&pdata->lock); |
171 | *temp =pdata->tmp_trip[trip].temperature; |
172 | /* convert the temperature into millicelsius */ |
173 | mutex_unlock(&pdata->lock); |
174 | |
175 | return 0; |
176 | } |
177 | |
178 | static int amlogic_set_trip_temp(struct thermal_zone_device *thermal, int trip, |
179 | unsigned long temp) |
180 | { |
181 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
182 | |
183 | if(trip > pdata->temp_trip_count ||trip<0) |
184 | return -EINVAL; |
185 | mutex_lock(&pdata->lock); |
186 | pdata->tmp_trip[trip].temperature=temp; |
187 | /* convert the temperature into millicelsius */ |
188 | mutex_unlock(&pdata->lock); |
189 | return 0; |
190 | } |
191 | |
192 | /* Get critical temperature callback functions for thermal zone */ |
193 | static int amlogic_get_crit_temp(struct thermal_zone_device *thermal, |
194 | unsigned long *temp) |
195 | { |
196 | int ret; |
197 | /* Panic zone */ |
198 | ret =amlogic_get_trip_temp(thermal, thermal->trips-1, temp); |
199 | |
200 | return ret; |
201 | } |
202 | |
203 | |
204 | /* Bind callback functions for thermal zone */ |
205 | static int amlogic_bind(struct thermal_zone_device *thermal, |
206 | struct thermal_cooling_device *cdev) |
207 | { |
208 | int ret = 0, i; |
209 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
210 | int id; |
211 | char type[THERMAL_NAME_LENGTH]; |
212 | unsigned long max; |
213 | |
214 | if (!sscanf(cdev->type, "thermal-%7s-%d", type,&id)) |
215 | return -EINVAL; |
216 | if(!strcmp(type,"cpufreq")){ |
217 | /* Bind the thermal zone to the cpufreq cooling device */ |
218 | for (i = 0; i < pdata->temp_trip_count; i++) { |
219 | if(pdata->tmp_trip[0].cpu_upper_level==THERMAL_CSTATE_INVALID) |
220 | { |
221 | printk("disable cpu cooling device by dtd\n"); |
222 | ret = -EINVAL; |
223 | goto out; |
224 | } |
225 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
226 | pdata->tmp_trip[i].cpu_upper_level, |
227 | pdata->tmp_trip[i].cpu_lower_level)) { |
228 | pr_err("error binding cdev inst %d\n", i); |
229 | ret = -EINVAL; |
230 | goto out; |
231 | } |
232 | } |
233 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
234 | if (pdata->keep_mode) { |
235 | cdev->ops->get_max_state(cdev, &max); |
236 | keep_mode_bind(pdata, max, 0); |
237 | } |
238 | } |
239 | |
240 | if(!strcmp(type,"gpufreq")){ |
241 | struct gpufreq_cooling_device *gpufreq_dev= |
242 | (struct gpufreq_cooling_device *)cdev->devdata; |
243 | /* Bind the thermal zone to the cpufreq cooling device */ |
244 | for (i = 0; i < pdata->temp_trip_count; i++) { |
245 | if(!gpufreq_dev->get_gpu_freq_level){ |
246 | ret = -EINVAL; |
247 | pr_info("invalidate pointer %p\n",gpufreq_dev->get_gpu_freq_level); |
248 | goto out; |
249 | } |
250 | pdata->tmp_trip[i].gpu_lower_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_upper_freq); |
251 | pdata->tmp_trip[i].gpu_upper_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_lower_freq); |
252 | printk("pdata->tmp_trip[%d].gpu_lower_level=%d\n",i,pdata->tmp_trip[i].gpu_lower_level); |
253 | printk("pdata->tmp_trip[%d].gpu_upper_level=%d\n",i,pdata->tmp_trip[i].gpu_upper_level); |
254 | if(pdata->tmp_trip[0].gpu_lower_level==THERMAL_CSTATE_INVALID) |
255 | { |
256 | printk("disable gpu cooling device by dtd\n"); |
257 | ret = -EINVAL; |
258 | goto out; |
259 | } |
260 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
261 | pdata->tmp_trip[i].gpu_upper_level, |
262 | pdata->tmp_trip[i].gpu_lower_level)) { |
263 | pr_err("error binding cdev inst %d\n", i); |
264 | ret = -EINVAL; |
265 | goto out; |
266 | } |
267 | } |
268 | pdata->gpu_cool_dev=cdev; |
269 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
270 | if (pdata->keep_mode) { |
271 | cdev->ops->get_max_state(cdev, &max); |
272 | keep_mode_bind(pdata, max, 1); |
273 | } |
274 | } |
275 | |
276 | if(!strcmp(type,"cpucore")){ |
277 | /* Bind the thermal zone to the cpufreq cooling device */ |
278 | struct cpucore_cooling_device *cpucore_dev= |
279 | (struct cpucore_cooling_device *)cdev->devdata; |
280 | for (i = 0; i < pdata->temp_trip_count; i++) { |
281 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
282 | { |
283 | printk("disable cpu cooling device by dtd\n"); |
284 | ret = -EINVAL; |
285 | goto out; |
286 | } |
287 | if(pdata->tmp_trip[i].cpu_core_num !=-1) |
288 | pdata->tmp_trip[i].cpu_core_upper=cpucore_dev->max_cpu_core_num-pdata->tmp_trip[i].cpu_core_num; |
289 | else |
290 | pdata->tmp_trip[i].cpu_core_upper=pdata->tmp_trip[i].cpu_core_num; |
291 | printk("tmp_trip[%d].cpu_core_upper=%d\n",i,pdata->tmp_trip[i].cpu_core_upper); |
292 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
293 | pdata->tmp_trip[i].cpu_core_upper, |
294 | pdata->tmp_trip[i].cpu_core_upper)) { |
295 | pr_err("error binding cdev inst %d\n", i); |
296 | ret = -EINVAL; |
297 | goto out; |
298 | } |
299 | } |
300 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
301 | if (pdata->keep_mode) { |
302 | cdev->ops->get_max_state(cdev, &max); |
303 | keep_mode_bind(pdata, max, 2); |
304 | } |
305 | } |
306 | |
307 | if(!strcmp(type,"gpucore")){ |
308 | /* Bind the thermal zone to the cpufreq cooling device */ |
309 | struct gpucore_cooling_device *gpucore_dev= |
310 | (struct gpucore_cooling_device *)cdev->devdata; |
311 | for (i = 0; i < pdata->temp_trip_count; i++) { |
312 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
313 | { |
314 | printk("disable cpu cooling device by dtd\n"); |
315 | ret = -EINVAL; |
316 | goto out; |
317 | } |
318 | if(pdata->tmp_trip[i].gpu_core_num != -1) |
319 | pdata->tmp_trip[i].gpu_core_upper=gpucore_dev->max_gpu_core_num-pdata->tmp_trip[i].gpu_core_num; |
320 | else |
321 | pdata->tmp_trip[i].gpu_core_upper=pdata->tmp_trip[i].gpu_core_num; |
322 | |
323 | printk("tmp_trip[%d].gpu_core_upper=%d\n",i,pdata->tmp_trip[i].gpu_core_upper); |
324 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
325 | pdata->tmp_trip[i].gpu_core_upper, |
326 | pdata->tmp_trip[i].gpu_core_upper)) { |
327 | pr_err("error binding cdev inst %d\n", i); |
328 | ret = -EINVAL; |
329 | goto out; |
330 | } |
331 | } |
332 | pdata->gpucore_cool_dev=cdev; |
333 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
334 | if (pdata->keep_mode) { |
335 | cdev->ops->get_max_state(cdev, &max); |
336 | keep_mode_bind(pdata, max, 3); |
337 | } |
338 | } |
339 | return ret; |
340 | out: |
341 | return ret; |
342 | } |
343 | |
344 | /* Unbind callback functions for thermal zone */ |
345 | static int amlogic_unbind(struct thermal_zone_device *thermal, |
346 | struct thermal_cooling_device *cdev) |
347 | { |
348 | int i; |
349 | if(thermal && cdev){ |
350 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
351 | for (i = 0; i < pdata->temp_trip_count; i++) { |
352 | pr_info("\n%s unbinding %s ",thermal->type,cdev->type); |
353 | if (thermal_zone_unbind_cooling_device(thermal, i, cdev)) { |
354 | pr_err(" error %d \n", i); |
355 | return -EINVAL; |
356 | } |
357 | pr_info(" okay\n"); |
358 | return 0; |
359 | } |
360 | }else{ |
361 | return -EINVAL; |
362 | } |
363 | return -EINVAL; |
364 | } |
365 | #define ABS(a) ((a) > 0 ? (a) : -(a)) |
366 | |
367 | void *thermal_alloc(size_t len) |
368 | { |
369 | return kzalloc(len, GFP_KERNEL); |
370 | } |
371 | EXPORT_SYMBOL(thermal_alloc); |
372 | |
373 | static void thermal_work(struct work_struct *work) |
374 | { |
375 | struct cpufreq_policy *policy = cpufreq_cpu_get(0); |
376 | struct amlogic_thermal_platform_data *pdata; |
377 | int cpu_freq = 0; |
378 | |
379 | pdata = container_of((struct delayed_work *)work, struct amlogic_thermal_platform_data, thermal_work); |
380 | if (policy) { |
381 | cpu_freq = policy->cur; |
382 | } |
383 | keep_mode_work(pdata, cpu_freq); |
384 | if (pdata->mode == THERMAL_DEVICE_ENABLED) { // no need to do this work again if thermal disabled |
385 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
386 | } |
387 | } |
388 | |
389 | static int aml_virtaul_thermal_probe(struct platform_device *pdev, struct amlogic_thermal_platform_data *pdata) |
390 | { |
391 | int ret, len, cells; |
392 | struct property *prop; |
393 | void *buf; |
394 | |
395 | if (!of_property_read_bool(pdev->dev.of_node, "use_virtual_thermal")) { |
396 | printk("%s, virtual thermal is not enabled\n", __func__); |
397 | pdata->virtual_thermal_en = 0; |
398 | return 0; |
399 | } else { |
400 | printk("%s, virtual thermal enabled\n", __func__); |
401 | } |
402 | |
403 | ret = of_property_read_u32(pdev->dev.of_node, |
404 | "freq_sample_period", |
405 | &pdata->freq_sample_period); |
406 | if (ret) { |
407 | printk("%s, get freq_sample_period failed, us 30 as default\n", __func__); |
408 | pdata->freq_sample_period = 30; |
409 | } else { |
410 | printk("%s, get freq_sample_period with value:%d\n", __func__, pdata->freq_sample_period); |
411 | } |
412 | ret = of_property_read_u32_array(pdev->dev.of_node, |
413 | "report_time", |
414 | report_interval, sizeof(report_interval) / sizeof(u32)); |
415 | if (ret) { |
416 | printk("%s, get report_time failed\n", __func__); |
417 | goto error; |
418 | } else { |
419 | printk("[virtual_thermal] report interval:%4d, %4d, %4d, %4d\n", |
420 | report_interval[0], report_interval[1], report_interval[2], report_interval[3]); |
421 | } |
422 | /* |
423 | * read cpu_virtal |
424 | */ |
425 | prop = of_find_property(pdev->dev.of_node, "cpu_virtual", &len); |
426 | if (!prop) { |
427 | printk("%s, cpu virtual not found\n", __func__); |
428 | goto error; |
429 | } |
430 | cells = len / sizeof(struct aml_virtual_thermal); |
431 | buf = kzalloc(len, GFP_KERNEL); |
432 | if (!buf) { |
433 | printk("%s, no memory\n", __func__); |
434 | return -ENOMEM; |
435 | } |
436 | ret = of_property_read_u32_array(pdev->dev.of_node, |
437 | "cpu_virtual", |
438 | buf, len/sizeof(u32)); |
439 | if (ret) { |
440 | printk("%s, read cpu_virtual failed\n", __func__); |
441 | kfree(buf); |
442 | goto error; |
443 | } |
444 | cpu_virtual_thermal.count = cells; |
445 | cpu_virtual_thermal.thermal = buf; |
446 | |
447 | /* |
448 | * read gpu_virtal |
449 | */ |
450 | prop = of_find_property(pdev->dev.of_node, "gpu_virtual", &len); |
451 | if (!prop) { |
452 | printk("%s, gpu virtual not found\n", __func__); |
453 | goto error; |
454 | } |
455 | cells = len / sizeof(struct aml_virtual_thermal); |
456 | buf = kzalloc(len, GFP_KERNEL); |
457 | if (!buf) { |
458 | printk("%s, no memory\n", __func__); |
459 | return -ENOMEM; |
460 | } |
461 | ret = of_property_read_u32_array(pdev->dev.of_node, |
462 | "gpu_virtual", |
463 | buf, len/sizeof(u32)); |
464 | if (ret) { |
465 | printk("%s, read gpu_virtual failed\n", __func__); |
466 | kfree(buf); |
467 | goto error; |
468 | } |
469 | gpu_virtual_thermal.count = cells; |
470 | gpu_virtual_thermal.thermal = buf; |
471 | |
472 | #if DBG_VIRTUAL |
473 | printk("cpu_virtal cells:%d, table:\n", cpu_virtual_thermal.count); |
474 | for (len = 0; len < cpu_virtual_thermal.count; len++) { |
475 | printk("%2d, %8d, %4d, %4d, %4d, %4d\n", |
476 | len, |
477 | cpu_virtual_thermal.thermal[len].freq, |
478 | cpu_virtual_thermal.thermal[len].temp_time[0], |
479 | cpu_virtual_thermal.thermal[len].temp_time[1], |
480 | cpu_virtual_thermal.thermal[len].temp_time[2], |
481 | cpu_virtual_thermal.thermal[len].temp_time[3]); |
482 | } |
483 | printk("gpu_virtal cells:%d, table:\n", gpu_virtual_thermal.count); |
484 | for (len = 0; len < gpu_virtual_thermal.count; len++) { |
485 | printk("%2d, %8d, %4d, %4d, %4d, %4d\n", |
486 | len, |
487 | gpu_virtual_thermal.thermal[len].freq, |
488 | gpu_virtual_thermal.thermal[len].temp_time[0], |
489 | gpu_virtual_thermal.thermal[len].temp_time[1], |
490 | gpu_virtual_thermal.thermal[len].temp_time[2], |
491 | gpu_virtual_thermal.thermal[len].temp_time[3]); |
492 | } |
493 | #endif |
494 | |
495 | pdata->virtual_thermal_en = 1; |
496 | return 0; |
497 | |
498 | error: |
499 | pdata->virtual_thermal_en = 0; |
500 | return -1; |
501 | } |
502 | |
503 | static void aml_virtual_thermal_remove(struct amlogic_thermal_platform_data *pdata) |
504 | { |
505 | kfree(cpu_virtual_thermal.thermal); |
506 | kfree(gpu_virtual_thermal.thermal); |
507 | pdata->virtual_thermal_en = 0; |
508 | } |
509 | |
510 | static int check_freq_level(struct aml_virtual_thermal_device *dev, unsigned int freq) |
511 | { |
512 | int i = 0; |
513 | |
514 | if (freq >= dev->thermal[dev->count-1].freq) { |
515 | return dev->count - 1; |
516 | } |
517 | for (i = 0; i < dev->count - 1; i++) { |
518 | if (freq > dev->thermal[i].freq && freq <= dev->thermal[i + 1].freq) { |
519 | return i + 1; |
520 | } |
521 | } |
522 | return 0; |
523 | } |
524 | |
525 | static int check_freq_level_cnt(unsigned int cnt) |
526 | { |
527 | int i; |
528 | |
529 | if (cnt >= report_interval[3]) { |
530 | return 3; |
531 | } |
532 | for (i = 0; i < 3; i++) { |
533 | if (cnt >= report_interval[i] && cnt < report_interval[i + 1]) { |
534 | return i; |
535 | } |
536 | } |
537 | return 0; |
538 | } |
539 | |
540 | static unsigned long aml_cal_virtual_temp(struct amlogic_thermal_platform_data *pdata) |
541 | { |
542 | static unsigned int cpu_freq_level_cnt = 0, gpu_freq_level_cnt = 0; |
543 | static unsigned int last_cpu_freq_level = 0, last_gpu_freq_level = 0; |
544 | static unsigned int cpu_temp = 40, gpu_temp = 40; // default set to 40 when at homescreen |
545 | unsigned int curr_cpu_avg_freq, curr_gpu_avg_freq; |
546 | int curr_cpu_freq_level, curr_gpu_freq_level; |
547 | int cnt_level, level_diff; |
548 | int temp_update = 0, final_temp; |
549 | |
550 | /* |
551 | * CPU temp |
552 | */ |
553 | if (atomic_read(&freq_update_flag)) { |
554 | curr_cpu_avg_freq = pdata->monitor.avg_cpu_freq; |
555 | curr_cpu_freq_level = check_freq_level(&cpu_virtual_thermal, curr_cpu_avg_freq); |
556 | level_diff = curr_cpu_freq_level - last_cpu_freq_level; |
557 | if (ABS(level_diff) <= 1) { // freq change is not large |
558 | cpu_freq_level_cnt++; |
559 | cnt_level = check_freq_level_cnt(cpu_freq_level_cnt); |
560 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[cnt_level]; |
561 | #if DBG_VIRTUAL |
562 | printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, cpu_temp:%d\n", |
563 | __func__, curr_cpu_avg_freq, curr_cpu_freq_level, cnt_level, cpu_freq_level_cnt, cpu_temp); |
564 | #endif |
565 | } else { // level not match |
566 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[0]; |
567 | #if DBG_VIRTUAL |
568 | printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, last_cnt_level:%d, cpu_temp:%d\n", |
569 | __func__, curr_cpu_avg_freq, curr_cpu_freq_level, last_cpu_freq_level, cpu_freq_level_cnt, cpu_temp); |
570 | #endif |
571 | cpu_freq_level_cnt = 0; |
572 | } |
573 | last_cpu_freq_level = curr_cpu_freq_level; |
574 | |
575 | curr_gpu_avg_freq = pdata->monitor.avg_gpu_freq; |
576 | curr_gpu_freq_level = check_freq_level(&gpu_virtual_thermal, curr_gpu_avg_freq); |
577 | level_diff = curr_gpu_freq_level - last_gpu_freq_level; |
578 | if (ABS(level_diff) <= 1) { // freq change is not large |
579 | gpu_freq_level_cnt++; |
580 | cnt_level = check_freq_level_cnt(gpu_freq_level_cnt); |
581 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[cnt_level]; |
582 | #if DBG_VIRTUAL |
583 | printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, gpu_temp:%d\n", |
584 | __func__, curr_gpu_avg_freq, curr_gpu_freq_level, cnt_level, gpu_freq_level_cnt, gpu_temp); |
585 | #endif |
586 | } else { // level not match |
587 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[0]; |
588 | gpu_freq_level_cnt = 0; |
589 | #if DBG_VIRTUAL |
590 | printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, gpu_temp:%d\n", |
591 | __func__, curr_gpu_avg_freq, curr_gpu_freq_level, last_gpu_freq_level, gpu_temp); |
592 | #endif |
593 | } |
594 | last_gpu_freq_level = curr_gpu_freq_level; |
595 | |
596 | atomic_set(&freq_update_flag, 0); |
597 | temp_update = 1; |
598 | } |
599 | |
600 | if (cpu_temp <= 0 && gpu_temp <= 0) { |
601 | printk("%s, Bug here, cpu & gpu temp can't be 0, cpu_temp:%d, gpu_temp:%d\n", __func__, cpu_temp, gpu_temp); |
602 | final_temp = 40; |
603 | } |
604 | final_temp = (cpu_temp >= gpu_temp ? cpu_temp : gpu_temp); |
605 | if (temp_update) { |
606 | #if DBG_VIRTUAL |
607 | printk("final temp:%d\n", final_temp); |
608 | #endif |
609 | } |
610 | return final_temp; |
611 | } |
612 | |
613 | /* Get temperature callback functions for thermal zone */ |
614 | static int amlogic_get_temp(struct thermal_zone_device *thermal, |
615 | unsigned long *temp) |
616 | { |
617 | struct amlogic_thermal_platform_data *pdata = thermal->devdata; |
618 | |
619 | if (pdata->trim_flag) { |
620 | *temp = (unsigned long)get_cpu_temp(); |
621 | pdata->current_temp = *temp; |
622 | } else if (pdata->virtual_thermal_en) { |
623 | *temp = aml_cal_virtual_temp(pdata); |
624 | } else { |
625 | *temp = 45; // fix cpu temperature to 45 if not trimed && disable virtual thermal |
626 | } |
627 | return 0; |
628 | } |
629 | |
630 | /* Get the temperature trend */ |
631 | static int amlogic_get_trend(struct thermal_zone_device *thermal, |
632 | int trip, enum thermal_trend *trend) |
633 | { |
634 | return 1; |
635 | } |
636 | /* Operation callback functions for thermal zone */ |
637 | static struct thermal_zone_device_ops const amlogic_dev_ops = { |
638 | .bind = amlogic_bind, |
639 | .unbind = amlogic_unbind, |
640 | .get_temp = amlogic_get_temp, |
641 | .get_trend = amlogic_get_trend, |
642 | .get_mode = amlogic_get_mode, |
643 | .set_mode = amlogic_set_mode, |
644 | .get_trip_type = amlogic_get_trip_type, |
645 | .get_trip_temp = amlogic_get_trip_temp, |
646 | .set_trip_temp = amlogic_set_trip_temp, |
647 | .get_crit_temp = amlogic_get_crit_temp, |
648 | }; |
649 | |
650 | /* |
651 | * sysfs for keep_mode |
652 | */ |
653 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG // for DEBUG |
654 | extern unsigned int max_cpu_num; |
655 | static ssize_t max_cpu_num_show(struct device *dev, struct device_attribute *attr, char *buf) |
656 | { |
657 | return sprintf(buf, "%d\n", max_cpu_num); |
658 | } |
659 | #endif |
660 | |
661 | static ssize_t thermal_debug_show(struct device *dev, struct device_attribute *attr, char *buf) |
662 | { |
663 | return sprintf(buf, "%d\n", thermal_debug_enable); |
664 | } |
665 | |
666 | static ssize_t thermal_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
667 | { |
668 | int32_t data = simple_strtol(buf, NULL, 10); |
669 | |
670 | if (data) { |
671 | thermal_debug_enable = 1; |
672 | } else { |
673 | thermal_debug_enable = 0; |
674 | } |
675 | return count; |
676 | } |
677 | |
678 | static ssize_t keep_mode_show(struct device *dev, struct device_attribute *attr, char *buf) |
679 | { |
680 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
681 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
682 | |
683 | return sprintf(buf, "%s\n", pdata->keep_mode ? "enabled": "disabled"); |
684 | } |
685 | |
686 | static ssize_t keep_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
687 | { |
688 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
689 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
690 | if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) { |
691 | pdata->keep_mode = 1; |
692 | } else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) { |
693 | pdata->keep_mode = 0; |
694 | } |
695 | return count; |
696 | } |
697 | |
698 | static ssize_t keep_mode_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) |
699 | { |
700 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
701 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
702 | |
703 | return sprintf(buf, "%d\n", pdata->keep_mode_threshold); |
704 | } |
705 | |
706 | static ssize_t keep_mode_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
707 | { |
708 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
709 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
710 | int32_t data = simple_strtol(buf, NULL, 10); |
711 | |
712 | if (data > 200) { |
713 | printk("input is %d, seems too large, invalid\n", data); |
714 | } |
715 | keep_mode_update_threshold(pdata, data); |
716 | printk("set keep_mode_threshold to %d\n", data); |
717 | return count; |
718 | } |
719 | |
720 | static ssize_t high_temp_protect_show(struct device *dev, struct device_attribute *attr, char *buf) |
721 | { |
722 | return sprintf(buf, "%d\n", high_temp_protect); |
723 | } |
724 | |
725 | static ssize_t high_temp_protect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
726 | { |
727 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
728 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
729 | int32_t data = simple_strtol(buf, NULL, 10); |
730 | |
731 | high_temp_protect = data ? 1 : 0; |
732 | if (high_temp_protect) { |
733 | pdata->tmp_trip[1].temperature = pdata->keep_mode_threshold + 25; |
734 | } else { |
735 | pdata->tmp_trip[1].temperature = 260; |
736 | } |
737 | printk("high temperature protect %s\n", high_temp_protect ? "enabled" : "disabled"); |
738 | return count; |
739 | } |
740 | |
741 | static struct device_attribute amlogic_thermal_attr[] = { |
742 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG |
743 | __ATTR(max_cpu_num, 0444, max_cpu_num_show, NULL), |
744 | #endif |
745 | __ATTR(thermal_debug, 0644, thermal_debug_show, thermal_debug_store), |
746 | __ATTR(keep_mode, 0644, keep_mode_show, keep_mode_store), |
747 | __ATTR(keep_mode_threshold, 0644, keep_mode_threshold_show, keep_mode_threshold_store), |
748 | __ATTR(high_temp_protect, 0644, high_temp_protect_show, high_temp_protect_store) |
749 | }; |
750 | |
751 | /* Register with the in-kernel thermal management */ |
752 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata) |
753 | { |
754 | int ret=0, j; |
755 | struct cpumask mask_val; |
756 | |
757 | memset(&mask_val,0,sizeof(struct cpumask)); |
758 | cpumask_set_cpu(0, &mask_val); |
759 | pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val); |
760 | if (IS_ERR(pdata->cpu_cool_dev)) { |
761 | pr_err("Failed to register cpufreq cooling device\n"); |
762 | ret = -EINVAL; |
763 | goto err_unregister; |
764 | } |
765 | pdata->cpucore_cool_dev = cpucore_cooling_register(); |
766 | if (IS_ERR(pdata->cpucore_cool_dev)) { |
767 | pr_err("Failed to register cpufreq cooling device\n"); |
768 | ret = -EINVAL; |
769 | goto err_unregister; |
770 | } |
771 | |
772 | pdata->therm_dev = thermal_zone_device_register(pdata->name, |
773 | pdata->temp_trip_count, ((1 << pdata->temp_trip_count) - 1), pdata, &amlogic_dev_ops, NULL, 0, |
774 | pdata->idle_interval); |
775 | |
776 | if (IS_ERR(pdata->therm_dev)) { |
777 | pr_err("Failed to register thermal zone device, err:%p\n", pdata->therm_dev); |
778 | ret = -EINVAL; |
779 | goto err_unregister; |
780 | } |
781 | |
782 | if (pdata->keep_mode) { // create sysfs for keep_mode |
783 | for (j = 0; j < ARRAY_SIZE(amlogic_thermal_attr); j++) { |
784 | device_create_file(&pdata->therm_dev->device, &amlogic_thermal_attr[j]); |
785 | } |
786 | } |
787 | pr_info("amlogic: Kernel Thermal management registered\n"); |
788 | |
789 | return 0; |
790 | |
791 | err_unregister: |
792 | amlogic_unregister_thermal(pdata); |
793 | return ret; |
794 | } |
795 | |
796 | /* Un-Register with the in-kernel thermal management */ |
797 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata) |
798 | { |
799 | if (pdata->therm_dev) |
800 | thermal_zone_device_unregister(pdata->therm_dev); |
801 | if (pdata->cpu_cool_dev) |
802 | cpufreq_cooling_unregister(pdata->cpu_cool_dev); |
803 | |
804 | pr_info("amlogic: Kernel Thermal management unregistered\n"); |
805 | } |
806 | |
807 | int get_desend(void) |
808 | { |
809 | int i; |
810 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
811 | int descend = -1; |
812 | struct cpufreq_frequency_table *table = |
813 | cpufreq_frequency_get_table(0); |
814 | |
815 | if (!table) |
816 | return -EINVAL; |
817 | |
818 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
819 | /* ignore invalid entries */ |
820 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
821 | continue; |
822 | |
823 | /* ignore duplicate entry */ |
824 | if (freq == table[i].frequency) |
825 | continue; |
826 | |
827 | /* get the frequency order */ |
828 | if (freq != CPUFREQ_ENTRY_INVALID && descend == -1){ |
829 | descend = !!(freq > table[i].frequency); |
830 | break; |
831 | } |
832 | |
833 | freq = table[i].frequency; |
834 | } |
835 | return descend; |
836 | } |
837 | int fix_to_freq(int freqold,int descend) |
838 | { |
839 | int i; |
840 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
841 | struct cpufreq_frequency_table *table = |
842 | cpufreq_frequency_get_table(0); |
843 | |
844 | if (!table) |
845 | return -EINVAL; |
846 | |
847 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
848 | /* ignore invalid entry */ |
849 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
850 | continue; |
851 | |
852 | /* ignore duplicate entry */ |
853 | if (freq == table[i].frequency) |
854 | continue; |
855 | freq = table[i].frequency; |
856 | if(descend){ |
857 | if(freqold>=table[i+1].frequency && freqold<=table[i].frequency) |
858 | return table[i+1].frequency; |
859 | } |
860 | else{ |
861 | if(freqold>=table[i].frequency && freqold<=table[i+1].frequency) |
862 | return table[i].frequency; |
863 | } |
864 | } |
865 | return -EINVAL; |
866 | } |
867 | |
868 | void thermal_atomic_set(atomic_t *a, int value) |
869 | { |
870 | atomic_set(a, 1); |
871 | } |
872 | EXPORT_SYMBOL(thermal_atomic_set); |
873 | |
874 | static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag) |
875 | { |
876 | int i = 0, ret = -1, val = 0, cells, descend, error = 0; |
877 | struct property *prop; |
878 | struct temp_level *tmp_level = NULL; |
879 | struct amlogic_thermal_platform_data *pdata = NULL; |
880 | |
881 | if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){ |
882 | //INIT FROM DTS |
883 | pdata=kzalloc(sizeof(*pdata),GFP_KERNEL); |
884 | if(!pdata){ |
885 | goto err; |
886 | } |
887 | memset((void* )pdata,0,sizeof(*pdata)); |
888 | ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val); |
889 | if(ret){ |
890 | dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret); |
891 | goto err; |
892 | } |
893 | printk("#thermal-cells=%d\n",val); |
894 | cells=val; |
895 | |
896 | /* |
897 | * process for KEEP_MODE and virtual thermal |
898 | * Logic: If virtual thermal is enabled, then ignore keep_mode |
899 | * |
900 | */ |
901 | pdata->trim_flag = trim_flag; |
902 | if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal |
903 | aml_virtaul_thermal_probe(pdev, pdata); |
904 | } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) { |
905 | if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) { |
906 | printk("ERROR:keep_mode is set but not found 'keep_mode_threshold'\n"); |
907 | error = 1; |
908 | } |
909 | if (of_property_read_u32_array(pdev->dev.of_node, |
910 | "keep_mode_max_range", |
911 | pdata->keep_mode_max_range, |
912 | sizeof(pdata->keep_mode_max_range)/sizeof(u32))) { |
913 | printk("ERROR:keep_mode is set but not found 'keep_mode_max_range'\n"); |
914 | error = 1; |
915 | } |
916 | if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now |
917 | printk("keep_mode enabled\n"); |
918 | printk("keep_mode_max_range: [%7d, %3d, %d, %d]\n", |
919 | pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1], |
920 | pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]); |
921 | pdata->keep_mode = 1; |
922 | pdata->freq_sample_period = 5; |
923 | } |
924 | } else { |
925 | printk("keep_mode is disabled\n"); |
926 | } |
927 | if(pdata->keep_mode || !pdata->trim_flag){ |
928 | INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work); |
929 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
930 | atomic_set(&freq_update_flag, 0); |
931 | } |
932 | |
933 | prop = of_find_property(pdev->dev.of_node, "trip_point", &val); |
934 | if (!prop){ |
935 | dev_err(&pdev->dev, "read %s length error\n","trip_point"); |
936 | goto err; |
937 | } |
938 | if (pdata->keep_mode) { |
939 | pdata->temp_trip_count = 2; |
940 | } else { |
941 | pdata->temp_trip_count=val/cells/sizeof(u32); |
942 | } |
943 | printk("pdata->temp_trip_count=%d\n",pdata->temp_trip_count); |
944 | tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL); |
945 | pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL); |
946 | if(!tmp_level){ |
947 | goto err; |
948 | } |
949 | |
950 | if (pdata->keep_mode) { // keep mode only need one point |
951 | keep_mode_temp_level_init(pdata, tmp_level); |
952 | } else { |
953 | ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32)); |
954 | if (ret){ |
955 | dev_err(&pdev->dev, "read %s data error\n","trip_point"); |
956 | goto err; |
957 | } |
958 | } |
959 | descend=get_desend(); |
960 | for (i = 0; i < pdata->temp_trip_count; i++) { |
961 | printk("temperature=%d on trip point=%d\n",tmp_level[i].temperature,i); |
962 | pdata->tmp_trip[i].temperature=tmp_level[i].temperature; |
963 | printk("fixing high_freq=%d to ",tmp_level[i].cpu_high_freq); |
964 | tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend); |
965 | pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq); |
966 | printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_high_freq,i,pdata->tmp_trip[i].cpu_lower_level); |
967 | |
968 | printk("fixing low_freq=%d to ",tmp_level[i].cpu_low_freq); |
969 | tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend); |
970 | pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq); |
971 | printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_low_freq,i,pdata->tmp_trip[i].cpu_upper_level); |
972 | pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq; |
973 | pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq; |
974 | printk("gpu[%d].gpu_high_freq=%d,tmp_level[%d].gpu_high_freq=%d\n",i,tmp_level[i].gpu_high_freq,i,tmp_level[i].gpu_low_freq); |
975 | |
976 | pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num; |
977 | printk("cpu[%d] core num==%d\n",i,pdata->tmp_trip[i].cpu_core_num); |
978 | pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num; |
979 | printk("gpu[%d] core num==%d\n",i,pdata->tmp_trip[i].gpu_core_num); |
980 | } |
981 | |
982 | ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val); |
983 | if (ret){ |
984 | dev_err(&pdev->dev, "read %s error\n","idle_interval"); |
985 | goto err; |
986 | } |
987 | pdata->idle_interval=val; |
988 | printk("idle interval=%d\n",pdata->idle_interval); |
989 | ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name); |
990 | if (ret){ |
991 | dev_err(&pdev->dev, "read %s error\n","dev_name"); |
992 | goto err; |
993 | } |
994 | printk("pdata->name:%s, pdata:%p\n",pdata->name, pdata); |
995 | pdata->mode=THERMAL_DEVICE_ENABLED; |
996 | if(tmp_level) |
997 | kfree(tmp_level); |
998 | printk("%s, %d\n", __func__, __LINE__); |
999 | return pdata; |
1000 | } |
1001 | err: |
1002 | if(tmp_level) |
1003 | kfree(tmp_level); |
1004 | if(pdata) |
1005 | kfree(pdata); |
1006 | pdata= NULL; |
1007 | return pdata; |
1008 | } |
1009 | |
1010 | static struct amlogic_thermal_platform_data * amlogic_thermal_initialize(struct platform_device *pdev, int trim_flag) |
1011 | { |
1012 | struct amlogic_thermal_platform_data *pdata=NULL; |
1013 | pdata=amlogic_thermal_init_from_dts(pdev, trim_flag); |
1014 | printk("%s, %d, pdata:%p\n", __func__, __LINE__, pdata); |
1015 | return pdata; |
1016 | } |
1017 | |
1018 | static const struct of_device_id amlogic_thermal_match[] = { |
1019 | { |
1020 | .compatible = "amlogic-thermal", |
1021 | }, |
1022 | {}, |
1023 | }; |
1024 | |
1025 | #ifdef CONFIG_HIBERNATION |
1026 | static int amlogic_thermal_freeze(struct device *dev) |
1027 | { |
1028 | return 0; |
1029 | } |
1030 | |
1031 | static int amlogic_thermal_thaw(struct device *dev) |
1032 | { |
1033 | return 0; |
1034 | } |
1035 | |
1036 | static int amlogic_thermal_restore(struct device *dev) |
1037 | { |
1038 | thermal_firmware_init(); |
1039 | |
1040 | return 0; |
1041 | } |
1042 | |
1043 | static struct dev_pm_ops amlogic_theraml_pm = { |
1044 | .freeze = amlogic_thermal_freeze, |
1045 | .thaw = amlogic_thermal_thaw, |
1046 | .restore = amlogic_thermal_restore, |
1047 | }; |
1048 | #endif |
1049 | |
1050 | static int amlogic_thermal_probe(struct platform_device *pdev) |
1051 | { |
1052 | int ret, trim_flag; |
1053 | struct amlogic_thermal_platform_data *pdata=NULL; |
1054 | |
1055 | ret=thermal_firmware_init(); |
1056 | if(ret<0){ |
1057 | printk("%s, this chip is not trimmed, use virtual thermal\n", __func__); |
1058 | trim_flag = 0; |
1059 | }else{ |
1060 | printk("%s, this chip is trimmed, use thermal\n", __func__); |
1061 | trim_flag = 1; |
1062 | } |
1063 | |
1064 | dev_info(&pdev->dev, "amlogic thermal probe start\n"); |
1065 | pdata = amlogic_thermal_initialize(pdev, trim_flag); |
1066 | if (!pdata) { |
1067 | dev_err(&pdev->dev, "Failed to initialize thermal\n"); |
1068 | goto err; |
1069 | } |
1070 | mutex_init(&pdata->lock); |
1071 | pdev->dev.platform_data=pdata; |
1072 | platform_set_drvdata(pdev, pdata); |
1073 | ret = amlogic_register_thermal(pdata); |
1074 | if (ret) { |
1075 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); |
1076 | goto err; |
1077 | } |
1078 | dev_info(&pdev->dev, "amlogic thermal probe done\n"); |
1079 | return 0; |
1080 | err: |
1081 | platform_set_drvdata(pdev, NULL); |
1082 | return ret; |
1083 | } |
1084 | |
1085 | static int amlogic_thermal_remove(struct platform_device *pdev) |
1086 | { |
1087 | struct amlogic_thermal_platform_data *pdata = platform_get_drvdata(pdev); |
1088 | |
1089 | aml_virtual_thermal_remove(pdata); |
1090 | |
1091 | amlogic_unregister_thermal(pdata); |
1092 | |
1093 | platform_set_drvdata(pdev, NULL); |
1094 | |
1095 | return 0; |
1096 | } |
1097 | |
1098 | struct platform_driver amlogic_thermal_driver = { |
1099 | .driver = { |
1100 | .name = "amlogic-thermal", |
1101 | .owner = THIS_MODULE, |
1102 | #ifdef CONFIG_HIBERNATION |
1103 | .pm = &amlogic_theraml_pm, |
1104 | #endif |
1105 | .of_match_table = of_match_ptr(amlogic_thermal_match), |
1106 | }, |
1107 | .probe = amlogic_thermal_probe, |
1108 | .remove = amlogic_thermal_remove, |
1109 | }; |
1110 | |
1111 | static int __init amlogic_thermal_driver_init(void) |
1112 | { |
1113 | return platform_driver_register(&(amlogic_thermal_driver)); |
1114 | } |
1115 | late_initcall(amlogic_thermal_driver_init); |
1116 | static void __exit amlogic_thermal_driver_exit(void) |
1117 | { |
1118 | platform_driver_unregister(&(amlogic_thermal_driver) ); |
1119 | } |
1120 | module_exit(amlogic_thermal_driver_exit); |
1121 | |
1122 | MODULE_DESCRIPTION("amlogic thermal Driver"); |
1123 | MODULE_AUTHOR("Amlogic SH platform team"); |
1124 | MODULE_ALIAS("platform:amlogic-thermal"); |
1125 | MODULE_LICENSE("GPL"); |
1126 | |
1127 |