blob: afd2cac1b1ae7eb6d371bd92edab2a56728a8339
1 | /* |
2 | * amlogic_thermal.c - Samsung amlogic thermal (Thermal Management Unit) |
3 | * |
4 | * Copyright (C) 2011 Samsung Electronics |
5 | * Donggeun Kim <dg77.kim@samsung.com> |
6 | * Amit Daniel Kachhap <amit.kachhap@linaro.org> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | * |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/err.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/platform_device.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/clk.h> |
31 | #include <linux/workqueue.h> |
32 | #include <linux/sysfs.h> |
33 | #include <linux/kobject.h> |
34 | #include <linux/io.h> |
35 | #include <linux/mutex.h> |
36 | #include <linux/thermal.h> |
37 | #include <linux/cpufreq.h> |
38 | #include <linux/cpu_cooling.h> |
39 | #include <linux/of.h> |
40 | #include <linux/amlogic/saradc.h> |
41 | #include <linux/random.h> |
42 | #include <linux/gpu_cooling.h> |
43 | #include <linux/cpucore_cooling.h> |
44 | #include <linux/gpucore_cooling.h> |
45 | #include <linux/thermal_core.h> |
46 | #include <linux/version.h> |
47 | #if LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 33) |
48 | #include <linux/amlogic/aml_thermal_hw.h> |
49 | #else |
50 | #include <mach/thermal.h> |
51 | #endif |
52 | #include <linux/version.h> |
53 | #include "amlogic_thermal.h" |
54 | |
55 | #define DBG_VIRTUAL 0 |
56 | #define MIN_TEMP (-273) |
57 | int thermal_debug_enable = 0; |
58 | int high_temp_protect = 0; |
59 | atomic_t freq_update_flag; |
60 | EXPORT_SYMBOL(thermal_debug_enable); |
61 | EXPORT_SYMBOL(high_temp_protect); |
62 | EXPORT_SYMBOL(freq_update_flag); |
63 | |
64 | #define THERMAL_DBG(format,args...) \ |
65 | if (thermal_debug_enable) { \ |
66 | printk("[THERMAL]"format, ##args); \ |
67 | } |
68 | |
69 | static struct aml_virtual_thermal_device cpu_virtual_thermal = {}; |
70 | static struct aml_virtual_thermal_device gpu_virtual_thermal = {}; |
71 | static unsigned int report_interval[4] = {}; |
72 | static int (*gpu_freq_level)(int ) = NULL; |
73 | |
74 | /* CPU Zone information */ |
75 | #define PANIC_ZONE 4 |
76 | #define WARN_ZONE 3 |
77 | #define MONITOR_ZONE 2 |
78 | #define SAFE_ZONE 1 |
79 | |
80 | #define GET_ZONE(trip) (trip + 2) |
81 | #define GET_TRIP(zone) (zone - 2) |
82 | |
83 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata); |
84 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev); |
85 | |
86 | void thermal_lock(struct mutex *lock) |
87 | { |
88 | mutex_lock(lock); |
89 | } |
90 | EXPORT_SYMBOL(thermal_lock); |
91 | |
92 | void thermal_unlock(struct mutex *lock) |
93 | { |
94 | mutex_unlock(lock); |
95 | } |
96 | EXPORT_SYMBOL(thermal_unlock); |
97 | |
98 | /* Get mode callback functions for thermal zone */ |
99 | static int amlogic_get_mode(struct thermal_zone_device *thermal, |
100 | enum thermal_device_mode *mode) |
101 | { |
102 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
103 | |
104 | if (pdata) |
105 | *mode = pdata->mode; |
106 | return 0; |
107 | } |
108 | |
109 | /* Set mode callback functions for thermal zone */ |
110 | static int amlogic_set_mode(struct thermal_zone_device *thermal, |
111 | enum thermal_device_mode mode) |
112 | { |
113 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
114 | struct cpucore_cooling_device *cpucore_device =NULL; |
115 | struct gpucore_cooling_device *gpucore_device = NULL; |
116 | if(!pdata) |
117 | return -EINVAL; |
118 | |
119 | //mutex_lock(&pdata->therm_dev->lock); |
120 | |
121 | if (mode == THERMAL_DEVICE_ENABLED){ |
122 | pdata->therm_dev->polling_delay = pdata->idle_interval; |
123 | if(pdata->cpucore_cool_dev){ |
124 | cpucore_device=pdata->cpucore_cool_dev->devdata; |
125 | cpucore_device->stop_flag=0; |
126 | } |
127 | if(pdata->gpucore_cool_dev){ |
128 | gpucore_device=pdata->gpucore_cool_dev->devdata; |
129 | gpucore_device->stop_flag=0; |
130 | } |
131 | if (pdata->keep_mode) { // start work |
132 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
133 | } |
134 | } |
135 | else{ |
136 | pdata->therm_dev->polling_delay = 0; |
137 | if (pdata->keep_mode) { |
138 | cancel_delayed_work_sync(&pdata->thermal_work); |
139 | keep_mode_set_mode(pdata); |
140 | } |
141 | if(pdata->cpucore_cool_dev) |
142 | pdata->cpucore_cool_dev->ops->set_cur_state(pdata->cpucore_cool_dev,(0|CPU_STOP)); |
143 | if(pdata->gpucore_cool_dev) |
144 | pdata->gpucore_cool_dev->ops->set_cur_state(pdata->gpucore_cool_dev,(0|GPU_STOP)); |
145 | } |
146 | |
147 | //mutex_unlock(&pdata->therm_dev->lock); |
148 | |
149 | pdata->mode = mode; |
150 | thermal_zone_device_update(pdata->therm_dev); |
151 | pr_info("thermal polling set for duration=%d msec\n", |
152 | pdata->therm_dev->polling_delay); |
153 | return 0; |
154 | } |
155 | |
156 | /* Get trip type callback functions for thermal zone */ |
157 | static int amlogic_get_trip_type(struct thermal_zone_device *thermal, int trip, |
158 | enum thermal_trip_type *type) |
159 | { |
160 | if(trip < thermal->trips-1) |
161 | *type = THERMAL_TRIP_ACTIVE; |
162 | else if(trip == thermal->trips-1) |
163 | *type = THERMAL_TRIP_CRITICAL; |
164 | else |
165 | return -EINVAL; |
166 | return 0; |
167 | } |
168 | |
169 | /* Get trip temperature callback functions for thermal zone */ |
170 | static int amlogic_get_trip_temp(struct thermal_zone_device *thermal, int trip, |
171 | unsigned long *temp) |
172 | { |
173 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
174 | |
175 | if(trip > pdata->temp_trip_count ||trip<0) |
176 | return -EINVAL; |
177 | mutex_lock(&pdata->lock); |
178 | *temp =pdata->tmp_trip[trip].temperature; |
179 | /* convert the temperature into millicelsius */ |
180 | mutex_unlock(&pdata->lock); |
181 | |
182 | return 0; |
183 | } |
184 | |
185 | static int amlogic_set_trip_temp(struct thermal_zone_device *thermal, int trip, |
186 | unsigned long temp) |
187 | { |
188 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
189 | |
190 | if(trip > pdata->temp_trip_count ||trip<0) |
191 | return -EINVAL; |
192 | mutex_lock(&pdata->lock); |
193 | pdata->tmp_trip[trip].temperature=temp; |
194 | /* convert the temperature into millicelsius */ |
195 | mutex_unlock(&pdata->lock); |
196 | return 0; |
197 | } |
198 | |
199 | /* Get critical temperature callback functions for thermal zone */ |
200 | static int amlogic_get_crit_temp(struct thermal_zone_device *thermal, |
201 | unsigned long *temp) |
202 | { |
203 | int ret; |
204 | /* Panic zone */ |
205 | ret =amlogic_get_trip_temp(thermal, thermal->trips-1, temp); |
206 | |
207 | return ret; |
208 | } |
209 | |
210 | int gpu_get_freq_level(int freq) |
211 | { |
212 | if (gpu_freq_level) |
213 | return gpu_freq_level(freq); |
214 | else |
215 | return -1; |
216 | } |
217 | |
218 | /* Bind callback functions for thermal zone */ |
219 | static int amlogic_bind(struct thermal_zone_device *thermal, |
220 | struct thermal_cooling_device *cdev) |
221 | { |
222 | int ret = 0, i; |
223 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
224 | int id; |
225 | char type[THERMAL_NAME_LENGTH]; |
226 | unsigned long max; |
227 | |
228 | if (!sscanf(cdev->type, "thermal-%7s-%d", type,&id)) |
229 | return -EINVAL; |
230 | if(!strcmp(type,"cpufreq")){ |
231 | /* Bind the thermal zone to the cpufreq cooling device */ |
232 | for (i = 0; i < pdata->temp_trip_count; i++) { |
233 | if(pdata->tmp_trip[0].cpu_upper_level==THERMAL_CSTATE_INVALID) |
234 | { |
235 | printk("disable cpu cooling device by dtd\n"); |
236 | ret = -EINVAL; |
237 | goto out; |
238 | } |
239 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
240 | pdata->tmp_trip[i].cpu_upper_level, |
241 | pdata->tmp_trip[i].cpu_lower_level)) { |
242 | pr_err("error binding cdev inst %d\n", i); |
243 | ret = -EINVAL; |
244 | goto out; |
245 | } |
246 | } |
247 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
248 | if (pdata->keep_mode) { |
249 | cdev->ops->get_max_state(cdev, &max); |
250 | keep_mode_bind(pdata, max, 0); |
251 | } |
252 | } |
253 | |
254 | if(!strcmp(type,"gpufreq")){ |
255 | struct gpufreq_cooling_device *gpufreq_dev= |
256 | (struct gpufreq_cooling_device *)cdev->devdata; |
257 | /* Bind the thermal zone to the cpufreq cooling device */ |
258 | for (i = 0; i < pdata->temp_trip_count; i++) { |
259 | if(!gpufreq_dev->get_gpu_freq_level){ |
260 | ret = -EINVAL; |
261 | pr_info("invalidate pointer %p\n",gpufreq_dev->get_gpu_freq_level); |
262 | goto out; |
263 | } else { |
264 | gpu_freq_level = gpufreq_dev->get_gpu_freq_level; |
265 | } |
266 | pdata->tmp_trip[i].gpu_lower_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_upper_freq); |
267 | pdata->tmp_trip[i].gpu_upper_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_lower_freq); |
268 | printk("pdata->tmp_trip[%d].gpu_lower_level=%d\n",i,pdata->tmp_trip[i].gpu_lower_level); |
269 | printk("pdata->tmp_trip[%d].gpu_upper_level=%d\n",i,pdata->tmp_trip[i].gpu_upper_level); |
270 | if(pdata->tmp_trip[0].gpu_lower_level==THERMAL_CSTATE_INVALID) |
271 | { |
272 | printk("disable gpu cooling device by dtd\n"); |
273 | ret = -EINVAL; |
274 | goto out; |
275 | } |
276 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
277 | pdata->tmp_trip[i].gpu_upper_level, |
278 | pdata->tmp_trip[i].gpu_lower_level)) { |
279 | pr_err("error binding cdev inst %d\n", i); |
280 | ret = -EINVAL; |
281 | goto out; |
282 | } |
283 | } |
284 | pdata->gpu_cool_dev=cdev; |
285 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
286 | if (pdata->keep_mode) { |
287 | cdev->ops->get_max_state(cdev, &max); |
288 | keep_mode_bind(pdata, max, 1); |
289 | } |
290 | } |
291 | |
292 | if(!strcmp(type,"cpucore")){ |
293 | /* Bind the thermal zone to the cpufreq cooling device */ |
294 | struct cpucore_cooling_device *cpucore_dev= |
295 | (struct cpucore_cooling_device *)cdev->devdata; |
296 | for (i = 0; i < pdata->temp_trip_count; i++) { |
297 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
298 | { |
299 | printk("disable cpucore cooling device by dtd\n"); |
300 | ret = -EINVAL; |
301 | goto out; |
302 | } |
303 | if(pdata->tmp_trip[i].cpu_core_num !=-1) |
304 | pdata->tmp_trip[i].cpu_core_upper=cpucore_dev->max_cpu_core_num-pdata->tmp_trip[i].cpu_core_num; |
305 | else |
306 | pdata->tmp_trip[i].cpu_core_upper=pdata->tmp_trip[i].cpu_core_num; |
307 | printk("tmp_trip[%d].cpu_core_upper=%d\n",i,pdata->tmp_trip[i].cpu_core_upper); |
308 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
309 | pdata->tmp_trip[i].cpu_core_upper, |
310 | pdata->tmp_trip[i].cpu_core_upper)) { |
311 | pr_err("error binding cdev inst %d\n", i); |
312 | ret = -EINVAL; |
313 | goto out; |
314 | } |
315 | } |
316 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
317 | if (pdata->keep_mode) { |
318 | cdev->ops->get_max_state(cdev, &max); |
319 | keep_mode_bind(pdata, max, 2); |
320 | } |
321 | } |
322 | |
323 | if(!strcmp(type,"gpucore")){ |
324 | /* Bind the thermal zone to the cpufreq cooling device */ |
325 | struct gpucore_cooling_device *gpucore_dev= |
326 | (struct gpucore_cooling_device *)cdev->devdata; |
327 | for (i = 0; i < pdata->temp_trip_count; i++) { |
328 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
329 | { |
330 | printk("disable gpucore cooling device by dtd\n"); |
331 | ret = -EINVAL; |
332 | goto out; |
333 | } |
334 | if(pdata->tmp_trip[i].gpu_core_num != -1) |
335 | pdata->tmp_trip[i].gpu_core_upper=gpucore_dev->max_gpu_core_num-pdata->tmp_trip[i].gpu_core_num; |
336 | else |
337 | pdata->tmp_trip[i].gpu_core_upper=pdata->tmp_trip[i].gpu_core_num; |
338 | |
339 | printk("tmp_trip[%d].gpu_core_upper=%d\n",i,pdata->tmp_trip[i].gpu_core_upper); |
340 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
341 | pdata->tmp_trip[i].gpu_core_upper, |
342 | pdata->tmp_trip[i].gpu_core_upper)) { |
343 | pr_err("error binding cdev inst %d\n", i); |
344 | ret = -EINVAL; |
345 | goto out; |
346 | } |
347 | } |
348 | pdata->gpucore_cool_dev=cdev; |
349 | pr_info("%s bind %s okay !\n",thermal->type,cdev->type); |
350 | if (pdata->keep_mode) { |
351 | cdev->ops->get_max_state(cdev, &max); |
352 | keep_mode_bind(pdata, max, 3); |
353 | } |
354 | } |
355 | return ret; |
356 | out: |
357 | return ret; |
358 | } |
359 | |
360 | /* Unbind callback functions for thermal zone */ |
361 | static int amlogic_unbind(struct thermal_zone_device *thermal, |
362 | struct thermal_cooling_device *cdev) |
363 | { |
364 | int i; |
365 | if(thermal && cdev){ |
366 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
367 | for (i = 0; i < pdata->temp_trip_count; i++) { |
368 | pr_info("\n%s unbinding %s ",thermal->type,cdev->type); |
369 | if (thermal_zone_unbind_cooling_device(thermal, i, cdev)) { |
370 | pr_err(" error %d \n", i); |
371 | return -EINVAL; |
372 | } |
373 | pr_info(" okay\n"); |
374 | return 0; |
375 | } |
376 | }else{ |
377 | return -EINVAL; |
378 | } |
379 | return -EINVAL; |
380 | } |
381 | #define ABS(a) ((a) > 0 ? (a) : -(a)) |
382 | |
383 | void *thermal_alloc(size_t len) |
384 | { |
385 | return kzalloc(len, GFP_KERNEL); |
386 | } |
387 | EXPORT_SYMBOL(thermal_alloc); |
388 | |
389 | static void thermal_work(struct work_struct *work) |
390 | { |
391 | struct amlogic_thermal_platform_data *pdata; |
392 | int cpu_freq = cpufreq_quick_get(0); |
393 | |
394 | pdata = container_of((struct delayed_work *)work, struct amlogic_thermal_platform_data, thermal_work); |
395 | if (pdata->temp_valid) |
396 | keep_mode_work(pdata, cpu_freq); |
397 | if (pdata->mode == THERMAL_DEVICE_ENABLED) { // no need to do this work again if thermal disabled |
398 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
399 | } |
400 | } |
401 | |
402 | static int aml_virtaul_thermal_probe(struct platform_device *pdev, struct amlogic_thermal_platform_data *pdata) |
403 | { |
404 | int ret, len, cells; |
405 | struct property *prop; |
406 | void *buf; |
407 | |
408 | if (!of_property_read_bool(pdev->dev.of_node, "use_virtual_thermal")) { |
409 | printk("%s, virtual thermal is not enabled\n", __func__); |
410 | pdata->virtual_thermal_en = 0; |
411 | return 0; |
412 | } else { |
413 | printk("%s, virtual thermal enabled\n", __func__); |
414 | } |
415 | |
416 | ret = of_property_read_u32(pdev->dev.of_node, |
417 | "freq_sample_period", |
418 | &pdata->freq_sample_period); |
419 | if (ret) { |
420 | printk("%s, get freq_sample_period failed, us 30 as default\n", __func__); |
421 | pdata->freq_sample_period = 30; |
422 | } else { |
423 | printk("%s, get freq_sample_period with value:%d\n", __func__, pdata->freq_sample_period); |
424 | } |
425 | ret = of_property_read_u32_array(pdev->dev.of_node, |
426 | "report_time", |
427 | report_interval, sizeof(report_interval) / sizeof(u32)); |
428 | if (ret) { |
429 | printk("%s, get report_time failed\n", __func__); |
430 | goto error; |
431 | } else { |
432 | printk("[virtual_thermal] report interval:%4d, %4d, %4d, %4d\n", |
433 | report_interval[0], report_interval[1], report_interval[2], report_interval[3]); |
434 | } |
435 | /* |
436 | * read cpu_virtal |
437 | */ |
438 | prop = of_find_property(pdev->dev.of_node, "cpu_virtual", &len); |
439 | if (!prop) { |
440 | printk("%s, cpu virtual not found\n", __func__); |
441 | goto error; |
442 | } |
443 | cells = len / sizeof(struct aml_virtual_thermal); |
444 | buf = kzalloc(len, GFP_KERNEL); |
445 | if (!buf) { |
446 | printk("%s, no memory\n", __func__); |
447 | return -ENOMEM; |
448 | } |
449 | ret = of_property_read_u32_array(pdev->dev.of_node, |
450 | "cpu_virtual", |
451 | buf, len/sizeof(u32)); |
452 | if (ret) { |
453 | printk("%s, read cpu_virtual failed\n", __func__); |
454 | kfree(buf); |
455 | goto error; |
456 | } |
457 | cpu_virtual_thermal.count = cells; |
458 | cpu_virtual_thermal.thermal = buf; |
459 | |
460 | /* |
461 | * read gpu_virtal |
462 | */ |
463 | prop = of_find_property(pdev->dev.of_node, "gpu_virtual", &len); |
464 | if (!prop) { |
465 | printk("%s, gpu virtual not found\n", __func__); |
466 | goto error; |
467 | } |
468 | cells = len / sizeof(struct aml_virtual_thermal); |
469 | buf = kzalloc(len, GFP_KERNEL); |
470 | if (!buf) { |
471 | printk("%s, no memory\n", __func__); |
472 | return -ENOMEM; |
473 | } |
474 | ret = of_property_read_u32_array(pdev->dev.of_node, |
475 | "gpu_virtual", |
476 | buf, len/sizeof(u32)); |
477 | if (ret) { |
478 | printk("%s, read gpu_virtual failed\n", __func__); |
479 | kfree(buf); |
480 | goto error; |
481 | } |
482 | gpu_virtual_thermal.count = cells; |
483 | gpu_virtual_thermal.thermal = buf; |
484 | |
485 | #if DBG_VIRTUAL |
486 | printk("cpu_virtal cells:%d, table:\n", cpu_virtual_thermal.count); |
487 | for (len = 0; len < cpu_virtual_thermal.count; len++) { |
488 | printk("%2d, %8d, %4d, %4d, %4d, %4d\n", |
489 | len, |
490 | cpu_virtual_thermal.thermal[len].freq, |
491 | cpu_virtual_thermal.thermal[len].temp_time[0], |
492 | cpu_virtual_thermal.thermal[len].temp_time[1], |
493 | cpu_virtual_thermal.thermal[len].temp_time[2], |
494 | cpu_virtual_thermal.thermal[len].temp_time[3]); |
495 | } |
496 | printk("gpu_virtal cells:%d, table:\n", gpu_virtual_thermal.count); |
497 | for (len = 0; len < gpu_virtual_thermal.count; len++) { |
498 | printk("%2d, %8d, %4d, %4d, %4d, %4d\n", |
499 | len, |
500 | gpu_virtual_thermal.thermal[len].freq, |
501 | gpu_virtual_thermal.thermal[len].temp_time[0], |
502 | gpu_virtual_thermal.thermal[len].temp_time[1], |
503 | gpu_virtual_thermal.thermal[len].temp_time[2], |
504 | gpu_virtual_thermal.thermal[len].temp_time[3]); |
505 | } |
506 | #endif |
507 | |
508 | pdata->virtual_thermal_en = 1; |
509 | return 0; |
510 | |
511 | error: |
512 | pdata->virtual_thermal_en = 0; |
513 | return -1; |
514 | } |
515 | |
516 | static void aml_virtual_thermal_remove(struct amlogic_thermal_platform_data *pdata) |
517 | { |
518 | kfree(cpu_virtual_thermal.thermal); |
519 | kfree(gpu_virtual_thermal.thermal); |
520 | pdata->virtual_thermal_en = 0; |
521 | } |
522 | |
523 | static int check_freq_level(struct aml_virtual_thermal_device *dev, unsigned int freq) |
524 | { |
525 | int i = 0; |
526 | |
527 | if (freq >= dev->thermal[dev->count-1].freq) { |
528 | return dev->count - 1; |
529 | } |
530 | for (i = 0; i < dev->count - 1; i++) { |
531 | if (freq > dev->thermal[i].freq && freq <= dev->thermal[i + 1].freq) { |
532 | return i + 1; |
533 | } |
534 | } |
535 | return 0; |
536 | } |
537 | |
538 | static int check_freq_level_cnt(unsigned int cnt) |
539 | { |
540 | int i; |
541 | |
542 | if (cnt >= report_interval[3]) { |
543 | return 3; |
544 | } |
545 | for (i = 0; i < 3; i++) { |
546 | if (cnt >= report_interval[i] && cnt < report_interval[i + 1]) { |
547 | return i; |
548 | } |
549 | } |
550 | return 0; |
551 | } |
552 | |
553 | static unsigned long aml_cal_virtual_temp(struct amlogic_thermal_platform_data *pdata) |
554 | { |
555 | static unsigned int cpu_freq_level_cnt = 0, gpu_freq_level_cnt = 0; |
556 | static unsigned int last_cpu_freq_level = 0, last_gpu_freq_level = 0; |
557 | static unsigned int cpu_temp = 40, gpu_temp = 40; // default set to 40 when at homescreen |
558 | unsigned int curr_cpu_avg_freq, curr_gpu_avg_freq; |
559 | int curr_cpu_freq_level, curr_gpu_freq_level; |
560 | int cnt_level, level_diff; |
561 | int temp_update = 0, final_temp; |
562 | |
563 | /* |
564 | * CPU temp |
565 | */ |
566 | if (atomic_read(&freq_update_flag)) { |
567 | curr_cpu_avg_freq = pdata->monitor.avg_cpu_freq; |
568 | curr_cpu_freq_level = check_freq_level(&cpu_virtual_thermal, curr_cpu_avg_freq); |
569 | level_diff = curr_cpu_freq_level - last_cpu_freq_level; |
570 | if (ABS(level_diff) <= 1) { // freq change is not large |
571 | cpu_freq_level_cnt++; |
572 | cnt_level = check_freq_level_cnt(cpu_freq_level_cnt); |
573 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[cnt_level]; |
574 | #if DBG_VIRTUAL |
575 | printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, cpu_temp:%d\n", |
576 | __func__, curr_cpu_avg_freq, curr_cpu_freq_level, cnt_level, cpu_freq_level_cnt, cpu_temp); |
577 | #endif |
578 | } else { // level not match |
579 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[0]; |
580 | #if DBG_VIRTUAL |
581 | printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, last_cnt_level:%d, cpu_temp:%d\n", |
582 | __func__, curr_cpu_avg_freq, curr_cpu_freq_level, last_cpu_freq_level, cpu_freq_level_cnt, cpu_temp); |
583 | #endif |
584 | cpu_freq_level_cnt = 0; |
585 | } |
586 | last_cpu_freq_level = curr_cpu_freq_level; |
587 | |
588 | curr_gpu_avg_freq = pdata->monitor.avg_gpu_freq; |
589 | curr_gpu_freq_level = check_freq_level(&gpu_virtual_thermal, curr_gpu_avg_freq); |
590 | level_diff = curr_gpu_freq_level - last_gpu_freq_level; |
591 | if (ABS(level_diff) <= 1) { // freq change is not large |
592 | gpu_freq_level_cnt++; |
593 | cnt_level = check_freq_level_cnt(gpu_freq_level_cnt); |
594 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[cnt_level]; |
595 | #if DBG_VIRTUAL |
596 | printk("%s, cur_freq:%7d, freq_level:%d, cnt_level:%d, cnt:%d, gpu_temp:%d\n", |
597 | __func__, curr_gpu_avg_freq, curr_gpu_freq_level, cnt_level, gpu_freq_level_cnt, gpu_temp); |
598 | #endif |
599 | } else { // level not match |
600 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[0]; |
601 | gpu_freq_level_cnt = 0; |
602 | #if DBG_VIRTUAL |
603 | printk("%s, cur_freq:%7d, cur_level:%d, last_level:%d, gpu_temp:%d\n", |
604 | __func__, curr_gpu_avg_freq, curr_gpu_freq_level, last_gpu_freq_level, gpu_temp); |
605 | #endif |
606 | } |
607 | last_gpu_freq_level = curr_gpu_freq_level; |
608 | |
609 | atomic_set(&freq_update_flag, 0); |
610 | temp_update = 1; |
611 | } |
612 | |
613 | if (cpu_temp <= 0 && gpu_temp <= 0) { |
614 | printk("%s, Bug here, cpu & gpu temp can't be 0, cpu_temp:%d, gpu_temp:%d\n", __func__, cpu_temp, gpu_temp); |
615 | final_temp = 40; |
616 | } |
617 | final_temp = (cpu_temp >= gpu_temp ? cpu_temp : gpu_temp); |
618 | if (temp_update) { |
619 | #if DBG_VIRTUAL |
620 | printk("final temp:%d\n", final_temp); |
621 | #endif |
622 | } |
623 | return final_temp; |
624 | } |
625 | |
626 | /* Get temperature callback functions for thermal zone */ |
627 | static int amlogic_get_temp(struct thermal_zone_device *thermal, |
628 | unsigned long *temp) |
629 | { |
630 | struct amlogic_thermal_platform_data *pdata = thermal->devdata; |
631 | int tmp; |
632 | |
633 | if (pdata->trim_flag) { |
634 | tmp = get_cpu_temp(); |
635 | if (tmp < MIN_TEMP) { |
636 | pdata->temp_valid = 0; |
637 | return -EINVAL; |
638 | } |
639 | pdata->temp_valid = 1; |
640 | *temp = (unsigned long)get_cpu_temp(); |
641 | pdata->current_temp = *temp; |
642 | } else if (pdata->virtual_thermal_en) { |
643 | *temp = aml_cal_virtual_temp(pdata); |
644 | } else { |
645 | *temp = 45; // fix cpu temperature to 45 if not trimed && disable virtual thermal |
646 | } |
647 | return 0; |
648 | } |
649 | |
650 | /* Get the temperature trend */ |
651 | static int amlogic_get_trend(struct thermal_zone_device *thermal, |
652 | int trip, enum thermal_trend *trend) |
653 | { |
654 | return 1; |
655 | } |
656 | /* Operation callback functions for thermal zone */ |
657 | static struct thermal_zone_device_ops amlogic_dev_ops = { |
658 | .bind = amlogic_bind, |
659 | .unbind = amlogic_unbind, |
660 | .get_temp = amlogic_get_temp, |
661 | .get_trend = amlogic_get_trend, |
662 | .get_mode = amlogic_get_mode, |
663 | .set_mode = amlogic_set_mode, |
664 | .get_trip_type = amlogic_get_trip_type, |
665 | .get_trip_temp = amlogic_get_trip_temp, |
666 | .set_trip_temp = amlogic_set_trip_temp, |
667 | .get_crit_temp = amlogic_get_crit_temp, |
668 | }; |
669 | |
670 | /* |
671 | * sysfs for keep_mode |
672 | */ |
673 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG // for DEBUG |
674 | extern unsigned int max_cpu_num; |
675 | static ssize_t max_cpu_num_show(struct device *dev, struct device_attribute *attr, char *buf) |
676 | { |
677 | return sprintf(buf, "%d\n", max_cpu_num); |
678 | } |
679 | #endif |
680 | |
681 | static ssize_t thermal_debug_show(struct device *dev, struct device_attribute *attr, char *buf) |
682 | { |
683 | return sprintf(buf, "%d\n", thermal_debug_enable); |
684 | } |
685 | |
686 | static ssize_t thermal_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
687 | { |
688 | int32_t data = simple_strtol(buf, NULL, 10); |
689 | |
690 | if (data) { |
691 | thermal_debug_enable = 1; |
692 | } else { |
693 | thermal_debug_enable = 0; |
694 | } |
695 | return count; |
696 | } |
697 | |
698 | static ssize_t keep_mode_show(struct device *dev, struct device_attribute *attr, char *buf) |
699 | { |
700 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
701 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
702 | |
703 | return sprintf(buf, "%s\n", pdata->keep_mode ? "enabled": "disabled"); |
704 | } |
705 | |
706 | static ssize_t keep_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
707 | { |
708 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
709 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
710 | if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) { |
711 | pdata->keep_mode = 1; |
712 | } else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) { |
713 | pdata->keep_mode = 0; |
714 | } |
715 | return count; |
716 | } |
717 | |
718 | static ssize_t keep_mode_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) |
719 | { |
720 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
721 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
722 | |
723 | return sprintf(buf, "%d\n", pdata->keep_mode_threshold); |
724 | } |
725 | |
726 | static ssize_t keep_mode_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
727 | { |
728 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
729 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
730 | int32_t data = simple_strtol(buf, NULL, 10); |
731 | |
732 | if (data > 200) { |
733 | printk("input is %d, seems too large, invalid\n", data); |
734 | } |
735 | keep_mode_update_threshold(pdata, data); |
736 | printk("set keep_mode_threshold to %d\n", data); |
737 | return count; |
738 | } |
739 | |
740 | static ssize_t high_temp_protect_show(struct device *dev, struct device_attribute *attr, char *buf) |
741 | { |
742 | return sprintf(buf, "%d\n", high_temp_protect); |
743 | } |
744 | |
745 | static ssize_t high_temp_protect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
746 | { |
747 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
748 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
749 | int32_t data = simple_strtol(buf, NULL, 10); |
750 | |
751 | high_temp_protect = data ? 1 : 0; |
752 | if (high_temp_protect) { |
753 | pdata->tmp_trip[1].temperature = pdata->keep_mode_threshold + 25; |
754 | } else { |
755 | pdata->tmp_trip[1].temperature = 260; |
756 | } |
757 | printk("high temperature protect %s\n", high_temp_protect ? "enabled" : "disabled"); |
758 | return count; |
759 | } |
760 | |
761 | static struct device_attribute amlogic_thermal_attr[] = { |
762 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG |
763 | __ATTR(max_cpu_num, 0444, max_cpu_num_show, NULL), |
764 | #endif |
765 | __ATTR(thermal_debug, 0644, thermal_debug_show, thermal_debug_store), |
766 | __ATTR(keep_mode, 0644, keep_mode_show, keep_mode_store), |
767 | __ATTR(keep_mode_threshold, 0644, keep_mode_threshold_show, keep_mode_threshold_store), |
768 | __ATTR(high_temp_protect, 0644, high_temp_protect_show, high_temp_protect_store) |
769 | }; |
770 | |
771 | /* Register with the in-kernel thermal management */ |
772 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev) |
773 | { |
774 | int ret=0, j; |
775 | struct cpumask mask_val; |
776 | |
777 | memset(&mask_val,0,sizeof(struct cpumask)); |
778 | cpumask_set_cpu(0, &mask_val); |
779 | pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val); |
780 | if (IS_ERR(pdata->cpu_cool_dev)) { |
781 | pr_err("Failed to register cpufreq cooling device\n"); |
782 | ret = -EINVAL; |
783 | goto err_unregister; |
784 | } |
785 | pdata->cpucore_cool_dev = cpucore_cooling_register(); |
786 | if (IS_ERR(pdata->cpucore_cool_dev)) { |
787 | pr_err("Failed to register cpufreq cooling device\n"); |
788 | ret = -EINVAL; |
789 | goto err_unregister; |
790 | } |
791 | |
792 | pdata->therm_dev = thermal_zone_device_register(pdata->name, |
793 | pdata->temp_trip_count, |
794 | ((1 << pdata->temp_trip_count) - 1), |
795 | pdata, |
796 | &amlogic_dev_ops, |
797 | NULL, |
798 | 0, |
799 | pdata->idle_interval); |
800 | |
801 | if (IS_ERR(pdata->therm_dev)) { |
802 | pr_err("Failed to register thermal zone device, err:%p\n", pdata->therm_dev); |
803 | ret = -EINVAL; |
804 | goto err_unregister; |
805 | } |
806 | |
807 | if (pdata->keep_mode) { // create sysfs for keep_mode |
808 | for (j = 0; j < ARRAY_SIZE(amlogic_thermal_attr); j++) { |
809 | device_create_file(&pdata->therm_dev->device, &amlogic_thermal_attr[j]); |
810 | } |
811 | } |
812 | pr_info("amlogic: Kernel Thermal management registered\n"); |
813 | |
814 | return 0; |
815 | |
816 | err_unregister: |
817 | amlogic_unregister_thermal(pdata); |
818 | return ret; |
819 | } |
820 | |
821 | /* Un-Register with the in-kernel thermal management */ |
822 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata) |
823 | { |
824 | if (pdata->therm_dev) |
825 | thermal_zone_device_unregister(pdata->therm_dev); |
826 | if (pdata->cpu_cool_dev) |
827 | cpufreq_cooling_unregister(pdata->cpu_cool_dev); |
828 | |
829 | pr_info("amlogic: Kernel Thermal management unregistered\n"); |
830 | } |
831 | |
832 | int get_desend(void) |
833 | { |
834 | int i; |
835 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
836 | int descend = -1; |
837 | struct cpufreq_frequency_table *table = |
838 | cpufreq_frequency_get_table(0); |
839 | |
840 | if (!table) |
841 | return -EINVAL; |
842 | |
843 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
844 | /* ignore invalid entries */ |
845 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
846 | continue; |
847 | |
848 | /* ignore duplicate entry */ |
849 | if (freq == table[i].frequency) |
850 | continue; |
851 | |
852 | /* get the frequency order */ |
853 | if (freq != CPUFREQ_ENTRY_INVALID && descend == -1){ |
854 | descend = !!(freq > table[i].frequency); |
855 | break; |
856 | } |
857 | |
858 | freq = table[i].frequency; |
859 | } |
860 | return descend; |
861 | } |
862 | int fix_to_freq(int freqold,int descend) |
863 | { |
864 | int i; |
865 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
866 | struct cpufreq_frequency_table *table = |
867 | cpufreq_frequency_get_table(0); |
868 | |
869 | if (!table) |
870 | return -EINVAL; |
871 | |
872 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
873 | /* ignore invalid entry */ |
874 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
875 | continue; |
876 | |
877 | /* ignore duplicate entry */ |
878 | if (freq == table[i].frequency) |
879 | continue; |
880 | freq = table[i].frequency; |
881 | if(descend){ |
882 | if(freqold>=table[i+1].frequency && freqold<=table[i].frequency) |
883 | return table[i+1].frequency; |
884 | } |
885 | else{ |
886 | if(freqold>=table[i].frequency && freqold<=table[i+1].frequency) |
887 | return table[i].frequency; |
888 | } |
889 | } |
890 | return -EINVAL; |
891 | } |
892 | |
893 | void thermal_atomic_set(atomic_t *a, int value) |
894 | { |
895 | atomic_set(a, 1); |
896 | } |
897 | EXPORT_SYMBOL(thermal_atomic_set); |
898 | |
899 | static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag) |
900 | { |
901 | int i = 0, ret = -1, val = 0, cells, descend, error = 0; |
902 | struct property *prop; |
903 | struct temp_level *tmp_level = NULL; |
904 | struct amlogic_thermal_platform_data *pdata = NULL; |
905 | |
906 | if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){ |
907 | //INIT FROM DTS |
908 | pdata=kzalloc(sizeof(*pdata),GFP_KERNEL); |
909 | if(!pdata){ |
910 | goto err; |
911 | } |
912 | memset((void* )pdata,0,sizeof(*pdata)); |
913 | ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val); |
914 | if(ret){ |
915 | dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret); |
916 | goto err; |
917 | } |
918 | printk("#thermal-cells=%d\n",val); |
919 | cells=val; |
920 | |
921 | /* |
922 | * process for KEEP_MODE and virtual thermal |
923 | * Logic: If virtual thermal is enabled, then ignore keep_mode |
924 | * |
925 | */ |
926 | pdata->trim_flag = trim_flag; |
927 | if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal |
928 | aml_virtaul_thermal_probe(pdev, pdata); |
929 | } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) { |
930 | if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) { |
931 | printk("ERROR:keep_mode is set but not found 'keep_mode_threshold'\n"); |
932 | error = 1; |
933 | } |
934 | if (of_property_read_u32_array(pdev->dev.of_node, |
935 | "keep_mode_max_range", |
936 | pdata->keep_mode_max_range, |
937 | sizeof(pdata->keep_mode_max_range)/sizeof(u32))) { |
938 | printk("ERROR:keep_mode is set but not found 'keep_mode_max_range'\n"); |
939 | error = 1; |
940 | } |
941 | if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now |
942 | printk("keep_mode enabled\n"); |
943 | printk("keep_mode_max_range: [%7d, %3d, %d, %d]\n", |
944 | pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1], |
945 | pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]); |
946 | pdata->keep_mode = 1; |
947 | pdata->freq_sample_period = 5; |
948 | } |
949 | } else { |
950 | printk("keep_mode is disabled\n"); |
951 | } |
952 | if(pdata->keep_mode || !pdata->trim_flag){ |
953 | INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work); |
954 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
955 | atomic_set(&freq_update_flag, 0); |
956 | } |
957 | |
958 | prop = of_find_property(pdev->dev.of_node, "trip_point", &val); |
959 | if (!prop){ |
960 | dev_err(&pdev->dev, "read %s length error\n","trip_point"); |
961 | goto err; |
962 | } |
963 | if (pdata->keep_mode) { |
964 | pdata->temp_trip_count = 2; |
965 | } else { |
966 | pdata->temp_trip_count=val/cells/sizeof(u32); |
967 | } |
968 | printk("pdata->temp_trip_count=%d\n",pdata->temp_trip_count); |
969 | tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL); |
970 | pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL); |
971 | if(!tmp_level){ |
972 | goto err; |
973 | } |
974 | |
975 | if (pdata->keep_mode) { // keep mode only need one point |
976 | keep_mode_temp_level_init(pdata, tmp_level); |
977 | } else { |
978 | ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32)); |
979 | if (ret){ |
980 | dev_err(&pdev->dev, "read %s data error\n","trip_point"); |
981 | goto err; |
982 | } |
983 | } |
984 | descend=get_desend(); |
985 | for (i = 0; i < pdata->temp_trip_count; i++) { |
986 | printk("temperature=%d on trip point=%d\n",tmp_level[i].temperature,i); |
987 | pdata->tmp_trip[i].temperature=tmp_level[i].temperature; |
988 | printk("fixing high_freq=%d to ",tmp_level[i].cpu_high_freq); |
989 | tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend); |
990 | pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq); |
991 | printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_high_freq,i,pdata->tmp_trip[i].cpu_lower_level); |
992 | |
993 | printk("fixing low_freq=%d to ",tmp_level[i].cpu_low_freq); |
994 | tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend); |
995 | pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq); |
996 | printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_low_freq,i,pdata->tmp_trip[i].cpu_upper_level); |
997 | pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq; |
998 | pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq; |
999 | printk("gpu[%d].gpu_high_freq=%d,tmp_level[%d].gpu_high_freq=%d\n",i,tmp_level[i].gpu_high_freq,i,tmp_level[i].gpu_low_freq); |
1000 | |
1001 | pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num; |
1002 | printk("cpu[%d] core num==%d\n",i,pdata->tmp_trip[i].cpu_core_num); |
1003 | pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num; |
1004 | printk("gpu[%d] core num==%d\n",i,pdata->tmp_trip[i].gpu_core_num); |
1005 | } |
1006 | |
1007 | ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val); |
1008 | if (ret){ |
1009 | dev_err(&pdev->dev, "read %s error\n","idle_interval"); |
1010 | goto err; |
1011 | } |
1012 | pdata->idle_interval=val; |
1013 | printk("idle interval=%d\n",pdata->idle_interval); |
1014 | ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name); |
1015 | if (ret){ |
1016 | dev_err(&pdev->dev, "read %s error\n","dev_name"); |
1017 | goto err; |
1018 | } |
1019 | printk("pdata->name:%s, pdata:%p\n",pdata->name, pdata); |
1020 | pdata->mode=THERMAL_DEVICE_ENABLED; |
1021 | if(tmp_level) |
1022 | kfree(tmp_level); |
1023 | printk("%s, %d\n", __func__, __LINE__); |
1024 | return pdata; |
1025 | } |
1026 | err: |
1027 | if(tmp_level) |
1028 | kfree(tmp_level); |
1029 | if(pdata) |
1030 | kfree(pdata); |
1031 | pdata= NULL; |
1032 | return pdata; |
1033 | } |
1034 | |
1035 | static struct amlogic_thermal_platform_data * amlogic_thermal_initialize(struct platform_device *pdev, int trim_flag) |
1036 | { |
1037 | struct amlogic_thermal_platform_data *pdata=NULL; |
1038 | pdata=amlogic_thermal_init_from_dts(pdev, trim_flag); |
1039 | printk("%s, %d, pdata:%p\n", __func__, __LINE__, pdata); |
1040 | return pdata; |
1041 | } |
1042 | |
1043 | static const struct of_device_id amlogic_thermal_match[] = { |
1044 | { |
1045 | .compatible = "amlogic, amlogic-thermal", |
1046 | }, |
1047 | {}, |
1048 | }; |
1049 | |
1050 | #ifdef CONFIG_HIBERNATION |
1051 | static int amlogic_thermal_freeze(struct device *dev) |
1052 | { |
1053 | return 0; |
1054 | } |
1055 | |
1056 | static int amlogic_thermal_thaw(struct device *dev) |
1057 | { |
1058 | return 0; |
1059 | } |
1060 | |
1061 | static int amlogic_thermal_restore(struct device *dev) |
1062 | { |
1063 | thermal_firmware_init(); |
1064 | |
1065 | return 0; |
1066 | } |
1067 | |
1068 | static struct dev_pm_ops amlogic_theraml_pm = { |
1069 | .freeze = amlogic_thermal_freeze, |
1070 | .thaw = amlogic_thermal_thaw, |
1071 | .restore = amlogic_thermal_restore, |
1072 | }; |
1073 | #endif |
1074 | |
1075 | static int amlogic_thermal_probe(struct platform_device *pdev) |
1076 | { |
1077 | int ret, trim_flag; |
1078 | struct amlogic_thermal_platform_data *pdata=NULL; |
1079 | |
1080 | ret=thermal_firmware_init(); |
1081 | if(ret<0){ |
1082 | printk("%s, this chip is not trimmed, can't use thermal\n", __func__); |
1083 | trim_flag = 0; |
1084 | return -ENODEV; |
1085 | }else{ |
1086 | printk("%s, this chip is trimmed, use thermal\n", __func__); |
1087 | trim_flag = 1; |
1088 | } |
1089 | |
1090 | dev_info(&pdev->dev, "amlogic thermal probe start\n"); |
1091 | pdata = amlogic_thermal_initialize(pdev, trim_flag); |
1092 | if (!pdata) { |
1093 | dev_err(&pdev->dev, "Failed to initialize thermal\n"); |
1094 | goto err; |
1095 | } |
1096 | mutex_init(&pdata->lock); |
1097 | pdev->dev.platform_data=pdata; |
1098 | platform_set_drvdata(pdev, pdata); |
1099 | ret = amlogic_register_thermal(pdata, pdev); |
1100 | if (ret) { |
1101 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); |
1102 | goto err; |
1103 | } |
1104 | dev_info(&pdev->dev, "amlogic thermal probe done\n"); |
1105 | return 0; |
1106 | err: |
1107 | platform_set_drvdata(pdev, NULL); |
1108 | return ret; |
1109 | } |
1110 | |
1111 | static int amlogic_thermal_remove(struct platform_device *pdev) |
1112 | { |
1113 | struct amlogic_thermal_platform_data *pdata = platform_get_drvdata(pdev); |
1114 | |
1115 | aml_virtual_thermal_remove(pdata); |
1116 | |
1117 | amlogic_unregister_thermal(pdata); |
1118 | |
1119 | platform_set_drvdata(pdev, NULL); |
1120 | |
1121 | return 0; |
1122 | } |
1123 | |
1124 | struct platform_driver amlogic_thermal_driver = { |
1125 | .driver = { |
1126 | .name = "amlogic-thermal", |
1127 | .owner = THIS_MODULE, |
1128 | #ifdef CONFIG_HIBERNATION |
1129 | .pm = &amlogic_theraml_pm, |
1130 | #endif |
1131 | .of_match_table = of_match_ptr(amlogic_thermal_match), |
1132 | }, |
1133 | .probe = amlogic_thermal_probe, |
1134 | .remove = amlogic_thermal_remove, |
1135 | }; |
1136 | |
1137 | void *aml_get_cdevdata(struct thermal_cooling_device *cdev) |
1138 | { |
1139 | return cdev->devdata; |
1140 | } |
1141 | EXPORT_SYMBOL(aml_get_cdevdata); |
1142 | |
1143 | void aml_set_cdev_update(struct thermal_cooling_device *cdev, bool update) |
1144 | { |
1145 | cdev->updated = update; |
1146 | } |
1147 | EXPORT_SYMBOL(aml_set_cdev_update); |
1148 | |
1149 | void aml_cdev_lockop(struct thermal_cooling_device *cdev, bool lock) |
1150 | { |
1151 | if (lock) { |
1152 | thermal_lock(&cdev->lock); |
1153 | } else { |
1154 | thermal_unlock(&cdev->lock); |
1155 | } |
1156 | } |
1157 | EXPORT_SYMBOL(aml_cdev_lockop); |
1158 | |
1159 | void aml_cdev_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *ret) |
1160 | { |
1161 | cdev->ops->get_cur_state(cdev, ret); |
1162 | } |
1163 | EXPORT_SYMBOL(aml_cdev_get_cur_state); |
1164 | |
1165 | static int __init amlogic_thermal_driver_init(void) |
1166 | { |
1167 | return platform_driver_register(&(amlogic_thermal_driver)); |
1168 | } |
1169 | late_initcall(amlogic_thermal_driver_init); |
1170 | static void __exit amlogic_thermal_driver_exit(void) |
1171 | { |
1172 | platform_driver_unregister(&(amlogic_thermal_driver) ); |
1173 | } |
1174 | module_exit(amlogic_thermal_driver_exit); |
1175 | |
1176 | MODULE_DESCRIPTION("amlogic thermal Driver"); |
1177 | MODULE_AUTHOR("Amlogic SH platform team"); |
1178 | MODULE_ALIAS("platform:amlogic-thermal"); |
1179 | MODULE_LICENSE("GPL"); |
1180 | |
1181 |