blob: f9cdafb0f4fe5ad0a331f978f3148542a371c859
1 | /* |
2 | * amlogic_thermal.c - Samsung amlogic thermal (Thermal Management Unit) |
3 | * |
4 | * Copyright (C) 2011 Samsung Electronics |
5 | * Donggeun Kim <dg77.kim@samsung.com> |
6 | * Amit Daniel Kachhap <amit.kachhap@linaro.org> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | * |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/err.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/platform_device.h> |
29 | #include <linux/interrupt.h> |
30 | #include <linux/clk.h> |
31 | #include <linux/workqueue.h> |
32 | #include <linux/sysfs.h> |
33 | #include <linux/kobject.h> |
34 | #include <linux/io.h> |
35 | #include <linux/mutex.h> |
36 | #include <linux/thermal.h> |
37 | #include <linux/cpufreq.h> |
38 | #include <linux/cpu_cooling.h> |
39 | #include <linux/of.h> |
40 | #include <linux/amlogic/saradc.h> |
41 | #include <linux/random.h> |
42 | #include <linux/gpu_cooling.h> |
43 | #include <linux/cpucore_cooling.h> |
44 | #include <linux/gpucore_cooling.h> |
45 | #include <linux/thermal_core.h> |
46 | #include <linux/version.h> |
47 | #if LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 33) |
48 | #include <linux/amlogic/aml_thermal_hw.h> |
49 | #else |
50 | #include <mach/thermal.h> |
51 | #endif |
52 | #include <linux/version.h> |
53 | #include "amlogic_thermal.h" |
54 | |
55 | #define DBG_VIRTUAL 0 |
56 | #define MIN_TEMP (-273) |
57 | int thermal_debug_enable = 0; |
58 | int high_temp_protect = 0; |
59 | atomic_t freq_update_flag; |
60 | EXPORT_SYMBOL(thermal_debug_enable); |
61 | EXPORT_SYMBOL(high_temp_protect); |
62 | EXPORT_SYMBOL(freq_update_flag); |
63 | |
64 | #define THERMAL_DBG(format,args...) \ |
65 | if (thermal_debug_enable) { \ |
66 | printk("[THERMAL]"format, ##args); \ |
67 | } |
68 | |
69 | static struct device *dbg_dev; |
70 | |
71 | #define THERMAL_ERR(format, args...) \ |
72 | {if (dbg_dev) \ |
73 | dev_err(dbg_dev, format, ##args); \ |
74 | } |
75 | |
76 | #define THERMAL_INFO(format, args...) \ |
77 | {if (dbg_dev) \ |
78 | dev_info(dbg_dev, format, ##args); \ |
79 | } |
80 | |
81 | static struct aml_virtual_thermal_device cpu_virtual_thermal = {}; |
82 | static struct aml_virtual_thermal_device gpu_virtual_thermal = {}; |
83 | static unsigned int report_interval[4] = {}; |
84 | static int (*gpu_freq_level)(int ) = NULL; |
85 | |
86 | /* CPU Zone information */ |
87 | #define PANIC_ZONE 4 |
88 | #define WARN_ZONE 3 |
89 | #define MONITOR_ZONE 2 |
90 | #define SAFE_ZONE 1 |
91 | |
92 | #define GET_ZONE(trip) (trip + 2) |
93 | #define GET_TRIP(zone) (zone - 2) |
94 | |
95 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata); |
96 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev); |
97 | |
98 | void thermal_lock(struct mutex *lock) |
99 | { |
100 | mutex_lock(lock); |
101 | } |
102 | EXPORT_SYMBOL(thermal_lock); |
103 | |
104 | void thermal_unlock(struct mutex *lock) |
105 | { |
106 | mutex_unlock(lock); |
107 | } |
108 | EXPORT_SYMBOL(thermal_unlock); |
109 | |
110 | /* Get mode callback functions for thermal zone */ |
111 | static int amlogic_get_mode(struct thermal_zone_device *thermal, |
112 | enum thermal_device_mode *mode) |
113 | { |
114 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
115 | |
116 | if (pdata) |
117 | *mode = pdata->mode; |
118 | return 0; |
119 | } |
120 | |
121 | /* Set mode callback functions for thermal zone */ |
122 | static int amlogic_set_mode(struct thermal_zone_device *thermal, |
123 | enum thermal_device_mode mode) |
124 | { |
125 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
126 | struct cpucore_cooling_device *cpucore_device =NULL; |
127 | struct gpucore_cooling_device *gpucore_device = NULL; |
128 | if(!pdata) |
129 | return -EINVAL; |
130 | |
131 | //mutex_lock(&pdata->therm_dev->lock); |
132 | |
133 | if (mode == THERMAL_DEVICE_ENABLED){ |
134 | pdata->therm_dev->polling_delay = pdata->idle_interval; |
135 | if(pdata->cpucore_cool_dev){ |
136 | cpucore_device=pdata->cpucore_cool_dev->devdata; |
137 | cpucore_device->stop_flag=0; |
138 | } |
139 | if(pdata->gpucore_cool_dev){ |
140 | gpucore_device=pdata->gpucore_cool_dev->devdata; |
141 | gpucore_device->stop_flag=0; |
142 | } |
143 | if (pdata->keep_mode) { // start work |
144 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
145 | } |
146 | } |
147 | else{ |
148 | pdata->therm_dev->polling_delay = 0; |
149 | if (pdata->keep_mode) { |
150 | cancel_delayed_work_sync(&pdata->thermal_work); |
151 | keep_mode_set_mode(pdata); |
152 | } |
153 | if(pdata->cpucore_cool_dev) |
154 | pdata->cpucore_cool_dev->ops->set_cur_state(pdata->cpucore_cool_dev,(0|CPU_STOP)); |
155 | if(pdata->gpucore_cool_dev) |
156 | pdata->gpucore_cool_dev->ops->set_cur_state(pdata->gpucore_cool_dev,(0|GPU_STOP)); |
157 | } |
158 | |
159 | //mutex_unlock(&pdata->therm_dev->lock); |
160 | |
161 | pdata->mode = mode; |
162 | thermal_zone_device_update(pdata->therm_dev); |
163 | THERMAL_INFO("thermal polling set for duration=%d msec\n", |
164 | pdata->therm_dev->polling_delay); |
165 | return 0; |
166 | } |
167 | |
168 | /* Get trip type callback functions for thermal zone */ |
169 | static int amlogic_get_trip_type(struct thermal_zone_device *thermal, int trip, |
170 | enum thermal_trip_type *type) |
171 | { |
172 | if(trip < thermal->trips-1) |
173 | *type = THERMAL_TRIP_ACTIVE; |
174 | else if(trip == thermal->trips-1) |
175 | *type = THERMAL_TRIP_CRITICAL; |
176 | else |
177 | return -EINVAL; |
178 | return 0; |
179 | } |
180 | |
181 | /* Get trip temperature callback functions for thermal zone */ |
182 | static int amlogic_get_trip_temp(struct thermal_zone_device *thermal, int trip, |
183 | unsigned long *temp) |
184 | { |
185 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
186 | |
187 | if(trip > pdata->temp_trip_count ||trip<0) |
188 | return -EINVAL; |
189 | mutex_lock(&pdata->lock); |
190 | *temp =pdata->tmp_trip[trip].temperature; |
191 | /* convert the temperature into millicelsius */ |
192 | mutex_unlock(&pdata->lock); |
193 | |
194 | return 0; |
195 | } |
196 | |
197 | static int amlogic_set_trip_temp(struct thermal_zone_device *thermal, int trip, |
198 | unsigned long temp) |
199 | { |
200 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
201 | |
202 | if(trip > pdata->temp_trip_count ||trip<0) |
203 | return -EINVAL; |
204 | mutex_lock(&pdata->lock); |
205 | pdata->tmp_trip[trip].temperature=temp; |
206 | /* convert the temperature into millicelsius */ |
207 | mutex_unlock(&pdata->lock); |
208 | return 0; |
209 | } |
210 | |
211 | /* Get critical temperature callback functions for thermal zone */ |
212 | static int amlogic_get_crit_temp(struct thermal_zone_device *thermal, |
213 | unsigned long *temp) |
214 | { |
215 | int ret; |
216 | /* Panic zone */ |
217 | ret =amlogic_get_trip_temp(thermal, thermal->trips-1, temp); |
218 | |
219 | return ret; |
220 | } |
221 | |
222 | int gpu_get_freq_level(int freq) |
223 | { |
224 | if (gpu_freq_level) |
225 | return gpu_freq_level(freq); |
226 | else |
227 | return -1; |
228 | } |
229 | |
230 | /* Bind callback functions for thermal zone */ |
231 | static int amlogic_bind(struct thermal_zone_device *thermal, |
232 | struct thermal_cooling_device *cdev) |
233 | { |
234 | int ret = 0, i; |
235 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
236 | int id; |
237 | char type[THERMAL_NAME_LENGTH]; |
238 | unsigned long max; |
239 | |
240 | if (!sscanf(cdev->type, "thermal-%7s-%d", type,&id)) |
241 | return -EINVAL; |
242 | if(!strcmp(type,"cpufreq")){ |
243 | /* Bind the thermal zone to the cpufreq cooling device */ |
244 | for (i = 0; i < pdata->temp_trip_count; i++) { |
245 | if(pdata->tmp_trip[0].cpu_upper_level==THERMAL_CSTATE_INVALID) |
246 | { |
247 | ret = -EINVAL; |
248 | goto out; |
249 | } |
250 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
251 | pdata->tmp_trip[i].cpu_upper_level, |
252 | pdata->tmp_trip[i].cpu_lower_level)) { |
253 | THERMAL_ERR("error binding cdev inst %d\n", i); |
254 | ret = -EINVAL; |
255 | goto out; |
256 | } |
257 | } |
258 | if (pdata->keep_mode) { |
259 | cdev->ops->get_max_state(cdev, &max); |
260 | keep_mode_bind(pdata, max, 0); |
261 | } |
262 | } |
263 | |
264 | if(!strcmp(type,"gpufreq")){ |
265 | struct gpufreq_cooling_device *gpufreq_dev= |
266 | (struct gpufreq_cooling_device *)cdev->devdata; |
267 | /* Bind the thermal zone to the cpufreq cooling device */ |
268 | for (i = 0; i < pdata->temp_trip_count; i++) { |
269 | if(!gpufreq_dev->get_gpu_freq_level){ |
270 | ret = -EINVAL; |
271 | THERMAL_ERR("invalidate pointer %p\n",gpufreq_dev->get_gpu_freq_level); |
272 | goto out; |
273 | } else { |
274 | gpu_freq_level = gpufreq_dev->get_gpu_freq_level; |
275 | } |
276 | pdata->tmp_trip[i].gpu_lower_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_upper_freq); |
277 | pdata->tmp_trip[i].gpu_upper_level=gpufreq_dev->get_gpu_freq_level(pdata->tmp_trip[i].gpu_lower_freq); |
278 | if(pdata->tmp_trip[0].gpu_lower_level==THERMAL_CSTATE_INVALID) |
279 | { |
280 | ret = -EINVAL; |
281 | goto out; |
282 | } |
283 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
284 | pdata->tmp_trip[i].gpu_upper_level, |
285 | pdata->tmp_trip[i].gpu_lower_level)) { |
286 | THERMAL_ERR("error binding cdev inst %d\n", i); |
287 | ret = -EINVAL; |
288 | goto out; |
289 | } |
290 | } |
291 | pdata->gpu_cool_dev=cdev; |
292 | if (pdata->keep_mode) { |
293 | cdev->ops->get_max_state(cdev, &max); |
294 | keep_mode_bind(pdata, max, 1); |
295 | } |
296 | } |
297 | |
298 | if(!strcmp(type,"cpucore")){ |
299 | /* Bind the thermal zone to the cpufreq cooling device */ |
300 | struct cpucore_cooling_device *cpucore_dev= |
301 | (struct cpucore_cooling_device *)cdev->devdata; |
302 | for (i = 0; i < pdata->temp_trip_count; i++) { |
303 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
304 | { |
305 | ret = -EINVAL; |
306 | goto out; |
307 | } |
308 | if(pdata->tmp_trip[i].cpu_core_num !=-1) |
309 | pdata->tmp_trip[i].cpu_core_upper=cpucore_dev->max_cpu_core_num-pdata->tmp_trip[i].cpu_core_num; |
310 | else |
311 | pdata->tmp_trip[i].cpu_core_upper=pdata->tmp_trip[i].cpu_core_num; |
312 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
313 | pdata->tmp_trip[i].cpu_core_upper, |
314 | pdata->tmp_trip[i].cpu_core_upper)) { |
315 | THERMAL_ERR("error binding cdev inst %d\n", i); |
316 | ret = -EINVAL; |
317 | goto out; |
318 | } |
319 | } |
320 | if (pdata->keep_mode) { |
321 | cdev->ops->get_max_state(cdev, &max); |
322 | keep_mode_bind(pdata, max, 2); |
323 | } |
324 | } |
325 | |
326 | if(!strcmp(type,"gpucore")){ |
327 | /* Bind the thermal zone to the cpufreq cooling device */ |
328 | struct gpucore_cooling_device *gpucore_dev= |
329 | (struct gpucore_cooling_device *)cdev->devdata; |
330 | for (i = 0; i < pdata->temp_trip_count; i++) { |
331 | if(pdata->tmp_trip[0].cpu_core_num==THERMAL_CSTATE_INVALID) |
332 | { |
333 | ret = -EINVAL; |
334 | goto out; |
335 | } |
336 | if(pdata->tmp_trip[i].gpu_core_num != -1) |
337 | pdata->tmp_trip[i].gpu_core_upper=gpucore_dev->max_gpu_core_num-pdata->tmp_trip[i].gpu_core_num; |
338 | else |
339 | pdata->tmp_trip[i].gpu_core_upper=pdata->tmp_trip[i].gpu_core_num; |
340 | |
341 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
342 | pdata->tmp_trip[i].gpu_core_upper, |
343 | pdata->tmp_trip[i].gpu_core_upper)) { |
344 | THERMAL_ERR("error binding cdev inst %d\n", i); |
345 | ret = -EINVAL; |
346 | goto out; |
347 | } |
348 | } |
349 | pdata->gpucore_cool_dev=cdev; |
350 | if (pdata->keep_mode) { |
351 | cdev->ops->get_max_state(cdev, &max); |
352 | keep_mode_bind(pdata, max, 3); |
353 | } |
354 | } |
355 | return ret; |
356 | out: |
357 | return ret; |
358 | } |
359 | |
360 | /* Unbind callback functions for thermal zone */ |
361 | static int amlogic_unbind(struct thermal_zone_device *thermal, |
362 | struct thermal_cooling_device *cdev) |
363 | { |
364 | int i; |
365 | if(thermal && cdev){ |
366 | struct amlogic_thermal_platform_data *pdata= thermal->devdata; |
367 | for (i = 0; i < pdata->temp_trip_count; i++) { |
368 | if (thermal_zone_unbind_cooling_device(thermal, i, cdev)) { |
369 | THERMAL_ERR(" error %d \n", i); |
370 | return -EINVAL; |
371 | } |
372 | return 0; |
373 | } |
374 | }else{ |
375 | return -EINVAL; |
376 | } |
377 | return -EINVAL; |
378 | } |
379 | #define ABS(a) ((a) > 0 ? (a) : -(a)) |
380 | |
381 | void *thermal_alloc(size_t len) |
382 | { |
383 | return kzalloc(len, GFP_KERNEL); |
384 | } |
385 | EXPORT_SYMBOL(thermal_alloc); |
386 | |
387 | static void thermal_work(struct work_struct *work) |
388 | { |
389 | struct amlogic_thermal_platform_data *pdata; |
390 | int cpu_freq = cpufreq_quick_get(0); |
391 | |
392 | pdata = container_of((struct delayed_work *)work, struct amlogic_thermal_platform_data, thermal_work); |
393 | if (pdata->temp_valid) |
394 | keep_mode_work(pdata, cpu_freq); |
395 | if (pdata->mode == THERMAL_DEVICE_ENABLED) { // no need to do this work again if thermal disabled |
396 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
397 | } |
398 | } |
399 | |
400 | static int aml_virtaul_thermal_probe(struct platform_device *pdev, struct amlogic_thermal_platform_data *pdata) |
401 | { |
402 | int ret, len, cells; |
403 | struct property *prop; |
404 | void *buf; |
405 | |
406 | if (!of_property_read_bool(pdev->dev.of_node, "use_virtual_thermal")) { |
407 | pdata->virtual_thermal_en = 0; |
408 | return 0; |
409 | } |
410 | |
411 | ret = of_property_read_u32(pdev->dev.of_node, |
412 | "freq_sample_period", |
413 | &pdata->freq_sample_period); |
414 | if (ret) { |
415 | pdata->freq_sample_period = 30; |
416 | } |
417 | ret = of_property_read_u32_array(pdev->dev.of_node, |
418 | "report_time", |
419 | report_interval, sizeof(report_interval) / sizeof(u32)); |
420 | if (ret) { |
421 | goto error; |
422 | } |
423 | /* |
424 | * read cpu_virtal |
425 | */ |
426 | prop = of_find_property(pdev->dev.of_node, "cpu_virtual", &len); |
427 | if (!prop) { |
428 | goto error; |
429 | } |
430 | cells = len / sizeof(struct aml_virtual_thermal); |
431 | buf = kzalloc(len, GFP_KERNEL); |
432 | if (!buf) { |
433 | THERMAL_ERR("%s, no memory\n", __func__); |
434 | return -ENOMEM; |
435 | } |
436 | ret = of_property_read_u32_array(pdev->dev.of_node, |
437 | "cpu_virtual", |
438 | buf, len/sizeof(u32)); |
439 | if (ret) { |
440 | kfree(buf); |
441 | goto error; |
442 | } |
443 | cpu_virtual_thermal.count = cells; |
444 | cpu_virtual_thermal.thermal = buf; |
445 | |
446 | /* |
447 | * read gpu_virtal |
448 | */ |
449 | prop = of_find_property(pdev->dev.of_node, "gpu_virtual", &len); |
450 | if (!prop) { |
451 | goto error; |
452 | } |
453 | cells = len / sizeof(struct aml_virtual_thermal); |
454 | buf = kzalloc(len, GFP_KERNEL); |
455 | if (!buf) { |
456 | return -ENOMEM; |
457 | } |
458 | ret = of_property_read_u32_array(pdev->dev.of_node, |
459 | "gpu_virtual", |
460 | buf, len/sizeof(u32)); |
461 | if (ret) { |
462 | kfree(buf); |
463 | goto error; |
464 | } |
465 | gpu_virtual_thermal.count = cells; |
466 | gpu_virtual_thermal.thermal = buf; |
467 | |
468 | pdata->virtual_thermal_en = 1; |
469 | return 0; |
470 | |
471 | error: |
472 | pdata->virtual_thermal_en = 0; |
473 | return -1; |
474 | } |
475 | |
476 | static void aml_virtual_thermal_remove(struct amlogic_thermal_platform_data *pdata) |
477 | { |
478 | kfree(cpu_virtual_thermal.thermal); |
479 | kfree(gpu_virtual_thermal.thermal); |
480 | pdata->virtual_thermal_en = 0; |
481 | } |
482 | |
483 | static int check_freq_level(struct aml_virtual_thermal_device *dev, unsigned int freq) |
484 | { |
485 | int i = 0; |
486 | |
487 | if (freq >= dev->thermal[dev->count-1].freq) { |
488 | return dev->count - 1; |
489 | } |
490 | for (i = 0; i < dev->count - 1; i++) { |
491 | if (freq > dev->thermal[i].freq && freq <= dev->thermal[i + 1].freq) { |
492 | return i + 1; |
493 | } |
494 | } |
495 | return 0; |
496 | } |
497 | |
498 | static int check_freq_level_cnt(unsigned int cnt) |
499 | { |
500 | int i; |
501 | |
502 | if (cnt >= report_interval[3]) { |
503 | return 3; |
504 | } |
505 | for (i = 0; i < 3; i++) { |
506 | if (cnt >= report_interval[i] && cnt < report_interval[i + 1]) { |
507 | return i; |
508 | } |
509 | } |
510 | return 0; |
511 | } |
512 | |
513 | static unsigned long aml_cal_virtual_temp(struct amlogic_thermal_platform_data *pdata) |
514 | { |
515 | static unsigned int cpu_freq_level_cnt = 0, gpu_freq_level_cnt = 0; |
516 | static unsigned int last_cpu_freq_level = 0, last_gpu_freq_level = 0; |
517 | static unsigned int cpu_temp = 40, gpu_temp = 40; // default set to 40 when at homescreen |
518 | unsigned int curr_cpu_avg_freq, curr_gpu_avg_freq; |
519 | int curr_cpu_freq_level, curr_gpu_freq_level; |
520 | int cnt_level, level_diff; |
521 | int temp_update = 0, final_temp; |
522 | |
523 | /* |
524 | * CPU temp |
525 | */ |
526 | if (atomic_read(&freq_update_flag)) { |
527 | curr_cpu_avg_freq = pdata->monitor.avg_cpu_freq; |
528 | curr_cpu_freq_level = check_freq_level(&cpu_virtual_thermal, curr_cpu_avg_freq); |
529 | level_diff = curr_cpu_freq_level - last_cpu_freq_level; |
530 | if (ABS(level_diff) <= 1) { // freq change is not large |
531 | cpu_freq_level_cnt++; |
532 | cnt_level = check_freq_level_cnt(cpu_freq_level_cnt); |
533 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[cnt_level]; |
534 | } else { // level not match |
535 | cpu_temp = cpu_virtual_thermal.thermal[curr_cpu_freq_level].temp_time[0]; |
536 | cpu_freq_level_cnt = 0; |
537 | } |
538 | last_cpu_freq_level = curr_cpu_freq_level; |
539 | |
540 | curr_gpu_avg_freq = pdata->monitor.avg_gpu_freq; |
541 | curr_gpu_freq_level = check_freq_level(&gpu_virtual_thermal, curr_gpu_avg_freq); |
542 | level_diff = curr_gpu_freq_level - last_gpu_freq_level; |
543 | if (ABS(level_diff) <= 1) { // freq change is not large |
544 | gpu_freq_level_cnt++; |
545 | cnt_level = check_freq_level_cnt(gpu_freq_level_cnt); |
546 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[cnt_level]; |
547 | } else { // level not match |
548 | gpu_temp = gpu_virtual_thermal.thermal[curr_gpu_freq_level].temp_time[0]; |
549 | gpu_freq_level_cnt = 0; |
550 | } |
551 | last_gpu_freq_level = curr_gpu_freq_level; |
552 | |
553 | atomic_set(&freq_update_flag, 0); |
554 | temp_update = 1; |
555 | } |
556 | |
557 | if (cpu_temp <= 0 && gpu_temp <= 0) { |
558 | final_temp = 40; |
559 | } |
560 | final_temp = (cpu_temp >= gpu_temp ? cpu_temp : gpu_temp); |
561 | return final_temp; |
562 | } |
563 | |
564 | /* Get temperature callback functions for thermal zone */ |
565 | static int amlogic_get_temp(struct thermal_zone_device *thermal, |
566 | unsigned long *temp) |
567 | { |
568 | struct amlogic_thermal_platform_data *pdata = thermal->devdata; |
569 | int tmp; |
570 | |
571 | if (pdata->trim_flag) { |
572 | tmp = get_cpu_temp(); |
573 | if (tmp < MIN_TEMP) { |
574 | pdata->temp_valid = 0; |
575 | return -EINVAL; |
576 | } |
577 | pdata->temp_valid = 1; |
578 | *temp = (unsigned long)get_cpu_temp(); |
579 | pdata->current_temp = *temp; |
580 | } else if (pdata->virtual_thermal_en) { |
581 | *temp = aml_cal_virtual_temp(pdata); |
582 | } else { |
583 | *temp = 45; // fix cpu temperature to 45 if not trimed && disable virtual thermal |
584 | } |
585 | return 0; |
586 | } |
587 | |
588 | /* Get the temperature trend */ |
589 | static int amlogic_get_trend(struct thermal_zone_device *thermal, |
590 | int trip, enum thermal_trend *trend) |
591 | { |
592 | return 1; |
593 | } |
594 | /* Operation callback functions for thermal zone */ |
595 | static struct thermal_zone_device_ops amlogic_dev_ops = { |
596 | .bind = amlogic_bind, |
597 | .unbind = amlogic_unbind, |
598 | .get_temp = amlogic_get_temp, |
599 | .get_trend = amlogic_get_trend, |
600 | .get_mode = amlogic_get_mode, |
601 | .set_mode = amlogic_set_mode, |
602 | .get_trip_type = amlogic_get_trip_type, |
603 | .get_trip_temp = amlogic_get_trip_temp, |
604 | .set_trip_temp = amlogic_set_trip_temp, |
605 | .get_crit_temp = amlogic_get_crit_temp, |
606 | }; |
607 | |
608 | /* |
609 | * sysfs for keep_mode |
610 | */ |
611 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG // for DEBUG |
612 | extern unsigned int max_cpu_num; |
613 | static ssize_t max_cpu_num_show(struct device *dev, struct device_attribute *attr, char *buf) |
614 | { |
615 | return sprintf(buf, "%d\n", max_cpu_num); |
616 | } |
617 | #endif |
618 | |
619 | static ssize_t thermal_debug_show(struct device *dev, struct device_attribute *attr, char *buf) |
620 | { |
621 | return sprintf(buf, "%d\n", thermal_debug_enable); |
622 | } |
623 | |
624 | static ssize_t thermal_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
625 | { |
626 | int32_t data = simple_strtol(buf, NULL, 10); |
627 | |
628 | if (data) { |
629 | thermal_debug_enable = 1; |
630 | } else { |
631 | thermal_debug_enable = 0; |
632 | } |
633 | return count; |
634 | } |
635 | |
636 | static ssize_t keep_mode_show(struct device *dev, struct device_attribute *attr, char *buf) |
637 | { |
638 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
639 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
640 | |
641 | return sprintf(buf, "%s\n", pdata->keep_mode ? "enabled": "disabled"); |
642 | } |
643 | |
644 | static ssize_t keep_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
645 | { |
646 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
647 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
648 | if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) { |
649 | pdata->keep_mode = 1; |
650 | } else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) { |
651 | pdata->keep_mode = 0; |
652 | } |
653 | return count; |
654 | } |
655 | |
656 | static ssize_t keep_mode_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) |
657 | { |
658 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
659 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
660 | |
661 | return sprintf(buf, "%d\n", pdata->keep_mode_threshold); |
662 | } |
663 | |
664 | static ssize_t keep_mode_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
665 | { |
666 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
667 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
668 | int32_t data = simple_strtol(buf, NULL, 10); |
669 | |
670 | if (data > 200) { |
671 | THERMAL_INFO("input is %d, seems too large, invalid\n", data); |
672 | } |
673 | keep_mode_update_threshold(pdata, data); |
674 | THERMAL_INFO("set keep_mode_threshold to %d\n", data); |
675 | return count; |
676 | } |
677 | |
678 | static ssize_t high_temp_protect_show(struct device *dev, struct device_attribute *attr, char *buf) |
679 | { |
680 | return sprintf(buf, "%d\n", high_temp_protect); |
681 | } |
682 | |
683 | static ssize_t high_temp_protect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
684 | { |
685 | struct thermal_zone_device *tz = container_of(dev, struct thermal_zone_device, device); |
686 | struct amlogic_thermal_platform_data *pdata = tz->devdata; |
687 | int32_t data = simple_strtol(buf, NULL, 10); |
688 | |
689 | high_temp_protect = data ? 1 : 0; |
690 | if (high_temp_protect) { |
691 | pdata->tmp_trip[1].temperature = pdata->keep_mode_threshold + 25; |
692 | } else { |
693 | pdata->tmp_trip[1].temperature = 260; |
694 | } |
695 | THERMAL_INFO("high temperature protect %s\n", high_temp_protect ? "enabled" : "disabled"); |
696 | return count; |
697 | } |
698 | |
699 | static struct device_attribute amlogic_thermal_attr[] = { |
700 | #ifdef CONFIG_CPU_FREQ_GOV_HOTPLUG |
701 | __ATTR(max_cpu_num, 0444, max_cpu_num_show, NULL), |
702 | #endif |
703 | __ATTR(thermal_debug, 0644, thermal_debug_show, thermal_debug_store), |
704 | __ATTR(keep_mode, 0644, keep_mode_show, keep_mode_store), |
705 | __ATTR(keep_mode_threshold, 0644, keep_mode_threshold_show, keep_mode_threshold_store), |
706 | __ATTR(high_temp_protect, 0644, high_temp_protect_show, high_temp_protect_store) |
707 | }; |
708 | |
709 | /* Register with the in-kernel thermal management */ |
710 | static int amlogic_register_thermal(struct amlogic_thermal_platform_data *pdata, struct platform_device *pdev) |
711 | { |
712 | int ret=0, j; |
713 | struct cpumask mask_val; |
714 | |
715 | memset(&mask_val,0,sizeof(struct cpumask)); |
716 | cpumask_set_cpu(0, &mask_val); |
717 | pdata->cpu_cool_dev= cpufreq_cooling_register(&mask_val); |
718 | if (IS_ERR(pdata->cpu_cool_dev)) { |
719 | THERMAL_ERR("Failed to register cpufreq cooling device\n"); |
720 | ret = -EINVAL; |
721 | goto err_unregister; |
722 | } |
723 | pdata->cpucore_cool_dev = cpucore_cooling_register(); |
724 | if (IS_ERR(pdata->cpucore_cool_dev)) { |
725 | THERMAL_ERR("Failed to register cpufreq cooling device\n"); |
726 | ret = -EINVAL; |
727 | goto err_unregister; |
728 | } |
729 | |
730 | pdata->therm_dev = thermal_zone_device_register(pdata->name, |
731 | pdata->temp_trip_count, |
732 | ((1 << pdata->temp_trip_count) - 1), |
733 | pdata, |
734 | &amlogic_dev_ops, |
735 | NULL, |
736 | 0, |
737 | pdata->idle_interval); |
738 | |
739 | if (IS_ERR(pdata->therm_dev)) { |
740 | THERMAL_ERR("Failed to register thermal zone device, err:%p\n", pdata->therm_dev); |
741 | ret = -EINVAL; |
742 | goto err_unregister; |
743 | } |
744 | |
745 | if (pdata->keep_mode) { // create sysfs for keep_mode |
746 | for (j = 0; j < ARRAY_SIZE(amlogic_thermal_attr); j++) { |
747 | device_create_file(&pdata->therm_dev->device, &amlogic_thermal_attr[j]); |
748 | } |
749 | } |
750 | |
751 | return 0; |
752 | |
753 | err_unregister: |
754 | amlogic_unregister_thermal(pdata); |
755 | return ret; |
756 | } |
757 | |
758 | /* Un-Register with the in-kernel thermal management */ |
759 | static void amlogic_unregister_thermal(struct amlogic_thermal_platform_data *pdata) |
760 | { |
761 | if (pdata->therm_dev) |
762 | thermal_zone_device_unregister(pdata->therm_dev); |
763 | if (pdata->cpu_cool_dev) |
764 | cpufreq_cooling_unregister(pdata->cpu_cool_dev); |
765 | |
766 | } |
767 | |
768 | int get_desend(void) |
769 | { |
770 | int i; |
771 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
772 | int descend = -1; |
773 | struct cpufreq_frequency_table *table = |
774 | cpufreq_frequency_get_table(0); |
775 | |
776 | if (!table) |
777 | return -EINVAL; |
778 | |
779 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
780 | /* ignore invalid entries */ |
781 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
782 | continue; |
783 | |
784 | /* ignore duplicate entry */ |
785 | if (freq == table[i].frequency) |
786 | continue; |
787 | |
788 | /* get the frequency order */ |
789 | if (freq != CPUFREQ_ENTRY_INVALID && descend == -1){ |
790 | descend = !!(freq > table[i].frequency); |
791 | break; |
792 | } |
793 | |
794 | freq = table[i].frequency; |
795 | } |
796 | return descend; |
797 | } |
798 | int fix_to_freq(int freqold,int descend) |
799 | { |
800 | int i; |
801 | unsigned int freq = CPUFREQ_ENTRY_INVALID; |
802 | struct cpufreq_frequency_table *table = |
803 | cpufreq_frequency_get_table(0); |
804 | |
805 | if (!table) |
806 | return -EINVAL; |
807 | |
808 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
809 | /* ignore invalid entry */ |
810 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
811 | continue; |
812 | |
813 | /* ignore duplicate entry */ |
814 | if (freq == table[i].frequency) |
815 | continue; |
816 | freq = table[i].frequency; |
817 | if(descend){ |
818 | if(freqold>=table[i+1].frequency && freqold<=table[i].frequency) |
819 | return table[i+1].frequency; |
820 | } |
821 | else{ |
822 | if(freqold>=table[i].frequency && freqold<=table[i+1].frequency) |
823 | return table[i].frequency; |
824 | } |
825 | } |
826 | return -EINVAL; |
827 | } |
828 | |
829 | void thermal_atomic_set(atomic_t *a, int value) |
830 | { |
831 | atomic_set(a, 1); |
832 | } |
833 | EXPORT_SYMBOL(thermal_atomic_set); |
834 | |
835 | static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag) |
836 | { |
837 | int i = 0, ret = -1, val = 0, cells, descend, error = 0; |
838 | struct property *prop; |
839 | struct temp_level *tmp_level = NULL; |
840 | struct amlogic_thermal_platform_data *pdata = NULL; |
841 | |
842 | if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){ |
843 | //INIT FROM DTS |
844 | pdata=kzalloc(sizeof(*pdata),GFP_KERNEL); |
845 | if(!pdata){ |
846 | goto err; |
847 | } |
848 | memset((void* )pdata,0,sizeof(*pdata)); |
849 | ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val); |
850 | if(ret){ |
851 | dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret); |
852 | goto err; |
853 | } |
854 | cells=val; |
855 | |
856 | /* |
857 | * process for KEEP_MODE and virtual thermal |
858 | * Logic: If virtual thermal is enabled, then ignore keep_mode |
859 | * |
860 | */ |
861 | pdata->trim_flag = trim_flag; |
862 | if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal |
863 | aml_virtaul_thermal_probe(pdev, pdata); |
864 | } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) { |
865 | if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) { |
866 | error = 1; |
867 | } |
868 | if (of_property_read_u32_array(pdev->dev.of_node, |
869 | "keep_mode_max_range", |
870 | pdata->keep_mode_max_range, |
871 | sizeof(pdata->keep_mode_max_range)/sizeof(u32))) { |
872 | error = 1; |
873 | } |
874 | if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now |
875 | THERMAL_INFO("keep_mode_max_range: [%7d, %3d, %d, %d]\n", |
876 | pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1], |
877 | pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]); |
878 | pdata->keep_mode = 1; |
879 | pdata->freq_sample_period = 5; |
880 | } |
881 | if (!of_property_read_u32_array(pdev->dev.of_node, |
882 | "keep_mode_min_range", |
883 | pdata->keep_mode_min_range, |
884 | sizeof(pdata->keep_mode_min_range)/sizeof(u32))) { |
885 | pdata->keep_min_exist = 1; |
886 | THERMAL_INFO("keep_mode_min_range: [%7d, %3d, %d, %d]\n", |
887 | pdata->keep_mode_min_range[0], pdata->keep_mode_min_range[1], |
888 | pdata->keep_mode_min_range[2], pdata->keep_mode_min_range[3]); |
889 | } |
890 | } else { |
891 | THERMAL_INFO("keep_mode is disabled\n"); |
892 | } |
893 | if(pdata->keep_mode || !pdata->trim_flag){ |
894 | INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work); |
895 | schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); |
896 | atomic_set(&freq_update_flag, 0); |
897 | } |
898 | |
899 | prop = of_find_property(pdev->dev.of_node, "trip_point", &val); |
900 | if (!prop){ |
901 | dev_err(&pdev->dev, "read %s length error\n","trip_point"); |
902 | goto err; |
903 | } |
904 | if (pdata->keep_mode) { |
905 | pdata->temp_trip_count = 2; |
906 | } else { |
907 | pdata->temp_trip_count=val/cells/sizeof(u32); |
908 | } |
909 | tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL); |
910 | pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL); |
911 | if(!tmp_level){ |
912 | goto err; |
913 | } |
914 | |
915 | if (pdata->keep_mode) { // keep mode only need one point |
916 | keep_mode_temp_level_init(pdata, tmp_level); |
917 | } else { |
918 | ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32)); |
919 | if (ret){ |
920 | dev_err(&pdev->dev, "read %s data error\n","trip_point"); |
921 | goto err; |
922 | } |
923 | } |
924 | descend=get_desend(); |
925 | for (i = 0; i < pdata->temp_trip_count; i++) { |
926 | pdata->tmp_trip[i].temperature=tmp_level[i].temperature; |
927 | tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend); |
928 | pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq); |
929 | |
930 | tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend); |
931 | pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq); |
932 | pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq; |
933 | pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq; |
934 | |
935 | pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num; |
936 | pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num; |
937 | } |
938 | |
939 | ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val); |
940 | if (ret){ |
941 | dev_err(&pdev->dev, "read %s error\n","idle_interval"); |
942 | goto err; |
943 | } |
944 | pdata->idle_interval=val; |
945 | ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name); |
946 | if (ret){ |
947 | dev_err(&pdev->dev, "read %s error\n","dev_name"); |
948 | goto err; |
949 | } |
950 | pdata->mode=THERMAL_DEVICE_ENABLED; |
951 | if(tmp_level) |
952 | kfree(tmp_level); |
953 | return pdata; |
954 | } |
955 | err: |
956 | if(tmp_level) |
957 | kfree(tmp_level); |
958 | if(pdata) |
959 | kfree(pdata); |
960 | pdata= NULL; |
961 | return pdata; |
962 | } |
963 | |
964 | static struct amlogic_thermal_platform_data * amlogic_thermal_initialize(struct platform_device *pdev, int trim_flag) |
965 | { |
966 | struct amlogic_thermal_platform_data *pdata=NULL; |
967 | pdata=amlogic_thermal_init_from_dts(pdev, trim_flag); |
968 | return pdata; |
969 | } |
970 | |
971 | static const struct of_device_id amlogic_thermal_match[] = { |
972 | { |
973 | .compatible = "amlogic, amlogic-thermal", |
974 | }, |
975 | {}, |
976 | }; |
977 | |
978 | #ifdef CONFIG_HIBERNATION |
979 | static int amlogic_thermal_freeze(struct device *dev) |
980 | { |
981 | return 0; |
982 | } |
983 | |
984 | static int amlogic_thermal_thaw(struct device *dev) |
985 | { |
986 | return 0; |
987 | } |
988 | |
989 | static int amlogic_thermal_restore(struct device *dev) |
990 | { |
991 | thermal_firmware_init(); |
992 | |
993 | return 0; |
994 | } |
995 | |
996 | static struct dev_pm_ops amlogic_theraml_pm = { |
997 | .freeze = amlogic_thermal_freeze, |
998 | .thaw = amlogic_thermal_thaw, |
999 | .restore = amlogic_thermal_restore, |
1000 | }; |
1001 | #endif |
1002 | |
1003 | static int amlogic_thermal_probe(struct platform_device *pdev) |
1004 | { |
1005 | int ret, trim_flag; |
1006 | struct amlogic_thermal_platform_data *pdata=NULL; |
1007 | |
1008 | device_rename(&pdev->dev, "thermal"); |
1009 | dbg_dev = &pdev->dev; |
1010 | ret = thermal_firmware_init(); |
1011 | if (ret < 0) { |
1012 | THERMAL_INFO("this chip is not trimmed, can't use thermal\n"); |
1013 | trim_flag = 0; |
1014 | return -ENODEV; |
1015 | } else { |
1016 | THERMAL_INFO("this chip is trimmed, use thermal\n"); |
1017 | trim_flag = 1; |
1018 | } |
1019 | |
1020 | pdata = amlogic_thermal_initialize(pdev, trim_flag); |
1021 | if (!pdata) { |
1022 | dev_err(&pdev->dev, "Failed to initialize thermal\n"); |
1023 | goto err; |
1024 | } |
1025 | mutex_init(&pdata->lock); |
1026 | pdev->dev.platform_data=pdata; |
1027 | platform_set_drvdata(pdev, pdata); |
1028 | ret = amlogic_register_thermal(pdata, pdev); |
1029 | if (ret) { |
1030 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); |
1031 | goto err; |
1032 | } |
1033 | return 0; |
1034 | err: |
1035 | platform_set_drvdata(pdev, NULL); |
1036 | return ret; |
1037 | } |
1038 | |
1039 | static int amlogic_thermal_remove(struct platform_device *pdev) |
1040 | { |
1041 | struct amlogic_thermal_platform_data *pdata = platform_get_drvdata(pdev); |
1042 | |
1043 | aml_virtual_thermal_remove(pdata); |
1044 | |
1045 | amlogic_unregister_thermal(pdata); |
1046 | |
1047 | platform_set_drvdata(pdev, NULL); |
1048 | |
1049 | return 0; |
1050 | } |
1051 | |
1052 | struct platform_driver amlogic_thermal_driver = { |
1053 | .driver = { |
1054 | .name = "amlogic-thermal", |
1055 | .owner = THIS_MODULE, |
1056 | #ifdef CONFIG_HIBERNATION |
1057 | .pm = &amlogic_theraml_pm, |
1058 | #endif |
1059 | .of_match_table = of_match_ptr(amlogic_thermal_match), |
1060 | }, |
1061 | .probe = amlogic_thermal_probe, |
1062 | .remove = amlogic_thermal_remove, |
1063 | }; |
1064 | |
1065 | void *aml_get_cdevdata(struct thermal_cooling_device *cdev) |
1066 | { |
1067 | return cdev->devdata; |
1068 | } |
1069 | EXPORT_SYMBOL(aml_get_cdevdata); |
1070 | |
1071 | void aml_set_cdev_update(struct thermal_cooling_device *cdev, bool update) |
1072 | { |
1073 | cdev->updated = update; |
1074 | } |
1075 | EXPORT_SYMBOL(aml_set_cdev_update); |
1076 | |
1077 | void aml_cdev_lockop(struct thermal_cooling_device *cdev, bool lock) |
1078 | { |
1079 | if (lock) { |
1080 | thermal_lock(&cdev->lock); |
1081 | } else { |
1082 | thermal_unlock(&cdev->lock); |
1083 | } |
1084 | } |
1085 | EXPORT_SYMBOL(aml_cdev_lockop); |
1086 | |
1087 | void aml_cdev_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *ret) |
1088 | { |
1089 | cdev->ops->get_cur_state(cdev, ret); |
1090 | } |
1091 | EXPORT_SYMBOL(aml_cdev_get_cur_state); |
1092 | |
1093 | static int __init amlogic_thermal_driver_init(void) |
1094 | { |
1095 | return platform_driver_register(&(amlogic_thermal_driver)); |
1096 | } |
1097 | late_initcall(amlogic_thermal_driver_init); |
1098 | static void __exit amlogic_thermal_driver_exit(void) |
1099 | { |
1100 | platform_driver_unregister(&(amlogic_thermal_driver) ); |
1101 | } |
1102 | module_exit(amlogic_thermal_driver_exit); |
1103 | |
1104 | MODULE_DESCRIPTION("amlogic thermal Driver"); |
1105 | MODULE_AUTHOR("Amlogic SH platform team"); |
1106 | MODULE_ALIAS("platform:amlogic-thermal"); |
1107 | MODULE_LICENSE("GPL"); |
1108 | |
1109 |