summaryrefslogtreecommitdiff
path: root/drivers/thermal/cpu_cooling.c (plain)
blob: f49d2989d0005e80fd7619e9ad7ffaef762ed57f
1/*
2 * linux/drivers/thermal/cpu_cooling.c
3 *
4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
6 *
7 * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25#include <linux/module.h>
26#include <linux/thermal.h>
27#include <linux/cpufreq.h>
28#include <linux/err.h>
29#include <linux/pm_opp.h>
30#include <linux/slab.h>
31#include <linux/cpu.h>
32#include <linux/cpu_cooling.h>
33
34#include <trace/events/thermal.h>
35
36/*
37 * Cooling state <-> CPUFreq frequency
38 *
39 * Cooling states are translated to frequencies throughout this driver and this
40 * is the relation between them.
41 *
42 * Highest cooling state corresponds to lowest possible frequency.
43 *
44 * i.e.
45 * level 0 --> 1st Max Freq
46 * level 1 --> 2nd Max Freq
47 * ...
48 */
49
50/**
51 * struct power_table - frequency to power conversion
52 * @frequency: frequency in KHz
53 * @power: power in mW
54 *
55 * This structure is built when the cooling device registers and helps
56 * in translating frequency to power and viceversa.
57 */
58struct power_table {
59 u32 frequency;
60 u32 power;
61};
62
63/**
64 * struct cpufreq_cooling_device - data for cooling device with cpufreq
65 * @id: unique integer value corresponding to each cpufreq_cooling_device
66 * registered.
67 * @cool_dev: thermal_cooling_device pointer to keep track of the
68 * registered cooling device.
69 * @cpufreq_state: integer value representing the current state of cpufreq
70 * cooling devices.
71 * @clipped_freq: integer value representing the absolute value of the clipped
72 * frequency.
73 * @max_level: maximum cooling level. One less than total number of valid
74 * cpufreq frequencies.
75 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
76 * @node: list_head to link all cpufreq_cooling_device together.
77 * @last_load: load measured by the latest call to cpufreq_get_requested_power()
78 * @time_in_idle: previous reading of the absolute time that this cpu was idle
79 * @time_in_idle_timestamp: wall time of the last invocation of
80 * get_cpu_idle_time_us()
81 * @dyn_power_table: array of struct power_table for frequency to power
82 * conversion, sorted in ascending order.
83 * @dyn_power_table_entries: number of entries in the @dyn_power_table array
84 * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
85 * @plat_get_static_power: callback to calculate the static power
86 *
87 * This structure is required for keeping information of each registered
88 * cpufreq_cooling_device.
89 */
90struct cpufreq_cooling_device {
91 int id;
92 struct thermal_cooling_device *cool_dev;
93 unsigned int cpufreq_state;
94 unsigned int clipped_freq;
95 unsigned int max_level;
96 unsigned int *freq_table; /* In descending order */
97 struct cpumask allowed_cpus;
98 struct list_head node;
99 u32 last_load;
100 u64 *time_in_idle;
101 u64 *time_in_idle_timestamp;
102 struct power_table *dyn_power_table;
103 int dyn_power_table_entries;
104 struct device *cpu_dev;
105 get_static_t plat_get_static_power;
106};
107static DEFINE_IDR(cpufreq_idr);
108static DEFINE_MUTEX(cooling_cpufreq_lock);
109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock);
113static LIST_HEAD(cpufreq_dev_list);
114
115/**
116 * get_idr - function to get a unique id.
117 * @idr: struct idr * handle used to create a id.
118 * @id: int * value generated by this function.
119 *
120 * This function will populate @id with an unique
121 * id, using the idr API.
122 *
123 * Return: 0 on success, an error code on failure.
124 */
125static int get_idr(struct idr *idr, int *id)
126{
127 int ret;
128
129 mutex_lock(&cooling_cpufreq_lock);
130 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
131 mutex_unlock(&cooling_cpufreq_lock);
132 if (unlikely(ret < 0))
133 return ret;
134 *id = ret;
135
136 return 0;
137}
138
139/**
140 * release_idr - function to free the unique id.
141 * @idr: struct idr * handle used for creating the id.
142 * @id: int value representing the unique id.
143 */
144static void release_idr(struct idr *idr, int id)
145{
146 mutex_lock(&cooling_cpufreq_lock);
147 idr_remove(idr, id);
148 mutex_unlock(&cooling_cpufreq_lock);
149}
150
151/* Below code defines functions to be used for cpufreq as cooling device */
152
153/**
154 * get_level: Find the level for a particular frequency
155 * @cpufreq_dev: cpufreq_dev for which the property is required
156 * @freq: Frequency
157 *
158 * Return: level on success, THERMAL_CSTATE_INVALID on error.
159 */
160static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
161 unsigned int freq)
162{
163 unsigned long level;
164
165 for (level = 0; level <= cpufreq_dev->max_level; level++) {
166 if (freq == cpufreq_dev->freq_table[level])
167 return level;
168
169 if (freq > cpufreq_dev->freq_table[level])
170 break;
171 }
172
173 return THERMAL_CSTATE_INVALID;
174}
175
176/**
177 * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
178 * @cpu: cpu for which the level is required
179 * @freq: the frequency of interest
180 *
181 * This function will match the cooling level corresponding to the
182 * requested @freq and return it.
183 *
184 * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
185 * otherwise.
186 */
187unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
188{
189 struct cpufreq_cooling_device *cpufreq_dev;
190
191 mutex_lock(&cooling_list_lock);
192 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
193 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
194 unsigned long level = get_level(cpufreq_dev, freq);
195
196 mutex_unlock(&cooling_list_lock);
197 return level;
198 }
199 }
200 mutex_unlock(&cooling_list_lock);
201
202 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
203 return THERMAL_CSTATE_INVALID;
204}
205EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
206
207/**
208 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
209 * @nb: struct notifier_block * with callback info.
210 * @event: value showing cpufreq event for which this function invoked.
211 * @data: callback-specific data
212 *
213 * Callback to hijack the notification on cpufreq policy transition.
214 * Every time there is a change in policy, we will intercept and
215 * update the cpufreq policy with thermal constraints.
216 *
217 * Return: 0 (success)
218 */
219static int cpufreq_thermal_notifier(struct notifier_block *nb,
220 unsigned long event, void *data)
221{
222 struct cpufreq_policy *policy = data;
223 unsigned long clipped_freq;
224 struct cpufreq_cooling_device *cpufreq_dev;
225
226 if (event != CPUFREQ_ADJUST)
227 return NOTIFY_DONE;
228
229 mutex_lock(&cooling_list_lock);
230 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
231 if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
232 continue;
233
234 /*
235 * policy->max is the maximum allowed frequency defined by user
236 * and clipped_freq is the maximum that thermal constraints
237 * allow.
238 *
239 * If clipped_freq is lower than policy->max, then we need to
240 * readjust policy->max.
241 *
242 * But, if clipped_freq is greater than policy->max, we don't
243 * need to do anything.
244 */
245 clipped_freq = cpufreq_dev->clipped_freq;
246
247 if (policy->max > clipped_freq)
248 cpufreq_verify_within_limits(policy, 0, clipped_freq);
249 break;
250 }
251 mutex_unlock(&cooling_list_lock);
252
253 return NOTIFY_OK;
254}
255
256/**
257 * build_dyn_power_table() - create a dynamic power to frequency table
258 * @cpufreq_device: the cpufreq cooling device in which to store the table
259 * @capacitance: dynamic power coefficient for these cpus
260 *
261 * Build a dynamic power to frequency table for this cpu and store it
262 * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
263 * cpu_freq_to_power() to convert between power and frequency
264 * efficiently. Power is stored in mW, frequency in KHz. The
265 * resulting table is in ascending order.
266 *
267 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
268 * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
269 * added/enabled while the function was executing.
270 */
271static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
272 u32 capacitance)
273{
274 struct power_table *power_table;
275 struct dev_pm_opp *opp;
276 struct device *dev = NULL;
277 int num_opps = 0, cpu, i, ret = 0;
278 unsigned long freq;
279
280 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
281 dev = get_cpu_device(cpu);
282 if (!dev) {
283 dev_warn(&cpufreq_device->cool_dev->device,
284 "No cpu device for cpu %d\n", cpu);
285 continue;
286 }
287
288 num_opps = dev_pm_opp_get_opp_count(dev);
289 if (num_opps > 0)
290 break;
291 else if (num_opps < 0)
292 return num_opps;
293 }
294
295 if (num_opps == 0)
296 return -EINVAL;
297
298 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
299 if (!power_table)
300 return -ENOMEM;
301
302 rcu_read_lock();
303
304 for (freq = 0, i = 0;
305 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
306 freq++, i++) {
307 u32 freq_mhz, voltage_mv;
308 u64 power;
309
310 if (i >= num_opps) {
311 rcu_read_unlock();
312 ret = -EAGAIN;
313 goto free_power_table;
314 }
315
316 freq_mhz = freq / 1000000;
317 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
318
319 /*
320 * Do the multiplication with MHz and millivolt so as
321 * to not overflow.
322 */
323 power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
324 do_div(power, 1000000000);
325
326 /* frequency is stored in power_table in KHz */
327 power_table[i].frequency = freq / 1000;
328
329 /* power is stored in mW */
330 power_table[i].power = power;
331 }
332
333 rcu_read_unlock();
334
335 if (i != num_opps) {
336 ret = PTR_ERR(opp);
337 goto free_power_table;
338 }
339
340 cpufreq_device->cpu_dev = dev;
341 cpufreq_device->dyn_power_table = power_table;
342 cpufreq_device->dyn_power_table_entries = i;
343
344 return 0;
345
346free_power_table:
347 kfree(power_table);
348
349 return ret;
350}
351
352static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
353 u32 freq)
354{
355 int i;
356 struct power_table *pt = cpufreq_device->dyn_power_table;
357
358 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
359 if (freq < pt[i].frequency)
360 break;
361
362 return pt[i - 1].power;
363}
364
365static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
366 u32 power)
367{
368 int i;
369 struct power_table *pt = cpufreq_device->dyn_power_table;
370
371 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
372 if (power < pt[i].power)
373 break;
374
375 return pt[i - 1].frequency;
376}
377
378/**
379 * get_load() - get load for a cpu since last updated
380 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
381 * @cpu: cpu number
382 * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
383 *
384 * Return: The average load of cpu @cpu in percentage since this
385 * function was last called.
386 */
387static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
388 int cpu_idx)
389{
390 u32 load;
391 u64 now, now_idle, delta_time, delta_idle;
392
393 now_idle = get_cpu_idle_time(cpu, &now, 0);
394 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
395 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
396
397 if (delta_time <= delta_idle)
398 load = 0;
399 else
400 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
401
402 cpufreq_device->time_in_idle[cpu_idx] = now_idle;
403 cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
404
405 return load;
406}
407
408/**
409 * get_static_power() - calculate the static power consumed by the cpus
410 * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
411 * @tz: thermal zone device in which we're operating
412 * @freq: frequency in KHz
413 * @power: pointer in which to store the calculated static power
414 *
415 * Calculate the static power consumed by the cpus described by
416 * @cpu_actor running at frequency @freq. This function relies on a
417 * platform specific function that should have been provided when the
418 * actor was registered. If it wasn't, the static power is assumed to
419 * be negligible. The calculated static power is stored in @power.
420 *
421 * Return: 0 on success, -E* on failure.
422 */
423static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
424 struct thermal_zone_device *tz, unsigned long freq,
425 u32 *power)
426{
427 struct dev_pm_opp *opp;
428 unsigned long voltage;
429 struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
430 unsigned long freq_hz = freq * 1000;
431
432 if (!cpufreq_device->plat_get_static_power ||
433 !cpufreq_device->cpu_dev) {
434 *power = 0;
435 return 0;
436 }
437
438 rcu_read_lock();
439
440 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
441 true);
442 voltage = dev_pm_opp_get_voltage(opp);
443
444 rcu_read_unlock();
445
446 if (voltage == 0) {
447 dev_warn_ratelimited(cpufreq_device->cpu_dev,
448 "Failed to get voltage for frequency %lu: %ld\n",
449 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
450 return -EINVAL;
451 }
452
453 return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
454 voltage, power);
455}
456
457/**
458 * get_dynamic_power() - calculate the dynamic power
459 * @cpufreq_device: &cpufreq_cooling_device for this cdev
460 * @freq: current frequency
461 *
462 * Return: the dynamic power consumed by the cpus described by
463 * @cpufreq_device.
464 */
465static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
466 unsigned long freq)
467{
468 u32 raw_cpu_power;
469
470 raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
471 return (raw_cpu_power * cpufreq_device->last_load) / 100;
472}
473
474/* cpufreq cooling device callback functions are defined below */
475
476/**
477 * cpufreq_get_max_state - callback function to get the max cooling state.
478 * @cdev: thermal cooling device pointer.
479 * @state: fill this variable with the max cooling state.
480 *
481 * Callback for the thermal cooling device to return the cpufreq
482 * max cooling state.
483 *
484 * Return: 0 on success, an error code otherwise.
485 */
486static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
487 unsigned long *state)
488{
489 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
490
491 *state = cpufreq_device->max_level;
492 return 0;
493}
494
495/**
496 * cpufreq_get_cur_state - callback function to get the current cooling state.
497 * @cdev: thermal cooling device pointer.
498 * @state: fill this variable with the current cooling state.
499 *
500 * Callback for the thermal cooling device to return the cpufreq
501 * current cooling state.
502 *
503 * Return: 0 on success, an error code otherwise.
504 */
505static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
506 unsigned long *state)
507{
508 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
509
510 *state = cpufreq_device->cpufreq_state;
511
512 return 0;
513}
514
515/**
516 * cpufreq_set_cur_state - callback function to set the current cooling state.
517 * @cdev: thermal cooling device pointer.
518 * @state: set this variable to the current cooling state.
519 *
520 * Callback for the thermal cooling device to change the cpufreq
521 * current cooling state.
522 *
523 * Return: 0 on success, an error code otherwise.
524 */
525static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
526 unsigned long state)
527{
528 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
529 unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
530 unsigned int clip_freq;
531
532 /* Request state should be less than max_level */
533 if (WARN_ON(state > cpufreq_device->max_level))
534 return -EINVAL;
535
536 /* Check if the old cooling action is same as new cooling action */
537 if (cpufreq_device->cpufreq_state == state)
538 return 0;
539
540 clip_freq = cpufreq_device->freq_table[state];
541 cpufreq_device->cpufreq_state = state;
542 cpufreq_device->clipped_freq = clip_freq;
543
544 cpufreq_update_policy(cpu);
545
546 return 0;
547}
548
549/**
550 * cpufreq_get_requested_power() - get the current power
551 * @cdev: &thermal_cooling_device pointer
552 * @tz: a valid thermal zone device pointer
553 * @power: pointer in which to store the resulting power
554 *
555 * Calculate the current power consumption of the cpus in milliwatts
556 * and store it in @power. This function should actually calculate
557 * the requested power, but it's hard to get the frequency that
558 * cpufreq would have assigned if there were no thermal limits.
559 * Instead, we calculate the current power on the assumption that the
560 * immediate future will look like the immediate past.
561 *
562 * We use the current frequency and the average load since this
563 * function was last called. In reality, there could have been
564 * multiple opps since this function was last called and that affects
565 * the load calculation. While it's not perfectly accurate, this
566 * simplification is good enough and works. REVISIT this, as more
567 * complex code may be needed if experiments show that it's not
568 * accurate enough.
569 *
570 * Return: 0 on success, -E* if getting the static power failed.
571 */
572static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
573 struct thermal_zone_device *tz,
574 u32 *power)
575{
576 unsigned long freq;
577 int i = 0, cpu, ret;
578 u32 static_power, dynamic_power, total_load = 0;
579 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
580 u32 *load_cpu = NULL;
581
582 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
583
584 /*
585 * All the CPUs are offline, thus the requested power by
586 * the cdev is 0
587 */
588 if (cpu >= nr_cpu_ids) {
589 *power = 0;
590 return 0;
591 }
592
593 freq = cpufreq_quick_get(cpu);
594
595 if (trace_thermal_power_cpu_get_power_enabled()) {
596 u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
597
598 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
599 }
600
601 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
602 u32 load;
603
604 if (cpu_online(cpu))
605 load = get_load(cpufreq_device, cpu, i);
606 else
607 load = 0;
608
609 total_load += load;
610 if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
611 load_cpu[i] = load;
612
613 i++;
614 }
615
616 cpufreq_device->last_load = total_load;
617
618 dynamic_power = get_dynamic_power(cpufreq_device, freq);
619 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
620 if (ret) {
621 kfree(load_cpu);
622 return ret;
623 }
624
625 if (load_cpu) {
626 trace_thermal_power_cpu_get_power(
627 &cpufreq_device->allowed_cpus,
628 freq, load_cpu, i, dynamic_power, static_power);
629
630 kfree(load_cpu);
631 }
632
633 *power = static_power + dynamic_power;
634 return 0;
635}
636
637/**
638 * cpufreq_state2power() - convert a cpu cdev state to power consumed
639 * @cdev: &thermal_cooling_device pointer
640 * @tz: a valid thermal zone device pointer
641 * @state: cooling device state to be converted
642 * @power: pointer in which to store the resulting power
643 *
644 * Convert cooling device state @state into power consumption in
645 * milliwatts assuming 100% load. Store the calculated power in
646 * @power.
647 *
648 * Return: 0 on success, -EINVAL if the cooling device state could not
649 * be converted into a frequency or other -E* if there was an error
650 * when calculating the static power.
651 */
652static int cpufreq_state2power(struct thermal_cooling_device *cdev,
653 struct thermal_zone_device *tz,
654 unsigned long state, u32 *power)
655{
656 unsigned int freq, num_cpus;
657 cpumask_t cpumask;
658 u32 static_power, dynamic_power;
659 int ret;
660 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
661
662 cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
663 num_cpus = cpumask_weight(&cpumask);
664
665 /* None of our cpus are online, so no power */
666 if (num_cpus == 0) {
667 *power = 0;
668 return 0;
669 }
670
671 freq = cpufreq_device->freq_table[state];
672 if (!freq)
673 return -EINVAL;
674
675 dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
676 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
677 if (ret)
678 return ret;
679
680 *power = static_power + dynamic_power;
681 return 0;
682}
683
684/**
685 * cpufreq_power2state() - convert power to a cooling device state
686 * @cdev: &thermal_cooling_device pointer
687 * @tz: a valid thermal zone device pointer
688 * @power: power in milliwatts to be converted
689 * @state: pointer in which to store the resulting state
690 *
691 * Calculate a cooling device state for the cpus described by @cdev
692 * that would allow them to consume at most @power mW and store it in
693 * @state. Note that this calculation depends on external factors
694 * such as the cpu load or the current static power. Calling this
695 * function with the same power as input can yield different cooling
696 * device states depending on those external factors.
697 *
698 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
699 * the calculated frequency could not be converted to a valid state.
700 * The latter should not happen unless the frequencies available to
701 * cpufreq have changed since the initialization of the cpu cooling
702 * device.
703 */
704static int cpufreq_power2state(struct thermal_cooling_device *cdev,
705 struct thermal_zone_device *tz, u32 power,
706 unsigned long *state)
707{
708 unsigned int cpu, cur_freq, target_freq;
709 int ret;
710 s32 dyn_power;
711 u32 last_load, normalised_power, static_power;
712 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
713
714 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
715
716 /* None of our cpus are online */
717 if (cpu >= nr_cpu_ids)
718 return -ENODEV;
719
720 cur_freq = cpufreq_quick_get(cpu);
721 ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
722 if (ret)
723 return ret;
724
725 dyn_power = power - static_power;
726 dyn_power = dyn_power > 0 ? dyn_power : 0;
727 last_load = cpufreq_device->last_load ?: 1;
728 normalised_power = (dyn_power * 100) / last_load;
729 target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
730
731 *state = cpufreq_cooling_get_level(cpu, target_freq);
732 if (*state == THERMAL_CSTATE_INVALID) {
733 dev_warn_ratelimited(&cdev->device,
734 "Failed to convert %dKHz for cpu %d into a cdev state\n",
735 target_freq, cpu);
736 return -EINVAL;
737 }
738
739 trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
740 target_freq, *state, power);
741 return 0;
742}
743
744/* Bind cpufreq callbacks to thermal cooling device ops */
745
746static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
747 .get_max_state = cpufreq_get_max_state,
748 .get_cur_state = cpufreq_get_cur_state,
749 .set_cur_state = cpufreq_set_cur_state,
750};
751
752static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
753 .get_max_state = cpufreq_get_max_state,
754 .get_cur_state = cpufreq_get_cur_state,
755 .set_cur_state = cpufreq_set_cur_state,
756 .get_requested_power = cpufreq_get_requested_power,
757 .state2power = cpufreq_state2power,
758 .power2state = cpufreq_power2state,
759};
760
761/* Notifier for cpufreq policy change */
762static struct notifier_block thermal_cpufreq_notifier_block = {
763 .notifier_call = cpufreq_thermal_notifier,
764};
765
766static unsigned int find_next_max(struct cpufreq_frequency_table *table,
767 unsigned int prev_max)
768{
769 struct cpufreq_frequency_table *pos;
770 unsigned int max = 0;
771
772 cpufreq_for_each_valid_entry(pos, table) {
773 if (pos->frequency > max && pos->frequency < prev_max)
774 max = pos->frequency;
775 }
776
777 return max;
778}
779
780/**
781 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
782 * @np: a valid struct device_node to the cooling device device tree node
783 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
784 * Normally this should be same as cpufreq policy->related_cpus.
785 * @capacitance: dynamic power coefficient for these cpus
786 * @plat_static_func: function to calculate the static power consumed by these
787 * cpus (optional)
788 *
789 * This interface function registers the cpufreq cooling device with the name
790 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
791 * cooling devices. It also gives the opportunity to link the cooling device
792 * with a device tree node, in order to bind it via the thermal DT code.
793 *
794 * Return: a valid struct thermal_cooling_device pointer on success,
795 * on failure, it returns a corresponding ERR_PTR().
796 */
797static struct thermal_cooling_device *
798__cpufreq_cooling_register(struct device_node *np,
799 const struct cpumask *clip_cpus, u32 capacitance,
800 get_static_t plat_static_func)
801{
802 struct cpufreq_policy *policy;
803 struct thermal_cooling_device *cool_dev;
804 struct cpufreq_cooling_device *cpufreq_dev;
805 char dev_name[THERMAL_NAME_LENGTH];
806 struct cpufreq_frequency_table *pos, *table;
807 struct cpumask temp_mask;
808 unsigned int freq, i, num_cpus;
809 int ret;
810 struct thermal_cooling_device_ops *cooling_ops;
811
812 cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
813 policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
814 if (!policy) {
815 pr_debug("%s: CPUFreq policy not found\n", __func__);
816 return ERR_PTR(-EPROBE_DEFER);
817 }
818
819 table = policy->freq_table;
820 if (!table) {
821 pr_debug("%s: CPUFreq table not found\n", __func__);
822 cool_dev = ERR_PTR(-ENODEV);
823 goto put_policy;
824 }
825
826 cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
827 if (!cpufreq_dev) {
828 cool_dev = ERR_PTR(-ENOMEM);
829 goto put_policy;
830 }
831
832 num_cpus = cpumask_weight(clip_cpus);
833 cpufreq_dev->time_in_idle = kcalloc(num_cpus,
834 sizeof(*cpufreq_dev->time_in_idle),
835 GFP_KERNEL);
836 if (!cpufreq_dev->time_in_idle) {
837 cool_dev = ERR_PTR(-ENOMEM);
838 goto free_cdev;
839 }
840
841 cpufreq_dev->time_in_idle_timestamp =
842 kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
843 GFP_KERNEL);
844 if (!cpufreq_dev->time_in_idle_timestamp) {
845 cool_dev = ERR_PTR(-ENOMEM);
846 goto free_time_in_idle;
847 }
848
849 /* Find max levels */
850 cpufreq_for_each_valid_entry(pos, table)
851 cpufreq_dev->max_level++;
852
853 cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
854 cpufreq_dev->max_level, GFP_KERNEL);
855 if (!cpufreq_dev->freq_table) {
856 cool_dev = ERR_PTR(-ENOMEM);
857 goto free_time_in_idle_timestamp;
858 }
859
860 /* max_level is an index, not a counter */
861 cpufreq_dev->max_level--;
862
863 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
864
865 if (capacitance) {
866 cpufreq_dev->plat_get_static_power = plat_static_func;
867
868 ret = build_dyn_power_table(cpufreq_dev, capacitance);
869 if (ret) {
870 cool_dev = ERR_PTR(ret);
871 goto free_table;
872 }
873
874 cooling_ops = &cpufreq_power_cooling_ops;
875 } else {
876 cooling_ops = &cpufreq_cooling_ops;
877 }
878
879 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
880 if (ret) {
881 cool_dev = ERR_PTR(ret);
882 goto free_power_table;
883 }
884
885 /* Fill freq-table in descending order of frequencies */
886 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
887 freq = find_next_max(table, freq);
888 cpufreq_dev->freq_table[i] = freq;
889
890 /* Warn for duplicate entries */
891 if (!freq)
892 pr_warn("%s: table has duplicate entries\n", __func__);
893 else
894 pr_debug("%s: freq:%u KHz\n", __func__, freq);
895 }
896
897 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
898 cpufreq_dev->id);
899
900 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
901 cooling_ops);
902 if (IS_ERR(cool_dev))
903 goto remove_idr;
904
905 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
906 cpufreq_dev->cool_dev = cool_dev;
907
908 mutex_lock(&cooling_cpufreq_lock);
909
910 mutex_lock(&cooling_list_lock);
911 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
912 mutex_unlock(&cooling_list_lock);
913
914 /* Register the notifier for first cpufreq cooling device */
915 if (!cpufreq_dev_count++)
916 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
917 CPUFREQ_POLICY_NOTIFIER);
918 mutex_unlock(&cooling_cpufreq_lock);
919
920 goto put_policy;
921
922remove_idr:
923 release_idr(&cpufreq_idr, cpufreq_dev->id);
924free_power_table:
925 kfree(cpufreq_dev->dyn_power_table);
926free_table:
927 kfree(cpufreq_dev->freq_table);
928free_time_in_idle_timestamp:
929 kfree(cpufreq_dev->time_in_idle_timestamp);
930free_time_in_idle:
931 kfree(cpufreq_dev->time_in_idle);
932free_cdev:
933 kfree(cpufreq_dev);
934put_policy:
935 cpufreq_cpu_put(policy);
936
937 return cool_dev;
938}
939
940/**
941 * cpufreq_cooling_register - function to create cpufreq cooling device.
942 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
943 *
944 * This interface function registers the cpufreq cooling device with the name
945 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
946 * cooling devices.
947 *
948 * Return: a valid struct thermal_cooling_device pointer on success,
949 * on failure, it returns a corresponding ERR_PTR().
950 */
951struct thermal_cooling_device *
952cpufreq_cooling_register(const struct cpumask *clip_cpus)
953{
954 return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
955}
956EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
957
958/**
959 * of_cpufreq_cooling_register - function to create cpufreq cooling device.
960 * @np: a valid struct device_node to the cooling device device tree node
961 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
962 *
963 * This interface function registers the cpufreq cooling device with the name
964 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
965 * cooling devices. Using this API, the cpufreq cooling device will be
966 * linked to the device tree node provided.
967 *
968 * Return: a valid struct thermal_cooling_device pointer on success,
969 * on failure, it returns a corresponding ERR_PTR().
970 */
971struct thermal_cooling_device *
972of_cpufreq_cooling_register(struct device_node *np,
973 const struct cpumask *clip_cpus)
974{
975 if (!np)
976 return ERR_PTR(-EINVAL);
977
978 return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
979}
980EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
981
982/**
983 * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
984 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
985 * @capacitance: dynamic power coefficient for these cpus
986 * @plat_static_func: function to calculate the static power consumed by these
987 * cpus (optional)
988 *
989 * This interface function registers the cpufreq cooling device with
990 * the name "thermal-cpufreq-%x". This api can support multiple
991 * instances of cpufreq cooling devices. Using this function, the
992 * cooling device will implement the power extensions by using a
993 * simple cpu power model. The cpus must have registered their OPPs
994 * using the OPP library.
995 *
996 * An optional @plat_static_func may be provided to calculate the
997 * static power consumed by these cpus. If the platform's static
998 * power consumption is unknown or negligible, make it NULL.
999 *
1000 * Return: a valid struct thermal_cooling_device pointer on success,
1001 * on failure, it returns a corresponding ERR_PTR().
1002 */
1003struct thermal_cooling_device *
1004cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
1005 get_static_t plat_static_func)
1006{
1007 return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
1008 plat_static_func);
1009}
1010EXPORT_SYMBOL(cpufreq_power_cooling_register);
1011
1012/**
1013 * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
1014 * @np: a valid struct device_node to the cooling device device tree node
1015 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
1016 * @capacitance: dynamic power coefficient for these cpus
1017 * @plat_static_func: function to calculate the static power consumed by these
1018 * cpus (optional)
1019 *
1020 * This interface function registers the cpufreq cooling device with
1021 * the name "thermal-cpufreq-%x". This api can support multiple
1022 * instances of cpufreq cooling devices. Using this API, the cpufreq
1023 * cooling device will be linked to the device tree node provided.
1024 * Using this function, the cooling device will implement the power
1025 * extensions by using a simple cpu power model. The cpus must have
1026 * registered their OPPs using the OPP library.
1027 *
1028 * An optional @plat_static_func may be provided to calculate the
1029 * static power consumed by these cpus. If the platform's static
1030 * power consumption is unknown or negligible, make it NULL.
1031 *
1032 * Return: a valid struct thermal_cooling_device pointer on success,
1033 * on failure, it returns a corresponding ERR_PTR().
1034 */
1035struct thermal_cooling_device *
1036of_cpufreq_power_cooling_register(struct device_node *np,
1037 const struct cpumask *clip_cpus,
1038 u32 capacitance,
1039 get_static_t plat_static_func)
1040{
1041 if (!np)
1042 return ERR_PTR(-EINVAL);
1043
1044 return __cpufreq_cooling_register(np, clip_cpus, capacitance,
1045 plat_static_func);
1046}
1047EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1048
1049/**
1050 * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
1051 * @cdev: thermal cooling device pointer.
1052 *
1053 * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
1054 */
1055void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1056{
1057 struct cpufreq_cooling_device *cpufreq_dev;
1058
1059 if (!cdev)
1060 return;
1061
1062 cpufreq_dev = cdev->devdata;
1063
1064 /* Unregister the notifier for the last cpufreq cooling device */
1065 mutex_lock(&cooling_cpufreq_lock);
1066 if (!--cpufreq_dev_count)
1067 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1068 CPUFREQ_POLICY_NOTIFIER);
1069
1070 mutex_lock(&cooling_list_lock);
1071 list_del(&cpufreq_dev->node);
1072 mutex_unlock(&cooling_list_lock);
1073
1074 mutex_unlock(&cooling_cpufreq_lock);
1075
1076 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1077 release_idr(&cpufreq_idr, cpufreq_dev->id);
1078 kfree(cpufreq_dev->dyn_power_table);
1079 kfree(cpufreq_dev->time_in_idle_timestamp);
1080 kfree(cpufreq_dev->time_in_idle);
1081 kfree(cpufreq_dev->freq_table);
1082 kfree(cpufreq_dev);
1083}
1084EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
1085