source: src/linux/universal/linux-4.9/drivers/cpufreq/cpufreq.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 5 weeks ago

update

File size: 65.9 KB
Line 
1/*
2 *  linux/drivers/cpufreq/cpufreq.c
3 *
4 *  Copyright (C) 2001 Russell King
5 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 *      Added handling for CPU hotplug
10 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 *      Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/slab.h>
29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
31#include <linux/tick.h>
32#include <trace/events/power.h>
33
34static LIST_HEAD(cpufreq_policy_list);
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38        return cpumask_empty(policy->cpus);
39}
40
41/* Macros to iterate over CPU policies */
42#define for_each_suitable_policy(__policy, __active)                     \
43        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44                if ((__active) == !policy_is_inactive(__policy))
45
46#define for_each_active_policy(__policy)                \
47        for_each_suitable_policy(__policy, true)
48#define for_each_inactive_policy(__policy)              \
49        for_each_suitable_policy(__policy, false)
50
51#define for_each_policy(__policy)                       \
52        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
53
54/* Iterate over governors */
55static LIST_HEAD(cpufreq_governor_list);
56#define for_each_governor(__governor)                           \
57        list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
58
59/**
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
63 */
64static struct cpufreq_driver *cpufreq_driver;
65static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66static DEFINE_RWLOCK(cpufreq_driver_lock);
67
68/* Flag to suspend/resume CPUFreq governors */
69static bool cpufreq_suspended;
70
71static inline bool has_target(void)
72{
73        return cpufreq_driver->target_index || cpufreq_driver->target;
74}
75
76/* internal prototypes */
77static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78static int cpufreq_init_governor(struct cpufreq_policy *policy);
79static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80static int cpufreq_start_governor(struct cpufreq_policy *policy);
81static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82static void cpufreq_governor_limits(struct cpufreq_policy *policy);
83
84/**
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
90 */
91static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92static struct srcu_notifier_head cpufreq_transition_notifier_list;
93
94static bool init_cpufreq_transition_notifier_list_called;
95static int __init init_cpufreq_transition_notifier_list(void)
96{
97        srcu_init_notifier_head(&cpufreq_transition_notifier_list);
98        init_cpufreq_transition_notifier_list_called = true;
99        return 0;
100}
101pure_initcall(init_cpufreq_transition_notifier_list);
102
103static int off __read_mostly;
104static int cpufreq_disabled(void)
105{
106        return off;
107}
108void disable_cpufreq(void)
109{
110        off = 1;
111}
112static DEFINE_MUTEX(cpufreq_governor_mutex);
113
114bool have_governor_per_policy(void)
115{
116        return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
117}
118EXPORT_SYMBOL_GPL(have_governor_per_policy);
119
120struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
121{
122        if (have_governor_per_policy())
123                return &policy->kobj;
124        else
125                return cpufreq_global_kobject;
126}
127EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
128
129static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
130{
131        u64 idle_time;
132        u64 cur_wall_time;
133        u64 busy_time;
134
135        cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
136
137        busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
138        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
139        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
140        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
141        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
142        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
143
144        idle_time = cur_wall_time - busy_time;
145        if (wall)
146                *wall = cputime_to_usecs(cur_wall_time);
147
148        return cputime_to_usecs(idle_time);
149}
150
151u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
152{
153        u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
154
155        if (idle_time == -1ULL)
156                return get_cpu_idle_time_jiffy(cpu, wall);
157        else if (!io_busy)
158                idle_time += get_cpu_iowait_time_us(cpu, wall);
159
160        return idle_time;
161}
162EXPORT_SYMBOL_GPL(get_cpu_idle_time);
163
164/*
165 * This is a generic cpufreq init() routine which can be used by cpufreq
166 * drivers of SMP systems. It will do following:
167 * - validate & show freq table passed
168 * - set policies transition latency
169 * - policy->cpus with all possible CPUs
170 */
171int cpufreq_generic_init(struct cpufreq_policy *policy,
172                struct cpufreq_frequency_table *table,
173                unsigned int transition_latency)
174{
175        int ret;
176
177        ret = cpufreq_table_validate_and_show(policy, table);
178        if (ret) {
179                pr_err("%s: invalid frequency table: %d\n", __func__, ret);
180                return ret;
181        }
182
183        policy->cpuinfo.transition_latency = transition_latency;
184
185        /*
186         * The driver only supports the SMP configuration where all processors
187         * share the clock and voltage and clock.
188         */
189        cpumask_setall(policy->cpus);
190
191        return 0;
192}
193EXPORT_SYMBOL_GPL(cpufreq_generic_init);
194
195struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196{
197        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
198
199        return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
200}
201EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
202
203unsigned int cpufreq_generic_get(unsigned int cpu)
204{
205        struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
206
207        if (!policy || IS_ERR(policy->clk)) {
208                pr_err("%s: No %s associated to cpu: %d\n",
209                       __func__, policy ? "clk" : "policy", cpu);
210                return 0;
211        }
212
213        return clk_get_rate(policy->clk) / 1000;
214}
215EXPORT_SYMBOL_GPL(cpufreq_generic_get);
216
217/**
218 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
219 *
220 * @cpu: cpu to find policy for.
221 *
222 * This returns policy for 'cpu', returns NULL if it doesn't exist.
223 * It also increments the kobject reference count to mark it busy and so would
224 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
225 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
226 * freed as that depends on the kobj count.
227 *
228 * Return: A valid policy on success, otherwise NULL on failure.
229 */
230struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
231{
232        struct cpufreq_policy *policy = NULL;
233        unsigned long flags;
234
235        if (WARN_ON(cpu >= nr_cpu_ids))
236                return NULL;
237
238        /* get the cpufreq driver */
239        read_lock_irqsave(&cpufreq_driver_lock, flags);
240
241        if (cpufreq_driver) {
242                /* get the CPU */
243                policy = cpufreq_cpu_get_raw(cpu);
244                if (policy)
245                        kobject_get(&policy->kobj);
246        }
247
248        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
249
250        return policy;
251}
252EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
253
254/**
255 * cpufreq_cpu_put: Decrements the usage count of a policy
256 *
257 * @policy: policy earlier returned by cpufreq_cpu_get().
258 *
259 * This decrements the kobject reference count incremented earlier by calling
260 * cpufreq_cpu_get().
261 */
262void cpufreq_cpu_put(struct cpufreq_policy *policy)
263{
264        kobject_put(&policy->kobj);
265}
266EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
267
268/*********************************************************************
269 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
270 *********************************************************************/
271
272/**
273 * adjust_jiffies - adjust the system "loops_per_jiffy"
274 *
275 * This function alters the system "loops_per_jiffy" for the clock
276 * speed change. Note that loops_per_jiffy cannot be updated on SMP
277 * systems as each CPU might be scaled differently. So, use the arch
278 * per-CPU loops_per_jiffy value wherever possible.
279 */
280static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
281{
282#ifndef CONFIG_SMP
283        static unsigned long l_p_j_ref;
284        static unsigned int l_p_j_ref_freq;
285
286        if (ci->flags & CPUFREQ_CONST_LOOPS)
287                return;
288
289        if (!l_p_j_ref_freq) {
290                l_p_j_ref = loops_per_jiffy;
291                l_p_j_ref_freq = ci->old;
292                pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
293                         l_p_j_ref, l_p_j_ref_freq);
294        }
295        if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
296                loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
297                                                                ci->new);
298                pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
299                         loops_per_jiffy, ci->new);
300        }
301#endif
302}
303
304static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
305                struct cpufreq_freqs *freqs, unsigned int state)
306{
307        BUG_ON(irqs_disabled());
308
309        if (cpufreq_disabled())
310                return;
311
312        freqs->flags = cpufreq_driver->flags;
313        pr_debug("notification %u of frequency transition to %u kHz\n",
314                 state, freqs->new);
315
316        switch (state) {
317
318        case CPUFREQ_PRECHANGE:
319                /* detect if the driver reported a value as "old frequency"
320                 * which is not equal to what the cpufreq core thinks is
321                 * "old frequency".
322                 */
323                if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
324                        if ((policy) && (policy->cpu == freqs->cpu) &&
325                            (policy->cur) && (policy->cur != freqs->old)) {
326                                pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327                                         freqs->old, policy->cur);
328                                freqs->old = policy->cur;
329                        }
330                }
331                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
332                                CPUFREQ_PRECHANGE, freqs);
333                adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
334                break;
335
336        case CPUFREQ_POSTCHANGE:
337                adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
338                pr_debug("FREQ: %lu - CPU: %lu\n",
339                         (unsigned long)freqs->new, (unsigned long)freqs->cpu);
340                trace_cpu_frequency(freqs->new, freqs->cpu);
341                cpufreq_stats_record_transition(policy, freqs->new);
342                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
343                                CPUFREQ_POSTCHANGE, freqs);
344                if (likely(policy) && likely(policy->cpu == freqs->cpu))
345                        policy->cur = freqs->new;
346                break;
347        }
348}
349
350/**
351 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
352 * on frequency transition.
353 *
354 * This function calls the transition notifiers and the "adjust_jiffies"
355 * function. It is called twice on all CPU frequency changes that have
356 * external effects.
357 */
358static void cpufreq_notify_transition(struct cpufreq_policy *policy,
359                struct cpufreq_freqs *freqs, unsigned int state)
360{
361        for_each_cpu(freqs->cpu, policy->cpus)
362                __cpufreq_notify_transition(policy, freqs, state);
363}
364
365/* Do post notifications when there are chances that transition has failed */
366static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
367                struct cpufreq_freqs *freqs, int transition_failed)
368{
369        cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370        if (!transition_failed)
371                return;
372
373        swap(freqs->old, freqs->new);
374        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
375        cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
376}
377
378void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
379                struct cpufreq_freqs *freqs)
380{
381
382        /*
383         * Catch double invocations of _begin() which lead to self-deadlock.
384         * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
385         * doesn't invoke _begin() on their behalf, and hence the chances of
386         * double invocations are very low. Moreover, there are scenarios
387         * where these checks can emit false-positive warnings in these
388         * drivers; so we avoid that by skipping them altogether.
389         */
390        WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
391                                && current == policy->transition_task);
392
393wait:
394        wait_event(policy->transition_wait, !policy->transition_ongoing);
395
396        spin_lock(&policy->transition_lock);
397
398        if (unlikely(policy->transition_ongoing)) {
399                spin_unlock(&policy->transition_lock);
400                goto wait;
401        }
402
403        policy->transition_ongoing = true;
404        policy->transition_task = current;
405
406        spin_unlock(&policy->transition_lock);
407
408        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
409}
410EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
411
412void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
413                struct cpufreq_freqs *freqs, int transition_failed)
414{
415        if (unlikely(WARN_ON(!policy->transition_ongoing)))
416                return;
417
418        cpufreq_notify_post_transition(policy, freqs, transition_failed);
419
420        policy->transition_ongoing = false;
421        policy->transition_task = NULL;
422
423        wake_up(&policy->transition_wait);
424}
425EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
426
427/*
428 * Fast frequency switching status count.  Positive means "enabled", negative
429 * means "disabled" and 0 means "not decided yet".
430 */
431static int cpufreq_fast_switch_count;
432static DEFINE_MUTEX(cpufreq_fast_switch_lock);
433
434static void cpufreq_list_transition_notifiers(void)
435{
436        struct notifier_block *nb;
437
438        pr_info("Registered transition notifiers:\n");
439
440        mutex_lock(&cpufreq_transition_notifier_list.mutex);
441
442        for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443                pr_info("%pF\n", nb->notifier_call);
444
445        mutex_unlock(&cpufreq_transition_notifier_list.mutex);
446}
447
448/**
449 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450 * @policy: cpufreq policy to enable fast frequency switching for.
451 *
452 * Try to enable fast frequency switching for @policy.
453 *
454 * The attempt will fail if there is at least one transition notifier registered
455 * at this point, as fast frequency switching is quite fundamentally at odds
456 * with transition notifiers.  Thus if successful, it will make registration of
457 * transition notifiers fail going forward.
458 */
459void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
460{
461        lockdep_assert_held(&policy->rwsem);
462
463        if (!policy->fast_switch_possible)
464                return;
465
466        mutex_lock(&cpufreq_fast_switch_lock);
467        if (cpufreq_fast_switch_count >= 0) {
468                cpufreq_fast_switch_count++;
469                policy->fast_switch_enabled = true;
470        } else {
471                pr_warn("CPU%u: Fast frequency switching not enabled\n",
472                        policy->cpu);
473                cpufreq_list_transition_notifiers();
474        }
475        mutex_unlock(&cpufreq_fast_switch_lock);
476}
477EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
478
479/**
480 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481 * @policy: cpufreq policy to disable fast frequency switching for.
482 */
483void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
484{
485        mutex_lock(&cpufreq_fast_switch_lock);
486        if (policy->fast_switch_enabled) {
487                policy->fast_switch_enabled = false;
488                if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489                        cpufreq_fast_switch_count--;
490        }
491        mutex_unlock(&cpufreq_fast_switch_lock);
492}
493EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
494
495/**
496 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
497 * one.
498 * @target_freq: target frequency to resolve.
499 *
500 * The target to driver frequency mapping is cached in the policy.
501 *
502 * Return: Lowest driver-supported frequency greater than or equal to the
503 * given target_freq, subject to policy (min/max) and driver limitations.
504 */
505unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
506                                         unsigned int target_freq)
507{
508        target_freq = clamp_val(target_freq, policy->min, policy->max);
509        policy->cached_target_freq = target_freq;
510
511        if (cpufreq_driver->target_index) {
512                int idx;
513
514                idx = cpufreq_frequency_table_target(policy, target_freq,
515                                                     CPUFREQ_RELATION_L);
516                policy->cached_resolved_idx = idx;
517                return policy->freq_table[idx].frequency;
518        }
519
520        if (cpufreq_driver->resolve_freq)
521                return cpufreq_driver->resolve_freq(policy, target_freq);
522
523        return target_freq;
524}
525EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
526
527/*********************************************************************
528 *                          SYSFS INTERFACE                          *
529 *********************************************************************/
530static ssize_t show_boost(struct kobject *kobj,
531                                 struct attribute *attr, char *buf)
532{
533        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
534}
535
536static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
537                                  const char *buf, size_t count)
538{
539        int ret, enable;
540
541        ret = sscanf(buf, "%d", &enable);
542        if (ret != 1 || enable < 0 || enable > 1)
543                return -EINVAL;
544
545        if (cpufreq_boost_trigger_state(enable)) {
546                pr_err("%s: Cannot %s BOOST!\n",
547                       __func__, enable ? "enable" : "disable");
548                return -EINVAL;
549        }
550
551        pr_debug("%s: cpufreq BOOST %s\n",
552                 __func__, enable ? "enabled" : "disabled");
553
554        return count;
555}
556define_one_global_rw(boost);
557
558static struct cpufreq_governor *find_governor(const char *str_governor)
559{
560        struct cpufreq_governor *t;
561
562        for_each_governor(t)
563                if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
564                        return t;
565
566        return NULL;
567}
568
569/**
570 * cpufreq_parse_governor - parse a governor string
571 */
572static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
573                                struct cpufreq_governor **governor)
574{
575        int err = -EINVAL;
576
577        if (cpufreq_driver->setpolicy) {
578                if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
579                        *policy = CPUFREQ_POLICY_PERFORMANCE;
580                        err = 0;
581                } else if (!strncasecmp(str_governor, "powersave",
582                                                CPUFREQ_NAME_LEN)) {
583                        *policy = CPUFREQ_POLICY_POWERSAVE;
584                        err = 0;
585                }
586        } else {
587                struct cpufreq_governor *t;
588
589                mutex_lock(&cpufreq_governor_mutex);
590
591                t = find_governor(str_governor);
592
593                if (t == NULL) {
594                        int ret;
595
596                        mutex_unlock(&cpufreq_governor_mutex);
597                        ret = request_module("cpufreq_%s", str_governor);
598                        mutex_lock(&cpufreq_governor_mutex);
599
600                        if (ret == 0)
601                                t = find_governor(str_governor);
602                }
603
604                if (t != NULL) {
605                        *governor = t;
606                        err = 0;
607                }
608
609                mutex_unlock(&cpufreq_governor_mutex);
610        }
611        return err;
612}
613
614/**
615 * cpufreq_per_cpu_attr_read() / show_##file_name() -
616 * print out cpufreq information
617 *
618 * Write out information from cpufreq_driver->policy[cpu]; object must be
619 * "unsigned int".
620 */
621
622#define show_one(file_name, object)                     \
623static ssize_t show_##file_name                         \
624(struct cpufreq_policy *policy, char *buf)              \
625{                                                       \
626        return sprintf(buf, "%u\n", policy->object);    \
627}
628
629show_one(cpuinfo_min_freq, cpuinfo.min_freq);
630show_one(cpuinfo_max_freq, cpuinfo.max_freq);
631show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
632show_one(scaling_min_freq, min);
633show_one(scaling_max_freq, max);
634
635static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
636{
637        ssize_t ret;
638
639        if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
640                ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
641        else
642                ret = sprintf(buf, "%u\n", policy->cur);
643        return ret;
644}
645
646static int cpufreq_set_policy(struct cpufreq_policy *policy,
647                                struct cpufreq_policy *new_policy);
648
649/**
650 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
651 */
652#define store_one(file_name, object)                    \
653static ssize_t store_##file_name                                        \
654(struct cpufreq_policy *policy, const char *buf, size_t count)          \
655{                                                                       \
656        int ret, temp;                                                  \
657        struct cpufreq_policy new_policy;                               \
658                                                                        \
659        memcpy(&new_policy, policy, sizeof(*policy));                   \
660                                                                        \
661        ret = sscanf(buf, "%u", &new_policy.object);                    \
662        if (ret != 1)                                                   \
663                return -EINVAL;                                         \
664                                                                        \
665        temp = new_policy.object;                                       \
666        ret = cpufreq_set_policy(policy, &new_policy);          \
667        if (!ret)                                                       \
668                policy->user_policy.object = temp;                      \
669                                                                        \
670        return ret ? ret : count;                                       \
671}
672
673store_one(scaling_min_freq, min);
674store_one(scaling_max_freq, max);
675
676/**
677 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
678 */
679static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
680                                        char *buf)
681{
682        unsigned int cur_freq = __cpufreq_get(policy);
683
684        if (cur_freq)
685                return sprintf(buf, "%u\n", cur_freq);
686
687        return sprintf(buf, "<unknown>\n");
688}
689
690/**
691 * show_scaling_governor - show the current policy for the specified CPU
692 */
693static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
694{
695        if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
696                return sprintf(buf, "powersave\n");
697        else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
698                return sprintf(buf, "performance\n");
699        else if (policy->governor)
700                return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
701                                policy->governor->name);
702        return -EINVAL;
703}
704
705/**
706 * store_scaling_governor - store policy for the specified CPU
707 */
708static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
709                                        const char *buf, size_t count)
710{
711        int ret;
712        char    str_governor[16];
713        struct cpufreq_policy new_policy;
714
715        memcpy(&new_policy, policy, sizeof(*policy));
716
717        ret = sscanf(buf, "%15s", str_governor);
718        if (ret != 1)
719                return -EINVAL;
720
721        if (cpufreq_parse_governor(str_governor, &new_policy.policy,
722                                                &new_policy.governor))
723                return -EINVAL;
724
725        ret = cpufreq_set_policy(policy, &new_policy);
726        return ret ? ret : count;
727}
728
729/**
730 * show_scaling_driver - show the cpufreq driver currently loaded
731 */
732static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
733{
734        return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
735}
736
737/**
738 * show_scaling_available_governors - show the available CPUfreq governors
739 */
740static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
741                                                char *buf)
742{
743        ssize_t i = 0;
744        struct cpufreq_governor *t;
745
746        if (!has_target()) {
747                i += sprintf(buf, "performance powersave");
748                goto out;
749        }
750
751        for_each_governor(t) {
752                if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
753                    - (CPUFREQ_NAME_LEN + 2)))
754                        goto out;
755                i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
756        }
757out:
758        i += sprintf(&buf[i], "\n");
759        return i;
760}
761
762ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
763{
764        ssize_t i = 0;
765        unsigned int cpu;
766
767        for_each_cpu(cpu, mask) {
768                if (i)
769                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
770                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
771                if (i >= (PAGE_SIZE - 5))
772                        break;
773        }
774        i += sprintf(&buf[i], "\n");
775        return i;
776}
777EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
778
779/**
780 * show_related_cpus - show the CPUs affected by each transition even if
781 * hw coordination is in use
782 */
783static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
784{
785        return cpufreq_show_cpus(policy->related_cpus, buf);
786}
787
788/**
789 * show_affected_cpus - show the CPUs affected by each transition
790 */
791static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
792{
793        return cpufreq_show_cpus(policy->cpus, buf);
794}
795
796static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
797                                        const char *buf, size_t count)
798{
799        unsigned int freq = 0;
800        unsigned int ret;
801
802        if (!policy->governor || !policy->governor->store_setspeed)
803                return -EINVAL;
804
805        ret = sscanf(buf, "%u", &freq);
806        if (ret != 1)
807                return -EINVAL;
808
809        policy->governor->store_setspeed(policy, freq);
810
811        return count;
812}
813
814static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
815{
816        if (!policy->governor || !policy->governor->show_setspeed)
817                return sprintf(buf, "<unsupported>\n");
818
819        return policy->governor->show_setspeed(policy, buf);
820}
821
822/**
823 * show_bios_limit - show the current cpufreq HW/BIOS limitation
824 */
825static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
826{
827        unsigned int limit;
828        int ret;
829        if (cpufreq_driver->bios_limit) {
830                ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
831                if (!ret)
832                        return sprintf(buf, "%u\n", limit);
833        }
834        return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
835}
836
837cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
838cpufreq_freq_attr_ro(cpuinfo_min_freq);
839cpufreq_freq_attr_ro(cpuinfo_max_freq);
840cpufreq_freq_attr_ro(cpuinfo_transition_latency);
841cpufreq_freq_attr_ro(scaling_available_governors);
842cpufreq_freq_attr_ro(scaling_driver);
843cpufreq_freq_attr_ro(scaling_cur_freq);
844cpufreq_freq_attr_ro(bios_limit);
845cpufreq_freq_attr_ro(related_cpus);
846cpufreq_freq_attr_ro(affected_cpus);
847cpufreq_freq_attr_rw(scaling_min_freq);
848cpufreq_freq_attr_rw(scaling_max_freq);
849cpufreq_freq_attr_rw(scaling_governor);
850cpufreq_freq_attr_rw(scaling_setspeed);
851
852static struct attribute *default_attrs[] = {
853        &cpuinfo_min_freq.attr,
854        &cpuinfo_max_freq.attr,
855        &cpuinfo_transition_latency.attr,
856        &scaling_min_freq.attr,
857        &scaling_max_freq.attr,
858        &affected_cpus.attr,
859        &related_cpus.attr,
860        &scaling_governor.attr,
861        &scaling_driver.attr,
862        &scaling_available_governors.attr,
863        &scaling_setspeed.attr,
864        NULL
865};
866
867#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
868#define to_attr(a) container_of(a, struct freq_attr, attr)
869
870static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
871{
872        struct cpufreq_policy *policy = to_policy(kobj);
873        struct freq_attr *fattr = to_attr(attr);
874        ssize_t ret;
875
876        down_read(&policy->rwsem);
877        ret = fattr->show(policy, buf);
878        up_read(&policy->rwsem);
879
880        return ret;
881}
882
883static ssize_t store(struct kobject *kobj, struct attribute *attr,
884                     const char *buf, size_t count)
885{
886        struct cpufreq_policy *policy = to_policy(kobj);
887        struct freq_attr *fattr = to_attr(attr);
888        ssize_t ret = -EINVAL;
889
890        get_online_cpus();
891
892        if (cpu_online(policy->cpu)) {
893                down_write(&policy->rwsem);
894                ret = fattr->store(policy, buf, count);
895                up_write(&policy->rwsem);
896        }
897
898        put_online_cpus();
899
900        return ret;
901}
902
903static void cpufreq_sysfs_release(struct kobject *kobj)
904{
905        struct cpufreq_policy *policy = to_policy(kobj);
906        pr_debug("last reference is dropped\n");
907        complete(&policy->kobj_unregister);
908}
909
910static const struct sysfs_ops sysfs_ops = {
911        .show   = show,
912        .store  = store,
913};
914
915static struct kobj_type ktype_cpufreq = {
916        .sysfs_ops      = &sysfs_ops,
917        .default_attrs  = default_attrs,
918        .release        = cpufreq_sysfs_release,
919};
920
921static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
922                               struct device *dev)
923{
924        dev_dbg(dev, "%s: Adding symlink\n", __func__);
925        return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
926}
927
928static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
929                                   struct device *dev)
930{
931        dev_dbg(dev, "%s: Removing symlink\n", __func__);
932        sysfs_remove_link(&dev->kobj, "cpufreq");
933}
934
935static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
936{
937        struct freq_attr **drv_attr;
938        int ret = 0;
939
940        /* set up files for this cpu device */
941        drv_attr = cpufreq_driver->attr;
942        while (drv_attr && *drv_attr) {
943                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
944                if (ret)
945                        return ret;
946                drv_attr++;
947        }
948        if (cpufreq_driver->get) {
949                ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
950                if (ret)
951                        return ret;
952        }
953
954        ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
955        if (ret)
956                return ret;
957
958        if (cpufreq_driver->bios_limit) {
959                ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
960                if (ret)
961                        return ret;
962        }
963
964        return 0;
965}
966
967__weak struct cpufreq_governor *cpufreq_default_governor(void)
968{
969        return NULL;
970}
971
972static int cpufreq_init_policy(struct cpufreq_policy *policy)
973{
974        struct cpufreq_governor *gov = NULL;
975        struct cpufreq_policy new_policy;
976
977        memcpy(&new_policy, policy, sizeof(*policy));
978
979        /* Update governor of new_policy to the governor used before hotplug */
980        gov = find_governor(policy->last_governor);
981        if (gov) {
982                pr_debug("Restoring governor %s for cpu %d\n",
983                                policy->governor->name, policy->cpu);
984        } else {
985                gov = cpufreq_default_governor();
986                if (!gov)
987                        return -ENODATA;
988        }
989
990        new_policy.governor = gov;
991
992        /* Use the default policy if there is no last_policy. */
993        if (cpufreq_driver->setpolicy) {
994                if (policy->last_policy)
995                        new_policy.policy = policy->last_policy;
996                else
997                        cpufreq_parse_governor(gov->name, &new_policy.policy,
998                                               NULL);
999        }
1000        /* set default policy */
1001        return cpufreq_set_policy(policy, &new_policy);
1002}
1003
1004static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1005{
1006        int ret = 0;
1007
1008        /* Has this CPU been taken care of already? */
1009        if (cpumask_test_cpu(cpu, policy->cpus))
1010                return 0;
1011
1012        down_write(&policy->rwsem);
1013        if (has_target())
1014                cpufreq_stop_governor(policy);
1015
1016        cpumask_set_cpu(cpu, policy->cpus);
1017
1018        if (has_target()) {
1019                ret = cpufreq_start_governor(policy);
1020                if (ret)
1021                        pr_err("%s: Failed to start governor\n", __func__);
1022        }
1023        up_write(&policy->rwsem);
1024        return ret;
1025}
1026
1027static void handle_update(struct work_struct *work)
1028{
1029        struct cpufreq_policy *policy =
1030                container_of(work, struct cpufreq_policy, update);
1031        unsigned int cpu = policy->cpu;
1032        pr_debug("handle_update for cpu %u called\n", cpu);
1033        cpufreq_update_policy(cpu);
1034}
1035
1036static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1037{
1038        struct cpufreq_policy *policy;
1039        int ret;
1040
1041        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1042        if (!policy)
1043                return NULL;
1044
1045        if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1046                goto err_free_policy;
1047
1048        if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1049                goto err_free_cpumask;
1050
1051        if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1052                goto err_free_rcpumask;
1053
1054        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1055                                   cpufreq_global_kobject, "policy%u", cpu);
1056        if (ret) {
1057                pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1058                goto err_free_real_cpus;
1059        }
1060
1061        INIT_LIST_HEAD(&policy->policy_list);
1062        init_rwsem(&policy->rwsem);
1063        spin_lock_init(&policy->transition_lock);
1064        init_waitqueue_head(&policy->transition_wait);
1065        init_completion(&policy->kobj_unregister);
1066        INIT_WORK(&policy->update, handle_update);
1067
1068        policy->cpu = cpu;
1069        return policy;
1070
1071err_free_real_cpus:
1072        free_cpumask_var(policy->real_cpus);
1073err_free_rcpumask:
1074        free_cpumask_var(policy->related_cpus);
1075err_free_cpumask:
1076        free_cpumask_var(policy->cpus);
1077err_free_policy:
1078        kfree(policy);
1079
1080        return NULL;
1081}
1082
1083static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1084{
1085        struct kobject *kobj;
1086        struct completion *cmp;
1087
1088        if (notify)
1089                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1090                                             CPUFREQ_REMOVE_POLICY, policy);
1091
1092        down_write(&policy->rwsem);
1093        cpufreq_stats_free_table(policy);
1094        kobj = &policy->kobj;
1095        cmp = &policy->kobj_unregister;
1096        up_write(&policy->rwsem);
1097        kobject_put(kobj);
1098
1099        /*
1100         * We need to make sure that the underlying kobj is
1101         * actually not referenced anymore by anybody before we
1102         * proceed with unloading.
1103         */
1104        pr_debug("waiting for dropping of refcount\n");
1105        wait_for_completion(cmp);
1106        pr_debug("wait complete\n");
1107}
1108
1109static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1110{
1111        unsigned long flags;
1112        int cpu;
1113
1114        /* Remove policy from list */
1115        write_lock_irqsave(&cpufreq_driver_lock, flags);
1116        list_del(&policy->policy_list);
1117
1118        for_each_cpu(cpu, policy->related_cpus)
1119                per_cpu(cpufreq_cpu_data, cpu) = NULL;
1120        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1121
1122        cpufreq_policy_put_kobj(policy, notify);
1123        free_cpumask_var(policy->real_cpus);
1124        free_cpumask_var(policy->related_cpus);
1125        free_cpumask_var(policy->cpus);
1126        kfree(policy);
1127}
1128
1129static int cpufreq_online(unsigned int cpu)
1130{
1131        struct cpufreq_policy *policy;
1132        bool new_policy;
1133        unsigned long flags;
1134        unsigned int j;
1135        int ret;
1136
1137        pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1138
1139        /* Check if this CPU already has a policy to manage it */
1140        policy = per_cpu(cpufreq_cpu_data, cpu);
1141        if (policy) {
1142                WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1143                if (!policy_is_inactive(policy))
1144                        return cpufreq_add_policy_cpu(policy, cpu);
1145
1146                /* This is the only online CPU for the policy.  Start over. */
1147                new_policy = false;
1148                down_write(&policy->rwsem);
1149                policy->cpu = cpu;
1150                policy->governor = NULL;
1151                up_write(&policy->rwsem);
1152        } else {
1153                new_policy = true;
1154                policy = cpufreq_policy_alloc(cpu);
1155                if (!policy)
1156                        return -ENOMEM;
1157        }
1158
1159        cpumask_copy(policy->cpus, cpumask_of(cpu));
1160
1161        /* call driver. From then on the cpufreq must be able
1162         * to accept all calls to ->verify and ->setpolicy for this CPU
1163         */
1164        ret = cpufreq_driver->init(policy);
1165        if (ret) {
1166                pr_debug("initialization failed\n");
1167                goto out_free_policy;
1168        }
1169
1170        down_write(&policy->rwsem);
1171
1172        if (new_policy) {
1173                /* related_cpus should at least include policy->cpus. */
1174                cpumask_copy(policy->related_cpus, policy->cpus);
1175                /* Clear mask of registered CPUs */
1176                cpumask_clear(policy->real_cpus);
1177        }
1178
1179        /*
1180         * affected cpus must always be the one, which are online. We aren't
1181         * managing offline cpus here.
1182         */
1183        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1184
1185        if (new_policy) {
1186                policy->user_policy.min = policy->min;
1187                policy->user_policy.max = policy->max;
1188
1189                write_lock_irqsave(&cpufreq_driver_lock, flags);
1190                for_each_cpu(j, policy->related_cpus)
1191                        per_cpu(cpufreq_cpu_data, j) = policy;
1192                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1193        } else {
1194                policy->min = policy->user_policy.min;
1195                policy->max = policy->user_policy.max;
1196        }
1197
1198        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1199                policy->cur = cpufreq_driver->get(policy->cpu);
1200                if (!policy->cur) {
1201                        pr_err("%s: ->get() failed\n", __func__);
1202                        goto out_exit_policy;
1203                }
1204        }
1205
1206        /*
1207         * Sometimes boot loaders set CPU frequency to a value outside of
1208         * frequency table present with cpufreq core. In such cases CPU might be
1209         * unstable if it has to run on that frequency for long duration of time
1210         * and so its better to set it to a frequency which is specified in
1211         * freq-table. This also makes cpufreq stats inconsistent as
1212         * cpufreq-stats would fail to register because current frequency of CPU
1213         * isn't found in freq-table.
1214         *
1215         * Because we don't want this change to effect boot process badly, we go
1216         * for the next freq which is >= policy->cur ('cur' must be set by now,
1217         * otherwise we will end up setting freq to lowest of the table as 'cur'
1218         * is initialized to zero).
1219         *
1220         * We are passing target-freq as "policy->cur - 1" otherwise
1221         * __cpufreq_driver_target() would simply fail, as policy->cur will be
1222         * equal to target-freq.
1223         */
1224        if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1225            && has_target()) {
1226                /* Are we running at unknown frequency ? */
1227                ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1228                if (ret == -EINVAL) {
1229                        /* Warn user and fix it */
1230                        pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1231                                __func__, policy->cpu, policy->cur);
1232                        ret = __cpufreq_driver_target(policy, policy->cur - 1,
1233                                CPUFREQ_RELATION_L);
1234
1235                        /*
1236                         * Reaching here after boot in a few seconds may not
1237                         * mean that system will remain stable at "unknown"
1238                         * frequency for longer duration. Hence, a BUG_ON().
1239                         */
1240                        BUG_ON(ret);
1241                        pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1242                                __func__, policy->cpu, policy->cur);
1243                }
1244        }
1245
1246        if (new_policy) {
1247                ret = cpufreq_add_dev_interface(policy);
1248                if (ret)
1249                        goto out_exit_policy;
1250
1251                cpufreq_stats_create_table(policy);
1252                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1253                                CPUFREQ_CREATE_POLICY, policy);
1254
1255                write_lock_irqsave(&cpufreq_driver_lock, flags);
1256                list_add(&policy->policy_list, &cpufreq_policy_list);
1257                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1258        }
1259
1260        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1261                                     CPUFREQ_START, policy);
1262
1263        ret = cpufreq_init_policy(policy);
1264        if (ret) {
1265                pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1266                       __func__, cpu, ret);
1267                /* cpufreq_policy_free() will notify based on this */
1268                new_policy = false;
1269                goto out_exit_policy;
1270        }
1271
1272        up_write(&policy->rwsem);
1273
1274        kobject_uevent(&policy->kobj, KOBJ_ADD);
1275
1276        /* Callback for handling stuff after policy is ready */
1277        if (cpufreq_driver->ready)
1278                cpufreq_driver->ready(policy);
1279
1280        pr_debug("initialization complete\n");
1281
1282        return 0;
1283
1284out_exit_policy:
1285        up_write(&policy->rwsem);
1286
1287        if (cpufreq_driver->exit)
1288                cpufreq_driver->exit(policy);
1289out_free_policy:
1290        cpufreq_policy_free(policy, !new_policy);
1291        return ret;
1292}
1293
1294static int cpufreq_offline(unsigned int cpu);
1295
1296/**
1297 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1298 * @dev: CPU device.
1299 * @sif: Subsystem interface structure pointer (not used)
1300 */
1301static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1302{
1303        struct cpufreq_policy *policy;
1304        unsigned cpu = dev->id;
1305        int ret;
1306
1307        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1308
1309        if (cpu_online(cpu)) {
1310                ret = cpufreq_online(cpu);
1311                if (ret)
1312                        return ret;
1313        }
1314
1315        /* Create sysfs link on CPU registration */
1316        policy = per_cpu(cpufreq_cpu_data, cpu);
1317        if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1318                return 0;
1319
1320        ret = add_cpu_dev_symlink(policy, dev);
1321        if (ret) {
1322                cpumask_clear_cpu(cpu, policy->real_cpus);
1323                cpufreq_offline(cpu);
1324        }
1325
1326        return ret;
1327}
1328
1329static int cpufreq_offline(unsigned int cpu)
1330{
1331        struct cpufreq_policy *policy;
1332        int ret;
1333
1334        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1335
1336        policy = cpufreq_cpu_get_raw(cpu);
1337        if (!policy) {
1338                pr_debug("%s: No cpu_data found\n", __func__);
1339                return 0;
1340        }
1341
1342        down_write(&policy->rwsem);
1343        if (has_target())
1344                cpufreq_stop_governor(policy);
1345
1346        cpumask_clear_cpu(cpu, policy->cpus);
1347
1348        if (policy_is_inactive(policy)) {
1349                if (has_target())
1350                        strncpy(policy->last_governor, policy->governor->name,
1351                                CPUFREQ_NAME_LEN);
1352                else
1353                        policy->last_policy = policy->policy;
1354        } else if (cpu == policy->cpu) {
1355                /* Nominate new CPU */
1356                policy->cpu = cpumask_any(policy->cpus);
1357        }
1358
1359        /* Start governor again for active policy */
1360        if (!policy_is_inactive(policy)) {
1361                if (has_target()) {
1362                        ret = cpufreq_start_governor(policy);
1363                        if (ret)
1364                                pr_err("%s: Failed to start governor\n", __func__);
1365                }
1366
1367                goto unlock;
1368        }
1369
1370        if (cpufreq_driver->stop_cpu)
1371                cpufreq_driver->stop_cpu(policy);
1372
1373        if (has_target())
1374                cpufreq_exit_governor(policy);
1375
1376        /*
1377         * Perform the ->exit() even during light-weight tear-down,
1378         * since this is a core component, and is essential for the
1379         * subsequent light-weight ->init() to succeed.
1380         */
1381        if (cpufreq_driver->exit) {
1382                cpufreq_driver->exit(policy);
1383                policy->freq_table = NULL;
1384        }
1385
1386unlock:
1387        up_write(&policy->rwsem);
1388        return 0;
1389}
1390
1391/**
1392 * cpufreq_remove_dev - remove a CPU device
1393 *
1394 * Removes the cpufreq interface for a CPU device.
1395 */
1396static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1397{
1398        unsigned int cpu = dev->id;
1399        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1400
1401        if (!policy)
1402                return;
1403
1404        if (cpu_online(cpu))
1405                cpufreq_offline(cpu);
1406
1407        cpumask_clear_cpu(cpu, policy->real_cpus);
1408        remove_cpu_dev_symlink(policy, dev);
1409
1410        if (cpumask_empty(policy->real_cpus))
1411                cpufreq_policy_free(policy, true);
1412}
1413
1414/**
1415 *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1416 *      in deep trouble.
1417 *      @policy: policy managing CPUs
1418 *      @new_freq: CPU frequency the CPU actually runs at
1419 *
1420 *      We adjust to current frequency first, and need to clean up later.
1421 *      So either call to cpufreq_update_policy() or schedule handle_update()).
1422 */
1423static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1424                                unsigned int new_freq)
1425{
1426        struct cpufreq_freqs freqs;
1427
1428        pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1429                 policy->cur, new_freq);
1430
1431        freqs.old = policy->cur;
1432        freqs.new = new_freq;
1433
1434        cpufreq_freq_transition_begin(policy, &freqs);
1435        cpufreq_freq_transition_end(policy, &freqs, 0);
1436}
1437
1438/**
1439 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1440 * @cpu: CPU number
1441 *
1442 * This is the last known freq, without actually getting it from the driver.
1443 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1444 */
1445unsigned int cpufreq_quick_get(unsigned int cpu)
1446{
1447        struct cpufreq_policy *policy;
1448        unsigned int ret_freq = 0;
1449        unsigned long flags;
1450
1451        read_lock_irqsave(&cpufreq_driver_lock, flags);
1452
1453        if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1454                ret_freq = cpufreq_driver->get(cpu);
1455                read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1456                return ret_freq;
1457        }
1458
1459        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1460
1461        policy = cpufreq_cpu_get(cpu);
1462        if (policy) {
1463                ret_freq = policy->cur;
1464                cpufreq_cpu_put(policy);
1465        }
1466
1467        return ret_freq;
1468}
1469EXPORT_SYMBOL(cpufreq_quick_get);
1470
1471/**
1472 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1473 * @cpu: CPU number
1474 *
1475 * Just return the max possible frequency for a given CPU.
1476 */
1477unsigned int cpufreq_quick_get_max(unsigned int cpu)
1478{
1479        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1480        unsigned int ret_freq = 0;
1481
1482        if (policy) {
1483                ret_freq = policy->max;
1484                cpufreq_cpu_put(policy);
1485        }
1486
1487        return ret_freq;
1488}
1489EXPORT_SYMBOL(cpufreq_quick_get_max);
1490
1491static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1492{
1493        unsigned int ret_freq = 0;
1494
1495        if (!cpufreq_driver->get)
1496                return ret_freq;
1497
1498        ret_freq = cpufreq_driver->get(policy->cpu);
1499
1500        /*
1501         * Updating inactive policies is invalid, so avoid doing that.  Also
1502         * if fast frequency switching is used with the given policy, the check
1503         * against policy->cur is pointless, so skip it in that case too.
1504         */
1505        if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1506                return ret_freq;
1507
1508        if (ret_freq && policy->cur &&
1509                !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1510                /* verify no discrepancy between actual and
1511                                        saved value exists */
1512                if (unlikely(ret_freq != policy->cur)) {
1513                        cpufreq_out_of_sync(policy, ret_freq);
1514                        schedule_work(&policy->update);
1515                }
1516        }
1517
1518        return ret_freq;
1519}
1520
1521/**
1522 * cpufreq_get - get the current CPU frequency (in kHz)
1523 * @cpu: CPU number
1524 *
1525 * Get the CPU current (static) CPU frequency
1526 */
1527unsigned int cpufreq_get(unsigned int cpu)
1528{
1529        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1530        unsigned int ret_freq = 0;
1531
1532        if (policy) {
1533                down_read(&policy->rwsem);
1534                ret_freq = __cpufreq_get(policy);
1535                up_read(&policy->rwsem);
1536
1537                cpufreq_cpu_put(policy);
1538        }
1539
1540        return ret_freq;
1541}
1542EXPORT_SYMBOL(cpufreq_get);
1543
1544static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1545{
1546        unsigned int new_freq;
1547
1548        new_freq = cpufreq_driver->get(policy->cpu);
1549        if (!new_freq)
1550                return 0;
1551
1552        if (!policy->cur) {
1553                pr_debug("cpufreq: Driver did not initialize current freq\n");
1554                policy->cur = new_freq;
1555        } else if (policy->cur != new_freq && has_target()) {
1556                cpufreq_out_of_sync(policy, new_freq);
1557        }
1558
1559        return new_freq;
1560}
1561
1562static struct subsys_interface cpufreq_interface = {
1563        .name           = "cpufreq",
1564        .subsys         = &cpu_subsys,
1565        .add_dev        = cpufreq_add_dev,
1566        .remove_dev     = cpufreq_remove_dev,
1567};
1568
1569/*
1570 * In case platform wants some specific frequency to be configured
1571 * during suspend..
1572 */
1573int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1574{
1575        int ret;
1576
1577        if (!policy->suspend_freq) {
1578                pr_debug("%s: suspend_freq not defined\n", __func__);
1579                return 0;
1580        }
1581
1582        pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1583                        policy->suspend_freq);
1584
1585        ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1586                        CPUFREQ_RELATION_H);
1587        if (ret)
1588                pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1589                                __func__, policy->suspend_freq, ret);
1590
1591        return ret;
1592}
1593EXPORT_SYMBOL(cpufreq_generic_suspend);
1594
1595/**
1596 * cpufreq_suspend() - Suspend CPUFreq governors
1597 *
1598 * Called during system wide Suspend/Hibernate cycles for suspending governors
1599 * as some platforms can't change frequency after this point in suspend cycle.
1600 * Because some of the devices (like: i2c, regulators, etc) they use for
1601 * changing frequency are suspended quickly after this point.
1602 */
1603void cpufreq_suspend(void)
1604{
1605        struct cpufreq_policy *policy;
1606
1607        if (!cpufreq_driver)
1608                return;
1609
1610        if (!has_target() && !cpufreq_driver->suspend)
1611                goto suspend;
1612
1613        pr_debug("%s: Suspending Governors\n", __func__);
1614
1615        for_each_active_policy(policy) {
1616                if (has_target()) {
1617                        down_write(&policy->rwsem);
1618                        cpufreq_stop_governor(policy);
1619                        up_write(&policy->rwsem);
1620                }
1621
1622                if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1623                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
1624                                policy);
1625        }
1626
1627suspend:
1628        cpufreq_suspended = true;
1629}
1630
1631/**
1632 * cpufreq_resume() - Resume CPUFreq governors
1633 *
1634 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1635 * are suspended with cpufreq_suspend().
1636 */
1637void cpufreq_resume(void)
1638{
1639        struct cpufreq_policy *policy;
1640        int ret;
1641
1642        if (!cpufreq_driver)
1643                return;
1644
1645        cpufreq_suspended = false;
1646
1647        if (!has_target() && !cpufreq_driver->resume)
1648                return;
1649
1650        pr_debug("%s: Resuming Governors\n", __func__);
1651
1652        for_each_active_policy(policy) {
1653                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1654                        pr_err("%s: Failed to resume driver: %p\n", __func__,
1655                                policy);
1656                } else if (has_target()) {
1657                        down_write(&policy->rwsem);
1658                        ret = cpufreq_start_governor(policy);
1659                        up_write(&policy->rwsem);
1660
1661                        if (ret)
1662                                pr_err("%s: Failed to start governor for policy: %p\n",
1663                                       __func__, policy);
1664                }
1665        }
1666}
1667
1668/**
1669 *      cpufreq_get_current_driver - return current driver's name
1670 *
1671 *      Return the name string of the currently loaded cpufreq driver
1672 *      or NULL, if none.
1673 */
1674const char *cpufreq_get_current_driver(void)
1675{
1676        if (cpufreq_driver)
1677                return cpufreq_driver->name;
1678
1679        return NULL;
1680}
1681EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1682
1683/**
1684 *      cpufreq_get_driver_data - return current driver data
1685 *
1686 *      Return the private data of the currently loaded cpufreq
1687 *      driver, or NULL if no cpufreq driver is loaded.
1688 */
1689void *cpufreq_get_driver_data(void)
1690{
1691        if (cpufreq_driver)
1692                return cpufreq_driver->driver_data;
1693
1694        return NULL;
1695}
1696EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1697
1698/*********************************************************************
1699 *                     NOTIFIER LISTS INTERFACE                      *
1700 *********************************************************************/
1701
1702/**
1703 *      cpufreq_register_notifier - register a driver with cpufreq
1704 *      @nb: notifier function to register
1705 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1706 *
1707 *      Add a driver to one of two lists: either a list of drivers that
1708 *      are notified about clock rate changes (once before and once after
1709 *      the transition), or a list of drivers that are notified about
1710 *      changes in cpufreq policy.
1711 *
1712 *      This function may sleep, and has the same return conditions as
1713 *      blocking_notifier_chain_register.
1714 */
1715int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1716{
1717        int ret;
1718
1719        if (cpufreq_disabled())
1720                return -EINVAL;
1721
1722        WARN_ON(!init_cpufreq_transition_notifier_list_called);
1723
1724        switch (list) {
1725        case CPUFREQ_TRANSITION_NOTIFIER:
1726                mutex_lock(&cpufreq_fast_switch_lock);
1727
1728                if (cpufreq_fast_switch_count > 0) {
1729                        mutex_unlock(&cpufreq_fast_switch_lock);
1730                        return -EBUSY;
1731                }
1732                ret = srcu_notifier_chain_register(
1733                                &cpufreq_transition_notifier_list, nb);
1734                if (!ret)
1735                        cpufreq_fast_switch_count--;
1736
1737                mutex_unlock(&cpufreq_fast_switch_lock);
1738                break;
1739        case CPUFREQ_POLICY_NOTIFIER:
1740                ret = blocking_notifier_chain_register(
1741                                &cpufreq_policy_notifier_list, nb);
1742                break;
1743        default:
1744                ret = -EINVAL;
1745        }
1746
1747        return ret;
1748}
1749EXPORT_SYMBOL(cpufreq_register_notifier);
1750
1751/**
1752 *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1753 *      @nb: notifier block to be unregistered
1754 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1755 *
1756 *      Remove a driver from the CPU frequency notifier list.
1757 *
1758 *      This function may sleep, and has the same return conditions as
1759 *      blocking_notifier_chain_unregister.
1760 */
1761int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1762{
1763        int ret;
1764
1765        if (cpufreq_disabled())
1766                return -EINVAL;
1767
1768        switch (list) {
1769        case CPUFREQ_TRANSITION_NOTIFIER:
1770                mutex_lock(&cpufreq_fast_switch_lock);
1771
1772                ret = srcu_notifier_chain_unregister(
1773                                &cpufreq_transition_notifier_list, nb);
1774                if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1775                        cpufreq_fast_switch_count++;
1776
1777                mutex_unlock(&cpufreq_fast_switch_lock);
1778                break;
1779        case CPUFREQ_POLICY_NOTIFIER:
1780                ret = blocking_notifier_chain_unregister(
1781                                &cpufreq_policy_notifier_list, nb);
1782                break;
1783        default:
1784                ret = -EINVAL;
1785        }
1786
1787        return ret;
1788}
1789EXPORT_SYMBOL(cpufreq_unregister_notifier);
1790
1791
1792/*********************************************************************
1793 *                              GOVERNORS                            *
1794 *********************************************************************/
1795
1796/**
1797 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1798 * @policy: cpufreq policy to switch the frequency for.
1799 * @target_freq: New frequency to set (may be approximate).
1800 *
1801 * Carry out a fast frequency switch without sleeping.
1802 *
1803 * The driver's ->fast_switch() callback invoked by this function must be
1804 * suitable for being called from within RCU-sched read-side critical sections
1805 * and it is expected to select the minimum available frequency greater than or
1806 * equal to @target_freq (CPUFREQ_RELATION_L).
1807 *
1808 * This function must not be called if policy->fast_switch_enabled is unset.
1809 *
1810 * Governors calling this function must guarantee that it will never be invoked
1811 * twice in parallel for the same policy and that it will never be called in
1812 * parallel with either ->target() or ->target_index() for the same policy.
1813 *
1814 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1815 * callback to indicate an error condition, the hardware configuration must be
1816 * preserved.
1817 */
1818unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1819                                        unsigned int target_freq)
1820{
1821        target_freq = clamp_val(target_freq, policy->min, policy->max);
1822
1823        return cpufreq_driver->fast_switch(policy, target_freq);
1824}
1825EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1826
1827/* Must set freqs->new to intermediate frequency */
1828static int __target_intermediate(struct cpufreq_policy *policy,
1829                                 struct cpufreq_freqs *freqs, int index)
1830{
1831        int ret;
1832
1833        freqs->new = cpufreq_driver->get_intermediate(policy, index);
1834
1835        /* We don't need to switch to intermediate freq */
1836        if (!freqs->new)
1837                return 0;
1838
1839        pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1840                 __func__, policy->cpu, freqs->old, freqs->new);
1841
1842        cpufreq_freq_transition_begin(policy, freqs);
1843        ret = cpufreq_driver->target_intermediate(policy, index);
1844        cpufreq_freq_transition_end(policy, freqs, ret);
1845
1846        if (ret)
1847                pr_err("%s: Failed to change to intermediate frequency: %d\n",
1848                       __func__, ret);
1849
1850        return ret;
1851}
1852
1853static int __target_index(struct cpufreq_policy *policy, int index)
1854{
1855        struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1856        unsigned int intermediate_freq = 0;
1857        unsigned int newfreq = policy->freq_table[index].frequency;
1858        int retval = -EINVAL;
1859        bool notify;
1860
1861        if (newfreq == policy->cur)
1862                return 0;
1863
1864        notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1865        if (notify) {
1866                /* Handle switching to intermediate frequency */
1867                if (cpufreq_driver->get_intermediate) {
1868                        retval = __target_intermediate(policy, &freqs, index);
1869                        if (retval)
1870                                return retval;
1871
1872                        intermediate_freq = freqs.new;
1873                        /* Set old freq to intermediate */
1874                        if (intermediate_freq)
1875                                freqs.old = freqs.new;
1876                }
1877
1878                freqs.new = newfreq;
1879                pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1880                         __func__, policy->cpu, freqs.old, freqs.new);
1881
1882                cpufreq_freq_transition_begin(policy, &freqs);
1883        }
1884
1885        retval = cpufreq_driver->target_index(policy, index);
1886        if (retval)
1887                pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1888                       retval);
1889
1890        if (notify) {
1891                cpufreq_freq_transition_end(policy, &freqs, retval);
1892
1893                /*
1894                 * Failed after setting to intermediate freq? Driver should have
1895                 * reverted back to initial frequency and so should we. Check
1896                 * here for intermediate_freq instead of get_intermediate, in
1897                 * case we haven't switched to intermediate freq at all.
1898                 */
1899                if (unlikely(retval && intermediate_freq)) {
1900                        freqs.old = intermediate_freq;
1901                        freqs.new = policy->restore_freq;
1902                        cpufreq_freq_transition_begin(policy, &freqs);
1903                        cpufreq_freq_transition_end(policy, &freqs, 0);
1904                }
1905        }
1906
1907        return retval;
1908}
1909
1910int __cpufreq_driver_target(struct cpufreq_policy *policy,
1911                            unsigned int target_freq,
1912                            unsigned int relation)
1913{
1914        unsigned int old_target_freq = target_freq;
1915        int index;
1916
1917        if (cpufreq_disabled())
1918                return -ENODEV;
1919
1920        /* Make sure that target_freq is within supported range */
1921        target_freq = clamp_val(target_freq, policy->min, policy->max);
1922
1923        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1924                 policy->cpu, target_freq, relation, old_target_freq);
1925
1926        /*
1927         * This might look like a redundant call as we are checking it again
1928         * after finding index. But it is left intentionally for cases where
1929         * exactly same freq is called again and so we can save on few function
1930         * calls.
1931         */
1932        if (target_freq == policy->cur)
1933                return 0;
1934
1935        /* Save last value to restore later on errors */
1936        policy->restore_freq = policy->cur;
1937
1938        if (cpufreq_driver->target)
1939                return cpufreq_driver->target(policy, target_freq, relation);
1940
1941        if (!cpufreq_driver->target_index)
1942                return -EINVAL;
1943
1944        index = cpufreq_frequency_table_target(policy, target_freq, relation);
1945
1946        return __target_index(policy, index);
1947}
1948EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1949
1950int cpufreq_driver_target(struct cpufreq_policy *policy,
1951                          unsigned int target_freq,
1952                          unsigned int relation)
1953{
1954        int ret = -EINVAL;
1955
1956        down_write(&policy->rwsem);
1957
1958        ret = __cpufreq_driver_target(policy, target_freq, relation);
1959
1960        up_write(&policy->rwsem);
1961
1962        return ret;
1963}
1964EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1965
1966__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1967{
1968        return NULL;
1969}
1970
1971static int cpufreq_init_governor(struct cpufreq_policy *policy)
1972{
1973        int ret;
1974
1975        /* Don't start any governor operations if we are entering suspend */
1976        if (cpufreq_suspended)
1977                return 0;
1978        /*
1979         * Governor might not be initiated here if ACPI _PPC changed
1980         * notification happened, so check it.
1981         */
1982        if (!policy->governor)
1983                return -EINVAL;
1984
1985        if (policy->governor->max_transition_latency &&
1986            policy->cpuinfo.transition_latency >
1987            policy->governor->max_transition_latency) {
1988                struct cpufreq_governor *gov = cpufreq_fallback_governor();
1989
1990                if (gov) {
1991                        pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1992                                policy->governor->name, gov->name);
1993                        policy->governor = gov;
1994                } else {
1995                        return -EINVAL;
1996                }
1997        }
1998
1999        if (!try_module_get(policy->governor->owner))
2000                return -EINVAL;
2001
2002        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2003
2004        if (policy->governor->init) {
2005                ret = policy->governor->init(policy);
2006                if (ret) {
2007                        module_put(policy->governor->owner);
2008                        return ret;
2009                }
2010        }
2011
2012        return 0;
2013}
2014
2015static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2016{
2017        if (cpufreq_suspended || !policy->governor)
2018                return;
2019
2020        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2021
2022        if (policy->governor->exit)
2023                policy->governor->exit(policy);
2024
2025        module_put(policy->governor->owner);
2026}
2027
2028static int cpufreq_start_governor(struct cpufreq_policy *policy)
2029{
2030        int ret;
2031
2032        if (cpufreq_suspended)
2033                return 0;
2034
2035        if (!policy->governor)
2036                return -EINVAL;
2037
2038        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2039
2040        if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2041                cpufreq_update_current_freq(policy);
2042
2043        if (policy->governor->start) {
2044                ret = policy->governor->start(policy);
2045                if (ret)
2046                        return ret;
2047        }
2048
2049        if (policy->governor->limits)
2050                policy->governor->limits(policy);
2051
2052        return 0;
2053}
2054
2055static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2056{
2057        if (cpufreq_suspended || !policy->governor)
2058                return;
2059
2060        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2061
2062        if (policy->governor->stop)
2063                policy->governor->stop(policy);
2064}
2065
2066static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2067{
2068        if (cpufreq_suspended || !policy->governor)
2069                return;
2070
2071        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2072
2073        if (policy->governor->limits)
2074                policy->governor->limits(policy);
2075}
2076
2077int cpufreq_register_governor(struct cpufreq_governor *governor)
2078{
2079        int err;
2080
2081        if (!governor)
2082                return -EINVAL;
2083
2084        if (cpufreq_disabled())
2085                return -ENODEV;
2086
2087        mutex_lock(&cpufreq_governor_mutex);
2088
2089        err = -EBUSY;
2090        if (!find_governor(governor->name)) {
2091                err = 0;
2092                list_add(&governor->governor_list, &cpufreq_governor_list);
2093        }
2094
2095        mutex_unlock(&cpufreq_governor_mutex);
2096        return err;
2097}
2098EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2099
2100void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2101{
2102        struct cpufreq_policy *policy;
2103        unsigned long flags;
2104
2105        if (!governor)
2106                return;
2107
2108        if (cpufreq_disabled())
2109                return;
2110
2111        /* clear last_governor for all inactive policies */
2112        read_lock_irqsave(&cpufreq_driver_lock, flags);
2113        for_each_inactive_policy(policy) {
2114                if (!strcmp(policy->last_governor, governor->name)) {
2115                        policy->governor = NULL;
2116                        strcpy(policy->last_governor, "\0");
2117                }
2118        }
2119        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2120
2121        mutex_lock(&cpufreq_governor_mutex);
2122        list_del(&governor->governor_list);
2123        mutex_unlock(&cpufreq_governor_mutex);
2124        return;
2125}
2126EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2127
2128
2129/*********************************************************************
2130 *                          POLICY INTERFACE                         *
2131 *********************************************************************/
2132
2133/**
2134 * cpufreq_get_policy - get the current cpufreq_policy
2135 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2136 *      is written
2137 *
2138 * Reads the current cpufreq policy.
2139 */
2140int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2141{
2142        struct cpufreq_policy *cpu_policy;
2143        if (!policy)
2144                return -EINVAL;
2145
2146        cpu_policy = cpufreq_cpu_get(cpu);
2147        if (!cpu_policy)
2148                return -EINVAL;
2149
2150        memcpy(policy, cpu_policy, sizeof(*policy));
2151
2152        cpufreq_cpu_put(cpu_policy);
2153        return 0;
2154}
2155EXPORT_SYMBOL(cpufreq_get_policy);
2156
2157/*
2158 * policy : current policy.
2159 * new_policy: policy to be set.
2160 */
2161static int cpufreq_set_policy(struct cpufreq_policy *policy,
2162                                struct cpufreq_policy *new_policy)
2163{
2164        struct cpufreq_governor *old_gov;
2165        int ret;
2166
2167        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2168                 new_policy->cpu, new_policy->min, new_policy->max);
2169
2170        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2171
2172        /*
2173        * This check works well when we store new min/max freq attributes,
2174        * because new_policy is a copy of policy with one field updated.
2175        */
2176        if (new_policy->min > new_policy->max)
2177                return -EINVAL;
2178
2179        /* verify the cpu speed can be set within this limit */
2180        ret = cpufreq_driver->verify(new_policy);
2181        if (ret)
2182                return ret;
2183
2184        /* adjust if necessary - all reasons */
2185        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2186                        CPUFREQ_ADJUST, new_policy);
2187
2188        /*
2189         * verify the cpu speed can be set within this limit, which might be
2190         * different to the first one
2191         */
2192        ret = cpufreq_driver->verify(new_policy);
2193        if (ret)
2194                return ret;
2195
2196        /* notification of the new policy */
2197        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2198                        CPUFREQ_NOTIFY, new_policy);
2199
2200        policy->min = new_policy->min;
2201        policy->max = new_policy->max;
2202
2203        policy->cached_target_freq = UINT_MAX;
2204
2205        pr_debug("new min and max freqs are %u - %u kHz\n",
2206                 policy->min, policy->max);
2207
2208        if (cpufreq_driver->setpolicy) {
2209                policy->policy = new_policy->policy;
2210                pr_debug("setting range\n");
2211                return cpufreq_driver->setpolicy(new_policy);
2212        }
2213
2214        if (new_policy->governor == policy->governor) {
2215                pr_debug("cpufreq: governor limits update\n");
2216                cpufreq_governor_limits(policy);
2217                return 0;
2218        }
2219
2220        pr_debug("governor switch\n");
2221
2222        /* save old, working values */
2223        old_gov = policy->governor;
2224        /* end old governor */
2225        if (old_gov) {
2226                cpufreq_stop_governor(policy);
2227                cpufreq_exit_governor(policy);
2228        }
2229
2230        /* start new governor */
2231        policy->governor = new_policy->governor;
2232        ret = cpufreq_init_governor(policy);
2233        if (!ret) {
2234                ret = cpufreq_start_governor(policy);
2235                if (!ret) {
2236                        pr_debug("cpufreq: governor change\n");
2237                        return 0;
2238                }
2239                cpufreq_exit_governor(policy);
2240        }
2241
2242        /* new governor failed, so re-start old one */
2243        pr_debug("starting governor %s failed\n", policy->governor->name);
2244        if (old_gov) {
2245                policy->governor = old_gov;
2246                if (cpufreq_init_governor(policy))
2247                        policy->governor = NULL;
2248                else
2249                        cpufreq_start_governor(policy);
2250        }
2251
2252        return ret;
2253}
2254
2255/**
2256 *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2257 *      @cpu: CPU which shall be re-evaluated
2258 *
2259 *      Useful for policy notifiers which have different necessities
2260 *      at different times.
2261 */
2262int cpufreq_update_policy(unsigned int cpu)
2263{
2264        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2265        struct cpufreq_policy new_policy;
2266        int ret;
2267
2268        if (!policy)
2269                return -ENODEV;
2270
2271        down_write(&policy->rwsem);
2272
2273        pr_debug("updating policy for CPU %u\n", cpu);
2274        memcpy(&new_policy, policy, sizeof(*policy));
2275        new_policy.min = policy->user_policy.min;
2276        new_policy.max = policy->user_policy.max;
2277
2278        /*
2279         * BIOS might change freq behind our back
2280         * -> ask driver for current freq and notify governors about a change
2281         */
2282        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2283                if (cpufreq_suspended) {
2284                        ret = -EAGAIN;
2285                        goto unlock;
2286                }
2287                new_policy.cur = cpufreq_update_current_freq(policy);
2288                if (WARN_ON(!new_policy.cur)) {
2289                        ret = -EIO;
2290                        goto unlock;
2291                }
2292        }
2293
2294        ret = cpufreq_set_policy(policy, &new_policy);
2295
2296unlock:
2297        up_write(&policy->rwsem);
2298
2299        cpufreq_cpu_put(policy);
2300        return ret;
2301}
2302EXPORT_SYMBOL(cpufreq_update_policy);
2303
2304/*********************************************************************
2305 *               BOOST                                               *
2306 *********************************************************************/
2307static int cpufreq_boost_set_sw(int state)
2308{
2309        struct cpufreq_policy *policy;
2310        int ret = -EINVAL;
2311
2312        for_each_active_policy(policy) {
2313                if (!policy->freq_table)
2314                        continue;
2315
2316                ret = cpufreq_frequency_table_cpuinfo(policy,
2317                                                      policy->freq_table);
2318                if (ret) {
2319                        pr_err("%s: Policy frequency update failed\n",
2320                               __func__);
2321                        break;
2322                }
2323
2324                down_write(&policy->rwsem);
2325                policy->user_policy.max = policy->max;
2326                cpufreq_governor_limits(policy);
2327                up_write(&policy->rwsem);
2328        }
2329
2330        return ret;
2331}
2332
2333int cpufreq_boost_trigger_state(int state)
2334{
2335        unsigned long flags;
2336        int ret = 0;
2337
2338        if (cpufreq_driver->boost_enabled == state)
2339                return 0;
2340
2341        write_lock_irqsave(&cpufreq_driver_lock, flags);
2342        cpufreq_driver->boost_enabled = state;
2343        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2344
2345        ret = cpufreq_driver->set_boost(state);
2346        if (ret) {
2347                write_lock_irqsave(&cpufreq_driver_lock, flags);
2348                cpufreq_driver->boost_enabled = !state;
2349                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2350
2351                pr_err("%s: Cannot %s BOOST\n",
2352                       __func__, state ? "enable" : "disable");
2353        }
2354
2355        return ret;
2356}
2357
2358static bool cpufreq_boost_supported(void)
2359{
2360        return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2361}
2362
2363static int create_boost_sysfs_file(void)
2364{
2365        int ret;
2366
2367        ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2368        if (ret)
2369                pr_err("%s: cannot register global BOOST sysfs file\n",
2370                       __func__);
2371
2372        return ret;
2373}
2374
2375static void remove_boost_sysfs_file(void)
2376{
2377        if (cpufreq_boost_supported())
2378                sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2379}
2380
2381int cpufreq_enable_boost_support(void)
2382{
2383        if (!cpufreq_driver)
2384                return -EINVAL;
2385
2386        if (cpufreq_boost_supported())
2387                return 0;
2388
2389        cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2390
2391        /* This will get removed on driver unregister */
2392        return create_boost_sysfs_file();
2393}
2394EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2395
2396int cpufreq_boost_enabled(void)
2397{
2398        return cpufreq_driver->boost_enabled;
2399}
2400EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2401
2402/*********************************************************************
2403 *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2404 *********************************************************************/
2405static enum cpuhp_state hp_online;
2406
2407static int cpuhp_cpufreq_online(unsigned int cpu)
2408{
2409        cpufreq_online(cpu);
2410
2411        return 0;
2412}
2413
2414static int cpuhp_cpufreq_offline(unsigned int cpu)
2415{
2416        cpufreq_offline(cpu);
2417
2418        return 0;
2419}
2420
2421/**
2422 * cpufreq_register_driver - register a CPU Frequency driver
2423 * @driver_data: A struct cpufreq_driver containing the values#
2424 * submitted by the CPU Frequency driver.
2425 *
2426 * Registers a CPU Frequency driver to this core code. This code
2427 * returns zero on success, -EEXIST when another driver got here first
2428 * (and isn't unregistered in the meantime).
2429 *
2430 */
2431int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2432{
2433        unsigned long flags;
2434        int ret;
2435
2436        if (cpufreq_disabled())
2437                return -ENODEV;
2438
2439        if (!driver_data || !driver_data->verify || !driver_data->init ||
2440            !(driver_data->setpolicy || driver_data->target_index ||
2441                    driver_data->target) ||
2442             (driver_data->setpolicy && (driver_data->target_index ||
2443                    driver_data->target)) ||
2444             (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2445                return -EINVAL;
2446
2447        printk(KERN_INFO "trying to register driver %s\n", driver_data->name);
2448
2449        /* Protect against concurrent CPU online/offline. */
2450        get_online_cpus();
2451
2452        write_lock_irqsave(&cpufreq_driver_lock, flags);
2453        if (cpufreq_driver) {
2454                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2455                ret = -EEXIST;
2456                goto out;
2457        }
2458        cpufreq_driver = driver_data;
2459        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2460
2461        if (driver_data->setpolicy)
2462                driver_data->flags |= CPUFREQ_CONST_LOOPS;
2463
2464        if (cpufreq_boost_supported()) {
2465                ret = create_boost_sysfs_file();
2466                if (ret)
2467                        goto err_null_driver;
2468        }
2469
2470        ret = subsys_interface_register(&cpufreq_interface);
2471        if (ret)
2472                goto err_boost_unreg;
2473
2474        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2475            list_empty(&cpufreq_policy_list)) {
2476                /* if all ->init() calls failed, unregister */
2477                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2478                         driver_data->name);
2479                goto err_if_unreg;
2480        }
2481
2482        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2483                                        cpuhp_cpufreq_online,
2484                                        cpuhp_cpufreq_offline);
2485        if (ret < 0)
2486                goto err_if_unreg;
2487        hp_online = ret;
2488        ret = 0;
2489
2490        pr_debug("driver %s up and running\n", driver_data->name);
2491        goto out;
2492
2493err_if_unreg:
2494        subsys_interface_unregister(&cpufreq_interface);
2495err_boost_unreg:
2496        remove_boost_sysfs_file();
2497err_null_driver:
2498        write_lock_irqsave(&cpufreq_driver_lock, flags);
2499        cpufreq_driver = NULL;
2500        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2501out:
2502        put_online_cpus();
2503        return ret;
2504}
2505EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2506
2507/**
2508 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2509 *
2510 * Unregister the current CPUFreq driver. Only call this if you have
2511 * the right to do so, i.e. if you have succeeded in initialising before!
2512 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2513 * currently not initialised.
2514 */
2515int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2516{
2517        unsigned long flags;
2518
2519        if (!cpufreq_driver || (driver != cpufreq_driver))
2520                return -EINVAL;
2521
2522        pr_debug("unregistering driver %s\n", driver->name);
2523
2524        /* Protect against concurrent cpu hotplug */
2525        get_online_cpus();
2526        subsys_interface_unregister(&cpufreq_interface);
2527        remove_boost_sysfs_file();
2528        cpuhp_remove_state_nocalls(hp_online);
2529
2530        write_lock_irqsave(&cpufreq_driver_lock, flags);
2531
2532        cpufreq_driver = NULL;
2533
2534        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2535        put_online_cpus();
2536
2537        return 0;
2538}
2539EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2540
2541/*
2542 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2543 * or mutexes when secondary CPUs are halted.
2544 */
2545static struct syscore_ops cpufreq_syscore_ops = {
2546        .shutdown = cpufreq_suspend,
2547};
2548
2549struct kobject *cpufreq_global_kobject;
2550EXPORT_SYMBOL(cpufreq_global_kobject);
2551
2552static int __init cpufreq_core_init(void)
2553{
2554        if (cpufreq_disabled())
2555                return -ENODEV;
2556
2557        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2558        BUG_ON(!cpufreq_global_kobject);
2559
2560        register_syscore_ops(&cpufreq_syscore_ops);
2561
2562        return 0;
2563}
2564core_initcall(cpufreq_core_init);
Note: See TracBrowser for help on using the repository browser.