source: src/linux/universal/linux-3.18/drivers/xen/xen-acpi-processor.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 3 months ago

update

File size: 16.6 KB
Line 
1/*
2 * Copyright 2012 by Oracle Inc
3 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249
6 * so many thanks go to Kevin Tian <kevin.tian@intel.com>
7 * and Yu Ke <ke.yu@intel.com>.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16 * more details.
17 *
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpumask.h>
23#include <linux/cpufreq.h>
24#include <linux/freezer.h>
25#include <linux/kernel.h>
26#include <linux/kthread.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/syscore_ops.h>
31#include <linux/acpi.h>
32#include <acpi/processor.h>
33#include <xen/xen.h>
34#include <xen/interface/platform.h>
35#include <asm/xen/hypercall.h>
36
37static int no_hypercall;
38MODULE_PARM_DESC(off, "Inhibit the hypercall.");
39module_param_named(off, no_hypercall, int, 0400);
40
41/*
42 * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit
43 * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which
44 * can be less than what we want to put in. Instead use the 'nr_acpi_bits'
45 * which is dynamically computed based on the MADT or x2APIC table.
46 */
47static unsigned int nr_acpi_bits;
48/* Mutex to protect the acpi_ids_done - for CPU hotplug use. */
49static DEFINE_MUTEX(acpi_ids_mutex);
50/* Which ACPI ID we have processed from 'struct acpi_processor'. */
51static unsigned long *acpi_ids_done;
52/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
53static unsigned long *acpi_id_present;
54/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
55static unsigned long *acpi_id_cst_present;
56
57static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
58{
59        struct xen_platform_op op = {
60                .cmd                    = XENPF_set_processor_pminfo,
61                .interface_version      = XENPF_INTERFACE_VERSION,
62                .u.set_pminfo.id        = _pr->acpi_id,
63                .u.set_pminfo.type      = XEN_PM_CX,
64        };
65        struct xen_processor_cx *dst_cx, *dst_cx_states = NULL;
66        struct acpi_processor_cx *cx;
67        unsigned int i, ok;
68        int ret = 0;
69
70        dst_cx_states = kcalloc(_pr->power.count,
71                                sizeof(struct xen_processor_cx), GFP_KERNEL);
72        if (!dst_cx_states)
73                return -ENOMEM;
74
75        for (ok = 0, i = 1; i <= _pr->power.count; i++) {
76                cx = &_pr->power.states[i];
77                if (!cx->valid)
78                        continue;
79
80                dst_cx = &(dst_cx_states[ok++]);
81
82                dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO;
83                if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
84                        dst_cx->reg.bit_width = 8;
85                        dst_cx->reg.bit_offset = 0;
86                        dst_cx->reg.access_size = 1;
87                } else {
88                        dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
89                        if (cx->entry_method == ACPI_CSTATE_FFH) {
90                                /* NATIVE_CSTATE_BEYOND_HALT */
91                                dst_cx->reg.bit_offset = 2;
92                                dst_cx->reg.bit_width = 1; /* VENDOR_INTEL */
93                        }
94                        dst_cx->reg.access_size = 0;
95                }
96                dst_cx->reg.address = cx->address;
97
98                dst_cx->type = cx->type;
99                dst_cx->latency = cx->latency;
100
101                dst_cx->dpcnt = 0;
102                set_xen_guest_handle(dst_cx->dp, NULL);
103        }
104        if (!ok) {
105                pr_debug("No _Cx for ACPI CPU %u\n", _pr->acpi_id);
106                kfree(dst_cx_states);
107                return -EINVAL;
108        }
109        op.u.set_pminfo.power.count = ok;
110        op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control;
111        op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check;
112        op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst;
113        op.u.set_pminfo.power.flags.power_setup_done =
114                _pr->flags.power_setup_done;
115
116        set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states);
117
118        if (!no_hypercall)
119                ret = HYPERVISOR_dom0_op(&op);
120
121        if (!ret) {
122                pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id);
123                for (i = 1; i <= _pr->power.count; i++) {
124                        cx = &_pr->power.states[i];
125                        if (!cx->valid)
126                                continue;
127                        pr_debug("     C%d: %s %d uS\n",
128                                 cx->type, cx->desc, (u32)cx->latency);
129                }
130        } else if ((ret != -EINVAL) && (ret != -ENOSYS))
131                /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
132                 * table is referencing a non-existing CPU - which can happen
133                 * with broken ACPI tables. */
134                pr_err("(CX): Hypervisor error (%d) for ACPI CPU%u\n",
135                       ret, _pr->acpi_id);
136
137        kfree(dst_cx_states);
138
139        return ret;
140}
141static struct xen_processor_px *
142xen_copy_pss_data(struct acpi_processor *_pr,
143                  struct xen_processor_performance *dst_perf)
144{
145        struct xen_processor_px *dst_states = NULL;
146        unsigned int i;
147
148        BUILD_BUG_ON(sizeof(struct xen_processor_px) !=
149                     sizeof(struct acpi_processor_px));
150
151        dst_states = kcalloc(_pr->performance->state_count,
152                             sizeof(struct xen_processor_px), GFP_KERNEL);
153        if (!dst_states)
154                return ERR_PTR(-ENOMEM);
155
156        dst_perf->state_count = _pr->performance->state_count;
157        for (i = 0; i < _pr->performance->state_count; i++) {
158                /* Fortunatly for us, they are both the same size */
159                memcpy(&(dst_states[i]), &(_pr->performance->states[i]),
160                       sizeof(struct acpi_processor_px));
161        }
162        return dst_states;
163}
164static int xen_copy_psd_data(struct acpi_processor *_pr,
165                             struct xen_processor_performance *dst)
166{
167        struct acpi_psd_package *pdomain;
168
169        BUILD_BUG_ON(sizeof(struct xen_psd_package) !=
170                     sizeof(struct acpi_psd_package));
171
172        /* This information is enumerated only if acpi_processor_preregister_performance
173         * has been called.
174         */
175        dst->shared_type = _pr->performance->shared_type;
176
177        pdomain = &(_pr->performance->domain_info);
178
179        /* 'acpi_processor_preregister_performance' does not parse if the
180         * num_processors <= 1, but Xen still requires it. Do it manually here.
181         */
182        if (pdomain->num_processors <= 1) {
183                if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
184                        dst->shared_type = CPUFREQ_SHARED_TYPE_ALL;
185                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
186                        dst->shared_type = CPUFREQ_SHARED_TYPE_HW;
187                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
188                        dst->shared_type = CPUFREQ_SHARED_TYPE_ANY;
189
190        }
191        memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package));
192        return 0;
193}
194static int xen_copy_pct_data(struct acpi_pct_register *pct,
195                             struct xen_pct_register *dst_pct)
196{
197        /* It would be nice if you could just do 'memcpy(pct, dst_pct') but
198         * sadly the Xen structure did not have the proper padding so the
199         * descriptor field takes two (dst_pct) bytes instead of one (pct).
200         */
201        dst_pct->descriptor = pct->descriptor;
202        dst_pct->length = pct->length;
203        dst_pct->space_id = pct->space_id;
204        dst_pct->bit_width = pct->bit_width;
205        dst_pct->bit_offset = pct->bit_offset;
206        dst_pct->reserved = pct->reserved;
207        dst_pct->address = pct->address;
208        return 0;
209}
210static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
211{
212        int ret = 0;
213        struct xen_platform_op op = {
214                .cmd                    = XENPF_set_processor_pminfo,
215                .interface_version      = XENPF_INTERFACE_VERSION,
216                .u.set_pminfo.id        = _pr->acpi_id,
217                .u.set_pminfo.type      = XEN_PM_PX,
218        };
219        struct xen_processor_performance *dst_perf;
220        struct xen_processor_px *dst_states = NULL;
221
222        dst_perf = &op.u.set_pminfo.perf;
223
224        dst_perf->platform_limit = _pr->performance_platform_limit;
225        dst_perf->flags |= XEN_PX_PPC;
226        xen_copy_pct_data(&(_pr->performance->control_register),
227                          &dst_perf->control_register);
228        xen_copy_pct_data(&(_pr->performance->status_register),
229                          &dst_perf->status_register);
230        dst_perf->flags |= XEN_PX_PCT;
231        dst_states = xen_copy_pss_data(_pr, dst_perf);
232        if (!IS_ERR_OR_NULL(dst_states)) {
233                set_xen_guest_handle(dst_perf->states, dst_states);
234                dst_perf->flags |= XEN_PX_PSS;
235        }
236        if (!xen_copy_psd_data(_pr, dst_perf))
237                dst_perf->flags |= XEN_PX_PSD;
238
239        if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) {
240                pr_warn("ACPI CPU%u missing some P-state data (%x), skipping\n",
241                        _pr->acpi_id, dst_perf->flags);
242                ret = -ENODEV;
243                goto err_free;
244        }
245
246        if (!no_hypercall)
247                ret = HYPERVISOR_dom0_op(&op);
248
249        if (!ret) {
250                struct acpi_processor_performance *perf;
251                unsigned int i;
252
253                perf = _pr->performance;
254                pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id);
255                for (i = 0; i < perf->state_count; i++) {
256                        pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
257                        (i == perf->state ? '*' : ' '), i,
258                        (u32) perf->states[i].core_frequency,
259                        (u32) perf->states[i].power,
260                        (u32) perf->states[i].transition_latency);
261                }
262        } else if ((ret != -EINVAL) && (ret != -ENOSYS))
263                /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
264                 * table is referencing a non-existing CPU - which can happen
265                 * with broken ACPI tables. */
266                pr_warn("(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
267                        ret, _pr->acpi_id);
268err_free:
269        if (!IS_ERR_OR_NULL(dst_states))
270                kfree(dst_states);
271
272        return ret;
273}
274static int upload_pm_data(struct acpi_processor *_pr)
275{
276        int err = 0;
277
278        mutex_lock(&acpi_ids_mutex);
279        if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) {
280                mutex_unlock(&acpi_ids_mutex);
281                return -EBUSY;
282        }
283        if (_pr->flags.power)
284                err = push_cxx_to_hypervisor(_pr);
285
286        if (_pr->performance && _pr->performance->states)
287                err |= push_pxx_to_hypervisor(_pr);
288
289        mutex_unlock(&acpi_ids_mutex);
290        return err;
291}
292static unsigned int __init get_max_acpi_id(void)
293{
294        struct xenpf_pcpuinfo *info;
295        struct xen_platform_op op = {
296                .cmd = XENPF_get_cpuinfo,
297                .interface_version = XENPF_INTERFACE_VERSION,
298        };
299        int ret = 0;
300        unsigned int i, last_cpu, max_acpi_id = 0;
301
302        info = &op.u.pcpu_info;
303        info->xen_cpuid = 0;
304
305        ret = HYPERVISOR_dom0_op(&op);
306        if (ret)
307                return NR_CPUS;
308
309        /* The max_present is the same irregardless of the xen_cpuid */
310        last_cpu = op.u.pcpu_info.max_present;
311        for (i = 0; i <= last_cpu; i++) {
312                info->xen_cpuid = i;
313                ret = HYPERVISOR_dom0_op(&op);
314                if (ret)
315                        continue;
316                max_acpi_id = max(info->acpi_id, max_acpi_id);
317        }
318        max_acpi_id *= 2; /* Slack for CPU hotplug support. */
319        pr_debug("Max ACPI ID: %u\n", max_acpi_id);
320        return max_acpi_id;
321}
322/*
323 * The read_acpi_id and check_acpi_ids are there to support the Xen
324 * oddity of virtual CPUs != physical CPUs in the initial domain.
325 * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line
326 * which will band the amount of CPUs the initial domain can see.
327 * In general that is OK, except it plays havoc with any of the
328 * for_each_[present|online]_cpu macros which are banded to the virtual
329 * CPU amount.
330 */
331static acpi_status
332read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
333{
334        u32 acpi_id;
335        acpi_status status;
336        acpi_object_type acpi_type;
337        unsigned long long tmp;
338        union acpi_object object = { 0 };
339        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
340        acpi_io_address pblk = 0;
341
342        status = acpi_get_type(handle, &acpi_type);
343        if (ACPI_FAILURE(status))
344                return AE_OK;
345
346        switch (acpi_type) {
347        case ACPI_TYPE_PROCESSOR:
348                status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
349                if (ACPI_FAILURE(status))
350                        return AE_OK;
351                acpi_id = object.processor.proc_id;
352                pblk = object.processor.pblk_address;
353                break;
354        case ACPI_TYPE_DEVICE:
355                status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
356                if (ACPI_FAILURE(status))
357                        return AE_OK;
358                acpi_id = tmp;
359                break;
360        default:
361                return AE_OK;
362        }
363        /* There are more ACPI Processor objects than in x2APIC or MADT.
364         * This can happen with incorrect ACPI SSDT declerations. */
365        if (acpi_id > nr_acpi_bits) {
366                pr_debug("We only have %u, trying to set %u\n",
367                         nr_acpi_bits, acpi_id);
368                return AE_OK;
369        }
370        /* OK, There is a ACPI Processor object */
371        __set_bit(acpi_id, acpi_id_present);
372
373        pr_debug("ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id, (unsigned long)pblk);
374
375        status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
376        if (ACPI_FAILURE(status)) {
377                if (!pblk)
378                        return AE_OK;
379        }
380        /* .. and it has a C-state */
381        __set_bit(acpi_id, acpi_id_cst_present);
382
383        return AE_OK;
384}
385static int check_acpi_ids(struct acpi_processor *pr_backup)
386{
387
388        if (!pr_backup)
389                return -ENODEV;
390
391        if (acpi_id_present && acpi_id_cst_present)
392                /* OK, done this once .. skip to uploading */
393                goto upload;
394
395        /* All online CPUs have been processed at this stage. Now verify
396         * whether in fact "online CPUs" == physical CPUs.
397         */
398        acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
399        if (!acpi_id_present)
400                return -ENOMEM;
401
402        acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
403        if (!acpi_id_cst_present) {
404                kfree(acpi_id_present);
405                return -ENOMEM;
406        }
407
408        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
409                            ACPI_UINT32_MAX,
410                            read_acpi_id, NULL, NULL, NULL);
411        acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
412
413upload:
414        if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
415                unsigned int i;
416                for_each_set_bit(i, acpi_id_present, nr_acpi_bits) {
417                        pr_backup->acpi_id = i;
418                        /* Mask out C-states if there are no _CST or PBLK */
419                        pr_backup->flags.power = test_bit(i, acpi_id_cst_present);
420                        (void)upload_pm_data(pr_backup);
421                }
422        }
423
424        return 0;
425}
426
427/* acpi_perf_data is a pointer to percpu data. */
428static struct acpi_processor_performance __percpu *acpi_perf_data;
429
430static void free_acpi_perf_data(void)
431{
432        unsigned int i;
433
434        /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
435        for_each_possible_cpu(i)
436                free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
437                                 ->shared_cpu_map);
438        free_percpu(acpi_perf_data);
439}
440
441static int xen_upload_processor_pm_data(void)
442{
443        struct acpi_processor *pr_backup = NULL;
444        unsigned int i;
445        int rc = 0;
446
447        pr_info("Uploading Xen processor PM info\n");
448
449        for_each_possible_cpu(i) {
450                struct acpi_processor *_pr;
451                _pr = per_cpu(processors, i /* APIC ID */);
452                if (!_pr)
453                        continue;
454
455                if (!pr_backup) {
456                        pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
457                        if (pr_backup)
458                                memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
459                }
460                (void)upload_pm_data(_pr);
461        }
462
463        rc = check_acpi_ids(pr_backup);
464        kfree(pr_backup);
465
466        return rc;
467}
468
469static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
470{
471        int rc;
472
473        bitmap_zero(acpi_ids_done, nr_acpi_bits);
474
475        rc = xen_upload_processor_pm_data();
476        if (rc != 0)
477                pr_info("ACPI data upload failed, error = %d\n", rc);
478}
479
480static void xen_acpi_processor_resume(void)
481{
482        static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
483
484        /*
485         * xen_upload_processor_pm_data() calls non-atomic code.
486         * However, the context for xen_acpi_processor_resume is syscore
487         * with only the boot CPU online and in an atomic context.
488         *
489         * So defer the upload for some point safer.
490         */
491        schedule_work(&wq);
492}
493
494static struct syscore_ops xap_syscore_ops = {
495        .resume = xen_acpi_processor_resume,
496};
497
498static int __init xen_acpi_processor_init(void)
499{
500        unsigned int i;
501        int rc;
502
503        if (!xen_initial_domain())
504                return -ENODEV;
505
506        nr_acpi_bits = get_max_acpi_id() + 1;
507        acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
508        if (!acpi_ids_done)
509                return -ENOMEM;
510
511        acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
512        if (!acpi_perf_data) {
513                pr_debug("Memory allocation error for acpi_perf_data\n");
514                kfree(acpi_ids_done);
515                return -ENOMEM;
516        }
517        for_each_possible_cpu(i) {
518                if (!zalloc_cpumask_var_node(
519                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
520                        GFP_KERNEL, cpu_to_node(i))) {
521                        rc = -ENOMEM;
522                        goto err_out;
523                }
524        }
525
526        /* Do initialization in ACPI core. It is OK to fail here. */
527        (void)acpi_processor_preregister_performance(acpi_perf_data);
528
529        for_each_possible_cpu(i) {
530                struct acpi_processor *pr;
531                struct acpi_processor_performance *perf;
532
533                pr = per_cpu(processors, i);
534                perf = per_cpu_ptr(acpi_perf_data, i);
535                if (!pr)
536                        continue;
537
538                pr->performance = perf;
539                rc = acpi_processor_get_performance_info(pr);
540                if (rc)
541                        goto err_out;
542        }
543
544        rc = xen_upload_processor_pm_data();
545        if (rc)
546                goto err_unregister;
547
548        register_syscore_ops(&xap_syscore_ops);
549
550        return 0;
551err_unregister:
552        for_each_possible_cpu(i) {
553                struct acpi_processor_performance *perf;
554                perf = per_cpu_ptr(acpi_perf_data, i);
555                acpi_processor_unregister_performance(perf, i);
556        }
557err_out:
558        /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
559        free_acpi_perf_data();
560        kfree(acpi_ids_done);
561        return rc;
562}
563static void __exit xen_acpi_processor_exit(void)
564{
565        int i;
566
567        unregister_syscore_ops(&xap_syscore_ops);
568        kfree(acpi_ids_done);
569        kfree(acpi_id_present);
570        kfree(acpi_id_cst_present);
571        for_each_possible_cpu(i) {
572                struct acpi_processor_performance *perf;
573                perf = per_cpu_ptr(acpi_perf_data, i);
574                acpi_processor_unregister_performance(perf, i);
575        }
576        free_acpi_perf_data();
577}
578
579MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>");
580MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor");
581MODULE_LICENSE("GPL");
582
583/* We want to be loaded before the CPU freq scaling drivers are loaded.
584 * They are loaded in late_initcall. */
585device_initcall(xen_acpi_processor_init);
586module_exit(xen_acpi_processor_exit);
Note: See TracBrowser for help on using the repository browser.